Return-Path:
Delivered-To: apmail-lucene-java-commits-archive@www.apache.org
Received: (qmail 32610 invoked from network); 29 Oct 2009 21:30:45 -0000
Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.3)
by minotaur.apache.org with SMTP; 29 Oct 2009 21:30:45 -0000
Received: (qmail 43335 invoked by uid 500); 29 Oct 2009 21:30:45 -0000
Delivered-To: apmail-lucene-java-commits-archive@lucene.apache.org
Received: (qmail 43261 invoked by uid 500); 29 Oct 2009 21:30:45 -0000
Mailing-List: contact java-commits-help@lucene.apache.org; run by ezmlm
Precedence: bulk
List-Help:
List-Unsubscribe:
List-Post:
List-Id:
Reply-To: java-dev@lucene.apache.org
Delivered-To: mailing list java-commits@lucene.apache.org
Received: (qmail 43252 invoked by uid 99); 29 Oct 2009 21:30:45 -0000
Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230)
by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 29 Oct 2009 21:30:45 +0000
X-ASF-Spam-Status: No, hits=-2000.0 required=10.0
tests=ALL_TRUSTED
X-Spam-Check-By: apache.org
Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4)
by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 29 Oct 2009 21:30:29 +0000
Received: by eris.apache.org (Postfix, from userid 65534)
id C5D5323888DD; Thu, 29 Oct 2009 21:30:06 +0000 (UTC)
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Subject: svn commit: r831094 [2/8] - in /lucene/java/branches/flex_1458: ./
contrib/
contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/
contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/
contrib/analyzers/common/src/java/org/apache/l...
Date: Thu, 29 Oct 2009 21:29:56 -0000
To: java-commits@lucene.apache.org
From: markrmiller@apache.org
X-Mailer: svnmailer-1.0.8
Message-Id: <20091029213006.C5D5323888DD@eris.apache.org>
X-Virus-Checked: Checked by ClamAV on apache.org
Modified: lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryScorer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryScorer.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryScorer.java (original)
+++ lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryScorer.java Thu Oct 29 21:29:47 2009
@@ -41,8 +41,8 @@
*/
public class QueryScorer implements Scorer {
private float totalScore;
- private Set foundTerms;
- private Map fieldWeightedSpanTerms;
+ private Set foundTerms;
+ private Map fieldWeightedSpanTerms;
private float maxTermWeight;
private int position = -1;
private String defaultField;
@@ -103,10 +103,10 @@
* @param weightedTerms an array of pre-created {@link WeightedSpanTerm}s
*/
public QueryScorer(WeightedSpanTerm[] weightedTerms) {
- this.fieldWeightedSpanTerms = new HashMap(weightedTerms.length);
+ this.fieldWeightedSpanTerms = new HashMap(weightedTerms.length);
for (int i = 0; i < weightedTerms.length; i++) {
- WeightedSpanTerm existingTerm = (WeightedSpanTerm) fieldWeightedSpanTerms.get(weightedTerms[i].term);
+ WeightedSpanTerm existingTerm = fieldWeightedSpanTerms.get(weightedTerms[i].term);
if ((existingTerm == null) ||
(existingTerm.weight < weightedTerms[i].weight)) {
@@ -149,7 +149,7 @@
WeightedSpanTerm weightedSpanTerm;
- if ((weightedSpanTerm = (WeightedSpanTerm) fieldWeightedSpanTerms.get(
+ if ((weightedSpanTerm = fieldWeightedSpanTerms.get(
termText)) == null) {
return 0;
}
@@ -194,7 +194,7 @@
* @return WeightedSpanTerm for token
*/
public WeightedSpanTerm getWeightedSpanTerm(String token) {
- return (WeightedSpanTerm) fieldWeightedSpanTerms.get(token);
+ return fieldWeightedSpanTerms.get(token);
}
/**
@@ -232,7 +232,7 @@
* @see org.apache.lucene.search.highlight.Scorer#startFragment(org.apache.lucene.search.highlight.TextFragment)
*/
public void startFragment(TextFragment newFragment) {
- foundTerms = new HashSet();
+ foundTerms = new HashSet();
totalScore = 0;
}
Modified: lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java (original)
+++ lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java Thu Oct 29 21:29:47 2009
@@ -93,13 +93,13 @@
*/
public static final WeightedTerm[] getTerms(Query query, boolean prohibited, String fieldName)
{
- HashSet terms=new HashSet();
+ HashSet terms=new HashSet();
if(fieldName!=null)
{
fieldName= StringHelper.intern(fieldName);
}
getTerms(query,terms,prohibited,fieldName);
- return (WeightedTerm[]) terms.toArray(new WeightedTerm[0]);
+ return terms.toArray(new WeightedTerm[0]);
}
/**
@@ -115,7 +115,7 @@
}
//fieldname MUST be interned prior to this call
- private static final void getTerms(Query query, HashSet terms,boolean prohibited, String fieldName)
+ private static final void getTerms(Query query, HashSet terms,boolean prohibited, String fieldName)
{
try
{
@@ -126,11 +126,11 @@
getTermsFromFilteredQuery((FilteredQuery)query, terms,prohibited, fieldName);
else
{
- HashSet nonWeightedTerms=new HashSet();
+ HashSet nonWeightedTerms=new HashSet();
query.extractTerms(nonWeightedTerms);
- for (Iterator iter = nonWeightedTerms.iterator(); iter.hasNext();)
+ for (Iterator iter = nonWeightedTerms.iterator(); iter.hasNext();)
{
- Term term = (Term) iter.next();
+ Term term = iter.next();
if((fieldName==null)||(term.field()==fieldName))
{
terms.add(new WeightedTerm(query.getBoost(),term.text()));
@@ -155,7 +155,7 @@
* something common which would allow access to child queries so what follows here are query-specific
* implementations for accessing embedded query elements.
*/
- private static final void getTermsFromBooleanQuery(BooleanQuery query, HashSet terms, boolean prohibited, String fieldName)
+ private static final void getTermsFromBooleanQuery(BooleanQuery query, HashSet terms, boolean prohibited, String fieldName)
{
BooleanClause[] queryClauses = query.getClauses();
for (int i = 0; i < queryClauses.length; i++)
@@ -164,7 +164,7 @@
getTerms(queryClauses[i].getQuery(), terms, prohibited, fieldName);
}
}
- private static void getTermsFromFilteredQuery(FilteredQuery query, HashSet terms, boolean prohibited, String fieldName)
+ private static void getTermsFromFilteredQuery(FilteredQuery query, HashSet terms, boolean prohibited, String fieldName)
{
getTerms(query.getQuery(),terms,prohibited,fieldName);
}
Modified: lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermScorer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermScorer.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermScorer.java (original)
+++ lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermScorer.java Thu Oct 29 21:29:47 2009
@@ -35,11 +35,11 @@
public class QueryTermScorer implements Scorer {
TextFragment currentTextFragment = null;
- HashSet uniqueTermsInFragment;
+ HashSet uniqueTermsInFragment;
float totalScore = 0;
float maxTermWeight = 0;
- private HashMap termsToFind;
+ private HashMap termsToFind;
private TermAttribute termAtt;
@@ -77,9 +77,9 @@
}
public QueryTermScorer(WeightedTerm[] weightedTerms) {
- termsToFind = new HashMap();
+ termsToFind = new HashMap();
for (int i = 0; i < weightedTerms.length; i++) {
- WeightedTerm existingTerm = (WeightedTerm) termsToFind
+ WeightedTerm existingTerm = termsToFind
.get(weightedTerms[i].term);
if ((existingTerm == null)
|| (existingTerm.weight < weightedTerms[i].weight)) {
@@ -107,7 +107,7 @@
* .lucene.search.highlight.TextFragment)
*/
public void startFragment(TextFragment newFragment) {
- uniqueTermsInFragment = new HashSet();
+ uniqueTermsInFragment = new HashSet();
currentTextFragment = newFragment;
totalScore = 0;
@@ -120,7 +120,7 @@
public float getTokenScore() {
String termText = termAtt.term();
- WeightedTerm queryTerm = (WeightedTerm) termsToFind.get(termText);
+ WeightedTerm queryTerm = termsToFind.get(termText);
if (queryTerm == null) {
// not a query term - return
return 0;
Modified: lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/SimpleSpanFragmenter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/SimpleSpanFragmenter.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/SimpleSpanFragmenter.java (original)
+++ lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/SimpleSpanFragmenter.java Thu Oct 29 21:29:47 2009
@@ -73,11 +73,11 @@
WeightedSpanTerm wSpanTerm = queryScorer.getWeightedSpanTerm(termAtt.term());
if (wSpanTerm != null) {
- List positionSpans = wSpanTerm.getPositionSpans();
+ List positionSpans = wSpanTerm.getPositionSpans();
for (int i = 0; i < positionSpans.size(); i++) {
- if (((PositionSpan) positionSpans.get(i)).start == position) {
- waitForPos = ((PositionSpan) positionSpans.get(i)).end + 1;
+ if (positionSpans.get(i).start == position) {
+ waitForPos = positionSpans.get(i).end + 1;
break;
}
}
Modified: lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java (original)
+++ lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java Thu Oct 29 21:29:47 2009
@@ -59,7 +59,7 @@
public static TokenStream getAnyTokenStream(IndexReader reader, int docId, String field, Document doc, Analyzer analyzer) throws IOException{
TokenStream ts=null;
- TermFreqVector tfv=(TermFreqVector) reader.getTermFreqVector(docId,field);
+ TermFreqVector tfv = reader.getTermFreqVector(docId,field);
if(tfv!=null)
{
if(tfv instanceof TermPositionVector)
@@ -89,7 +89,7 @@
{
TokenStream ts=null;
- TermFreqVector tfv=(TermFreqVector) reader.getTermFreqVector(docId,field);
+ TermFreqVector tfv = reader.getTermFreqVector(docId,field);
if(tfv!=null)
{
if(tfv instanceof TermPositionVector)
@@ -171,7 +171,7 @@
totalTokens+=freq[t];
}
Token tokensInOriginalOrder[]=new Token[totalTokens];
- ArrayList unsortedTokens = null;
+ ArrayList unsortedTokens = null;
for (int t = 0; t < freq.length; t++)
{
TermVectorOffsetInfo[] offsets=tpv.getOffsets(t);
@@ -191,7 +191,7 @@
//tokens NOT stored with positions or not guaranteed contiguous - must add to list and sort later
if(unsortedTokens==null)
{
- unsortedTokens=new ArrayList();
+ unsortedTokens=new ArrayList();
}
for (int tp = 0; tp < offsets.length; tp++)
{
@@ -216,14 +216,10 @@
}
}
//If the field has been stored without position data we must perform a sort
- if(unsortedTokens!=null)
- {
- tokensInOriginalOrder=(Token[]) unsortedTokens.toArray(new Token[unsortedTokens.size()]);
- Arrays.sort(tokensInOriginalOrder, new Comparator(){
- public int compare(Object o1, Object o2)
- {
- Token t1=(Token) o1;
- Token t2=(Token) o2;
+ if(unsortedTokens!=null) {
+ tokensInOriginalOrder= unsortedTokens.toArray(new Token[unsortedTokens.size()]);
+ Arrays.sort(tokensInOriginalOrder, new Comparator(){
+ public int compare(Token t1, Token t2) {
if(t1.startOffset()>t2.endOffset())
return 1;
if(t1.startOffset() positionSpans = new ArrayList();
/**
* @param weight
@@ -36,7 +36,7 @@
*/
public WeightedSpanTerm(float weight, String term) {
super(weight, term);
- this.positionSpans = new ArrayList();
+ this.positionSpans = new ArrayList();
}
/**
@@ -61,10 +61,10 @@
// where kept in some sort of priority queue - that way this method
// could
// bail early without checking each PositionSpan.
- Iterator positionSpanIt = positionSpans.iterator();
+ Iterator positionSpanIt = positionSpans.iterator();
while (positionSpanIt.hasNext()) {
- PositionSpan posSpan = (PositionSpan) positionSpanIt.next();
+ PositionSpan posSpan = positionSpanIt.next();
if (((position >= posSpan.start) && (position <= posSpan.end))) {
return true;
@@ -74,7 +74,7 @@
return false;
}
- public void addPositionSpans(List positionSpans) {
+ public void addPositionSpans(List positionSpans) {
this.positionSpans.addAll(positionSpans);
}
@@ -86,7 +86,7 @@
this.positionSensitive = positionSensitive;
}
- public List getPositionSpans() {
+ public List getPositionSpans() {
return positionSpans;
}
}
Modified: lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java (original)
+++ lucene/java/branches/flex_1458/contrib/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java Thu Oct 29 21:29:47 2009
@@ -47,7 +47,7 @@
private String fieldName;
private TokenStream tokenStream;
- private Map readers = new HashMap(10); // Map
+ private Map readers = new HashMap(10);
private String defaultField;
private boolean expandMultiTermQuery;
private boolean cachedTokenStream;
@@ -63,11 +63,9 @@
}
private void closeReaders() {
- Collection readerSet = readers.values();
- Iterator it = readerSet.iterator();
+ Collection readerSet = readers.values();
- while (it.hasNext()) {
- IndexReader reader = (IndexReader) it.next();
+ for (final IndexReader reader : readerSet) {
try {
reader.close();
} catch (IOException e) {
@@ -85,7 +83,7 @@
* Map to place created WeightedSpanTerms in
* @throws IOException
*/
- private void extract(Query query, Map terms) throws IOException {
+ private void extract(Query query, Map terms) throws IOException {
if (query instanceof BooleanQuery) {
BooleanClause[] queryClauses = ((BooleanQuery) query).getClauses();
@@ -137,8 +135,8 @@
} else if (query instanceof FilteredQuery) {
extract(((FilteredQuery) query).getQuery(), terms);
} else if (query instanceof DisjunctionMaxQuery) {
- for (Iterator iterator = ((DisjunctionMaxQuery) query).iterator(); iterator.hasNext();) {
- extract((Query) iterator.next(), terms);
+ for (Iterator iterator = ((DisjunctionMaxQuery) query).iterator(); iterator.hasNext();) {
+ extract(iterator.next(), terms);
}
} else if (query instanceof MultiTermQuery && expandMultiTermQuery) {
MultiTermQuery mtq = ((MultiTermQuery)query);
@@ -163,7 +161,7 @@
}
} else if (query instanceof MultiPhraseQuery) {
final MultiPhraseQuery mpq = (MultiPhraseQuery) query;
- final List termArrays = mpq.getTermArrays();
+ final List termArrays = mpq.getTermArrays();
final int[] positions = mpq.getPositions();
if (positions.length > 0) {
@@ -174,14 +172,14 @@
}
}
- final List[] disjunctLists = new List[maxPosition + 1];
+ final List[] disjunctLists = new List[maxPosition + 1];
int distinctPositions = 0;
for (int i = 0; i < termArrays.size(); ++i) {
- final Term[] termArray = (Term[]) termArrays.get(i);
- List disjuncts = disjunctLists[positions[i]];
+ final Term[] termArray = termArrays.get(i);
+ List disjuncts = disjunctLists[positions[i]];
if (disjuncts == null) {
- disjuncts = (disjunctLists[positions[i]] = new ArrayList(termArray.length));
+ disjuncts = (disjunctLists[positions[i]] = new ArrayList(termArray.length));
++distinctPositions;
}
for (int j = 0; j < termArray.length; ++j) {
@@ -193,9 +191,9 @@
int position = 0;
final SpanQuery[] clauses = new SpanQuery[distinctPositions];
for (int i = 0; i < disjunctLists.length; ++i) {
- List disjuncts = disjunctLists[i];
+ List disjuncts = disjunctLists[i];
if (disjuncts != null) {
- clauses[position++] = new SpanOrQuery((SpanQuery[]) disjuncts
+ clauses[position++] = new SpanOrQuery(disjuncts
.toArray(new SpanQuery[disjuncts.size()]));
} else {
++positionGaps;
@@ -221,20 +219,19 @@
* SpanQuery to extract Terms from
* @throws IOException
*/
- private void extractWeightedSpanTerms(Map terms, SpanQuery spanQuery) throws IOException {
- Set nonWeightedTerms = new HashSet();
+ private void extractWeightedSpanTerms(Map terms, SpanQuery spanQuery) throws IOException {
+ Set nonWeightedTerms = new HashSet();
spanQuery.extractTerms(nonWeightedTerms);
- Set fieldNames;
+ Set fieldNames;
if (fieldName == null) {
- fieldNames = new HashSet();
- for (Iterator iter = nonWeightedTerms.iterator(); iter.hasNext();) {
- Term queryTerm = (Term) iter.next();
+ fieldNames = new HashSet();
+ for (final Term queryTerm : nonWeightedTerms) {
fieldNames.add(queryTerm.field());
}
} else {
- fieldNames = new HashSet(1);
+ fieldNames = new HashSet(1);
fieldNames.add(fieldName);
}
// To support the use of the default field name
@@ -242,11 +239,9 @@
fieldNames.add(defaultField);
}
- Iterator it = fieldNames.iterator();
- List spanPositions = new ArrayList();
+ List spanPositions = new ArrayList();
- while (it.hasNext()) {
- String field = (String) it.next();
+ for (final String field : fieldNames) {
IndexReader reader = getReaderForField(field);
Spans spans = spanQuery.getSpans(reader);
@@ -263,11 +258,10 @@
return;
}
- for (Iterator iter = nonWeightedTerms.iterator(); iter.hasNext();) {
- Term queryTerm = (Term) iter.next();
+ for (final Term queryTerm : nonWeightedTerms) {
if (fieldNameComparator(queryTerm.field())) {
- WeightedSpanTerm weightedSpanTerm = (WeightedSpanTerm) terms.get(queryTerm.text());
+ WeightedSpanTerm weightedSpanTerm = terms.get(queryTerm.text());
if (weightedSpanTerm == null) {
weightedSpanTerm = new WeightedSpanTerm(spanQuery.getBoost(), queryTerm.text());
@@ -292,12 +286,11 @@
* Query to extract Terms from
* @throws IOException
*/
- private void extractWeightedTerms(Map terms, Query query) throws IOException {
- Set nonWeightedTerms = new HashSet();
+ private void extractWeightedTerms(Map terms, Query query) throws IOException {
+ Set nonWeightedTerms = new HashSet();
query.extractTerms(nonWeightedTerms);
- for (Iterator iter = nonWeightedTerms.iterator(); iter.hasNext();) {
- Term queryTerm = (Term) iter.next();
+ for (final Term queryTerm : nonWeightedTerms) {
if (fieldNameComparator(queryTerm.field())) {
WeightedSpanTerm weightedSpanTerm = new WeightedSpanTerm(query.getBoost(), queryTerm.text());
@@ -320,7 +313,7 @@
tokenStream = new CachingTokenFilter(tokenStream);
cachedTokenStream = true;
}
- IndexReader reader = (IndexReader) readers.get(field);
+ IndexReader reader = readers.get(field);
if (reader == null) {
MemoryIndex indexer = new MemoryIndex();
indexer.addField(field, tokenStream);
@@ -345,7 +338,7 @@
* @return Map containing WeightedSpanTerms
* @throws IOException
*/
- public Map getWeightedSpanTerms(Query query, TokenStream tokenStream)
+ public Map getWeightedSpanTerms(Query query, TokenStream tokenStream)
throws IOException {
return getWeightedSpanTerms(query, tokenStream, null);
}
@@ -364,7 +357,7 @@
* @return Map containing WeightedSpanTerms
* @throws IOException
*/
- public Map getWeightedSpanTerms(Query query, TokenStream tokenStream,
+ public Map getWeightedSpanTerms(Query query, TokenStream tokenStream,
String fieldName) throws IOException {
if (fieldName != null) {
this.fieldName = StringHelper.intern(fieldName);
@@ -372,7 +365,7 @@
this.fieldName = null;
}
- Map terms = new PositionCheckingMap();
+ Map terms = new PositionCheckingMap();
this.tokenStream = tokenStream;
try {
extract(query, terms);
@@ -400,7 +393,7 @@
* @return Map of WeightedSpanTerms with quasi tf/idf scores
* @throws IOException
*/
- public Map getWeightedSpanTermsWithScores(Query query, TokenStream tokenStream, String fieldName,
+ public Map getWeightedSpanTermsWithScores(Query query, TokenStream tokenStream, String fieldName,
IndexReader reader) throws IOException {
if (fieldName != null) {
this.fieldName = StringHelper.intern(fieldName);
@@ -409,16 +402,16 @@
}
this.tokenStream = tokenStream;
- Map terms = new PositionCheckingMap();
+ Map terms = new PositionCheckingMap();
extract(query, terms);
int totalNumDocs = reader.numDocs();
- Set weightedTerms = terms.keySet();
- Iterator it = weightedTerms.iterator();
+ Set weightedTerms = terms.keySet();
+ Iterator it = weightedTerms.iterator();
try {
while (it.hasNext()) {
- WeightedSpanTerm weightedSpanTerm = (WeightedSpanTerm) terms.get(it.next());
+ WeightedSpanTerm weightedSpanTerm = terms.get(it.next());
int docFreq = reader.docFreq(new Term(fieldName, weightedSpanTerm.term));
// docFreq counts deletes
if(totalNumDocs < docFreq) {
@@ -440,21 +433,21 @@
* This class makes sure that if both position sensitive and insensitive
* versions of the same term are added, the position insensitive one wins.
*/
- static private class PositionCheckingMap extends HashMap {
+ static private class PositionCheckingMap extends HashMap {
public void putAll(Map m) {
- Iterator it = m.entrySet().iterator();
+ Iterator> it = m.entrySet().iterator();
while (it.hasNext()) {
- Map.Entry entry = (java.util.Map.Entry) it.next();
+ Map.Entry entry = it.next();
this.put(entry.getKey(), entry.getValue());
}
}
- public Object put(Object key, Object value) {
- Object prev = super.put(key, value);
+ public WeightedSpanTerm put(K key, WeightedSpanTerm value) {
+ WeightedSpanTerm prev = super.put(key, value);
if (prev == null) return prev;
- WeightedSpanTerm prevTerm = (WeightedSpanTerm)prev;
- WeightedSpanTerm newTerm = (WeightedSpanTerm)value;
+ WeightedSpanTerm prevTerm = prev;
+ WeightedSpanTerm newTerm = value;
if (!prevTerm.positionSensitive) {
newTerm.positionSensitive = false;
}
Propchange: lucene/java/branches/flex_1458/contrib/highlighter/src/test/
------------------------------------------------------------------------------
--- svn:mergeinfo (added)
+++ svn:mergeinfo Thu Oct 29 21:29:47 2009
@@ -0,0 +1,4 @@
+/lucene/java/branches/lucene_2_4/contrib/highlighter/src/test:748824
+/lucene/java/branches/lucene_2_9/contrib/highlighter/src/test:817269-818600,825998,826775,829134,829816,829881
+/lucene/java/branches/lucene_2_9_back_compat_tests/contrib/highlighter/src/test:818601-821336
+/lucene/java/trunk/contrib/highlighter/src/test:829439-830804
Modified: lucene/java/branches/flex_1458/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (original)
+++ lucene/java/branches/flex_1458/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java Thu Oct 29 21:29:47 2009
@@ -46,6 +46,7 @@
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericField;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.index.IndexReader;
@@ -60,6 +61,7 @@
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.MultiSearcher;
import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
@@ -88,6 +90,7 @@
private IndexReader reader;
static final String FIELD_NAME = "contents";
+ private static final String NUMERIC_FIELD_NAME = "nfield";
private Query query;
RAMDirectory ramDir;
public IndexSearcher searcher = null;
@@ -302,6 +305,30 @@
numHighlights == 4);
}
+
+ public void testNumericRangeQuery() throws Exception {
+ // doesn't currently highlight, but make sure it doesn't cause exception either
+ query = NumericRangeQuery.newIntRange(NUMERIC_FIELD_NAME, 2, 6, true, true);
+ searcher = new IndexSearcher(ramDir, true);
+ hits = searcher.search(query, 100);
+ int maxNumFragmentsRequired = 2;
+
+ QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
+ Highlighter highlighter = new Highlighter(this, scorer);
+
+ for (int i = 0; i < hits.totalHits; i++) {
+ String text = searcher.doc(hits.scoreDocs[i].doc).get(NUMERIC_FIELD_NAME);
+ TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+
+ highlighter.setTextFragmenter(new SimpleFragmenter(40));
+
+ String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired,
+ "...");
+ //System.out.println("\t" + result);
+ }
+
+
+ }
public void testSimpleQueryScorerPhraseHighlighting2() throws Exception {
doSearching("\"text piece long\"~5");
@@ -1617,7 +1644,26 @@
for (int i = 0; i < texts.length; i++) {
addDoc(writer, texts[i]);
}
-
+ Document doc = new Document();
+ NumericField nfield = new NumericField(NUMERIC_FIELD_NAME, Store.YES, true);
+ nfield.setIntValue(1);
+ doc.add(nfield);
+ writer.addDocument(doc, analyzer);
+ nfield = new NumericField(NUMERIC_FIELD_NAME, Store.YES, true);
+ nfield.setIntValue(3);
+ doc = new Document();
+ doc.add(nfield);
+ writer.addDocument(doc, analyzer);
+ nfield = new NumericField(NUMERIC_FIELD_NAME, Store.YES, true);
+ nfield.setIntValue(5);
+ doc = new Document();
+ doc.add(nfield);
+ writer.addDocument(doc, analyzer);
+ nfield = new NumericField(NUMERIC_FIELD_NAME, Store.YES, true);
+ nfield.setIntValue(7);
+ doc = new Document();
+ doc.add(nfield);
+ writer.addDocument(doc, analyzer);
writer.optimize();
writer.close();
reader = IndexReader.open(ramDir, true);
Modified: lucene/java/branches/flex_1458/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (original)
+++ lucene/java/branches/flex_1458/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java Thu Oct 29 21:29:47 2009
@@ -48,7 +48,6 @@
import org.apache.lucene.index.TermPositions;
import org.apache.lucene.index.TermRef;
import org.apache.lucene.index.TermVectorMapper;
-import org.apache.lucene.index.FieldInvertState;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.Collector;
@@ -65,7 +64,7 @@
* Overview
*
* This class is a replacement/substitute for a large subset of
- * {@link org.apache.lucene.store.RAMDirectory} functionality. It is designed to
+ * {@link RAMDirectory} functionality. It is designed to
* enable maximum efficiency for on-the-fly matchmaking combining structured and
* fuzzy fulltext search in realtime streaming applications such as Nux XQuery based XML
* message queues, publish-subscribe systems for Blogs/newsfeeds, text chat, data acquisition and
@@ -183,10 +182,10 @@
public class MemoryIndex implements Serializable {
/** info for each field: Map */
- private final HashMap fields = new HashMap();
+ private final HashMap fields = new HashMap();
/** fields sorted ascending by fieldName; lazily computed on demand */
- private transient Map.Entry[] sortedFields;
+ private transient Map.Entry[] sortedFields;
/** pos: positions[3*i], startOffset: positions[3*i +1], endOffset: positions[3*i +2] */
private final int stride;
@@ -278,13 +277,13 @@
* the keywords to generate tokens for
* @return the corresponding token stream
*/
- public TokenStream keywordTokenStream(final Collection keywords) {
+ public TokenStream keywordTokenStream(final Collection keywords) {
// TODO: deprecate & move this method into AnalyzerUtil?
if (keywords == null)
throw new IllegalArgumentException("keywords must not be null");
return new TokenStream() {
- private Iterator iter = keywords.iterator();
+ private Iterator iter = keywords.iterator();
private int start = 0;
private TermAttribute termAtt = addAttribute(TermAttribute.class);
private OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
@@ -292,7 +291,7 @@
public boolean incrementToken() {
if (!iter.hasNext()) return false;
- Object obj = iter.next();
+ T obj = iter.next();
if (obj == null)
throw new IllegalArgumentException("keyword must not be null");
@@ -343,7 +342,7 @@
if (fields.get(fieldName) != null)
throw new IllegalArgumentException("field must not be added more than once");
- HashMap terms = new HashMap();
+ HashMap terms = new HashMap();
int numTokens = 0;
int numOverlapTokens = 0;
int pos = -1;
@@ -363,7 +362,7 @@
numOverlapTokens++;
pos += posIncr;
- ArrayIntList positions = (ArrayIntList) terms.get(term);
+ ArrayIntList positions = terms.get(term);
if (positions == null) { // term not seen before
positions = new ArrayIntList(stride);
terms.put(term, positions);
@@ -479,21 +478,19 @@
if (sortedFields != null) size += VM.sizeOfObjectArray(sortedFields.length);
size += VM.sizeOfHashMap(fields.size());
- Iterator iter = fields.entrySet().iterator();
- while (iter.hasNext()) { // for each Field Info
- Map.Entry entry = (Map.Entry) iter.next();
- Info info = (Info) entry.getValue();
+ for (Map.Entry entry : fields.entrySet()) { // for each Field Info
+ Info info = entry.getValue();
size += VM.sizeOfObject(2*INT + 3*PTR); // Info instance vars
if (info.sortedTerms != null) size += VM.sizeOfObjectArray(info.sortedTerms.length);
int len = info.terms.size();
size += VM.sizeOfHashMap(len);
- Iterator iter2 = info.terms.entrySet().iterator();
+ Iterator> iter2 = info.terms.entrySet().iterator();
while (--len >= 0) { // for each term
- Map.Entry e = (Map.Entry) iter2.next();
+ Map.Entry e = iter2.next();
size += VM.sizeOfObject(PTR + 3*INT); // assumes substring() memory overlay
// size += STR + 2 * ((String) e.getKey()).length();
- ArrayIntList positions = (ArrayIntList) e.getValue();
+ ArrayIntList positions = e.getValue();
size += VM.sizeOfArrayIntList(positions.size());
}
}
@@ -510,13 +507,13 @@
}
/** returns a view of the given map's entries, sorted ascending by key */
- private static Map.Entry[] sort(HashMap map) {
+ private static Map.Entry[] sort(HashMap map) {
int size = map.size();
- Map.Entry[] entries = new Map.Entry[size];
+ Map.Entry[] entries = new Map.Entry[size];
- Iterator iter = map.entrySet().iterator();
+ Iterator> iter = map.entrySet().iterator();
for (int i=0; i < size; i++) {
- entries[i] = (Map.Entry) iter.next();
+ entries[i] = iter.next();
}
if (size > 1) Arrays.sort(entries, termComparator);
@@ -536,18 +533,18 @@
int sumTerms = 0;
for (int i=0; i < sortedFields.length; i++) {
- Map.Entry entry = sortedFields[i];
- String fieldName = (String) entry.getKey();
- Info info = (Info) entry.getValue();
+ Map.Entry entry = sortedFields[i];
+ String fieldName = entry.getKey();
+ Info info = entry.getValue();
info.sortTerms();
result.append(fieldName + ":\n");
int numChars = 0;
int numPositions = 0;
for (int j=0; j < info.sortedTerms.length; j++) {
- Map.Entry e = info.sortedTerms[j];
- String term = (String) e.getKey();
- ArrayIntList positions = (ArrayIntList) e.getValue();
+ Map.Entry e = info.sortedTerms[j];
+ String term = e.getKey();
+ ArrayIntList positions = e.getValue();
result.append("\t'" + term + "':" + numPositions(positions) + ":");
result.append(positions.toString(stride)); // ignore offsets
result.append("\n");
@@ -585,10 +582,10 @@
* Term strings and their positions for this field: Map
*/
- private final HashMap terms;
+ private final HashMap terms;
/** Terms sorted ascending by term text; computed on demand */
- private transient Map.Entry[] sortedTerms;
+ private transient Map.Entry[] sortedTerms;
/** Number of added tokens for this field */
private final int numTokens;
@@ -604,7 +601,7 @@
private static final long serialVersionUID = 2882195016849084649L;
- public Info(HashMap terms, int numTokens, int numOverlapTokens, float boost) {
+ public Info(HashMap terms, int numTokens, int numOverlapTokens, float boost) {
this.terms = terms;
this.numTokens = numTokens;
this.numOverlapTokens = numOverlapTokens;
@@ -625,12 +622,12 @@
/** note that the frequency can be calculated as numPosition(getPositions(x)) */
public ArrayIntList getPositions(String term) {
- return (ArrayIntList) terms.get(term);
+ return terms.get(term);
}
/** note that the frequency can be calculated as numPosition(getPositions(x)) */
public ArrayIntList getPositions(int pos) {
- return (ArrayIntList) sortedTerms[pos].getValue();
+ return sortedTerms[pos].getValue();
}
public float getBoost() {
@@ -745,11 +742,11 @@
protected void finalize() {}
private Info getInfo(String fieldName) {
- return (Info) fields.get(fieldName);
+ return fields.get(fieldName);
}
private Info getInfo(int pos) {
- return (Info) sortedFields[pos].getValue();
+ return sortedFields[pos].getValue();
}
public Bits getDeletedDocs() {
@@ -1024,7 +1021,7 @@
Info info = getInfo(j);
if (i >= info.sortedTerms.length) return null;
// if (DEBUG) System.err.println("TermEnum.term: " + i + ", " + info.sortedTerms[i].getKey());
- return createTerm(info, j, (String) info.sortedTerms[i].getKey());
+ return createTerm(info, j, info.sortedTerms[i].getKey());
}
public int docFreq() {
@@ -1044,7 +1041,7 @@
// Assertion: sortFields has already been called before
Term template = info.template;
if (template == null) { // not yet cached?
- String fieldName = (String) sortedFields[pos].getKey();
+ String fieldName = sortedFields[pos].getKey();
template = new Term(fieldName);
info.template = template;
}
@@ -1159,10 +1156,9 @@
if (DEBUG) System.err.println("MemoryIndexReader.getTermFreqVectors");
TermFreqVector[] vectors = new TermFreqVector[fields.size()];
// if (vectors.length == 0) return null;
- Iterator iter = fields.keySet().iterator();
+ Iterator iter = fields.keySet().iterator();
for (int i=0; i < vectors.length; i++) {
- String fieldName = (String) iter.next();
- vectors[i] = getTermFreqVector(docNumber, fieldName);
+ vectors[i] = getTermFreqVector(docNumber, iter.next());
}
return vectors;
}
@@ -1172,9 +1168,8 @@
if (DEBUG) System.err.println("MemoryIndexReader.getTermFreqVectors");
// if (vectors.length == 0) return null;
- for (Iterator iterator = fields.keySet().iterator(); iterator.hasNext();)
+ for (final String fieldName : fields.keySet())
{
- String fieldName = (String) iterator.next();
getTermFreqVector(docNumber, fieldName, mapper);
}
}
@@ -1190,7 +1185,7 @@
mapper.setExpectations(field, info.sortedTerms.length, stride != 1, true);
for (int i = info.sortedTerms.length; --i >=0;){
- ArrayIntList positions = (ArrayIntList) info.sortedTerms[i].getValue();
+ ArrayIntList positions = info.sortedTerms[i].getValue();
int size = positions.size();
org.apache.lucene.index.TermVectorOffsetInfo[] offsets =
new org.apache.lucene.index.TermVectorOffsetInfo[size / stride];
@@ -1200,9 +1195,9 @@
int end = positions.get(j+1);
offsets[k] = new org.apache.lucene.index.TermVectorOffsetInfo(start, end);
}
- mapper.map((String)info.sortedTerms[i].getKey(),
- numPositions((ArrayIntList) info.sortedTerms[i].getValue()),
- offsets, ((ArrayIntList) info.sortedTerms[i].getValue()).toArray(stride));
+ mapper.map(info.sortedTerms[i].getKey(),
+ numPositions(info.sortedTerms[i].getValue()),
+ offsets, (info.sortedTerms[i].getValue()).toArray(stride));
}
}
@@ -1214,7 +1209,7 @@
return new TermPositionVector() {
- private final Map.Entry[] sortedTerms = info.sortedTerms;
+ private final Map.Entry[] sortedTerms = info.sortedTerms;
public String getField() {
return fieldName;
@@ -1227,7 +1222,7 @@
public String[] getTerms() {
String[] terms = new String[sortedTerms.length];
for (int i=sortedTerms.length; --i >= 0; ) {
- terms[i] = (String) sortedTerms[i].getKey();
+ terms[i] = sortedTerms[i].getKey();
}
return terms;
}
@@ -1235,7 +1230,7 @@
public int[] getTermFrequencies() {
int[] freqs = new int[sortedTerms.length];
for (int i=sortedTerms.length; --i >= 0; ) {
- freqs[i] = numPositions((ArrayIntList) sortedTerms[i].getValue());
+ freqs[i] = numPositions(sortedTerms[i].getValue());
}
return freqs;
}
@@ -1255,14 +1250,14 @@
// lucene >= 1.4.3
public int[] getTermPositions(int index) {
- return ((ArrayIntList) sortedTerms[index].getValue()).toArray(stride);
+ return sortedTerms[index].getValue().toArray(stride);
}
// lucene >= 1.9 (remove this method for lucene-1.4.3)
public org.apache.lucene.index.TermVectorOffsetInfo[] getOffsets(int index) {
if (stride == 1) return null; // no offsets stored
- ArrayIntList positions = (ArrayIntList) sortedTerms[index].getValue();
+ ArrayIntList positions = sortedTerms[index].getValue();
int size = positions.size();
org.apache.lucene.index.TermVectorOffsetInfo[] offsets =
new org.apache.lucene.index.TermVectorOffsetInfo[size / stride];
@@ -1363,7 +1358,7 @@
throw new UnsupportedOperationException();
}
- protected void doCommit(Map commitUserData) {
+ protected void doCommit(Map commitUserData) {
if (DEBUG) System.err.println("MemoryIndexReader.doCommit");
}
@@ -1372,16 +1367,16 @@
}
// lucene >= 1.9 (remove this method for lucene-1.4.3)
- public Collection getFieldNames(FieldOption fieldOption) {
+ public Collection getFieldNames(FieldOption fieldOption) {
if (DEBUG) System.err.println("MemoryIndexReader.getFieldNamesOption");
if (fieldOption == FieldOption.UNINDEXED)
- return Collections.EMPTY_SET;
+ return Collections.emptySet();
if (fieldOption == FieldOption.INDEXED_NO_TERMVECTOR)
- return Collections.EMPTY_SET;
+ return Collections.emptySet();
if (fieldOption == FieldOption.TERMVECTOR_WITH_OFFSET && stride == 1)
- return Collections.EMPTY_SET;
+ return Collections.emptySet();
if (fieldOption == FieldOption.TERMVECTOR_WITH_POSITION_OFFSET && stride == 1)
- return Collections.EMPTY_SET;
+ return Collections.emptySet();
return Collections.unmodifiableSet(fields.keySet());
}
Modified: lucene/java/branches/flex_1458/contrib/memory/src/java/org/apache/lucene/index/memory/PatternAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/memory/src/java/org/apache/lucene/index/memory/PatternAnalyzer.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/memory/src/java/org/apache/lucene/index/memory/PatternAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/contrib/memory/src/java/org/apache/lucene/index/memory/PatternAnalyzer.java Thu Oct 29 21:29:47 2009
@@ -21,13 +21,13 @@
import java.io.Reader;
import java.io.StringReader;
import java.util.Arrays;
-import java.util.HashSet;
import java.util.Locale;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
@@ -72,11 +72,8 @@
/** "\\s+"
; Divides text at whitespaces (Character.isWhitespace(c)) */
public static final Pattern WHITESPACE_PATTERN = Pattern.compile("\\s+");
- private static final Set EXTENDED_ENGLISH_STOP_WORDS;
- static {
- EXTENDED_ENGLISH_STOP_WORDS = new HashSet();
-
- EXTENDED_ENGLISH_STOP_WORDS.addAll(Arrays.asList(new String[] {
+ private static final CharArraySet EXTENDED_ENGLISH_STOP_WORDS =
+ CharArraySet.unmodifiableSet(new CharArraySet(Arrays.asList(
"a", "about", "above", "across", "adj", "after", "afterwards",
"again", "against", "albeit", "all", "almost", "alone", "along",
"already", "also", "although", "always", "among", "amongst", "an",
@@ -117,8 +114,8 @@
"whomever", "whomsoever", "whose", "whosoever", "why", "will",
"with", "within", "without", "would", "xsubj", "xcal", "xauthor",
"xother ", "xnote", "yet", "you", "your", "yours", "yourself",
- "yourselves"}));
- }
+ "yourselves"
+ ), true));
/**
* A lower-casing word analyzer with English stop words (can be shared
@@ -139,7 +136,7 @@
private final Pattern pattern;
private final boolean toLowerCase;
- private final Set stopWords;
+ private final Set> stopWords;
private final Version matchVersion;
@@ -162,7 +159,7 @@
* or other stop words
* lists .
*/
- public PatternAnalyzer(Version matchVersion, Pattern pattern, boolean toLowerCase, Set stopWords) {
+ public PatternAnalyzer(Version matchVersion, Pattern pattern, boolean toLowerCase, Set> stopWords) {
if (pattern == null)
throw new IllegalArgumentException("pattern must not be null");
@@ -313,15 +310,7 @@
if (input != null) input.close();
}
}
-
- /** somewhat oversized to minimize hash collisions */
- private static Set makeStopSet(Set stopWords) {
- Set stops = new HashSet(stopWords.size() * 2, 0.3f);
- stops.addAll(stopWords);
- return stops;
-// return Collections.unmodifiableSet(stops);
- }
-
+
///////////////////////////////////////////////////////////////////////////////
// Nested classes:
Modified: lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java (original)
+++ lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java Thu Oct 29 21:29:47 2009
@@ -103,7 +103,7 @@
}
}
- private boolean isOptimized(SegmentInfos infos, IndexWriter writer, int maxNumSegments, Set segmentsToOptimize) throws IOException {
+ private boolean isOptimized(SegmentInfos infos, IndexWriter writer, int maxNumSegments, Set segmentsToOptimize) throws IOException {
final int numSegments = infos.size();
int numToOptimize = 0;
SegmentInfo optimizeInfo = null;
@@ -128,7 +128,7 @@
}
@Override
- public MergeSpecification findMergesForOptimize(SegmentInfos infos, int maxNumSegments, Set segmentsToOptimize) throws IOException {
+ public MergeSpecification findMergesForOptimize(SegmentInfos infos, int maxNumSegments, Set segmentsToOptimize) throws IOException {
assert maxNumSegments > 0;
Modified: lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/FieldNormModifier.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/FieldNormModifier.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/FieldNormModifier.java (original)
+++ lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/FieldNormModifier.java Thu Oct 29 21:29:47 2009
@@ -56,8 +56,7 @@
Similarity s = null;
if (!args[1].equals("-n")) {
try {
- Class simClass = Class.forName(args[1]);
- s = (Similarity)simClass.newInstance();
+ s = Class.forName(args[1]).asSubclass(Similarity.class).newInstance();
} catch (Exception e) {
System.err.println("Couldn't instantiate similarity with empty constructor: " + args[1]);
e.printStackTrace(System.err);
@@ -146,7 +145,7 @@
if (sim == null)
reader.setNorm(d, fieldName, fakeNorms[0]);
else
- reader.setNorm(d, fieldName, sim.encodeNorm(sim.lengthNorm(fieldName, termCounts[d])));
+ reader.setNorm(d, fieldName, Similarity.encodeNorm(sim.lengthNorm(fieldName, termCounts[d])));
}
}
Modified: lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java (original)
+++ lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java Thu Oct 29 21:29:47 2009
@@ -78,14 +78,14 @@
for (int x = 2; x < args.length; x++) {
segs.add(args[x]);
}
- is.remove((String[]) segs.toArray(new String[0]));
+ is.remove(segs.toArray(new String[0]));
} else {
File targetDir = new File(args[1]);
List segs = new ArrayList();
for (int x = 2; x < args.length; x++) {
segs.add(args[x]);
}
- is.split(targetDir, (String[]) segs.toArray(new String[0]));
+ is.split(targetDir, segs.toArray(new String[0]));
}
}
@@ -137,9 +137,8 @@
SegmentInfo info = getInfo(n);
destInfos.add(info);
// now copy files over
- List files = info.files();
- for (int x = 0; x < files.size(); x++) {
- String srcName = (String) files.get(x);
+ List files = info.files();
+ for (final String srcName : files) {
File srcFile = new File(dir, srcName);
File destFile = new File(destDir, srcName);
copyFile(srcFile, destFile);
Modified: lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java (original)
+++ lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java Thu Oct 29 21:29:47 2009
@@ -158,7 +158,7 @@
if (indexes.size() == 1) {
input = indexes.get(0);
} else {
- input = new MultiReader((IndexReader[])indexes.toArray(new IndexReader[indexes.size()]));
+ input = new MultiReader(indexes.toArray(new IndexReader[indexes.size()]));
}
splitter.split(input, dirs, seq);
}
Modified: lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/TermVectorAccessor.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/TermVectorAccessor.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/TermVectorAccessor.java (original)
+++ lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/index/TermVectorAccessor.java Thu Oct 29 21:29:47 2009
@@ -70,13 +70,13 @@
}
/** Instance reused to save garbage collector some time */
- private List/**/ tokens;
+ private List tokens;
/** Instance reused to save garbage collector some time */
- private List/**/ positions;
+ private List positions;
/** Instance reused to save garbage collector some time */
- private List/**/ frequencies;
+ private List frequencies;
/**
@@ -92,9 +92,9 @@
private void build(IndexReader indexReader, String field, TermVectorMapper mapper, int documentNumber) throws IOException {
if (tokens == null) {
- tokens = new ArrayList/**/(500);
- positions = new ArrayList/**/(500);
- frequencies = new ArrayList/**/(500);
+ tokens = new ArrayList(500);
+ positions = new ArrayList(500);
+ frequencies = new ArrayList(500);
} else {
tokens.clear();
frequencies.clear();
@@ -138,7 +138,7 @@
mapper.setDocumentNumber(documentNumber);
mapper.setExpectations(field, tokens.size(), false, !mapper.isIgnoringPositions());
for (int i = 0; i < tokens.size(); i++) {
- mapper.map((String) tokens.get(i), ((Integer) frequencies.get(i)).intValue(), (TermVectorOffsetInfo[]) null, (int[]) positions.get(i));
+ mapper.map(tokens.get(i), frequencies.get(i).intValue(), (TermVectorOffsetInfo[]) null, positions.get(i));
}
}
}
Modified: lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java (original)
+++ lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java Thu Oct 29 21:29:47 2009
@@ -87,6 +87,11 @@
}
}
}
+
+ while (tiq.size() != 0) {
+ TermInfo termInfo = tiq.pop();
+ System.out.println(termInfo.term + " " + termInfo.docFreq);
+ }
reader.close();
}
Modified: lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/misc/LengthNormModifier.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/misc/LengthNormModifier.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/misc/LengthNormModifier.java (original)
+++ lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/misc/LengthNormModifier.java Thu Oct 29 21:29:47 2009
@@ -58,8 +58,7 @@
Similarity s = null;
try {
- Class simClass = Class.forName(args[1]);
- s = (Similarity)simClass.newInstance();
+ s = Class.forName(args[1]).asSubclass(Similarity.class).newInstance();
} catch (Exception e) {
System.err.println("Couldn't instantiate similarity with empty constructor: " + args[1]);
e.printStackTrace(System.err);
@@ -142,7 +141,7 @@
reader = IndexReader.open(dir, false);
for (int d = 0; d < termCounts.length; d++) {
if (! reader.isDeleted(d)) {
- byte norm = sim.encodeNorm(sim.lengthNorm(fieldName, termCounts[d]));
+ byte norm = Similarity.encodeNorm(sim.lengthNorm(fieldName, termCounts[d]));
reader.setNorm(d, fieldName, norm);
}
}
Modified: lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java (original)
+++ lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java Thu Oct 29 21:29:47 2009
@@ -17,7 +17,6 @@
package org.apache.lucene.misc;
-import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.DefaultSimilarity;
import org.apache.lucene.index.FieldInvertState;
@@ -51,10 +50,10 @@
private int ln_max = 1;
private float ln_steep = 0.5f;
- private Map ln_mins = new HashMap(7);
- private Map ln_maxs = new HashMap(7);
- private Map ln_steeps = new HashMap(7);
- private Map ln_overlaps = new HashMap(7);
+ private Map ln_maxs = new HashMap(7);
+ private Map ln_mins = new HashMap(7);
+ private Map ln_steeps = new HashMap(7);
+ private Map ln_overlaps = new HashMap(7);
private float tf_base = 0.0f;
private float tf_min = 0.0f;
@@ -139,7 +138,7 @@
final int numTokens;
boolean overlaps = discountOverlaps;
if (ln_overlaps.containsKey(fieldName)) {
- overlaps = ((Boolean)ln_overlaps.get(fieldName)).booleanValue();
+ overlaps = ln_overlaps.get(fieldName).booleanValue();
}
if (overlaps)
numTokens = state.getLength() - state.getNumOverlap();
@@ -173,13 +172,13 @@
float s = ln_steep;
if (ln_mins.containsKey(fieldName)) {
- l = ((Number)ln_mins.get(fieldName)).intValue();
+ l = ln_mins.get(fieldName).intValue();
}
if (ln_maxs.containsKey(fieldName)) {
- h = ((Number)ln_maxs.get(fieldName)).intValue();
+ h = ln_maxs.get(fieldName).intValue();
}
if (ln_steeps.containsKey(fieldName)) {
- s = ((Number)ln_steeps.get(fieldName)).floatValue();
+ s = ln_steeps.get(fieldName).floatValue();
}
return (float)
Modified: lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/analyzing/AnalyzingQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/analyzing/AnalyzingQueryParser.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/analyzing/AnalyzingQueryParser.java (original)
+++ lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/analyzing/AnalyzingQueryParser.java Thu Oct 29 21:29:47 2009
@@ -23,7 +23,6 @@
import java.util.List;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.queryParser.ParseException;
@@ -76,8 +75,8 @@
* @throws ParseException
*/
protected Query getWildcardQuery(String field, String termStr) throws ParseException {
- List tlist = new ArrayList();
- List wlist = new ArrayList();
+ List tlist = new ArrayList();
+ List wlist = new ArrayList();
/* somewhat a hack: find/store wildcard chars
* in order to put them back after analyzing */
boolean isWithinToken = (!termStr.startsWith("?") && !termStr.startsWith("*"));
@@ -145,8 +144,8 @@
/* if wlist contains one wildcard, it must be at the end, because:
* 1) wildcards are not allowed in 1st position of a term by QueryParser
* 2) if wildcard was *not* in end, there would be *two* or more tokens */
- return super.getWildcardQuery(field, (String) tlist.get(0)
- + (((String) wlist.get(0)).toString()));
+ return super.getWildcardQuery(field, tlist.get(0)
+ + wlist.get(0).toString());
} else {
/* we should never get here! if so, this method was called
* with a termStr containing no wildcard ... */
@@ -157,9 +156,9 @@
* with wildcards put back in postion */
StringBuilder sb = new StringBuilder();
for (int i = 0; i < tlist.size(); i++) {
- sb.append((String) tlist.get(i));
+ sb.append( tlist.get(i));
if (wlist != null && wlist.size() > i) {
- sb.append((String) wlist.get(i));
+ sb.append(wlist.get(i));
}
}
return super.getWildcardQuery(field, sb.toString());
@@ -188,7 +187,7 @@
protected Query getPrefixQuery(String field, String termStr) throws ParseException {
// get Analyzer from superclass and tokenize the term
TokenStream source = getAnalyzer().tokenStream(field, new StringReader(termStr));
- List tlist = new ArrayList();
+ List tlist = new ArrayList();
TermAttribute termAtt = source.addAttribute(TermAttribute.class);
while (true) {
@@ -207,7 +206,7 @@
}
if (tlist.size() == 1) {
- return super.getPrefixQuery(field, (String) tlist.get(0));
+ return super.getPrefixQuery(field, tlist.get(0));
} else {
/* this means that the analyzer used either added or consumed
* (common for a stemmer) tokens, and we can't build a PrefixQuery */
Modified: lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/complexPhrase/ComplexPhraseQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/complexPhrase/ComplexPhraseQueryParser.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/complexPhrase/ComplexPhraseQueryParser.java (original)
+++ lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/complexPhrase/ComplexPhraseQueryParser.java Thu Oct 29 21:29:47 2009
@@ -62,7 +62,7 @@
*
*/
public class ComplexPhraseQueryParser extends QueryParser {
- private ArrayList/**/complexPhrases = null;
+ private ArrayList complexPhrases = null;
private boolean isPass2ResolvingPhrases;
@@ -102,7 +102,7 @@
// First pass - parse the top-level query recording any PhraseQuerys
// which will need to be resolved
- complexPhrases = new ArrayList/**/();
+ complexPhrases = new ArrayList();
Query q = super.parse(query);
// Perform second pass, using this QueryParser to parse any nested
@@ -110,8 +110,8 @@
// set of syntax restrictions (i.e. all fields must be same)
isPass2ResolvingPhrases = true;
try {
- for (Iterator iterator = complexPhrases.iterator(); iterator.hasNext();) {
- currentPhraseQuery = (ComplexPhraseQuery) iterator.next();
+ for (Iterator iterator = complexPhrases.iterator(); iterator.hasNext();) {
+ currentPhraseQuery = iterator.next();
// in each phrase, now parse the contents between quotes as a
// separate parse operation
currentPhraseQuery.parsePhraseElements(this);
@@ -247,10 +247,10 @@
}
if (qc instanceof BooleanQuery) {
- ArrayList sc = new ArrayList();
+ ArrayList sc = new ArrayList();
addComplexPhraseClause(sc, (BooleanQuery) qc);
if (sc.size() > 0) {
- allSpanClauses[i] = (SpanQuery) sc.get(0);
+ allSpanClauses[i] = sc.get(0);
} else {
// Insert fake term e.g. phrase query was for "Fred Smithe*" and
// there were no "Smithe*" terms - need to
@@ -278,14 +278,14 @@
// Complex case - we have mixed positives and negatives in the
// sequence.
// Need to return a SpanNotQuery
- ArrayList positiveClauses = new ArrayList();
+ ArrayList positiveClauses = new ArrayList();
for (int j = 0; j < allSpanClauses.length; j++) {
if (!bclauses[j].getOccur().equals(BooleanClause.Occur.MUST_NOT)) {
positiveClauses.add(allSpanClauses[j]);
}
}
- SpanQuery[] includeClauses = (SpanQuery[]) positiveClauses
+ SpanQuery[] includeClauses = positiveClauses
.toArray(new SpanQuery[positiveClauses.size()]);
SpanQuery include = null;
@@ -304,9 +304,9 @@
return snot;
}
- private void addComplexPhraseClause(List spanClauses, BooleanQuery qc) {
- ArrayList ors = new ArrayList();
- ArrayList nots = new ArrayList();
+ private void addComplexPhraseClause(List spanClauses, BooleanQuery qc) {
+ ArrayList ors = new ArrayList();
+ ArrayList nots = new ArrayList();
BooleanClause[] bclauses = qc.getClauses();
// For all clauses e.g. one* two~
@@ -314,7 +314,7 @@
Query childQuery = bclauses[i].getQuery();
// select the list to which we will add these options
- ArrayList chosenList = ors;
+ ArrayList chosenList = ors;
if (bclauses[i].getOccur() == BooleanClause.Occur.MUST_NOT) {
chosenList = nots;
}
@@ -336,12 +336,12 @@
if (ors.size() == 0) {
return;
}
- SpanOrQuery soq = new SpanOrQuery((SpanQuery[]) ors
+ SpanOrQuery soq = new SpanOrQuery(ors
.toArray(new SpanQuery[ors.size()]));
if (nots.size() == 0) {
spanClauses.add(soq);
} else {
- SpanOrQuery snqs = new SpanOrQuery((SpanQuery[]) nots
+ SpanOrQuery snqs = new SpanOrQuery(nots
.toArray(new SpanQuery[nots.size()]));
SpanNotQuery snq = new SpanNotQuery(soq, snqs);
spanClauses.add(snq);
Modified: lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/precedence/FastCharStream.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/precedence/FastCharStream.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/precedence/FastCharStream.java (original)
+++ lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/precedence/FastCharStream.java Thu Oct 29 21:29:47 2009
@@ -18,7 +18,6 @@
* limitations under the License.
*/
-import org.apache.lucene.queryParser.*;
import java.io.*;
Modified: lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.java (original)
+++ lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.java Thu Oct 29 21:29:47 2009
@@ -8,7 +8,6 @@
import java.util.Date;
import java.util.List;
import java.util.Locale;
-import java.util.Vector;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
@@ -247,20 +246,13 @@
return locale;
}
- /**
- * @deprecated use {@link #addClause(List, int, int, Query)} instead.
- */
- protected void addClause(Vector clauses, int conj, int modifier, Query q) {
- addClause((List) clauses, conj, modifier, q);
- }
-
- protected void addClause(List clauses, int conj, int modifier, Query q) {
+ protected void addClause(List clauses, int conj, int modifier, Query q) {
boolean required, prohibited;
// If this term is introduced by AND, make the preceding term required,
// unless it's already prohibited
if (clauses.size() > 0 && conj == CONJ_AND) {
- BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
+ BooleanClause c = clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.MUST);
}
@@ -270,7 +262,7 @@
// unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
// notice if the input is a OR b, first term is parsed as required; without
// this modification a OR b would parsed as +a OR b
- BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
+ BooleanClause c = clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.SHOULD);
}
@@ -357,12 +349,12 @@
for (int i = 0; i < list.size(); i++) {
source.restoreState(list.get(i));
if (posincrAtt.getPositionIncrement() == 1 && multiTerms.size() > 0) {
- mpq.add((Term[])multiTerms.toArray(new Term[0]));
+ mpq.add(multiTerms.toArray(new Term[0]));
multiTerms.clear();
}
multiTerms.add(new Term(field, termAtt.term()));
}
- mpq.add((Term[])multiTerms.toArray(new Term[0]));
+ mpq.add(multiTerms.toArray(new Term[0]));
return mpq;
}
}
@@ -436,27 +428,8 @@
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
- * @deprecated use {@link #getBooleanQuery(List)} instead
*/
- protected Query getBooleanQuery(Vector clauses) throws ParseException
- {
- return getBooleanQuery((List) clauses, false);
- }
-
- /**
- * Factory method for generating query, given a set of clauses.
- * By default creates a boolean query composed of clauses passed in.
- *
- * Can be overridden by extending classes, to modify query being
- * returned.
- *
- * @param clauses List that contains {@link BooleanClause} instances
- * to join.
- *
- * @return Resulting {@link Query} object.
- * @exception ParseException throw in overridden method to disallow
- */
- protected Query getBooleanQuery(List clauses) throws ParseException
+ protected Query getBooleanQuery(List clauses) throws ParseException
{
return getBooleanQuery(clauses, false);
}
@@ -474,36 +447,15 @@
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
- * @deprecated use {@link #getBooleanQuery(List, boolean)} instead
- */
- protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
- throws ParseException
- {
- return getBooleanQuery((List) clauses, disableCoord);
- }
-
- /**
- * Factory method for generating query, given a set of clauses.
- * By default creates a boolean query composed of clauses passed in.
- *
- * Can be overridden by extending classes, to modify query being
- * returned.
- *
- * @param clauses List that contains {@link BooleanClause} instances
- * to join.
- * @param disableCoord true if coord scoring should be disabled.
- *
- * @return Resulting {@link Query} object.
- * @exception ParseException throw in overridden method to disallow
*/
- protected Query getBooleanQuery(List clauses, boolean disableCoord)
+ protected Query getBooleanQuery(List clauses, boolean disableCoord)
throws ParseException {
if (clauses == null || clauses.size() == 0)
return null;
BooleanQuery query = new BooleanQuery(disableCoord);
for (int i = 0; i < clauses.size(); i++) {
- query.add((BooleanClause)clauses.get(i));
+ query.add(clauses.get(i));
}
return query;
}
@@ -705,7 +657,7 @@
}
final public Query Query(String field) throws ParseException {
- List clauses = new ArrayList();
+ List clauses = new ArrayList();
Query q, firstQuery=null;
boolean orPresent = false;
int modifier;
@@ -757,7 +709,7 @@
}
final public Query andExpression(String field) throws ParseException {
- List clauses = new ArrayList();
+ List clauses = new ArrayList();
Query q, firstQuery=null;
int modifier;
q = Clause(field);
Modified: lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.jj
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.jj?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.jj (original)
+++ lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.jj Thu Oct 29 21:29:47 2009
@@ -32,7 +32,6 @@
import java.util.Date;
import java.util.List;
import java.util.Locale;
-import java.util.Vector;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
@@ -271,20 +270,13 @@
return locale;
}
- /**
- * @deprecated use {@link #addClause(List, int, int, Query)} instead.
- */
- protected void addClause(Vector clauses, int conj, int modifier, Query q) {
- addClause((List) clauses, conj, modifier, q);
- }
-
- protected void addClause(List clauses, int conj, int modifier, Query q) {
+ protected void addClause(List clauses, int conj, int modifier, Query q) {
boolean required, prohibited;
// If this term is introduced by AND, make the preceding term required,
// unless it's already prohibited
if (clauses.size() > 0 && conj == CONJ_AND) {
- BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
+ BooleanClause c = clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.MUST);
}
@@ -294,7 +286,7 @@
// unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
// notice if the input is a OR b, first term is parsed as required; without
// this modification a OR b would parsed as +a OR b
- BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
+ BooleanClause c = clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.SHOULD);
}
@@ -381,12 +373,12 @@
for (int i = 0; i < list.size(); i++) {
source.restoreState(list.get(i));
if (posincrAtt.getPositionIncrement() == 1 && multiTerms.size() > 0) {
- mpq.add((Term[])multiTerms.toArray(new Term[0]));
+ mpq.add(multiTerms.toArray(new Term[0]));
multiTerms.clear();
}
multiTerms.add(new Term(field, termAtt.term()));
}
- mpq.add((Term[])multiTerms.toArray(new Term[0]));
+ mpq.add(multiTerms.toArray(new Term[0]));
return mpq;
}
}
@@ -460,27 +452,8 @@
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
- * @deprecated use {@link #getBooleanQuery(List)} instead
*/
- protected Query getBooleanQuery(Vector clauses) throws ParseException
- {
- return getBooleanQuery((List) clauses, false);
- }
-
- /**
- * Factory method for generating query, given a set of clauses.
- * By default creates a boolean query composed of clauses passed in.
- *
- * Can be overridden by extending classes, to modify query being
- * returned.
- *
- * @param clauses List that contains {@link BooleanClause} instances
- * to join.
- *
- * @return Resulting {@link Query} object.
- * @exception ParseException throw in overridden method to disallow
- */
- protected Query getBooleanQuery(List clauses) throws ParseException
+ protected Query getBooleanQuery(List clauses) throws ParseException
{
return getBooleanQuery(clauses, false);
}
@@ -498,36 +471,15 @@
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
- * @deprecated use {@link #getBooleanQuery(List, boolean)} instead
- */
- protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
- throws ParseException
- {
- return getBooleanQuery((List) clauses, disableCoord);
- }
-
- /**
- * Factory method for generating query, given a set of clauses.
- * By default creates a boolean query composed of clauses passed in.
- *
- * Can be overridden by extending classes, to modify query being
- * returned.
- *
- * @param clauses List that contains {@link BooleanClause} instances
- * to join.
- * @param disableCoord true if coord scoring should be disabled.
- *
- * @return Resulting {@link Query} object.
- * @exception ParseException throw in overridden method to disallow
*/
- protected Query getBooleanQuery(List clauses, boolean disableCoord)
+ protected Query getBooleanQuery(List clauses, boolean disableCoord)
throws ParseException {
if (clauses == null || clauses.size() == 0)
return null;
BooleanQuery query = new BooleanQuery(disableCoord);
for (int i = 0; i < clauses.size(); i++) {
- query.add((BooleanClause)clauses.get(i));
+ query.add(clauses.get(i));
}
return query;
}
@@ -763,7 +715,7 @@
Query Query(String field) :
{
- List clauses = new ArrayList();
+ List clauses = new ArrayList();
Query q, firstQuery=null;
boolean orPresent = false;
int modifier;
@@ -790,7 +742,7 @@
Query andExpression(String field) :
{
- List clauses = new ArrayList();
+ List clauses = new ArrayList();
Query q, firstQuery=null;
int modifier;
}
Modified: lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParserTokenManager.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParserTokenManager.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParserTokenManager.java (original)
+++ lucene/java/branches/flex_1458/contrib/misc/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParserTokenManager.java Thu Oct 29 21:29:47 2009
@@ -7,7 +7,6 @@
import java.util.Date;
import java.util.List;
import java.util.Locale;
-import java.util.Vector;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.*;
Modified: lucene/java/branches/flex_1458/contrib/queryparser/src/java/overview.html
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/queryparser/src/java/overview.html?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/queryparser/src/java/overview.html (original)
+++ lucene/java/branches/flex_1458/contrib/queryparser/src/java/overview.html Thu Oct 29 21:29:47 2009
@@ -29,9 +29,8 @@
-It's currently divided in 3 main packages:
+It's currently divided in 2 main packages:
-- {@link org.apache.lucene.messages}: it contains the API to defined lazily loaded messages. This message API is used by the new query parser to support localized messages.
- {@link org.apache.lucene.queryParser.core}: it contains the query parser API classes, which should be extended by query parser implementations.
- {@link org.apache.lucene.queryParser.standard}: it contains the current Lucene query parser implementation using the new query parser API.
Modified: lucene/java/branches/flex_1458/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java (original)
+++ lucene/java/branches/flex_1458/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java Thu Oct 29 21:29:47 2009
@@ -97,7 +97,7 @@
*
* @param a optional analyzer used to parse the users query else {@link StandardAnalyzer} is used
*
- * @param field optional field name to search in or null if you want the default of "contents"
+ * @param f optional field name to search in or null if you want the default of "contents"
*
* @param boost optional boost applied to synonyms else no boost is applied
*
Modified: lucene/java/branches/flex_1458/contrib/wordnet/src/java/org/apache/lucene/wordnet/package.html
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/contrib/wordnet/src/java/org/apache/lucene/wordnet/package.html?rev=831094&r1=831093&r2=831094&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/contrib/wordnet/src/java/org/apache/lucene/wordnet/package.html (original)
+++ lucene/java/branches/flex_1458/contrib/wordnet/src/java/org/apache/lucene/wordnet/package.html Thu Oct 29 21:29:47 2009
@@ -21,8 +21,14 @@
- This package uses synonyms defined by WordNet to build a
- Lucene index storing them, which in turn can be used for query expansion.
+ This package uses synonyms defined by WordNet.
+ There are two methods: query expansion and analysis.
+
+ Both methods first require you to download the WordNet prolog database
+ Inside this archive is a file named wn_s.pl, which contains the WordNet synonyms.
+
+ Query Expansion Method
+ This method creates Lucene index storing the synonyms, which in turn can be used for query expansion.
You normally run {@link org.apache.lucene.wordnet.Syns2Index} once to build the query index/"database", and then call
{@link org.apache.lucene.wordnet.SynExpand#expand SynExpand.expand(...)} to expand a query.
@@ -31,12 +37,21 @@
Instructions
- - Download the WordNet prolog database , gunzip, untar etc.
- Invoke Syn2Index as appropriate to build a synonym index.
- It'll take 2 arguments, the path to wn_s.pl from that WordNet download, and the index name.
+ It'll take 2 arguments, the path to wn_s.pl from the WordNet download, and the index name.
- Update your UI so that as appropriate you call SynExpand.expand(...) to expand user queries with synonyms.
+
+ Analysis Method
+ This method injects additional synonym tokens for tokens from a child {@link org.apache.lucene.analysis.TokenStream}.
+
+ Instructions
+
+ - Create a {@link org.apache.lucene.wordnet.SynonymMap}, passing in the path to wn_s.pl
+
- Add a {@link org.apache.lucene.wordnet.SynonymTokenFilter} to your analyzer. Note: SynonymTokenFilter should be after LowerCaseFilter,
+ because it expects terms to already be in lowercase.
+