lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mharw...@apache.org
Subject svn commit: r432042 - in /lucene/java/trunk/contrib/highlighter/src: java/org/apache/lucene/search/highlight/Highlighter.java test/org/apache/lucene/search/highlight/HighlighterTest.java
Date Wed, 16 Aug 2006 21:42:18 GMT
Author: mharwood
Date: Wed Aug 16 14:42:18 2006
New Revision: 432042

URL: http://svn.apache.org/viewvc?rev=432042&view=rev
Log:
Fix for http://issues.apache.org/jira/browse/LUCENE-645 with added Junit tests for this bug
and related problem  where last fragment can be huge if highlighting huge documents. 

Modified:
    lucene/java/trunk/contrib/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java
    lucene/java/trunk/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java

Modified: lucene/java/trunk/contrib/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java?rev=432042&r1=432041&r2=432042&view=diff
==============================================================================
--- lucene/java/trunk/contrib/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java
(original)
+++ lucene/java/trunk/contrib/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java
Wed Aug 16 14:42:18 2006
@@ -21,6 +21,7 @@
 import java.util.Iterator;
 
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.util.PriorityQueue;
 
@@ -221,8 +222,8 @@
 			textFragmenter.start(text);
 
 			TokenGroup tokenGroup=new TokenGroup();
-
-			while ((token = tokenStream.next()) != null)
+			token = tokenStream.next();
+			while ((token!= null)&&(token.startOffset()<maxDocBytesToAnalyze))
 			{
 				if((tokenGroup.numTokens>0)&&(tokenGroup.isDistinct(token)))
 				{
@@ -251,12 +252,13 @@
 					}
 				}
 
-        tokenGroup.addToken(token,fragmentScorer.getTokenScore(token));
+				tokenGroup.addToken(token,fragmentScorer.getTokenScore(token));
 
-				if(lastEndOffset>maxDocBytesToAnalyze)
-				{
-					break;
-				}
+//				if(lastEndOffset>maxDocBytesToAnalyze)
+//				{
+//					break;
+//				}
+				token = tokenStream.next();
 			}
 			currentFrag.setScore(fragmentScorer.getFragmentScore());
 
@@ -274,9 +276,18 @@
 				lastEndOffset=Math.max(lastEndOffset,endOffset);
 			}
 
-			// append text after end of last token
-//			if (lastEndOffset < text.length())
-//				newText.append(encoder.encodeText(text.substring(lastEndOffset)));
+			//Test what remains of the original text beyond the point where we stopped analyzing 
+			if (
+//					if there is text beyond the last token considered..
+					(lastEndOffset < text.length()) 
+					&&
+//					and that text is not too large...
+					(text.length()<maxDocBytesToAnalyze)
+				)				
+			{
+				//append it to the last fragment
+				newText.append(encoder.encodeText(text.substring(lastEndOffset)));
+			}
 
 			currentFrag.textEndPos = newText.length();
 

Modified: lucene/java/trunk/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java?rev=432042&r1=432041&r2=432042&view=diff
==============================================================================
--- lucene/java/trunk/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
(original)
+++ lucene/java/trunk/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
Wed Aug 16 14:42:18 2006
@@ -44,6 +44,7 @@
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.RangeFilter;
 import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.spans.SpanNearQuery;
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanTermQuery;
@@ -155,6 +156,17 @@
 		//Currently highlights "John" and "Kennedy" separately
 		assertTrue("Failed to find correct number of highlights " + numHighlights + " found", numHighlights
== 2);
 	}
+
+	public void testOffByOne() throws IOException 
+	{
+	    TermQuery query= new TermQuery( new Term( "data", "help" ));
+	    Highlighter hg = new Highlighter(new SimpleHTMLFormatter(), new QueryScorer( query ));
+	    hg.setTextFragmenter( new NullFragmenter() );
+
+	    String match = null;
+	    match = hg.getBestFragment( new StandardAnalyzer(), "data", "help me [54-65]");
+	    assertEquals("<B>help</B> me [54-65]", match);
+	} 	
 	public void testGetBestFragmentsFilteredQuery() throws Exception
 	{
 		RangeFilter rf=new RangeFilter("contents","john","john",true,true);
@@ -338,6 +350,40 @@
 			"us from finding matches for this record: " + numHighlights +
 			 " found", numHighlights == 0);
 	}
+	public void testMaxSizeHighlightTruncates() throws IOException 
+	{
+	    String goodWord="goodtoken";
+	    String stopWords[]={"stoppedtoken"};
+	    
+	    TermQuery query= new TermQuery( new Term( "data", goodWord ));
+	    SimpleHTMLFormatter fm=new SimpleHTMLFormatter();
+	    Highlighter hg = new Highlighter(fm, new QueryScorer( query ));
+	    hg.setTextFragmenter( new NullFragmenter() );
+
+	    String match = null;
+	    StringBuffer sb=new StringBuffer();
+	    sb.append(goodWord);
+	    for(int i=0;i<10000;i++)
+	    {
+	    	sb.append(" ");
+	    	sb.append(stopWords[0]);
+	    }
+	    	    	
+	    hg.setMaxDocBytesToAnalyze(100);
+	    match = hg.getBestFragment( new StandardAnalyzer(stopWords), "data", sb.toString());
+	    assertTrue("Matched text should be no more than 100 chars in length ", 
+	    		match.length()<hg.getMaxDocBytesToAnalyze());
+	    
+	    //add another tokenized word to the overrall length - but set way beyond 
+	    //the length of text under consideration (after a large slug of stop words + whitespace)
+	    sb.append(" ");
+	    sb.append(goodWord);
+	    match = hg.getBestFragment( new StandardAnalyzer(stopWords), "data", sb.toString());
+	    assertTrue("Matched text should be no more than 100 chars in length ", 
+	    		match.length()<hg.getMaxDocBytesToAnalyze());
+	    
+	    
+	} 	
 
 
 



Mime
View raw message