lucene-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From er...@apache.org
Subject svn commit: r1386675 [6/10] - in /lucene/dev/branches/branch_4x: lucene/analysis/common/src/java/org/apache/lucene/analysis/br/ lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/ lucene/analysis/common/src/java/org/apache/lucene/ana...
Date Mon, 17 Sep 2012 15:55:18 GMT
Modified: lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java Mon Sep 17 15:55:11 2012
@@ -28,7 +28,7 @@ import java.io.IOException;
 public abstract class FilteredDocIdSetIterator extends DocIdSetIterator {
   protected DocIdSetIterator _innerIter;
   private int doc;
-	
+
   /**
    * Constructor.
    * @param innerIter Underlying DocIdSetIterator.
@@ -40,7 +40,7 @@ public abstract class FilteredDocIdSetIt
     _innerIter = innerIter;
     doc = -1;
   }
-	
+
   /**
    * Validation method to determine whether a docid should be in the result set.
    * @param doc docid to be tested
@@ -48,7 +48,7 @@ public abstract class FilteredDocIdSetIt
    * @see #FilteredDocIdSetIterator(DocIdSetIterator)
    */
   protected abstract boolean match(int doc);
-	
+
   @Override
   public int docID() {
     return doc;

Modified: lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java Mon Sep 17 15:55:11 2012
@@ -110,7 +110,7 @@ public class MultiPhraseQuery extends Qu
    * Do not modify the List or its contents.
    */
   public List<Term[]> getTermArrays() {
-	  return Collections.unmodifiableList(termArrays);
+    return Collections.unmodifiableList(termArrays);
   }
 
   /**

Modified: lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java Mon Sep 17 15:55:11 2012
@@ -24,13 +24,13 @@ import org.apache.lucene.index.*;
  * Position of a term in a document that takes into account the term offset within the phrase. 
  */
 final class PhrasePositions {
-  int doc;					  // current doc
-  int position;					  // position in doc
-  int count;					  // remaining pos in this doc
-  int offset;					  // position in phrase
+  int doc;              // current doc
+  int position;         // position in doc
+  int count;            // remaining pos in this doc
+  int offset;           // position in phrase
   final int ord;                                  // unique across all PhrasePositions instances
-  final DocsAndPositionsEnum postings;  	  // stream of docs & positions
-  PhrasePositions next;	                          // used to make lists
+  final DocsAndPositionsEnum postings;            // stream of docs & positions
+  PhrasePositions next;                           // used to make lists
   int rptGroup = -1; // >=0 indicates that this is a repeating PP
   int rptInd; // index in the rptGroup
   final Term[] terms; // for repetitions initialization 
@@ -42,7 +42,7 @@ final class PhrasePositions {
     this.terms = terms;
   }
 
-  final boolean next() throws IOException {	  // increments to next doc
+  final boolean next() throws IOException {  // increments to next doc
     doc = postings.nextDoc();
     if (doc == DocIdSetIterator.NO_MORE_DOCS) {
       return false;
@@ -59,7 +59,7 @@ final class PhrasePositions {
   }
 
   final void firstPosition() throws IOException {
-    count = postings.freq();				  // read first pos
+    count = postings.freq();  // read first pos
     nextPosition();
   }
 
@@ -70,7 +70,7 @@ final class PhrasePositions {
    * have exactly the same <code>position</code>.
    */
   final boolean nextPosition() throws IOException {
-    if (count-- > 0) {				  // read subsequent pos's
+    if (count-- > 0) {  // read subsequent pos's
       position = postings.nextPosition() - offset;
       return true;
     } else

Modified: lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java Mon Sep 17 15:55:11 2012
@@ -281,7 +281,7 @@ public class PhraseQuery extends Query {
         ArrayUtil.mergeSort(postingsFreqs);
       }
 
-      if (slop == 0) {				  // optimize exact case
+      if (slop == 0) {  // optimize exact case
         ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity.exactSimScorer(stats, context));
         if (s.noDocs) {
           return null;

Modified: lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/payloads/MinPayloadFunction.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/payloads/MinPayloadFunction.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/payloads/MinPayloadFunction.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/payloads/MinPayloadFunction.java Mon Sep 17 15:55:11 2012
@@ -24,12 +24,12 @@ package org.apache.lucene.search.payload
 public class MinPayloadFunction extends PayloadFunction {
 
   @Override
-	public float currentScore(int docId, String field, int start, int end, int numPayloadsSeen, float currentScore, float currentPayloadScore) {
+  public float currentScore(int docId, String field, int start, int end, int numPayloadsSeen, float currentScore, float currentPayloadScore) {
     if (numPayloadsSeen == 0) {
       return currentPayloadScore;
     } else {
-		return Math.min(currentPayloadScore, currentScore);
-	}
+      return Math.min(currentPayloadScore, currentScore);
+    }
   }
 
   @Override

Modified: lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadFunction.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadFunction.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadFunction.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadFunction.java Mon Sep 17 15:55:11 2012
@@ -56,10 +56,10 @@ public abstract class PayloadFunction {
   public abstract float docScore(int docId, String field, int numPayloadsSeen, float payloadScore);
   
   public Explanation explain(int docId, String field, int numPayloadsSeen, float payloadScore){
-	  Explanation result = new Explanation();
-	  result.setDescription(getClass().getSimpleName() + ".docScore()");
-	  result.setValue(docScore(docId, field, numPayloadsSeen, payloadScore));
-	  return result;
+    Explanation result = new Explanation();
+    result.setDescription(getClass().getSimpleName() + ".docScore()");
+    result.setValue(docScore(docId, field, numPayloadsSeen, payloadScore));
+    return result;
   };
   
   @Override

Modified: lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java Mon Sep 17 15:55:11 2012
@@ -257,7 +257,7 @@ public class PayloadNearQuery extends Sp
             getPayloads(spansArr);            
             more = spans.next();
           } while (more && (doc == spans.doc()));
-          return true;    	
+          return true;
     }
 
     @Override

Modified: lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/NearSpansOrdered.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/NearSpansOrdered.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/NearSpansOrdered.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/NearSpansOrdered.java Mon Sep 17 15:55:11 2012
@@ -117,7 +117,7 @@ public class NearSpansOrdered extends Sp
   public int end() { return matchEnd; }
   
   public Spans[] getSubSpans() {
-	  return subSpans;
+    return subSpans;
   }  
 
   // TODO: Remove warning after API has been finalized

Modified: lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/NearSpansUnordered.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/NearSpansUnordered.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/NearSpansUnordered.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/NearSpansUnordered.java Mon Sep 17 15:55:11 2012
@@ -151,7 +151,7 @@ public class NearSpansUnordered extends 
     }
   }
   public Spans[] getSubSpans() {
-	  return subSpans;
+    return subSpans;
   }
   @Override
   public boolean next() throws IOException {
@@ -286,7 +286,7 @@ public class NearSpansUnordered extends 
   }
 
   private void addToList(SpansCell cell) {
-    if (last != null) {			  // add next to end of list
+    if (last != null) {  // add next to end of list
       last.next = cell;
     } else
       first = cell;
@@ -295,7 +295,7 @@ public class NearSpansUnordered extends 
   }
 
   private void firstToLast() {
-    last.next = first;			  // move first to end of list
+    last.next = first;  // move first to end of list
     last = first;
     first = first.next;
     last.next = null;

Modified: lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java Mon Sep 17 15:55:11 2012
@@ -92,9 +92,9 @@ public class SpanNearQuery extends SpanQ
   
   @Override
   public void extractTerms(Set<Term> terms) {
-	    for (final SpanQuery clause : clauses) {
-	      clause.extractTerms(terms);
-	    }
+    for (final SpanQuery clause : clauses) {
+      clause.extractTerms(terms);
+    }
   }  
   
 

Modified: lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java Mon Sep 17 15:55:11 2012
@@ -57,7 +57,7 @@ public abstract class SpanPositionCheckQ
 
   @Override
   public void extractTerms(Set<Term> terms) {
-	    match.extractTerms(terms);
+    match.extractTerms(terms);
   }
 
   /** 
@@ -186,4 +186,4 @@ public abstract class SpanPositionCheckQ
       }
 
   }
-}
\ No newline at end of file
+}

Modified: lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/Spans.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/Spans.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/Spans.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/search/spans/Spans.java Mon Sep 17 15:55:11 2012
@@ -34,7 +34,7 @@ public abstract class Spans {
    *   boolean skipTo(int target) {
    *     do {
    *       if (!next())
-   * 	     return false;
+   *         return false;
    *     } while (target > doc());
    *     return true;
    *   }

Modified: lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/store/BufferedIndexInput.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/store/BufferedIndexInput.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/store/BufferedIndexInput.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/store/BufferedIndexInput.java Mon Sep 17 15:55:11 2012
@@ -41,9 +41,9 @@ public abstract class BufferedIndexInput
   
   protected byte[] buffer;
   
-  private long bufferStart = 0;			  // position in file of buffer
-  private int bufferLength = 0;			  // end of valid bytes
-  private int bufferPosition = 0;		  // next byte to read
+  private long bufferStart = 0;       // position in file of buffer
+  private int bufferLength = 0;       // end of valid bytes
+  private int bufferPosition = 0;     // next byte to read
 
   @Override
   public final byte readByte() throws IOException {
@@ -259,7 +259,7 @@ public abstract class BufferedIndexInput
   private void refill() throws IOException {
     long start = bufferStart + bufferPosition;
     long end = start + bufferSize;
-    if (end > length())				  // don't read past EOF
+    if (end > length())  // don't read past EOF
       end = length();
     int newLength = (int)(end - start);
     if (newLength <= 0)
@@ -294,7 +294,7 @@ public abstract class BufferedIndexInput
     else {
       bufferStart = pos;
       bufferPosition = 0;
-      bufferLength = 0;				  // trigger refill() on read()
+      bufferLength = 0;  // trigger refill() on read()
       seekInternal(pos);
     }
   }

Modified: lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/store/Lock.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/store/Lock.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/store/Lock.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/store/Lock.java Mon Sep 17 15:55:11 2012
@@ -135,7 +135,7 @@ public abstract class Lock {
          return doBody();
       } finally {
         if (locked)
-	      lock.release();
+          lock.release();
       }
     }
   }

Modified: lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/util/Constants.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/util/Constants.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/util/Constants.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/util/Constants.java Mon Sep 17 15:55:11 2012
@@ -26,7 +26,7 @@ import org.apache.lucene.LucenePackage;
  **/
 
 public final class Constants {
-  private Constants() {}			  // can't construct
+  private Constants() {}  // can't construct
 
   /** JVM vendor info. */
   public static final String JVM_VENDOR = System.getProperty("java.vm.vendor");

Modified: lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/util/PriorityQueue.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/util/PriorityQueue.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/util/PriorityQueue.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/java/org/apache/lucene/util/PriorityQueue.java Mon Sep 17 15:55:11 2012
@@ -177,11 +177,11 @@ public abstract class PriorityQueue<T> {
     time. */
   public final T pop() {
     if (size > 0) {
-      T result = heap[1];			  // save first value
-      heap[1] = heap[size];			  // move last to first
-      heap[size] = null;			  // permit GC of objects
+      T result = heap[1];       // save first value
+      heap[1] = heap[size];     // move last to first
+      heap[size] = null;        // permit GC of objects
       size--;
-      downHeap();				  // adjust heap
+      downHeap();               // adjust heap
       return result;
     } else
       return null;
@@ -226,26 +226,26 @@ public abstract class PriorityQueue<T> {
 
   private final void upHeap() {
     int i = size;
-    T node = heap[i];			  // save bottom node
+    T node = heap[i];          // save bottom node
     int j = i >>> 1;
     while (j > 0 && lessThan(node, heap[j])) {
-      heap[i] = heap[j];			  // shift parents down
+      heap[i] = heap[j];       // shift parents down
       i = j;
       j = j >>> 1;
     }
-    heap[i] = node;				  // install saved node
+    heap[i] = node;            // install saved node
   }
 
   private final void downHeap() {
     int i = 1;
-    T node = heap[i];			  // save top node
-    int j = i << 1;				  // find smaller child
+    T node = heap[i];          // save top node
+    int j = i << 1;            // find smaller child
     int k = j + 1;
     if (k <= size && lessThan(heap[k], heap[j])) {
       j = k;
     }
     while (j <= size && lessThan(heap[j], node)) {
-      heap[i] = heap[j];			  // shift up child
+      heap[i] = heap[j];       // shift up child
       i = j;
       j = i << 1;
       k = j + 1;
@@ -253,7 +253,7 @@ public abstract class PriorityQueue<T> {
         j = k;
       }
     }
-    heap[i] = node;				  // install saved node
+    heap[i] = node;            // install saved node
   }
   
   /** This method returns the internal heap array as Object[].

Modified: lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java Mon Sep 17 15:55:11 2012
@@ -111,26 +111,26 @@ public class TestLongPostings extends Lu
     }
 
     final IndexReader r;
-	  final IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
-	    .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
-	    .setMergePolicy(newLogMergePolicy());
-	  iwc.setRAMBufferSizeMB(16.0 + 16.0 * random().nextDouble());
-	  iwc.setMaxBufferedDocs(-1);
-	  final RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc);
-	
-	  for(int idx=0;idx<NUM_DOCS;idx++) {
-	    final Document doc = new Document();
-	    String s = isS1.get(idx) ? s1 : s2;
-	    final Field f = newTextField("field", s, Field.Store.NO);
-	    final int count = _TestUtil.nextInt(random(), 1, 4);
-	    for(int ct=0;ct<count;ct++) {
-	      doc.add(f);
-	    }
-	    riw.addDocument(doc);
-	  }
-	
-	  r = riw.getReader();
-	  riw.close();
+    final IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
+      .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
+      .setMergePolicy(newLogMergePolicy());
+    iwc.setRAMBufferSizeMB(16.0 + 16.0 * random().nextDouble());
+    iwc.setMaxBufferedDocs(-1);
+    final RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc);
+
+    for(int idx=0;idx<NUM_DOCS;idx++) {
+      final Document doc = new Document();
+      String s = isS1.get(idx) ? s1 : s2;
+      final Field f = newTextField("field", s, Field.Store.NO);
+      final int count = _TestUtil.nextInt(random(), 1, 4);
+      for(int ct=0;ct<count;ct++) {
+        doc.add(f);
+      }
+      riw.addDocument(doc);
+    }
+
+    r = riw.getReader();
+    riw.close();
 
     /*
     if (VERBOSE) {

Modified: lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java Mon Sep 17 15:55:11 2012
@@ -152,7 +152,7 @@ public class TestParallelReaderEmptyInde
 
     rd1.close();
     rd2.close();
-		
+
     iwOut.forceMerge(1);
     iwOut.close();
     

Modified: lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java Mon Sep 17 15:55:11 2012
@@ -78,7 +78,7 @@ final class BugReproTokenStream extends 
       offsetAtt.setOffset(starts[nextTokenIndex], ends[nextTokenIndex]);
       posIncAtt.setPositionIncrement(incs[nextTokenIndex]);
       nextTokenIndex++;
-      return true;			
+      return true;
     } else {
       return false;
     }

Modified: lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java Mon Sep 17 15:55:11 2012
@@ -41,13 +41,13 @@ import org.apache.lucene.util.Bits;
  */
 
 public class TestTransactionRollback extends LuceneTestCase {
-	
+
   private static final String FIELD_RECORD_ID = "record_id";
   private Directory dir;
-	
+
   //Rolls back index to a chosen ID
   private void rollBackLast(int id) throws Exception {
-		
+
     // System.out.println("Attempting to rollback to "+id);
     String ids="-"+id;
     IndexCommit last=null;
@@ -62,7 +62,7 @@ public class TestTransactionRollback ext
 
     if (last==null)
       throw new RuntimeException("Couldn't find commit point "+id);
-		
+
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
         TEST_VERSION_CURRENT, new MockAnalyzer(random())).setIndexDeletionPolicy(
         new RollbackDeletionPolicy(id)).setIndexCommit(last));
@@ -72,22 +72,22 @@ public class TestTransactionRollback ext
     w.close();
   }
 
-  public void testRepeatedRollBacks() throws Exception {		
+  public void testRepeatedRollBacks() throws Exception {
 
     int expectedLastRecordId=100;
     while (expectedLastRecordId>10) {
-      expectedLastRecordId -=10;			
+      expectedLastRecordId -=10;
       rollBackLast(expectedLastRecordId);
       
       BitSet expecteds = new BitSet(100);
       expecteds.set(1,(expectedLastRecordId+1),true);
-      checkExpecteds(expecteds);			
+      checkExpecteds(expecteds);
     }
   }
-	
+
   private void checkExpecteds(BitSet expecteds) throws Exception {
     IndexReader r = DirectoryReader.open(dir);
-		
+
     //Perhaps not the most efficient approach but meets our
     //needs here.
     final Bits liveDocs = MultiFields.getLiveDocs(r);
@@ -114,7 +114,7 @@ public class TestTransactionRollback ext
       Collection files = comm.getFileNames();
       for (Iterator iterator2 = files.iterator(); iterator2.hasNext();) {
         String filename = (String) iterator2.next();
-        System.out.print(filename+", ");				
+        System.out.print(filename+", ");
       }
       System.out.println();
     }
@@ -133,7 +133,7 @@ public class TestTransactionRollback ext
       Document doc=new Document();
       doc.add(newTextField(FIELD_RECORD_ID, ""+currentRecordId, Field.Store.YES));
       w.addDocument(doc);
-			
+
       if (currentRecordId%10 == 0) {
         Map<String,String> data = new HashMap<String,String>();
         data.put("index", "records 1-"+currentRecordId);
@@ -177,16 +177,16 @@ public class TestTransactionRollback ext
                              " UserData="+commit.getUserData() +")  ("+(commits.size()-1)+" commit points left) files=");
             Collection files = commit.getFileNames();
             for (Iterator iterator2 = files.iterator(); iterator2.hasNext();) {
-              System.out.print(" "+iterator2.next());				
+              System.out.print(" "+iterator2.next());
             }
             System.out.println();
             */
-						
-            commit.delete();									
+
+            commit.delete();
           }
         }
       }
-    }		
+    }
   }
 
   class DeleteLastCommitPolicy implements IndexDeletionPolicy {
@@ -198,7 +198,7 @@ public class TestTransactionRollback ext
     }
   }
 
-  public void testRollbackDeletionPolicy() throws Exception {		
+  public void testRollbackDeletionPolicy() throws Exception {
     for(int i=0;i<2;i++) {
       // Unless you specify a prior commit point, rollback
       // should not work:
@@ -209,7 +209,7 @@ public class TestTransactionRollback ext
       r.close();
     }
   }
-	
+
   // Keeps all commit points (used to build index)
   class KeepAllDeletionPolicy implements IndexDeletionPolicy {
     public void onCommit(List<? extends IndexCommit> commits) throws IOException {}

Modified: lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestTransactions.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestTransactions.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestTransactions.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestTransactions.java Mon Sep 17 15:55:11 2012
@@ -129,7 +129,7 @@ public class TestTransactions extends Lu
           }
           try {
             writer2.prepareCommit();
-          } catch (Throwable t) { 	
+          } catch (Throwable t) {
             writer1.rollback();
             writer2.rollback();
             return;

Modified: lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java Mon Sep 17 15:55:11 2012
@@ -145,7 +145,7 @@ public class TestCachingCollector extend
     try {
       cc.replay(new NoOpCollector(false)); // this call should fail
       fail("should have failed if an in-order Collector was given to replay(), " +
-      		"while CachingCollector was initialized with out-of-order collection");
+           "while CachingCollector was initialized with out-of-order collection");
     } catch (IllegalArgumentException e) {
       // ok
     }

Modified: lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java Mon Sep 17 15:55:11 2012
@@ -62,15 +62,15 @@ public class TestDocIdSet extends Lucene
           };
         } 
       };
-	  
-		
+
+
     DocIdSet filteredSet = new FilteredDocIdSet(innerSet){
         @Override
         protected boolean match(int docid) {
           return docid%2 == 0;  //validate only even docids
-        }	
+        }
       };
-	  
+
     DocIdSetIterator iter = filteredSet.iterator();
     ArrayList<Integer> list = new ArrayList<Integer>();
     int doc = iter.advance(3);
@@ -80,7 +80,7 @@ public class TestDocIdSet extends Lucene
         list.add(Integer.valueOf(doc));
       }
     }
-	  
+
     int[] docs = new int[list.size()];
     int c=0;
     Iterator<Integer> intIter = list.iterator();
@@ -151,7 +151,7 @@ public class TestDocIdSet extends Lucene
           @Override
           protected boolean match(int docid) {
             return true;
-          }	
+          }
         };
       }
     };

Modified: lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java Mon Sep 17 15:55:11 2012
@@ -101,7 +101,7 @@ public class TestFuzzyQuery extends Luce
     }
 
     // not similar enough:
-    query = new FuzzyQuery(new Term("field", "xxxxx"), FuzzyQuery.defaultMaxEdits, 0);  	
+    query = new FuzzyQuery(new Term("field", "xxxxx"), FuzzyQuery.defaultMaxEdits, 0);
     hits = searcher.search(query, null, 1000).scoreDocs;
     assertEquals(0, hits.length);
     query = new FuzzyQuery(new Term("field", "aaccc"), FuzzyQuery.defaultMaxEdits, 0);   // edit distance to "aaaaa" = 3

Modified: lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java Mon Sep 17 15:55:11 2012
@@ -140,7 +140,7 @@ public class TestPayloadNearQuery extend
 
     query = newPhraseQuery("field", "twenty two", true, new AveragePayloadFunction());
     QueryUtils.check(query);
-		
+
     // all 10 hits should have score = 3 because adjacent terms have payloads of 2,4
     // and all the similarity factors are set to 1
     hits = searcher.search(query, null, 100);
@@ -162,8 +162,8 @@ public class TestPayloadNearQuery extend
       assertEquals("should be 100 hits", 100, hits.totalHits);
       for (int j = 0; j < hits.scoreDocs.length; j++) {
         ScoreDoc doc = hits.scoreDocs[j];
-        //				System.out.println("Doc: " + doc.toString());
-        //				System.out.println("Explain: " + searcher.explain(query, doc.doc));
+        //        System.out.println("Doc: " + doc.toString());
+        //        System.out.println("Explain: " + searcher.explain(query, doc.doc));
         assertTrue(doc.score + " does not equal: " + 3, doc.score == 3);
       }
     }
@@ -192,71 +192,71 @@ public class TestPayloadNearQuery extend
   }
   
   public void testAverageFunction() throws IOException {
-	  PayloadNearQuery query;
-	  TopDocs hits;
+    PayloadNearQuery query;
+    TopDocs hits;
 
-	  query = newPhraseQuery("field", "twenty two", true, new AveragePayloadFunction());
-	  QueryUtils.check(query);
-	  // all 10 hits should have score = 3 because adjacent terms have payloads of 2,4
-	  // and all the similarity factors are set to 1
-	  hits = searcher.search(query, null, 100);
-	  assertTrue("hits is null and it shouldn't be", hits != null);
-	  assertTrue("should be 10 hits", hits.totalHits == 10);
-	  for (int j = 0; j < hits.scoreDocs.length; j++) {
-		  ScoreDoc doc = hits.scoreDocs[j];
-		  assertTrue(doc.score + " does not equal: " + 3, doc.score == 3);
-		  Explanation explain = searcher.explain(query, hits.scoreDocs[j].doc);
-		  String exp = explain.toString();
-		  assertTrue(exp, exp.indexOf("AveragePayloadFunction") > -1);
-		  assertTrue(hits.scoreDocs[j].score + " explain value does not equal: " + 3, explain.getValue() == 3f);
-	  }
+    query = newPhraseQuery("field", "twenty two", true, new AveragePayloadFunction());
+    QueryUtils.check(query);
+    // all 10 hits should have score = 3 because adjacent terms have payloads of 2,4
+    // and all the similarity factors are set to 1
+    hits = searcher.search(query, null, 100);
+    assertTrue("hits is null and it shouldn't be", hits != null);
+    assertTrue("should be 10 hits", hits.totalHits == 10);
+    for (int j = 0; j < hits.scoreDocs.length; j++) {
+      ScoreDoc doc = hits.scoreDocs[j];
+      assertTrue(doc.score + " does not equal: " + 3, doc.score == 3);
+      Explanation explain = searcher.explain(query, hits.scoreDocs[j].doc);
+      String exp = explain.toString();
+      assertTrue(exp, exp.indexOf("AveragePayloadFunction") > -1);
+      assertTrue(hits.scoreDocs[j].score + " explain value does not equal: " + 3, explain.getValue() == 3f);
+    }
   }
   public void testMaxFunction() throws IOException {
-	  PayloadNearQuery query;
-	  TopDocs hits;
+    PayloadNearQuery query;
+    TopDocs hits;
 
-	  query = newPhraseQuery("field", "twenty two", true, new MaxPayloadFunction());
-	  QueryUtils.check(query);
-	  // all 10 hits should have score = 4 (max payload value)
-	  hits = searcher.search(query, null, 100);
-	  assertTrue("hits is null and it shouldn't be", hits != null);
-	  assertTrue("should be 10 hits", hits.totalHits == 10);
-	  for (int j = 0; j < hits.scoreDocs.length; j++) {
-		  ScoreDoc doc = hits.scoreDocs[j];
-		  assertTrue(doc.score + " does not equal: " + 4, doc.score == 4);
-		  Explanation explain = searcher.explain(query, hits.scoreDocs[j].doc);
-		  String exp = explain.toString();
-		  assertTrue(exp, exp.indexOf("MaxPayloadFunction") > -1);
-		  assertTrue(hits.scoreDocs[j].score + " explain value does not equal: " + 4, explain.getValue() == 4f);
-	  }
+    query = newPhraseQuery("field", "twenty two", true, new MaxPayloadFunction());
+    QueryUtils.check(query);
+    // all 10 hits should have score = 4 (max payload value)
+    hits = searcher.search(query, null, 100);
+    assertTrue("hits is null and it shouldn't be", hits != null);
+    assertTrue("should be 10 hits", hits.totalHits == 10);
+    for (int j = 0; j < hits.scoreDocs.length; j++) {
+      ScoreDoc doc = hits.scoreDocs[j];
+      assertTrue(doc.score + " does not equal: " + 4, doc.score == 4);
+      Explanation explain = searcher.explain(query, hits.scoreDocs[j].doc);
+      String exp = explain.toString();
+      assertTrue(exp, exp.indexOf("MaxPayloadFunction") > -1);
+      assertTrue(hits.scoreDocs[j].score + " explain value does not equal: " + 4, explain.getValue() == 4f);
+    }
   }  
   public void testMinFunction() throws IOException {
-	  PayloadNearQuery query;
-	  TopDocs hits;
+    PayloadNearQuery query;
+    TopDocs hits;
 
-	  query = newPhraseQuery("field", "twenty two", true, new MinPayloadFunction());
-	  QueryUtils.check(query);
-	  // all 10 hits should have score = 2 (min payload value)
-	  hits = searcher.search(query, null, 100);
-	  assertTrue("hits is null and it shouldn't be", hits != null);
-	  assertTrue("should be 10 hits", hits.totalHits == 10);
-	  for (int j = 0; j < hits.scoreDocs.length; j++) {
-		  ScoreDoc doc = hits.scoreDocs[j];
-		  assertTrue(doc.score + " does not equal: " + 2, doc.score == 2);
-		  Explanation explain = searcher.explain(query, hits.scoreDocs[j].doc);
-		  String exp = explain.toString();
-		  assertTrue(exp, exp.indexOf("MinPayloadFunction") > -1);
-		  assertTrue(hits.scoreDocs[j].score + " explain value does not equal: " + 2, explain.getValue() == 2f);
-	  }
+    query = newPhraseQuery("field", "twenty two", true, new MinPayloadFunction());
+    QueryUtils.check(query);
+    // all 10 hits should have score = 2 (min payload value)
+    hits = searcher.search(query, null, 100);
+    assertTrue("hits is null and it shouldn't be", hits != null);
+    assertTrue("should be 10 hits", hits.totalHits == 10);
+    for (int j = 0; j < hits.scoreDocs.length; j++) {
+      ScoreDoc doc = hits.scoreDocs[j];
+      assertTrue(doc.score + " does not equal: " + 2, doc.score == 2);
+      Explanation explain = searcher.explain(query, hits.scoreDocs[j].doc);
+      String exp = explain.toString();
+      assertTrue(exp, exp.indexOf("MinPayloadFunction") > -1);
+      assertTrue(hits.scoreDocs[j].score + " explain value does not equal: " + 2, explain.getValue() == 2f);
+    }
   }  
   private SpanQuery[] getClauses() {
-	    SpanNearQuery q1, q2;
-	    q1 = spanNearQuery("field2", "twenty two");
-	    q2 = spanNearQuery("field2", "twenty three");
-	    SpanQuery[] clauses = new SpanQuery[2];
-	    clauses[0] = q1;
-	    clauses[1] = q2;
-	    return clauses;
+      SpanNearQuery q1, q2;
+      q1 = spanNearQuery("field2", "twenty two");
+      q2 = spanNearQuery("field2", "twenty three");
+      SpanQuery[] clauses = new SpanQuery[2];
+      clauses[0] = q1;
+      clauses[1] = q2;
+      return clauses;
   }
   private SpanNearQuery spanNearQuery(String fieldName, String words) {
     String[] wordList = words.split("[\\s]+");
@@ -274,8 +274,8 @@ public class TestPayloadNearQuery extend
     hits = searcher.search(query, null, 100);
     assertTrue("hits is null and it shouldn't be", hits != null);
     ScoreDoc doc = hits.scoreDocs[0];
-    //		System.out.println("Doc: " + doc.toString());
-    //		System.out.println("Explain: " + searcher.explain(query, doc.doc));
+    //    System.out.println("Doc: " + doc.toString());
+    //    System.out.println("Explain: " + searcher.explain(query, doc.doc));
     assertTrue("there should only be one hit", hits.totalHits == 1);
     // should have score = 3 because adjacent terms have payloads of 2,4
     assertTrue(doc.score + " does not equal: " + 3, doc.score == 3); 
@@ -299,8 +299,8 @@ public class TestPayloadNearQuery extend
     assertTrue("should only be one hit", hits.scoreDocs.length == 1);
     // the score should be 3 - the average of all the underlying payloads
     ScoreDoc doc = hits.scoreDocs[0];
-    //		System.out.println("Doc: " + doc.toString());
-    //		System.out.println("Explain: " + searcher.explain(query, doc.doc));
+    //    System.out.println("Doc: " + doc.toString());
+    //    System.out.println("Explain: " + searcher.explain(query, doc.doc));
     assertTrue(doc.score + " does not equal: " + 3, doc.score == 3);  
   }
 

Modified: lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/spans/TestBasics.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/spans/TestBasics.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/spans/TestBasics.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/spans/TestBasics.java Mon Sep 17 15:55:11 2012
@@ -582,21 +582,21 @@ public class TestBasics extends LuceneTe
   
   @Test
   public void testSpansSkipTo() throws Exception {
-	  SpanTermQuery t1 = new SpanTermQuery(new Term("field", "seventy"));
-	  SpanTermQuery t2 = new SpanTermQuery(new Term("field", "seventy"));
-	  Spans s1 = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), t1);
-	  Spans s2 = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), t2);
-	  
-	  assertTrue(s1.next());
-	  assertTrue(s2.next());
-	  
-	  boolean hasMore = true;
-	  
-	  do {
-		  hasMore = skipToAccoringToJavaDocs(s1, s1.doc());
-		  assertEquals(hasMore, s2.skipTo(s2.doc()));
-		  assertEquals(s1.doc(), s2.doc());
-	  } while (hasMore);
+    SpanTermQuery t1 = new SpanTermQuery(new Term("field", "seventy"));
+    SpanTermQuery t2 = new SpanTermQuery(new Term("field", "seventy"));
+    Spans s1 = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), t1);
+    Spans s2 = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), t2);
+
+    assertTrue(s1.next());
+    assertTrue(s2.next());
+
+    boolean hasMore = true;
+
+    do {
+      hasMore = skipToAccoringToJavaDocs(s1, s1.doc());
+      assertEquals(hasMore, s2.skipTo(s2.doc()));
+      assertEquals(s1.doc(), s2.doc());
+    } while (hasMore);
   }
 
   /** Skips to the first match beyond the current, whose document number is

Modified: lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/store/TestWindowsMMap.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/store/TestWindowsMMap.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/store/TestWindowsMMap.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/store/TestWindowsMMap.java Mon Sep 17 15:55:11 2012
@@ -84,7 +84,7 @@ public class TestWindowsMMap extends Luc
     for(int dx = 0; dx < num; dx ++) {
       String f = randomField();
       Document doc = new Document();
-      doc.add(newTextField("data", f, Field.Store.YES));	
+      doc.add(newTextField("data", f, Field.Store.YES));  
       writer.addDocument(doc);
     }
     

Modified: lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/util/TestBitUtil.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/util/TestBitUtil.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/util/TestBitUtil.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/util/TestBitUtil.java Mon Sep 17 15:55:11 2012
@@ -71,12 +71,12 @@ public class TestBitUtil extends LuceneT
     long sumRes = 0;
     while (iters-- >= 0) {
       for (int i = 1; i <= 63; i++) {
-      	long a = testArg(i);
-	sumRes += BitUtil.nlz(a);
-	sumRes += BitUtil.nlz(a+1);
-	sumRes += BitUtil.nlz(a-1);
-	sumRes += BitUtil.nlz(a+10);
-	sumRes += BitUtil.nlz(a-10);
+        long a = testArg(i);
+        sumRes += BitUtil.nlz(a);
+        sumRes += BitUtil.nlz(a + 1);
+        sumRes += BitUtil.nlz(a - 1);
+        sumRes += BitUtil.nlz(a + 10);
+        sumRes += BitUtil.nlz(a - 10);
       }
     }
     return sumRes;
@@ -86,12 +86,12 @@ public class TestBitUtil extends LuceneT
     long sumRes = 0;
     while (iters-- >= 0) {
       for (int i = 1; i <= 63; i++) {
-      	long a = testArg(i);
-	sumRes += Long.numberOfLeadingZeros(a);
-	sumRes += Long.numberOfLeadingZeros(a+1);
-	sumRes += Long.numberOfLeadingZeros(a-1);
-	sumRes += Long.numberOfLeadingZeros(a+10);
-	sumRes += Long.numberOfLeadingZeros(a-10);
+        long a = testArg(i);
+        sumRes += Long.numberOfLeadingZeros(a);
+        sumRes += Long.numberOfLeadingZeros(a + 1);
+        sumRes += Long.numberOfLeadingZeros(a - 1);
+        sumRes += Long.numberOfLeadingZeros(a + 10);
+        sumRes += Long.numberOfLeadingZeros(a - 10);
       }
     }
     return sumRes;

Modified: lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/util/TestFixedBitSet.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/util/TestFixedBitSet.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/util/TestFixedBitSet.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/util/TestFixedBitSet.java Mon Sep 17 15:55:11 2012
@@ -49,7 +49,7 @@ public class TestFixedBitSet extends Luc
       // aa = a.prevSetBit(aa-1);
       aa--;
       while ((aa >= 0) && (! a.get(aa))) {
-      	aa--;
+        aa--;
       }
       if (b.length() == 0) {
         bb = -1;

Modified: lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/util/TestOpenBitSet.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/util/TestOpenBitSet.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/util/TestOpenBitSet.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/util/TestOpenBitSet.java Mon Sep 17 15:55:11 2012
@@ -71,7 +71,7 @@ public class TestOpenBitSet extends Luce
       // aa = a.prevSetBit(aa-1);
       aa--;
       while ((aa >= 0) && (! a.get(aa))) {
-      	aa--;
+        aa--;
       }
       bb = b.prevSetBit(bb-1);
       assertEquals(aa,bb);
@@ -85,7 +85,7 @@ public class TestOpenBitSet extends Luce
       // aa = a.prevSetBit(aa-1);
       aa--;
       while ((aa >= 0) && (! a.get(aa))) {
-      	aa--;
+        aa--;
       }
       bb = (int) b.prevSetBit((long) (bb-1));
       assertEquals(aa,bb);

Modified: lucene/dev/branches/branch_4x/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java (original)
+++ lucene/dev/branches/branch_4x/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java Mon Sep 17 15:55:11 2012
@@ -356,7 +356,7 @@ public class DirectoryTaxonomyReader imp
     // only possible writer, and it is "synchronized" to avoid this case).
     DirectoryReader r2 = DirectoryReader.openIfChanged(indexReader);
     if (r2 == null) {
-    	return false; // no changes, nothing to do
+      return false; // no changes, nothing to do
     } 
     
     // validate that a refresh is valid at this point, i.e. that the taxonomy 
@@ -364,13 +364,13 @@ public class DirectoryTaxonomyReader imp
     String t1 = indexReader.getIndexCommit().getUserData().get(DirectoryTaxonomyWriter.INDEX_CREATE_TIME);
     String t2 = r2.getIndexCommit().getUserData().get(DirectoryTaxonomyWriter.INDEX_CREATE_TIME);
     if (t1==null) {
-    	if (t2!=null) {
-    		r2.close();
-    		throw new InconsistentTaxonomyException("Taxonomy was recreated at: "+t2);
-    	}
+      if (t2!=null) {
+        r2.close();
+        throw new InconsistentTaxonomyException("Taxonomy was recreated at: "+t2);
+      }
     } else if (!t1.equals(t2)) {
-    	r2.close();
-    	throw new InconsistentTaxonomyException("Taxonomy was recreated at: "+t2+"  !=  "+t1);
+      r2.close();
+      throw new InconsistentTaxonomyException("Taxonomy was recreated at: "+t2+"  !=  "+t1);
     }
     
       IndexReader oldreader = indexReader;

Modified: lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/DefaultEncoder.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/DefaultEncoder.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/DefaultEncoder.java (original)
+++ lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/DefaultEncoder.java Mon Sep 17 15:55:11 2012
@@ -21,12 +21,12 @@ package org.apache.lucene.search.highlig
  */
 public class DefaultEncoder implements Encoder
 {
-	public DefaultEncoder()
-	{
-	}
+  public DefaultEncoder()
+  {
+  }
 
-	public String encodeText(String originalText)
-	{
-		return originalText;
-	}
+  public String encodeText(String originalText)
+  {
+    return originalText;
+  }
 }
\ No newline at end of file

Modified: lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Encoder.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Encoder.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Encoder.java (original)
+++ lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Encoder.java Mon Sep 17 15:55:11 2012
@@ -22,8 +22,8 @@ package org.apache.lucene.search.highlig
  */
 public interface Encoder
 {
-	/**
-	 * @param originalText The section of text being output
-	 */
-	String encodeText(String originalText);
+  /**
+   * @param originalText The section of text being output
+   */
+  String encodeText(String originalText);
 }
\ No newline at end of file

Modified: lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Formatter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Formatter.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Formatter.java (original)
+++ lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Formatter.java Mon Sep 17 15:55:11 2012
@@ -24,10 +24,10 @@ package org.apache.lucene.search.highlig
  */
 public interface Formatter
 {
-	/**
-	 * @param originalText The section of text being considered for markup
-	 * @param tokenGroup contains one or several overlapping Tokens along with
-	 * their scores and positions.
-	 */
-	String highlightTerm(String originalText, TokenGroup tokenGroup);
+  /**
+   * @param originalText The section of text being considered for markup
+   * @param tokenGroup contains one or several overlapping Tokens along with
+   * their scores and positions.
+   */
+  String highlightTerm(String originalText, TokenGroup tokenGroup);
 }

Modified: lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/GradientFormatter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/GradientFormatter.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/GradientFormatter.java (original)
+++ lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/GradientFormatter.java Mon Sep 17 15:55:11 2012
@@ -42,7 +42,7 @@ public class GradientFormatter implement
      * 
      * @param maxScore
      *            The score (and above) displayed as maxColor (See QueryScorer.getMaxWeight 
-     * 			  which can be used to calibrate scoring scale)
+     *         which can be used to calibrate scoring scale)
      * @param minForegroundColor
      *            The hex color used for representing IDF scores of zero eg
      *            #FFFFFF (white) or null if no foreground color required

Modified: lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java?rev=1386675&r1=1386674&r2=1386675&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java (original)
+++ lucene/dev/branches/branch_4x/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java Mon Sep 17 15:55:11 2012
@@ -38,445 +38,445 @@ public class Highlighter
   public static final int DEFAULT_MAX_CHARS_TO_ANALYZE = 50*1024;
 
   private int maxDocCharsToAnalyze = DEFAULT_MAX_CHARS_TO_ANALYZE;
-	private Formatter formatter;
-	private Encoder encoder;
-	private Fragmenter textFragmenter=new SimpleFragmenter();
-	private Scorer fragmentScorer=null;
-
-	public Highlighter(Scorer fragmentScorer)
-	{
-		this(new SimpleHTMLFormatter(),fragmentScorer);
-	}
-
-
- 	public Highlighter(Formatter formatter, Scorer fragmentScorer)
- 	{
-		this(formatter,new DefaultEncoder(),fragmentScorer);
-	}
-
-
-	public Highlighter(Formatter formatter, Encoder encoder, Scorer fragmentScorer)
-	{
- 		this.formatter = formatter;
-		this.encoder = encoder;
- 		this.fragmentScorer = fragmentScorer;
- 	}
-
-	/**
-	 * Highlights chosen terms in a text, extracting the most relevant section.
-	 * This is a convenience method that calls
-	 * {@link #getBestFragment(TokenStream, String)}
-	 *
-	 * @param analyzer   the analyzer that will be used to split <code>text</code>
-	 * into chunks
-	 * @param text text to highlight terms in
-	 * @param fieldName Name of field used to influence analyzer's tokenization policy
-	 *
-	 * @return highlighted text fragment or null if no terms found
-	 * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length
-	 */
-	public final String getBestFragment(Analyzer analyzer, String fieldName,String text)
-		throws IOException, InvalidTokenOffsetsException
-	{
-		TokenStream tokenStream = analyzer.tokenStream(fieldName, new StringReader(text));
-		return getBestFragment(tokenStream, text);
-	}
-
-	/**
-	 * Highlights chosen terms in a text, extracting the most relevant section.
-	 * The document text is analysed in chunks to record hit statistics
-	 * across the document. After accumulating stats, the fragment with the highest score
-	 * is returned
-	 *
-	 * @param tokenStream   a stream of tokens identified in the text parameter, including offset information.
-	 * This is typically produced by an analyzer re-parsing a document's
-	 * text. Some work may be done on retrieving TokenStreams more efficiently
-	 * by adding support for storing original text position data in the Lucene
-	 * index but this support is not currently available (as of Lucene 1.4 rc2).
-	 * @param text text to highlight terms in
-	 *
-	 * @return highlighted text fragment or null if no terms found
-	 * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length
-	 */
-	public final String getBestFragment(TokenStream tokenStream, String text)
-		throws IOException, InvalidTokenOffsetsException
-	{
-		String[] results = getBestFragments(tokenStream,text, 1);
-		if (results.length > 0)
-		{
-			return results[0];
-		}
-		return null;
-	}
-
-	/**
-	 * Highlights chosen terms in a text, extracting the most relevant sections.
-	 * This is a convenience method that calls
-	 * {@link #getBestFragments(TokenStream, String, int)}
-	 *
-	 * @param analyzer   the analyzer that will be used to split <code>text</code>
-	 * into chunks
-	 * @param fieldName     the name of the field being highlighted (used by analyzer)
-	 * @param text        	text to highlight terms in
-	 * @param maxNumFragments  the maximum number of fragments.
-	 *
-	 * @return highlighted text fragments (between 0 and maxNumFragments number of fragments)
-	 * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length
-	 */
-	public final String[] getBestFragments(
-		Analyzer analyzer,
-		String fieldName,
-		String text,
-		int maxNumFragments)
-		throws IOException, InvalidTokenOffsetsException
-	{
-		TokenStream tokenStream = analyzer.tokenStream(fieldName, new StringReader(text));
-		return getBestFragments(tokenStream, text, maxNumFragments);
-	}
-
-	/**
-	 * Highlights chosen terms in a text, extracting the most relevant sections.
-	 * The document text is analysed in chunks to record hit statistics
-	 * across the document. After accumulating stats, the fragments with the highest scores
-	 * are returned as an array of strings in order of score (contiguous fragments are merged into
-	 * one in their original order to improve readability)
-	 *
-	 * @param text        	text to highlight terms in
-	 * @param maxNumFragments  the maximum number of fragments.
-	 *
-	 * @return highlighted text fragments (between 0 and maxNumFragments number of fragments)
-	 * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length
-	 */
-	public final String[] getBestFragments(
-		TokenStream tokenStream,
-		String text,
-		int maxNumFragments)
-		throws IOException, InvalidTokenOffsetsException
-	{
-		maxNumFragments = Math.max(1, maxNumFragments); //sanity check
-
-		TextFragment[] frag =getBestTextFragments(tokenStream,text, true,maxNumFragments);
-
-		//Get text
-		ArrayList<String> fragTexts = new ArrayList<String>();
-		for (int i = 0; i < frag.length; i++)
-		{
-			if ((frag[i] != null) && (frag[i].getScore() > 0))
-			{
-				fragTexts.add(frag[i].toString());
-			}
-		}
-		return fragTexts.toArray(new String[0]);
-	}
-
-
-	/**
-	 * Low level api to get the most relevant (formatted) sections of the document.
-	 * This method has been made public to allow visibility of score information held in TextFragment objects.
-	 * Thanks to Jason Calabrese for help in redefining the interface.
-	 * @param tokenStream
-	 * @param text
-	 * @param maxNumFragments
-	 * @param mergeContiguousFragments
-	 * @throws IOException
-	 * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length
-	 */
-	public final TextFragment[] getBestTextFragments(
-		TokenStream tokenStream,
-		String text,
-		boolean mergeContiguousFragments,
-		int maxNumFragments)
-		throws IOException, InvalidTokenOffsetsException
-	{
-		ArrayList<TextFragment> docFrags = new ArrayList<TextFragment>();
-		StringBuilder newText=new StringBuilder();
-		
-	    CharTermAttribute termAtt = tokenStream.addAttribute(CharTermAttribute.class);
-	    OffsetAttribute offsetAtt = tokenStream.addAttribute(OffsetAttribute.class);
-	    tokenStream.addAttribute(PositionIncrementAttribute.class);
-	    tokenStream.reset();
-	    
-		TextFragment currentFrag =	new TextFragment(newText,newText.length(), docFrags.size());
-		
+  private Formatter formatter;
+  private Encoder encoder;
+  private Fragmenter textFragmenter=new SimpleFragmenter();
+  private Scorer fragmentScorer=null;
+
+  public Highlighter(Scorer fragmentScorer)
+  {
+    this(new SimpleHTMLFormatter(),fragmentScorer);
+  }
+
+
+   public Highlighter(Formatter formatter, Scorer fragmentScorer)
+   {
+    this(formatter,new DefaultEncoder(),fragmentScorer);
+  }
+
+
+  public Highlighter(Formatter formatter, Encoder encoder, Scorer fragmentScorer)
+  {
+     this.formatter = formatter;
+    this.encoder = encoder;
+     this.fragmentScorer = fragmentScorer;
+   }
+
+  /**
+   * Highlights chosen terms in a text, extracting the most relevant section.
+   * This is a convenience method that calls
+   * {@link #getBestFragment(TokenStream, String)}
+   *
+   * @param analyzer   the analyzer that will be used to split <code>text</code>
+   * into chunks
+   * @param text text to highlight terms in
+   * @param fieldName Name of field used to influence analyzer's tokenization policy
+   *
+   * @return highlighted text fragment or null if no terms found
+   * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length
+   */
+  public final String getBestFragment(Analyzer analyzer, String fieldName,String text)
+    throws IOException, InvalidTokenOffsetsException
+  {
+    TokenStream tokenStream = analyzer.tokenStream(fieldName, new StringReader(text));
+    return getBestFragment(tokenStream, text);
+  }
+
+  /**
+   * Highlights chosen terms in a text, extracting the most relevant section.
+   * The document text is analysed in chunks to record hit statistics
+   * across the document. After accumulating stats, the fragment with the highest score
+   * is returned
+   *
+   * @param tokenStream   a stream of tokens identified in the text parameter, including offset information.
+   * This is typically produced by an analyzer re-parsing a document's
+   * text. Some work may be done on retrieving TokenStreams more efficiently
+   * by adding support for storing original text position data in the Lucene
+   * index but this support is not currently available (as of Lucene 1.4 rc2).
+   * @param text text to highlight terms in
+   *
+   * @return highlighted text fragment or null if no terms found
+   * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length
+   */
+  public final String getBestFragment(TokenStream tokenStream, String text)
+    throws IOException, InvalidTokenOffsetsException
+  {
+    String[] results = getBestFragments(tokenStream,text, 1);
+    if (results.length > 0)
+    {
+      return results[0];
+    }
+    return null;
+  }
+
+  /**
+   * Highlights chosen terms in a text, extracting the most relevant sections.
+   * This is a convenience method that calls
+   * {@link #getBestFragments(TokenStream, String, int)}
+   *
+   * @param analyzer   the analyzer that will be used to split <code>text</code>
+   * into chunks
+   * @param fieldName     the name of the field being highlighted (used by analyzer)
+   * @param text          text to highlight terms in
+   * @param maxNumFragments  the maximum number of fragments.
+   *
+   * @return highlighted text fragments (between 0 and maxNumFragments number of fragments)
+   * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length
+   */
+  public final String[] getBestFragments(
+    Analyzer analyzer,
+    String fieldName,
+    String text,
+    int maxNumFragments)
+    throws IOException, InvalidTokenOffsetsException
+  {
+    TokenStream tokenStream = analyzer.tokenStream(fieldName, new StringReader(text));
+    return getBestFragments(tokenStream, text, maxNumFragments);
+  }
+
+  /**
+   * Highlights chosen terms in a text, extracting the most relevant sections.
+   * The document text is analysed in chunks to record hit statistics
+   * across the document. After accumulating stats, the fragments with the highest scores
+   * are returned as an array of strings in order of score (contiguous fragments are merged into
+   * one in their original order to improve readability)
+   *
+   * @param text          text to highlight terms in
+   * @param maxNumFragments  the maximum number of fragments.
+   *
+   * @return highlighted text fragments (between 0 and maxNumFragments number of fragments)
+   * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length
+   */
+  public final String[] getBestFragments(
+    TokenStream tokenStream,
+    String text,
+    int maxNumFragments)
+    throws IOException, InvalidTokenOffsetsException
+  {
+    maxNumFragments = Math.max(1, maxNumFragments); //sanity check
+
+    TextFragment[] frag =getBestTextFragments(tokenStream,text, true,maxNumFragments);
+
+    //Get text
+    ArrayList<String> fragTexts = new ArrayList<String>();
+    for (int i = 0; i < frag.length; i++)
+    {
+      if ((frag[i] != null) && (frag[i].getScore() > 0))
+      {
+        fragTexts.add(frag[i].toString());
+      }
+    }
+    return fragTexts.toArray(new String[0]);
+  }
+
+
+  /**
+   * Low level api to get the most relevant (formatted) sections of the document.
+   * This method has been made public to allow visibility of score information held in TextFragment objects.
+   * Thanks to Jason Calabrese for help in redefining the interface.
+   * @param tokenStream
+   * @param text
+   * @param maxNumFragments
+   * @param mergeContiguousFragments
+   * @throws IOException
+   * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length
+   */
+  public final TextFragment[] getBestTextFragments(
+    TokenStream tokenStream,
+    String text,
+    boolean mergeContiguousFragments,
+    int maxNumFragments)
+    throws IOException, InvalidTokenOffsetsException
+  {
+    ArrayList<TextFragment> docFrags = new ArrayList<TextFragment>();
+    StringBuilder newText=new StringBuilder();
+
+      CharTermAttribute termAtt = tokenStream.addAttribute(CharTermAttribute.class);
+      OffsetAttribute offsetAtt = tokenStream.addAttribute(OffsetAttribute.class);
+      tokenStream.addAttribute(PositionIncrementAttribute.class);
+      tokenStream.reset();
+
+    TextFragment currentFrag =  new TextFragment(newText,newText.length(), docFrags.size());
+
     if (fragmentScorer instanceof QueryScorer) {
       ((QueryScorer) fragmentScorer).setMaxDocCharsToAnalyze(maxDocCharsToAnalyze);
     }
     
-		TokenStream newStream = fragmentScorer.init(tokenStream);
-		if(newStream != null) {
-		  tokenStream = newStream;
-		}
-		fragmentScorer.startFragment(currentFrag);
-		docFrags.add(currentFrag);
-
-		FragmentQueue fragQueue = new FragmentQueue(maxNumFragments);
-
-		try
-		{
-
-			String tokenText;
-			int startOffset;
-			int endOffset;
-			int lastEndOffset = 0;
-			textFragmenter.start(text, tokenStream);
-
-			TokenGroup tokenGroup=new TokenGroup(tokenStream);
-
-			for (boolean next = tokenStream.incrementToken(); next && (offsetAtt.startOffset()< maxDocCharsToAnalyze);
-			      next = tokenStream.incrementToken())
-			{
-				if(	(offsetAtt.endOffset()>text.length())
-					||
-					(offsetAtt.startOffset()>text.length())
-					)						
-				{
-					throw new InvalidTokenOffsetsException("Token "+ termAtt.toString()
-							+" exceeds length of provided text sized "+text.length());
-				}
-				if((tokenGroup.numTokens>0)&&(tokenGroup.isDistinct()))
-				{
-					//the current token is distinct from previous tokens -
-					// markup the cached token group info
-					startOffset = tokenGroup.matchStartOffset;
-					endOffset = tokenGroup.matchEndOffset;
-					tokenText = text.substring(startOffset, endOffset);
-					String markedUpText=formatter.highlightTerm(encoder.encodeText(tokenText), tokenGroup);
-					//store any whitespace etc from between this and last group
-					if (startOffset > lastEndOffset)
-						newText.append(encoder.encodeText(text.substring(lastEndOffset, startOffset)));
-					newText.append(markedUpText);
-					lastEndOffset=Math.max(endOffset, lastEndOffset);
-					tokenGroup.clear();
-
-					//check if current token marks the start of a new fragment
-					if(textFragmenter.isNewFragment())
-					{
-						currentFrag.setScore(fragmentScorer.getFragmentScore());
-						//record stats for a new fragment
-						currentFrag.textEndPos = newText.length();
-						currentFrag =new TextFragment(newText, newText.length(), docFrags.size());
-						fragmentScorer.startFragment(currentFrag);
-						docFrags.add(currentFrag);
-					}
-				}
-
-				tokenGroup.addToken(fragmentScorer.getTokenScore());
-
-//				if(lastEndOffset>maxDocBytesToAnalyze)
-//				{
-//					break;
-//				}
-			}
-			currentFrag.setScore(fragmentScorer.getFragmentScore());
-
-			if(tokenGroup.numTokens>0)
-			{
-				//flush the accumulated text (same code as in above loop)
-				startOffset = tokenGroup.matchStartOffset;
-				endOffset = tokenGroup.matchEndOffset;
-				tokenText = text.substring(startOffset, endOffset);
-				String markedUpText=formatter.highlightTerm(encoder.encodeText(tokenText), tokenGroup);
-				//store any whitespace etc from between this and last group
-				if (startOffset > lastEndOffset)
-					newText.append(encoder.encodeText(text.substring(lastEndOffset, startOffset)));
-				newText.append(markedUpText);
-				lastEndOffset=Math.max(lastEndOffset,endOffset);
-			}
-
-			//Test what remains of the original text beyond the point where we stopped analyzing 
-			if (
-//					if there is text beyond the last token considered..
-					(lastEndOffset < text.length()) 
-					&&
-//					and that text is not too large...
-					(text.length()<= maxDocCharsToAnalyze)
-				)				
-			{
-				//append it to the last fragment
-				newText.append(encoder.encodeText(text.substring(lastEndOffset)));
-			}
-
-			currentFrag.textEndPos = newText.length();
-
-			//sort the most relevant sections of the text
-			for (Iterator<TextFragment> i = docFrags.iterator(); i.hasNext();)
-			{
-				currentFrag = i.next();
-
-				//If you are running with a version of Lucene before 11th Sept 03
-				// you do not have PriorityQueue.insert() - so uncomment the code below
-				/*
-									if (currentFrag.getScore() >= minScore)
-									{
-										fragQueue.put(currentFrag);
-										if (fragQueue.size() > maxNumFragments)
-										{ // if hit queue overfull
-											fragQueue.pop(); // remove lowest in hit queue
-											minScore = ((TextFragment) fragQueue.top()).getScore(); // reset minScore
-										}
-
-
-									}
-				*/
-				//The above code caused a problem as a result of Christoph Goller's 11th Sept 03
-				//fix to PriorityQueue. The correct method to use here is the new "insert" method
-				// USE ABOVE CODE IF THIS DOES NOT COMPILE!
-				fragQueue.insertWithOverflow(currentFrag);
-			}
-
-			//return the most relevant fragments
-			TextFragment frag[] = new TextFragment[fragQueue.size()];
-			for (int i = frag.length - 1; i >= 0; i--)
-			{
-				frag[i] = fragQueue.pop();
-			}
-
-			//merge any contiguous fragments to improve readability
-			if(mergeContiguousFragments)
-			{
-				mergeContiguousFragments(frag);
-				ArrayList<TextFragment> fragTexts = new ArrayList<TextFragment>();
-				for (int i = 0; i < frag.length; i++)
-				{
-					if ((frag[i] != null) && (frag[i].getScore() > 0))
-					{
-						fragTexts.add(frag[i]);
-					}
-				}
-				frag= fragTexts.toArray(new TextFragment[0]);
-			}
-
-			return frag;
-
-		}
-		finally
-		{
-			if (tokenStream != null)
-			{
-				try
-				{
-				  tokenStream.end();
-					tokenStream.close();
-				}
-				catch (Exception e)
-				{
-				}
-			}
-		}
-	}
-
-
-	/** Improves readability of a score-sorted list of TextFragments by merging any fragments
-	 * that were contiguous in the original text into one larger fragment with the correct order.
-	 * This will leave a "null" in the array entry for the lesser scored fragment. 
-	 * 
-	 * @param frag An array of document fragments in descending score
-	 */
-	private void mergeContiguousFragments(TextFragment[] frag)
-	{
-		boolean mergingStillBeingDone;
-		if (frag.length > 1)
-			do
-			{
-				mergingStillBeingDone = false; //initialise loop control flag
-				//for each fragment, scan other frags looking for contiguous blocks
-				for (int i = 0; i < frag.length; i++)
-				{
-					if (frag[i] == null)
-					{
-						continue;
-					}
-					//merge any contiguous blocks 
-					for (int x = 0; x < frag.length; x++)
-					{
-						if (frag[x] == null)
-						{
-							continue;
-						}
-						if (frag[i] == null)
-						{
-							break;
-						}
-						TextFragment frag1 = null;
-						TextFragment frag2 = null;
-						int frag1Num = 0;
-						int frag2Num = 0;
-						int bestScoringFragNum;
-						int worstScoringFragNum;
-						//if blocks are contiguous....
-						if (frag[i].follows(frag[x]))
-						{
-							frag1 = frag[x];
-							frag1Num = x;
-							frag2 = frag[i];
-							frag2Num = i;
-						}
-						else
-							if (frag[x].follows(frag[i]))
-							{
-								frag1 = frag[i];
-								frag1Num = i;
-								frag2 = frag[x];
-								frag2Num = x;
-							}
-						//merging required..
-						if (frag1 != null)
-						{
-							if (frag1.getScore() > frag2.getScore())
-							{
-								bestScoringFragNum = frag1Num;
-								worstScoringFragNum = frag2Num;
-							}
-							else
-							{
-								bestScoringFragNum = frag2Num;
-								worstScoringFragNum = frag1Num;
-							}
-							frag1.merge(frag2);
-							frag[worstScoringFragNum] = null;
-							mergingStillBeingDone = true;
-							frag[bestScoringFragNum] = frag1;
-						}
-					}
-				}
-			}
-			while (mergingStillBeingDone);
-	}
-	
-	
-	/**
-	 * Highlights terms in the  text , extracting the most relevant sections
-	 * and concatenating the chosen fragments with a separator (typically "...").
-	 * The document text is analysed in chunks to record hit statistics
-	 * across the document. After accumulating stats, the fragments with the highest scores
-	 * are returned in order as "separator" delimited strings.
-	 *
-	 * @param text        text to highlight terms in
-	 * @param maxNumFragments  the maximum number of fragments.
-	 * @param separator  the separator used to intersperse the document fragments (typically "...")
-	 *
-	 * @return highlighted text
-	 * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length
-	 */
-	public final String getBestFragments(
-		TokenStream tokenStream,	
-		String text,
-		int maxNumFragments,
-		String separator)
-		throws IOException, InvalidTokenOffsetsException
-	{
-		String sections[] =	getBestFragments(tokenStream,text, maxNumFragments);
-		StringBuilder result = new StringBuilder();
-		for (int i = 0; i < sections.length; i++)
-		{
-			if (i > 0)
-			{
-				result.append(separator);
-			}
-			result.append(sections[i]);
-		}
-		return result.toString();
-	}
+    TokenStream newStream = fragmentScorer.init(tokenStream);
+    if(newStream != null) {
+      tokenStream = newStream;
+    }
+    fragmentScorer.startFragment(currentFrag);
+    docFrags.add(currentFrag);
+
+    FragmentQueue fragQueue = new FragmentQueue(maxNumFragments);
+
+    try
+    {
+
+      String tokenText;
+      int startOffset;
+      int endOffset;
+      int lastEndOffset = 0;
+      textFragmenter.start(text, tokenStream);
+
+      TokenGroup tokenGroup=new TokenGroup(tokenStream);
+
+      for (boolean next = tokenStream.incrementToken(); next && (offsetAtt.startOffset()< maxDocCharsToAnalyze);
+            next = tokenStream.incrementToken())
+      {
+        if(  (offsetAtt.endOffset()>text.length())
+          ||
+          (offsetAtt.startOffset()>text.length())
+          )
+        {
+          throw new InvalidTokenOffsetsException("Token "+ termAtt.toString()
+              +" exceeds length of provided text sized "+text.length());
+        }
+        if((tokenGroup.numTokens>0)&&(tokenGroup.isDistinct()))
+        {
+          //the current token is distinct from previous tokens -
+          // markup the cached token group info
+          startOffset = tokenGroup.matchStartOffset;
+          endOffset = tokenGroup.matchEndOffset;
+          tokenText = text.substring(startOffset, endOffset);
+          String markedUpText=formatter.highlightTerm(encoder.encodeText(tokenText), tokenGroup);
+          //store any whitespace etc from between this and last group
+          if (startOffset > lastEndOffset)
+            newText.append(encoder.encodeText(text.substring(lastEndOffset, startOffset)));
+          newText.append(markedUpText);
+          lastEndOffset=Math.max(endOffset, lastEndOffset);
+          tokenGroup.clear();
+
+          //check if current token marks the start of a new fragment
+          if(textFragmenter.isNewFragment())
+          {
+            currentFrag.setScore(fragmentScorer.getFragmentScore());
+            //record stats for a new fragment
+            currentFrag.textEndPos = newText.length();
+            currentFrag =new TextFragment(newText, newText.length(), docFrags.size());
+            fragmentScorer.startFragment(currentFrag);
+            docFrags.add(currentFrag);
+          }
+        }
+
+        tokenGroup.addToken(fragmentScorer.getTokenScore());
+
+//        if(lastEndOffset>maxDocBytesToAnalyze)
+//        {
+//          break;
+//        }
+      }
+      currentFrag.setScore(fragmentScorer.getFragmentScore());
+
+      if(tokenGroup.numTokens>0)
+      {
+        //flush the accumulated text (same code as in above loop)
+        startOffset = tokenGroup.matchStartOffset;
+        endOffset = tokenGroup.matchEndOffset;
+        tokenText = text.substring(startOffset, endOffset);
+        String markedUpText=formatter.highlightTerm(encoder.encodeText(tokenText), tokenGroup);
+        //store any whitespace etc from between this and last group
+        if (startOffset > lastEndOffset)
+          newText.append(encoder.encodeText(text.substring(lastEndOffset, startOffset)));
+        newText.append(markedUpText);
+        lastEndOffset=Math.max(lastEndOffset,endOffset);
+      }
+
+      //Test what remains of the original text beyond the point where we stopped analyzing
+      if (
+//          if there is text beyond the last token considered..
+          (lastEndOffset < text.length())
+          &&
+//          and that text is not too large...
+          (text.length()<= maxDocCharsToAnalyze)
+        )
+      {
+        //append it to the last fragment
+        newText.append(encoder.encodeText(text.substring(lastEndOffset)));
+      }
+
+      currentFrag.textEndPos = newText.length();
+
+      //sort the most relevant sections of the text
+      for (Iterator<TextFragment> i = docFrags.iterator(); i.hasNext();)
+      {
+        currentFrag = i.next();
+
+        //If you are running with a version of Lucene before 11th Sept 03
+        // you do not have PriorityQueue.insert() - so uncomment the code below
+        /*
+                  if (currentFrag.getScore() >= minScore)
+                  {
+                    fragQueue.put(currentFrag);
+                    if (fragQueue.size() > maxNumFragments)
+                    { // if hit queue overfull
+                      fragQueue.pop(); // remove lowest in hit queue
+                      minScore = ((TextFragment) fragQueue.top()).getScore(); // reset minScore
+                    }
+
+
+                  }
+        */
+        //The above code caused a problem as a result of Christoph Goller's 11th Sept 03
+        //fix to PriorityQueue. The correct method to use here is the new "insert" method
+        // USE ABOVE CODE IF THIS DOES NOT COMPILE!
+        fragQueue.insertWithOverflow(currentFrag);
+      }
+
+      //return the most relevant fragments
+      TextFragment frag[] = new TextFragment[fragQueue.size()];
+      for (int i = frag.length - 1; i >= 0; i--)
+      {
+        frag[i] = fragQueue.pop();
+      }
+
+      //merge any contiguous fragments to improve readability
+      if(mergeContiguousFragments)
+      {
+        mergeContiguousFragments(frag);
+        ArrayList<TextFragment> fragTexts = new ArrayList<TextFragment>();
+        for (int i = 0; i < frag.length; i++)
+        {
+          if ((frag[i] != null) && (frag[i].getScore() > 0))
+          {
+            fragTexts.add(frag[i]);
+          }
+        }
+        frag= fragTexts.toArray(new TextFragment[0]);
+      }
+
+      return frag;
+
+    }
+    finally
+    {
+      if (tokenStream != null)
+      {
+        try
+        {
+          tokenStream.end();
+          tokenStream.close();
+        }
+        catch (Exception e)
+        {
+        }
+      }
+    }
+  }
+
+
+  /** Improves readability of a score-sorted list of TextFragments by merging any fragments
+   * that were contiguous in the original text into one larger fragment with the correct order.
+   * This will leave a "null" in the array entry for the lesser scored fragment.
+   *
+   * @param frag An array of document fragments in descending score
+   */
+  private void mergeContiguousFragments(TextFragment[] frag)
+  {
+    boolean mergingStillBeingDone;
+    if (frag.length > 1)
+      do
+      {
+        mergingStillBeingDone = false; //initialise loop control flag
+        //for each fragment, scan other frags looking for contiguous blocks
+        for (int i = 0; i < frag.length; i++)
+        {
+          if (frag[i] == null)
+          {
+            continue;
+          }
+          //merge any contiguous blocks
+          for (int x = 0; x < frag.length; x++)
+          {
+            if (frag[x] == null)
+            {
+              continue;
+            }
+            if (frag[i] == null)
+            {
+              break;
+            }
+            TextFragment frag1 = null;
+            TextFragment frag2 = null;
+            int frag1Num = 0;
+            int frag2Num = 0;
+            int bestScoringFragNum;
+            int worstScoringFragNum;
+            //if blocks are contiguous....
+            if (frag[i].follows(frag[x]))
+            {
+              frag1 = frag[x];
+              frag1Num = x;
+              frag2 = frag[i];
+              frag2Num = i;
+            }
+            else
+              if (frag[x].follows(frag[i]))
+              {
+                frag1 = frag[i];
+                frag1Num = i;
+                frag2 = frag[x];
+                frag2Num = x;
+              }
+            //merging required..
+            if (frag1 != null)
+            {
+              if (frag1.getScore() > frag2.getScore())
+              {
+                bestScoringFragNum = frag1Num;
+                worstScoringFragNum = frag2Num;
+              }
+              else
+              {
+                bestScoringFragNum = frag2Num;
+                worstScoringFragNum = frag1Num;
+              }
+              frag1.merge(frag2);
+              frag[worstScoringFragNum] = null;
+              mergingStillBeingDone = true;
+              frag[bestScoringFragNum] = frag1;
+            }
+          }
+        }
+      }
+      while (mergingStillBeingDone);
+  }
+
+
+  /**
+   * Highlights terms in the  text , extracting the most relevant sections
+   * and concatenating the chosen fragments with a separator (typically "...").
+   * The document text is analysed in chunks to record hit statistics
+   * across the document. After accumulating stats, the fragments with the highest scores
+   * are returned in order as "separator" delimited strings.
+   *
+   * @param text        text to highlight terms in
+   * @param maxNumFragments  the maximum number of fragments.
+   * @param separator  the separator used to intersperse the document fragments (typically "...")
+   *
+   * @return highlighted text
+   * @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length
+   */
+  public final String getBestFragments(
+    TokenStream tokenStream,
+    String text,
+    int maxNumFragments,
+    String separator)
+    throws IOException, InvalidTokenOffsetsException
+  {
+    String sections[] =  getBestFragments(tokenStream,text, maxNumFragments);
+    StringBuilder result = new StringBuilder();
+    for (int i = 0; i < sections.length; i++)
+    {
+      if (i > 0)
+      {
+        result.append(separator);
+      }
+      result.append(sections[i]);
+    }
+    return result.toString();
+  }
 
   public int getMaxDocCharsToAnalyze() {
     return maxDocCharsToAnalyze;
@@ -487,35 +487,35 @@ public class Highlighter
   }
 
   
-	public Fragmenter getTextFragmenter()
-	{
-		return textFragmenter;
-	}
-
-	/**
-	 * @param fragmenter
-	 */
-	public void setTextFragmenter(Fragmenter fragmenter)
-	{
-		textFragmenter = fragmenter;
-	}
-
-	/**
-	 * @return Object used to score each text fragment 
-	 */
-	public Scorer getFragmentScorer()
-	{
-		return fragmentScorer;
-	}
-
-
-	/**
-	 * @param scorer
-	 */
-	public void setFragmentScorer(Scorer scorer)
-	{
-		fragmentScorer = scorer;
-	}
+  public Fragmenter getTextFragmenter()
+  {
+    return textFragmenter;
+  }
+
+  /**
+   * @param fragmenter
+   */
+  public void setTextFragmenter(Fragmenter fragmenter)
+  {
+    textFragmenter = fragmenter;
+  }
+
+  /**
+   * @return Object used to score each text fragment
+   */
+  public Scorer getFragmentScorer()
+  {
+    return fragmentScorer;
+  }
+
+
+  /**
+   * @param scorer
+   */
+  public void setFragmentScorer(Scorer scorer)
+  {
+    fragmentScorer = scorer;
+  }
 
     public Encoder getEncoder()
     {
@@ -528,17 +528,17 @@ public class Highlighter
 }
 class FragmentQueue extends PriorityQueue<TextFragment>
 {
-	public FragmentQueue(int size)
-	{
-		super(size);
-	}
-
-	@Override
-	public final boolean lessThan(TextFragment fragA, TextFragment fragB)
-	{
-		if (fragA.getScore() == fragB.getScore())
-			return fragA.fragNum > fragB.fragNum;
-		else
-			return fragA.getScore() < fragB.getScore();
-	}
+  public FragmentQueue(int size)
+  {
+    super(size);
+  }
+
+  @Override
+  public final boolean lessThan(TextFragment fragA, TextFragment fragB)
+  {
+    if (fragA.getScore() == fragB.getScore())
+      return fragA.fragNum > fragB.fragNum;
+    else
+      return fragA.getScore() < fragB.getScore();
+  }
 }



Mime
View raw message