lucene-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mikemcc...@apache.org
Subject svn commit: r1633314 [2/2] - in /lucene/dev/branches/lucene6005/lucene: analysis/common/src/java/org/apache/lucene/analysis/core/ classification/src/java/org/apache/lucene/classification/utils/ core/src/java/org/apache/lucene/codecs/blocktree/ core/src...
Date Tue, 21 Oct 2014 08:45:45 GMT
Added: lucene/dev/branches/lucene6005/lucene/core/src/java/org/apache/lucene/document/StringTokenStream.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/java/org/apache/lucene/document/StringTokenStream.java?rev=1633314&view=auto
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/java/org/apache/lucene/document/StringTokenStream.java (added)
+++ lucene/dev/branches/lucene6005/lucene/core/src/java/org/apache/lucene/document/StringTokenStream.java Tue Oct 21 08:45:44 2014
@@ -0,0 +1,73 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+
+final class StringTokenStream extends TokenStream {
+  private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class);
+  private final OffsetAttribute offsetAttribute = addAttribute(OffsetAttribute.class);
+  private boolean used = false;
+  private String value = null;
+    
+  /** Creates a new TokenStream that returns a String as single token.
+   * <p>Warning: Does not initialize the value, you must call
+   * {@link #setValue(String)} afterwards!
+   */
+  StringTokenStream() {
+  }
+    
+  /** Sets the string value. */
+  void setValue(String value) {
+    this.value = value;
+  }
+
+  @Override
+  public boolean incrementToken() {
+    if (used) {
+      return false;
+    }
+    clearAttributes();
+    termAttribute.append(value);
+    offsetAttribute.setOffset(0, value.length());
+    used = true;
+    return true;
+  }
+
+  @Override
+  public void end() throws IOException {
+    super.end();
+    final int finalOffset = value.length();
+    offsetAttribute.setOffset(finalOffset, finalOffset);
+  }
+    
+  @Override
+  public void reset() {
+    used = false;
+  }
+
+  @Override
+  public void close() {
+    value = null;
+  }
+}
+

Modified: lucene/dev/branches/lucene6005/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java?rev=1633314&r1=1633313&r2=1633314&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java Tue Oct 21 08:45:44 2014
@@ -402,35 +402,35 @@ final class DefaultIndexingChain extends
         if (fp.docValuesWriter == null) {
           fp.docValuesWriter = new NumericDocValuesWriter(fp.fieldInfo, bytesUsed);
         }
-        ((NumericDocValuesWriter) fp.docValuesWriter).addValue(docID, field.numericValue().longValue());
+        ((NumericDocValuesWriter) fp.docValuesWriter).addValue(docID, field.numericDocValue().longValue());
         break;
 
       case BINARY:
         if (fp.docValuesWriter == null) {
           fp.docValuesWriter = new BinaryDocValuesWriter(fp.fieldInfo, bytesUsed);
         }
-        ((BinaryDocValuesWriter) fp.docValuesWriter).addValue(docID, field.binaryValue());
+        ((BinaryDocValuesWriter) fp.docValuesWriter).addValue(docID, field.binaryDocValue());
         break;
 
       case SORTED:
         if (fp.docValuesWriter == null) {
           fp.docValuesWriter = new SortedDocValuesWriter(fp.fieldInfo, bytesUsed);
         }
-        ((SortedDocValuesWriter) fp.docValuesWriter).addValue(docID, field.binaryValue());
+        ((SortedDocValuesWriter) fp.docValuesWriter).addValue(docID, field.binaryDocValue());
         break;
         
       case SORTED_NUMERIC:
         if (fp.docValuesWriter == null) {
           fp.docValuesWriter = new SortedNumericDocValuesWriter(fp.fieldInfo, bytesUsed);
         }
-        ((SortedNumericDocValuesWriter) fp.docValuesWriter).addValue(docID, field.numericValue().longValue());
+        ((SortedNumericDocValuesWriter) fp.docValuesWriter).addValue(docID, field.numericDocValue().longValue());
         break;
 
       case SORTED_SET:
         if (fp.docValuesWriter == null) {
           fp.docValuesWriter = new SortedSetDocValuesWriter(fp.fieldInfo, bytesUsed);
         }
-        ((SortedSetDocValuesWriter) fp.docValuesWriter).addValue(docID, field.binaryValue());
+        ((SortedSetDocValuesWriter) fp.docValuesWriter).addValue(docID, field.binaryDocValue());
         break;
 
       default:
@@ -563,9 +563,8 @@ final class DefaultIndexingChain extends
 
       IndexableFieldType fieldType = field.fieldType();
 
-      final boolean analyzed = fieldType.tokenized() && docState.analyzer != null;
-        
       // only bother checking offsets if something will consume them.
+      // nocommit can't we do this todo now?
       // TODO: after we fix analyzers, also check if termVectorOffsets will be indexed.
       final boolean checkOffsets = fieldType.indexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
 
@@ -589,7 +588,8 @@ final class DefaultIndexingChain extends
           // chokes on a given document), then it's
           // non-aborting and (above) this one document
           // will be marked as deleted, but still
-          // consume a docID
+          // consume a docID since we will have already
+          // written some if its postings into our RAM buffer.
 
           int posIncr = invertState.posIncrAttribute.getPositionIncrement();
           invertState.position += posIncr;
@@ -662,7 +662,8 @@ final class DefaultIndexingChain extends
         }
       }
 
-      if (analyzed) {
+      // TODO: this "multi-field-ness" (and, Analyzer) should be outside of IW somehow
+      if (docState.analyzer != null) {
         invertState.position += docState.analyzer.getPositionIncrementGap(fieldInfo.name);
         invertState.offset += docState.analyzer.getOffsetGap(fieldInfo.name);
       }

Modified: lucene/dev/branches/lucene6005/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java?rev=1633314&r1=1633313&r2=1633314&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/java/org/apache/lucene/index/IndexableFieldType.java Tue Oct 21 08:45:44 2014
@@ -31,16 +31,6 @@ public interface IndexableFieldType {
   public boolean stored();
   
   /** 
-   * True if this field's value should be analyzed by the
-   * {@link Analyzer}.
-   * <p>
-   * This has no effect if {@link #indexOptions()} returns null.
-   */
-  // TODO: shouldn't we remove this?  Whether/how a field is
-  // tokenized is an impl detail under Field?
-  public boolean tokenized();
-
-  /** 
    * True if this field's indexed form should be also stored 
    * into term vectors.
    * <p>

Modified: lucene/dev/branches/lucene6005/lucene/core/src/java/org/apache/lucene/index/StorableField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/java/org/apache/lucene/index/StorableField.java?rev=1633314&r1=1633313&r2=1633314&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/java/org/apache/lucene/index/StorableField.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/java/org/apache/lucene/index/StorableField.java Tue Oct 21 08:45:44 2014
@@ -28,15 +28,18 @@ import org.apache.lucene.util.BytesRef;
 
 public interface StorableField extends GeneralField {
 
-  /** Non-null if this field has a binary value */
+  /** Non-null if this field has a stored binary value */
   public BytesRef binaryValue();
 
+  /** Non-null if this field has a binary doc value */
+  public BytesRef binaryDocValue();
+
   /** Non-null if this field has a string value */
   public String stringValue();
 
-  /** Non-null if this field has a Reader value */
-  public Reader readerValue();
-
   /** Non-null if this field has a numeric value */
   public Number numericValue(); 
+
+  /** Non-null if this field has a numeric doc value */
+  public Number numericDocValue(); 
 }

Added: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/document/TestDocument2.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/document/TestDocument2.java?rev=1633314&view=auto
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/document/TestDocument2.java (added)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/document/TestDocument2.java Tue Oct 21 08:45:44 2014
@@ -0,0 +1,830 @@
+package org.apache.lucene.document;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.StringReader;
+import java.util.Arrays;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.CannedTokenStream;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.codecs.blocktree.Stats;
+import org.apache.lucene.index.CheckIndex;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.FieldInfo.DocValuesType;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.MultiDocValues;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.index.StoredDocument;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.SortedNumericSortField;
+import org.apache.lucene.search.SortedSetSortField;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.junit.Ignore;
+
+public class TestDocument2 extends LuceneTestCase {
+
+  public void testBasic() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+
+    Document2 doc = new Document2(types);
+    doc.addLargeText("body", "some text");
+    doc.addShortText("title", "a title");
+    doc.addAtom("id", "29jafnn");
+    doc.addStored("bytes", new byte[7]);
+    doc.addNumber("number", 17);
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }
+
+  public void testBinaryAtom() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+
+    Document2 doc = new Document2(types);
+    doc.addAtom("binary", new byte[5]);
+    w.addDocument(doc);
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    assertEquals(1, s.search(types.newTermQuery("binary", new byte[5]), 1).totalHits);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testBinaryAtomSort() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+    types.enableStored("id");
+    types.enableSorted("binary");
+
+    Document2 doc = new Document2(types);
+    byte[] value = new byte[5];
+    value[0] = 1;
+    doc.addAtom("id", "0");
+    doc.addAtom("binary", value);
+    w.addDocument(doc);
+
+    doc = new Document2(types);
+    doc.addAtom("id", "1");
+    doc.addAtom("binary", new byte[5]);
+    w.addDocument(doc);
+    
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 2, types.newSort("binary"));
+    assertEquals(2, hits.scoreDocs.length);
+    assertEquals("1", r.document(hits.scoreDocs[0].doc).get("id"));
+    assertEquals("0", r.document(hits.scoreDocs[1].doc).get("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testBinaryStored() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+
+    Document2 doc = new Document2(types);
+    doc.addStored("binary", new byte[5]);
+    w.addDocument(doc);
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    assertEquals(new BytesRef(new byte[5]), r.document(0).getBinaryValue("binary"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testSortedSetDocValues() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+    types.setDocValuesType("sortedset", DocValuesType.SORTED_SET);
+
+    Document2 doc = new Document2(types);
+    doc.addAtom("sortedset", "one");
+    doc.addAtom("sortedset", "two");
+    doc.addAtom("sortedset", "three");
+    w.addDocument(doc);
+    IndexReader r = DirectoryReader.open(w, true);
+    SortedSetDocValues ssdv = MultiDocValues.getSortedSetValues(r, "sortedset");
+    ssdv.setDocument(0);
+
+    long ord = ssdv.nextOrd();
+    assertTrue(ord != SortedSetDocValues.NO_MORE_ORDS);
+    assertEquals(new BytesRef("one"), ssdv.lookupOrd(ord));
+
+    ord = ssdv.nextOrd();
+    assertTrue(ord != SortedSetDocValues.NO_MORE_ORDS);
+    assertEquals(new BytesRef("three"), ssdv.lookupOrd(ord));
+
+    ord = ssdv.nextOrd();
+    assertTrue(ord != SortedSetDocValues.NO_MORE_ORDS);
+    assertEquals(new BytesRef("two"), ssdv.lookupOrd(ord));
+
+    assertEquals(SortedSetDocValues.NO_MORE_ORDS, ssdv.nextOrd());
+    w.close();
+    r.close();
+    dir.close();
+  }
+
+  public void testSortedNumericDocValues() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+    types.setDocValuesType("sortednumeric", DocValuesType.SORTED_NUMERIC);
+
+    Document2 doc = new Document2(types);
+    doc.addNumber("sortednumeric", 3);
+    doc.addNumber("sortednumeric", 1);
+    doc.addNumber("sortednumeric", 2);
+    w.addDocument(doc);
+    IndexReader r = DirectoryReader.open(w, true);
+    SortedNumericDocValues sndv = MultiDocValues.getSortedNumericValues(r, "sortednumeric");
+    sndv.setDocument(0);
+
+    assertEquals(3, sndv.count());
+    assertEquals(1, sndv.valueAt(0));
+    assertEquals(2, sndv.valueAt(1));
+    assertEquals(3, sndv.valueAt(2));
+    w.close();
+    r.close();
+    dir.close();
+  }
+
+  public void testFloatRangeQuery() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+    types.enableStored("id");
+    types.enableSorted("id");
+    //System.out.println("id type: " + types.getFieldType("id"));
+
+    Document2 doc = new Document2(types);
+    doc.addNumber("float", 3f);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = new Document2(types);
+    doc.addNumber("float", 2f);
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    doc = new Document2(types);
+    doc.addNumber("float", 7f);
+    doc.addAtom("id", "three");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+
+    // Make sure range query hits the right number of hits
+    assertEquals(2, s.search(types.newRangeQuery("float", 0f, true, 3f, true), 1).totalHits);
+    assertEquals(3, s.search(types.newRangeQuery("float", 0f, true, 10f, true), 1).totalHits);
+    assertEquals(1, s.search(types.newRangeQuery("float", 1f, true,2.5f, true), 1).totalHits);
+
+    // Make sure doc values shows the correct float values:
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, types.newSort("id"));
+    assertEquals(3, hits.totalHits);
+    NumericDocValues ndv = MultiDocValues.getNumericValues(r, "float");
+    assertNotNull(ndv);
+    ScoreDoc hit = hits.scoreDocs[0];
+    StoredDocument storedDoc = r.document(hit.doc);
+    assertEquals("one", storedDoc.get("id"));
+    assertEquals(3f, Float.intBitsToFloat((int) ndv.get(hit.doc)), .001f);
+
+    hit = hits.scoreDocs[1];
+    storedDoc = r.document(hit.doc);
+    assertEquals("three", storedDoc.get("id"));
+    assertEquals(7f, Float.intBitsToFloat((int) ndv.get(hit.doc)), .001f);
+
+    hit = hits.scoreDocs[2];
+    storedDoc = r.document(hit.doc);
+    assertEquals("two", storedDoc.get("id"));
+    assertEquals(2f, Float.intBitsToFloat((int) ndv.get(hit.doc)), .001f);
+
+    // Make sure we can sort by the field:
+    hits = s.search(new MatchAllDocsQuery(), 3, types.newSort("float"));
+    assertEquals(3, hits.totalHits);
+    assertEquals("two", r.document(hits.scoreDocs[0].doc).get("id"));
+    assertEquals("one", r.document(hits.scoreDocs[1].doc).get("id"));
+    assertEquals("three", r.document(hits.scoreDocs[2].doc).get("id"));
+
+    w.close();
+    r.close();
+    dir.close();
+  }
+
+  // Cannot change a field from INT to DOUBLE
+  public void testInvalidNumberTypeChange() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+
+    Document2 doc = new Document2(types);
+    doc.addNumber("int", 3);
+    w.addDocument(doc);
+
+    doc = new Document2(types);
+    try {
+      doc.addNumber("int", 2.0);
+      fail("did not hit exception");
+    } catch (IllegalStateException ise) {
+      // expected
+    }
+    w.close();
+    dir.close();
+  }
+
+  public void testIntRangeQuery() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+
+    Document2 doc = new Document2(types);
+    doc.addNumber("int", 3);
+    w.addDocument(doc);
+
+    doc = new Document2(types);
+    doc.addNumber("int", 2);
+    w.addDocument(doc);
+
+    doc = new Document2(types);
+    doc.addNumber("int", 7);
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+
+    assertEquals(2, s.search(types.newRangeQuery("int", 0, true, 3, true), 1).totalHits);
+    assertEquals(3, s.search(types.newRangeQuery("int", 0, true, 10, true), 1).totalHits);
+    w.close();
+    r.close();
+    dir.close();
+  }
+
+  public void testExcMissingSetIndexWriter() throws Exception {
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    Document2 doc = new Document2(types);
+    try {
+      doc.addLargeText("body", "some text");
+      fail("did not hit expected exception");
+    } catch (IllegalStateException ise) {
+      // expected
+    }
+  }
+
+  public void testExcAnalyzerForAtomField() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+    types.setAnalyzer("atom", a);
+    Document2 doc = new Document2(types);
+    try {
+      doc.addAtom("atom", "blahblah");
+      fail("did not hit expected exception");
+    } catch (IllegalStateException ise) {
+      // expected
+    }
+    w.close();
+    dir.close();
+  }
+
+  // Can't ask for SORTED dv but then add the field as a number
+  public void testExcInvalidDocValuesTypeFirst() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+    types.setDocValuesType("string", DocValuesType.SORTED);
+    Document2 doc = new Document2(types);
+    try {
+      doc.addNumber("string", 17);
+      fail("did not hit expected exception");
+    } catch (IllegalStateException ise) {
+      // expected
+    }
+    doc.addAtom("string", "a string");
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }
+
+  // Can't ask for BINARY dv but then add the field as a number
+  public void testExcInvalidBinaryDocValuesTypeFirst() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+    types.setDocValuesType("binary", DocValuesType.BINARY);
+    Document2 doc = new Document2(types);
+    try {
+      doc.addNumber("binary", 17);
+      fail("did not hit expected exception");
+    } catch (IllegalStateException ise) {
+      // expected
+    }
+    doc.addAtom("binary", new byte[7]);
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }
+
+  // Cannot store Reader:
+  public void testExcStoreReaderFields() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+    types.enableStored("body");
+    Document2 doc = new Document2(types);
+    try {
+      doc.addLargeText("body", new StringReader("a small string"));
+      fail("did not hit expected exception");
+    } catch (IllegalStateException ise) {
+      // expected
+    }
+    doc.addLargeText("body", "a string");
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }
+
+  // Cannot store TokenStream:
+  public void testExcStorePreTokenizedFields() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+    types.enableStored("body");
+    Document2 doc = new Document2(types);
+    try {
+      doc.addLargeText("body", new CannedTokenStream());
+      fail("did not hit expected exception");
+    } catch (IllegalStateException ise) {
+      // expected
+    }
+    doc.addLargeText("body", "a string");
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }
+
+  public void testSortable() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+
+    // Normally sorting is not enabled for atom fields:
+    types.enableSorted("id");
+    types.enableStored("id");
+
+    Document2 doc = new Document2(types);
+
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+    doc = new Document2(types);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 2, types.newSort("id"));
+    assertEquals(2, hits.scoreDocs.length);
+    assertEquals("one", r.document(hits.scoreDocs[0].doc).get("id"));
+    assertEquals("two", r.document(hits.scoreDocs[1].doc).get("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMultiValuedNumeric() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+
+    types.setMultiValued("numbers");
+    types.enableSorted("numbers");
+    types.enableStored("id");
+
+    Document2 doc = new Document2(types);
+    doc.addNumber("numbers", 1);
+    doc.addNumber("numbers", 2);
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = new Document2(types);
+    doc.addNumber("numbers", -10);
+    doc.addNumber("numbers", -20);
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 2, types.newSort("numbers"));
+    assertEquals(2, hits.scoreDocs.length);
+    assertEquals("two", r.document(hits.scoreDocs[0].doc).get("id"));
+    assertEquals("one", r.document(hits.scoreDocs[1].doc).get("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testMultiValuedString() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+
+    types.setMultiValued("strings");
+    types.enableSorted("strings");
+    types.enableStored("id");
+
+    Document2 doc = new Document2(types);
+    doc.addAtom("strings", "abc");
+    doc.addAtom("strings", "baz");
+    doc.addAtom("id", "one");
+    w.addDocument(doc);
+
+    doc = new Document2(types);
+    doc.addAtom("strings", "aaa");
+    doc.addAtom("strings", "bbb");
+    doc.addAtom("id", "two");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 2, types.newSort("strings"));
+    assertEquals(2, hits.scoreDocs.length);
+    assertEquals("two", r.document(hits.scoreDocs[0].doc).get("id"));
+    assertEquals("one", r.document(hits.scoreDocs[1].doc).get("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  // You cannot have multi-valued DocValuesType.BINARY
+  public void testExcMultiValuedDVBinary() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+    types.setDocValuesType("binary", DocValuesType.BINARY);
+    try {
+      types.setMultiValued("binary");
+      fail("did not hit expected exception");
+    } catch (IllegalStateException ise) {
+      // expected
+    }
+    assertFalse(types.getMultiValued("binary"));
+    Document2 doc = new Document2(types);
+    doc.addStored("binary", new byte[7]);
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }
+
+  // You cannot have multi-valued DocValuesType.SORTED
+  public void testExcMultiValuedDVSorted() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+    types.setDocValuesType("sorted", DocValuesType.SORTED);
+    try {
+      types.setMultiValued("sorted");
+      fail("did not hit expected exception");
+    } catch (IllegalStateException ise) {
+      // expected
+    }
+    assertFalse(types.getMultiValued("sorted"));
+    Document2 doc = new Document2(types);
+    doc.addStored("binary", new byte[7]);
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }
+
+  // You cannot have multi-valued DocValuesType.NUMERIC
+  public void testExcMultiValuedDVNumeric() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(w);
+    types.setDocValuesType("numeric", DocValuesType.NUMERIC);
+    try {
+      types.setMultiValued("numeric");
+      fail("did not hit expected exception");
+    } catch (IllegalStateException ise) {
+      // expected
+    }
+    assertFalse(types.getMultiValued("numeric"));
+    Document2 doc = new Document2(types);
+    doc.addNumber("numeric", 17);
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }
+
+  public void testPostingsFormat() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    types.setIndexWriter(w);
+
+    types.setPostingsFormat("id", "Memory");
+    types.enableStored("id");
+
+    Document2 doc = new Document2(types);
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    doc = new Document2(types);
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(types.newTermQuery("id", "0"), 1);
+    assertEquals(1, hits.scoreDocs.length);
+    assertEquals("0", r.document(hits.scoreDocs[0].doc).get("id"));
+    hits = s.search(types.newTermQuery("id", "1"), 1);
+    assertEquals(1, hits.scoreDocs.length);
+    assertEquals("1", r.document(hits.scoreDocs[0].doc).get("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testLongTermQuery() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    types.setIndexWriter(w);
+
+    Document2 doc = new Document2(types);
+    doc.addNumber("id", 1L);
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(types.newTermQuery("id", 1L), 1);
+    assertEquals(1, hits.scoreDocs.length);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testIntTermQuery() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    types.setIndexWriter(w);
+
+    Document2 doc = new Document2(types);
+    doc.addNumber("id", 1);
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(types.newTermQuery("id", 1), 1);
+    assertEquals(1, hits.scoreDocs.length);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testBinaryTermQuery() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    types.setIndexWriter(w);
+    types.setIndexOptions("id", IndexOptions.DOCS_ONLY);
+
+    Document2 doc = new Document2(types);
+    doc.addStored("id", new byte[1]);
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(types.newTermQuery("id", new byte[1]), 1);
+    assertEquals(1, hits.scoreDocs.length);
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testDocValuesFormat() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    types.setIndexWriter(w);
+
+    types.setDocValuesFormat("id", "Memory");
+    types.enableStored("id");
+    types.enableSorted("id");
+
+    Document2 doc = new Document2(types);
+    doc.addAtom("id", "1");
+    w.addDocument(doc);
+
+    doc = new Document2(types);
+    doc.addAtom("id", "0");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    IndexSearcher s = newSearcher(r);
+    TopDocs hits = s.search(types.newTermQuery("id", "0"), 1, types.newSort("id"));
+    assertEquals(1, hits.scoreDocs.length);
+    assertEquals("0", r.document(hits.scoreDocs[0].doc).get("id"));
+    hits = s.search(types.newTermQuery("id", "1"), 1);
+    assertEquals(1, hits.scoreDocs.length);
+    assertEquals("1", r.document(hits.scoreDocs[0].doc).get("id"));
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testTermsDictTermsPerBlock() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    types.setIndexWriter(w);
+    types.setIndexOptions("id", IndexOptions.DOCS_ONLY);
+
+    types.setTermsDictBlockSize("id", 10);
+    for(int i=0;i<10;i++) {
+      Document2 doc = new Document2(types);
+      doc.addAtom("id", "0" + i);
+      w.addDocument(doc);
+    }
+    for(int i=0;i<10;i++) {
+      Document2 doc = new Document2(types);
+      doc.addAtom("id", "1" + i);
+      w.addDocument(doc);
+    }
+    w.close();
+
+    // Use CheckIndex to verify we got 2 terms blocks:
+    CheckIndex.Status checked = TestUtil.checkIndex(dir);
+    assertEquals(1, checked.segmentInfos.size());
+    CheckIndex.Status.SegmentInfoStatus segment = checked.segmentInfos.get(0);
+    assertNotNull(segment.termIndexStatus.blockTreeStats);
+    Stats btStats = segment.termIndexStatus.blockTreeStats.get("id");
+    assertNotNull(btStats);
+    assertEquals(2, btStats.termsOnlyBlockCount);
+    assertEquals(1, btStats.subBlocksOnlyBlockCount);
+    assertEquals(3, btStats.totalBlockCount);
+    w.close();
+    dir.close();
+  }
+
+  public void testExcInvalidDocValuesFormat() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    types.setIndexWriter(w);
+    try {
+      types.setDocValuesFormat("id", "foobar");
+      fail("did not hit exception");
+    } catch (IllegalArgumentException iae) {
+      // expected
+    }
+    types.setDocValuesFormat("id", "Memory");
+    w.close();
+    dir.close();
+  }
+
+  public void testExcInvalidPostingsFormat() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    types.setIndexWriter(w);
+    try {
+      types.setPostingsFormat("id", "foobar");
+      fail("did not hit exception");
+    } catch (IllegalArgumentException iae) {
+      // expected
+    }
+    types.setPostingsFormat("id", "Memory");
+    w.close();
+    dir.close();
+  }
+
+  /** Make sure that if we index an ATOM field, at search time we get KeywordAnalyzer for it. */
+  public void testAtomFieldUsesKeywordAnalyzer() throws Exception {
+    Directory dir = newDirectory();
+    Analyzer a = new MockAnalyzer(random());
+    FieldTypes types = new FieldTypes(a);
+    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    types.setIndexWriter(w);
+    Document2 doc = new Document2(types);
+    doc.addAtom("id", "foo bar");
+    w.addDocument(doc);
+    BaseTokenStreamTestCase.assertTokenStreamContents(types.getAnalyzer().tokenStream("id", "foo bar"), new String[] {"foo bar"}, new int[1], new int[] {7});
+    w.close();
+    dir.close();
+  }
+
+  // nocommit test per-field analyzers
+
+  // nocommit test per-field sims
+
+  // nocommit test for pre-analyzed
+
+  // nocommit test multi-valued
+}

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java?rev=1633314&r1=1633313&r2=1633314&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java Tue Oct 21 08:45:44 2014
@@ -29,9 +29,11 @@ import org.apache.lucene.codecs.FilterCo
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.asserting.AssertingCodec;
 import org.apache.lucene.codecs.memory.MemoryPostingsFormat;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
@@ -600,28 +602,28 @@ public class TestAddIndexes extends Luce
     LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
     lmp.setNoCFSRatio(0.0);
     lmp.setMergeFactor(100);
-    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
-        new MockAnalyzer(random()))
+    FieldTypes types = new FieldTypes(new MockAnalyzer(random()));
+    IndexWriter writer = new IndexWriter(dir, types.getDefaultIndexWriterConfig()
         .setMaxBufferedDocs(5).setMergePolicy(lmp));
+    types.setIndexWriter(writer);
 
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType));
-    for(int i=0;i<60;i++)
+    Document2 doc = new Document2(types);
+    types.enableTermVectors("content");
+    types.enableTermVectorPositions("content");
+    types.enableTermVectorOffsets("content");
+    doc.addLargeText("content", "aaa bbb ccc ddd eee fff ggg hhh iii");
+    for(int i=0;i<60;i++) {
       writer.addDocument(doc);
+    }
 
-    Document doc2 = new Document();
-    FieldType customType2 = new FieldType();
-    customType2.setStored(true);
-    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2));
-    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2));
-    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2));
-    doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2));
-    for(int i=0;i<10;i++)
-      writer.addDocument(doc2);
+    doc = new Document2(types);
+    doc.addStored("content", "aaa bbb ccc ddd eee fff ggg hhh iii");
+    doc.addStored("content", "aaa bbb ccc ddd eee fff ggg hhh iii");
+    doc.addStored("content", "aaa bbb ccc ddd eee fff ggg hhh iii");
+    doc.addStored("content", "aaa bbb ccc ddd eee fff ggg hhh iii");
+    for(int i=0;i<10;i++) {
+      writer.addDocument(doc);
+    }
     writer.close();
 
     Directory dir2 = newDirectory();

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java?rev=1633314&r1=1633313&r2=1633314&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java Tue Oct 21 08:45:44 2014
@@ -96,9 +96,6 @@ public class TestCodecs extends LuceneTe
         public boolean stored() { return false; }
 
         @Override
-        public boolean tokenized() { return false; }
-
-        @Override
         public boolean storeTermVectors() { return false; }
 
         @Override

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java?rev=1633314&r1=1633313&r2=1633314&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java Tue Oct 21 08:45:44 2014
@@ -163,174 +163,174 @@ public class TestDirectoryReader extends
    * @throws Exception on error
    */
   public void testGetFieldNames() throws Exception {
-      Directory d = newDirectory();
-      // set up writer
-      IndexWriter writer = new IndexWriter(
-          d,
-          newIndexWriterConfig(new MockAnalyzer(random()))
-      );
+    Directory d = newDirectory();
+    // set up writer
+    IndexWriter writer = new IndexWriter(
+                                         d,
+                                         newIndexWriterConfig(new MockAnalyzer(random()))
+                                         );
 
-      Document doc = new Document();
+    Document doc = new Document();
 
-      FieldType customType3 = new FieldType();
-      customType3.setStored(true);
+    FieldType customType3 = new FieldType();
+    customType3.setStored(true);
       
+    doc.add(new StringField("keyword", "test1", Field.Store.YES));
+    doc.add(new TextField("text", "test1", Field.Store.YES));
+    doc.add(new Field("unindexed", "test1", customType3));
+    doc.add(new TextField("unstored","test1", Field.Store.NO));
+    writer.addDocument(doc);
+
+    writer.close();
+    // set up reader
+    DirectoryReader reader = DirectoryReader.open(d);
+    FieldInfos fieldInfos = MultiFields.getMergedFieldInfos(reader);
+    assertNotNull(fieldInfos.fieldInfo("keyword"));
+    assertNotNull(fieldInfos.fieldInfo("text"));
+    assertNotNull(fieldInfos.fieldInfo("unindexed"));
+    assertNotNull(fieldInfos.fieldInfo("unstored"));
+    reader.close();
+    // add more documents
+    writer = new IndexWriter(
+                             d,
+                             newIndexWriterConfig(new MockAnalyzer(random()))
+                             .setOpenMode(OpenMode.APPEND)
+                             .setMergePolicy(newLogMergePolicy())
+                             );
+    // want to get some more segments here
+    int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
+    for (int i = 0; i < 5*mergeFactor; i++) {
+      doc = new Document();
       doc.add(new StringField("keyword", "test1", Field.Store.YES));
       doc.add(new TextField("text", "test1", Field.Store.YES));
       doc.add(new Field("unindexed", "test1", customType3));
       doc.add(new TextField("unstored","test1", Field.Store.NO));
       writer.addDocument(doc);
+    }
+    // new fields are in some different segments (we hope)
+    for (int i = 0; i < 5*mergeFactor; i++) {
+      doc = new Document();
+      doc.add(new StringField("keyword2", "test1", Field.Store.YES));
+      doc.add(new TextField("text2", "test1", Field.Store.YES));
+      doc.add(new Field("unindexed2", "test1", customType3));
+      doc.add(new TextField("unstored2","test1", Field.Store.NO));
+      writer.addDocument(doc);
+    }
+    // new termvector fields
 
-      writer.close();
-      // set up reader
-      DirectoryReader reader = DirectoryReader.open(d);
-      FieldInfos fieldInfos = MultiFields.getMergedFieldInfos(reader);
-      assertNotNull(fieldInfos.fieldInfo("keyword"));
-      assertNotNull(fieldInfos.fieldInfo("text"));
-      assertNotNull(fieldInfos.fieldInfo("unindexed"));
-      assertNotNull(fieldInfos.fieldInfo("unstored"));
-      reader.close();
-      // add more documents
-      writer = new IndexWriter(
-          d,
-          newIndexWriterConfig(new MockAnalyzer(random()))
-             .setOpenMode(OpenMode.APPEND)
-             .setMergePolicy(newLogMergePolicy())
-      );
-      // want to get some more segments here
-      int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
-      for (int i = 0; i < 5*mergeFactor; i++) {
-        doc = new Document();
-        doc.add(new StringField("keyword", "test1", Field.Store.YES));
-        doc.add(new TextField("text", "test1", Field.Store.YES));
-        doc.add(new Field("unindexed", "test1", customType3));
-        doc.add(new TextField("unstored","test1", Field.Store.NO));
-        writer.addDocument(doc);
-      }
-      // new fields are in some different segments (we hope)
-      for (int i = 0; i < 5*mergeFactor; i++) {
-        doc = new Document();
-        doc.add(new StringField("keyword2", "test1", Field.Store.YES));
-        doc.add(new TextField("text2", "test1", Field.Store.YES));
-        doc.add(new Field("unindexed2", "test1", customType3));
-        doc.add(new TextField("unstored2","test1", Field.Store.NO));
-        writer.addDocument(doc);
-      }
-      // new termvector fields
-
-      FieldType customType5 = new FieldType(TextField.TYPE_STORED);
-      customType5.setStoreTermVectors(true);
-      FieldType customType6 = new FieldType(TextField.TYPE_STORED);
-      customType6.setStoreTermVectors(true);
-      customType6.setStoreTermVectorOffsets(true);
-      FieldType customType7 = new FieldType(TextField.TYPE_STORED);
-      customType7.setStoreTermVectors(true);
-      customType7.setStoreTermVectorPositions(true);
-      FieldType customType8 = new FieldType(TextField.TYPE_STORED);
-      customType8.setStoreTermVectors(true);
-      customType8.setStoreTermVectorOffsets(true);
-      customType8.setStoreTermVectorPositions(true);
+    FieldType customType5 = new FieldType(TextField.TYPE_STORED);
+    customType5.setStoreTermVectors(true);
+    FieldType customType6 = new FieldType(TextField.TYPE_STORED);
+    customType6.setStoreTermVectors(true);
+    customType6.setStoreTermVectorOffsets(true);
+    FieldType customType7 = new FieldType(TextField.TYPE_STORED);
+    customType7.setStoreTermVectors(true);
+    customType7.setStoreTermVectorPositions(true);
+    FieldType customType8 = new FieldType(TextField.TYPE_STORED);
+    customType8.setStoreTermVectors(true);
+    customType8.setStoreTermVectorOffsets(true);
+    customType8.setStoreTermVectorPositions(true);
       
-      for (int i = 0; i < 5*mergeFactor; i++) {
-        doc = new Document();
-        doc.add(new TextField("tvnot", "tvnot", Field.Store.YES));
-        doc.add(new Field("termvector", "termvector", customType5));
-        doc.add(new Field("tvoffset", "tvoffset", customType6));
-        doc.add(new Field("tvposition", "tvposition", customType7));
-        doc.add(new Field("tvpositionoffset", "tvpositionoffset", customType8));
-        writer.addDocument(doc);
-      }
+    for (int i = 0; i < 5*mergeFactor; i++) {
+      doc = new Document();
+      doc.add(new TextField("tvnot", "tvnot", Field.Store.YES));
+      doc.add(new Field("termvector", "termvector", customType5));
+      doc.add(new Field("tvoffset", "tvoffset", customType6));
+      doc.add(new Field("tvposition", "tvposition", customType7));
+      doc.add(new Field("tvpositionoffset", "tvpositionoffset", customType8));
+      writer.addDocument(doc);
+    }
       
-      writer.close();
+    writer.close();
 
-      // verify fields again
-      reader = DirectoryReader.open(d);
-      fieldInfos = MultiFields.getMergedFieldInfos(reader);
-
-      Collection<String> allFieldNames = new HashSet<>();
-      Collection<String> indexedFieldNames = new HashSet<>();
-      Collection<String> notIndexedFieldNames = new HashSet<>();
-      Collection<String> tvFieldNames = new HashSet<>();
-
-      for(FieldInfo fieldInfo : fieldInfos) {
-        final String name = fieldInfo.name;
-        allFieldNames.add(name);
-        if (fieldInfo.isIndexed()) {
-          indexedFieldNames.add(name);
-        } else {
-          notIndexedFieldNames.add(name);
-        }
-        if (fieldInfo.hasVectors()) {
-          tvFieldNames.add(name);
-        }
+    // verify fields again
+    reader = DirectoryReader.open(d);
+    fieldInfos = MultiFields.getMergedFieldInfos(reader);
+
+    Collection<String> allFieldNames = new HashSet<>();
+    Collection<String> indexedFieldNames = new HashSet<>();
+    Collection<String> notIndexedFieldNames = new HashSet<>();
+    Collection<String> tvFieldNames = new HashSet<>();
+
+    for(FieldInfo fieldInfo : fieldInfos) {
+      final String name = fieldInfo.name;
+      allFieldNames.add(name);
+      if (fieldInfo.isIndexed()) {
+        indexedFieldNames.add(name);
+      } else {
+        notIndexedFieldNames.add(name);
       }
+      if (fieldInfo.hasVectors()) {
+        tvFieldNames.add(name);
+      }
+    }
 
-      assertTrue(allFieldNames.contains("keyword"));
-      assertTrue(allFieldNames.contains("text"));
-      assertTrue(allFieldNames.contains("unindexed"));
-      assertTrue(allFieldNames.contains("unstored"));
-      assertTrue(allFieldNames.contains("keyword2"));
-      assertTrue(allFieldNames.contains("text2"));
-      assertTrue(allFieldNames.contains("unindexed2"));
-      assertTrue(allFieldNames.contains("unstored2"));
-      assertTrue(allFieldNames.contains("tvnot"));
-      assertTrue(allFieldNames.contains("termvector"));
-      assertTrue(allFieldNames.contains("tvposition"));
-      assertTrue(allFieldNames.contains("tvoffset"));
-      assertTrue(allFieldNames.contains("tvpositionoffset"));
+    assertTrue(allFieldNames.contains("keyword"));
+    assertTrue(allFieldNames.contains("text"));
+    assertTrue(allFieldNames.contains("unindexed"));
+    assertTrue(allFieldNames.contains("unstored"));
+    assertTrue(allFieldNames.contains("keyword2"));
+    assertTrue(allFieldNames.contains("text2"));
+    assertTrue(allFieldNames.contains("unindexed2"));
+    assertTrue(allFieldNames.contains("unstored2"));
+    assertTrue(allFieldNames.contains("tvnot"));
+    assertTrue(allFieldNames.contains("termvector"));
+    assertTrue(allFieldNames.contains("tvposition"));
+    assertTrue(allFieldNames.contains("tvoffset"));
+    assertTrue(allFieldNames.contains("tvpositionoffset"));
       
-      // verify that only indexed fields were returned
-      assertEquals(11, indexedFieldNames.size());    // 6 original + the 5 termvector fields 
-      assertTrue(indexedFieldNames.contains("keyword"));
-      assertTrue(indexedFieldNames.contains("text"));
-      assertTrue(indexedFieldNames.contains("unstored"));
-      assertTrue(indexedFieldNames.contains("keyword2"));
-      assertTrue(indexedFieldNames.contains("text2"));
-      assertTrue(indexedFieldNames.contains("unstored2"));
-      assertTrue(indexedFieldNames.contains("tvnot"));
-      assertTrue(indexedFieldNames.contains("termvector"));
-      assertTrue(indexedFieldNames.contains("tvposition"));
-      assertTrue(indexedFieldNames.contains("tvoffset"));
-      assertTrue(indexedFieldNames.contains("tvpositionoffset"));
+    // verify that only indexed fields were returned
+    assertEquals(11, indexedFieldNames.size());    // 6 original + the 5 termvector fields 
+    assertTrue(indexedFieldNames.contains("keyword"));
+    assertTrue(indexedFieldNames.contains("text"));
+    assertTrue(indexedFieldNames.contains("unstored"));
+    assertTrue(indexedFieldNames.contains("keyword2"));
+    assertTrue(indexedFieldNames.contains("text2"));
+    assertTrue(indexedFieldNames.contains("unstored2"));
+    assertTrue(indexedFieldNames.contains("tvnot"));
+    assertTrue(indexedFieldNames.contains("termvector"));
+    assertTrue(indexedFieldNames.contains("tvposition"));
+    assertTrue(indexedFieldNames.contains("tvoffset"));
+    assertTrue(indexedFieldNames.contains("tvpositionoffset"));
       
-      // verify that only unindexed fields were returned
-      assertEquals(2, notIndexedFieldNames.size());    // the following fields
-      assertTrue(notIndexedFieldNames.contains("unindexed"));
-      assertTrue(notIndexedFieldNames.contains("unindexed2"));
+    // verify that only unindexed fields were returned
+    assertEquals(2, notIndexedFieldNames.size());    // the following fields
+    assertTrue(notIndexedFieldNames.contains("unindexed"));
+    assertTrue(notIndexedFieldNames.contains("unindexed2"));
               
-      // verify index term vector fields  
-      assertEquals(tvFieldNames.toString(), 4, tvFieldNames.size());    // 4 field has term vector only
-      assertTrue(tvFieldNames.contains("termvector"));
-
-      reader.close();
-      d.close();
-  }
-
-public void testTermVectors() throws Exception {
-  Directory d = newDirectory();
-  // set up writer
-  IndexWriter writer = new IndexWriter(
-      d,
-      newIndexWriterConfig(new MockAnalyzer(random()))
-          .setMergePolicy(newLogMergePolicy())
-  );
-  // want to get some more segments here
-  // new termvector fields
-  int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
-  FieldType customType5 = new FieldType(TextField.TYPE_STORED);
-  customType5.setStoreTermVectors(true);
-  FieldType customType6 = new FieldType(TextField.TYPE_STORED);
-  customType6.setStoreTermVectors(true);
-  customType6.setStoreTermVectorOffsets(true);
-  FieldType customType7 = new FieldType(TextField.TYPE_STORED);
-  customType7.setStoreTermVectors(true);
-  customType7.setStoreTermVectorPositions(true);
-  FieldType customType8 = new FieldType(TextField.TYPE_STORED);
-  customType8.setStoreTermVectors(true);
-  customType8.setStoreTermVectorOffsets(true);
-  customType8.setStoreTermVectorPositions(true);
-  for (int i = 0; i < 5 * mergeFactor; i++) {
-    Document doc = new Document();
+    // verify index term vector fields  
+    assertEquals(tvFieldNames.toString(), 4, tvFieldNames.size());    // 4 field has term vector only
+    assertTrue(tvFieldNames.contains("termvector"));
+
+    reader.close();
+    d.close();
+  }
+
+  public void testTermVectors() throws Exception {
+    Directory d = newDirectory();
+    // set up writer
+    IndexWriter writer = new IndexWriter(
+                                         d,
+                                         newIndexWriterConfig(new MockAnalyzer(random()))
+                                         .setMergePolicy(newLogMergePolicy())
+                                         );
+    // want to get some more segments here
+    // new termvector fields
+    int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
+    FieldType customType5 = new FieldType(TextField.TYPE_STORED);
+    customType5.setStoreTermVectors(true);
+    FieldType customType6 = new FieldType(TextField.TYPE_STORED);
+    customType6.setStoreTermVectors(true);
+    customType6.setStoreTermVectorOffsets(true);
+    FieldType customType7 = new FieldType(TextField.TYPE_STORED);
+    customType7.setStoreTermVectors(true);
+    customType7.setStoreTermVectorPositions(true);
+    FieldType customType8 = new FieldType(TextField.TYPE_STORED);
+    customType8.setStoreTermVectors(true);
+    customType8.setStoreTermVectorOffsets(true);
+    customType8.setStoreTermVectorPositions(true);
+    for (int i = 0; i < 5 * mergeFactor; i++) {
+      Document doc = new Document();
       doc.add(new TextField("tvnot", "one two two three three three", Field.Store.YES));
       doc.add(new Field("termvector", "one two two three three three", customType5));
       doc.add(new Field("tvoffset", "one two two three three three", customType6));
@@ -338,32 +338,31 @@ public void testTermVectors() throws Exc
       doc.add(new Field("tvpositionoffset", "one two two three three three", customType8));
       
       writer.addDocument(doc);
+    }
+    writer.close();
+    d.close();
   }
-  writer.close();
-  d.close();
-}
 
-void assertTermDocsCount(String msg,
-                                   IndexReader reader,
-                                   Term term,
-                                   int expected)
-  throws IOException {
-  DocsEnum tdocs = TestUtil.docs(random(), reader,
-      term.field(),
-      new BytesRef(term.text()),
-      MultiFields.getLiveDocs(reader),
-      null,
-      0);
-  int count = 0;
-  if (tdocs != null) {
-    while(tdocs.nextDoc()!= DocIdSetIterator.NO_MORE_DOCS) {
-      count++;
+  void assertTermDocsCount(String msg,
+                           IndexReader reader,
+                           Term term,
+                           int expected)
+    throws IOException {
+    DocsEnum tdocs = TestUtil.docs(random(), reader,
+                                   term.field(),
+                                   new BytesRef(term.text()),
+                                   MultiFields.getLiveDocs(reader),
+                                   null,
+                                   0);
+    int count = 0;
+    if (tdocs != null) {
+      while(tdocs.nextDoc()!= DocIdSetIterator.NO_MORE_DOCS) {
+        count++;
+      }
     }
+    assertEquals(msg + ", count mismatch", expected, count);
   }
-  assertEquals(msg + ", count mismatch", expected, count);
-}
 
-  
   public void testBinaryFields() throws IOException {
     Directory dir = newDirectory();
     byte[] bin = new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
@@ -438,35 +437,35 @@ void assertTermDocsCount(String msg,
     rmDir(fileDirName);
   }*/
   
-public void testFilesOpenClose() throws IOException {
-      // Create initial data set
-      Path dirFile = createTempDir("TestIndexReader.testFilesOpenClose");
-      Directory dir = newFSDirectory(dirFile);
-      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-      addDoc(writer, "test");
-      writer.close();
-      dir.close();
+  public void testFilesOpenClose() throws IOException {
+    // Create initial data set
+    Path dirFile = createTempDir("TestIndexReader.testFilesOpenClose");
+    Directory dir = newFSDirectory(dirFile);
+    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    addDoc(writer, "test");
+    writer.close();
+    dir.close();
 
-      // Try to erase the data - this ensures that the writer closed all files
-      IOUtils.rm(dirFile);
-      dir = newFSDirectory(dirFile);
-
-      // Now create the data set again, just as before
-      writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
-                                       .setOpenMode(OpenMode.CREATE));
-      addDoc(writer, "test");
-      writer.close();
-      dir.close();
+    // Try to erase the data - this ensures that the writer closed all files
+    IOUtils.rm(dirFile);
+    dir = newFSDirectory(dirFile);
 
-      // Now open existing directory and test that reader closes all files
-      dir = newFSDirectory(dirFile);
-      DirectoryReader reader1 = DirectoryReader.open(dir);
-      reader1.close();
-      dir.close();
+    // Now create the data set again, just as before
+    writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
+                              .setOpenMode(OpenMode.CREATE));
+    addDoc(writer, "test");
+    writer.close();
+    dir.close();
 
-      // The following will fail if reader did not close
-      // all files
-      IOUtils.rm(dirFile);
+    // Now open existing directory and test that reader closes all files
+    dir = newFSDirectory(dirFile);
+    DirectoryReader reader1 = DirectoryReader.open(dir);
+    reader1.close();
+    dir.close();
+
+    // The following will fail if reader did not close
+    // all files
+    IOUtils.rm(dirFile);
   }
 
   public void testOpenReaderAfterDelete() throws IOException {
@@ -495,21 +494,19 @@ public void testFilesOpenClose() throws 
     dir.close();
   }
 
-  static void addDocumentWithFields(IndexWriter writer) throws IOException
-  {
-      Document doc = new Document();
+  static void addDocumentWithFields(IndexWriter writer) throws IOException {
+    Document doc = new Document();
       
-      FieldType customType3 = new FieldType();
-      customType3.setStored(true);
-      doc.add(newStringField("keyword", "test1", Field.Store.YES));
-      doc.add(newTextField("text", "test1", Field.Store.YES));
-      doc.add(newField("unindexed", "test1", customType3));
-      doc.add(new TextField("unstored","test1", Field.Store.NO));
-      writer.addDocument(doc);
+    FieldType customType3 = new FieldType();
+    customType3.setStored(true);
+    doc.add(newStringField("keyword", "test1", Field.Store.YES));
+    doc.add(newTextField("text", "test1", Field.Store.YES));
+    doc.add(newField("unindexed", "test1", customType3));
+    doc.add(new TextField("unstored","test1", Field.Store.NO));
+    writer.addDocument(doc);
   }
 
-  static void addDocumentWithDifferentFields(IndexWriter writer) throws IOException
-  {
+  static void addDocumentWithDifferentFields(IndexWriter writer) throws IOException {
     Document doc = new Document();
     
     FieldType customType3 = new FieldType();
@@ -521,34 +518,33 @@ public void testFilesOpenClose() throws 
     writer.addDocument(doc);
   }
 
-  static void addDocumentWithTermVectorFields(IndexWriter writer) throws IOException
-  {
-      Document doc = new Document();
-      FieldType customType5 = new FieldType(TextField.TYPE_STORED);
-      customType5.setStoreTermVectors(true);
-      FieldType customType6 = new FieldType(TextField.TYPE_STORED);
-      customType6.setStoreTermVectors(true);
-      customType6.setStoreTermVectorOffsets(true);
-      FieldType customType7 = new FieldType(TextField.TYPE_STORED);
-      customType7.setStoreTermVectors(true);
-      customType7.setStoreTermVectorPositions(true);
-      FieldType customType8 = new FieldType(TextField.TYPE_STORED);
-      customType8.setStoreTermVectors(true);
-      customType8.setStoreTermVectorOffsets(true);
-      customType8.setStoreTermVectorPositions(true);
-      doc.add(newTextField("tvnot", "tvnot", Field.Store.YES));
-      doc.add(newField("termvector","termvector",customType5));
-      doc.add(newField("tvoffset","tvoffset", customType6));
-      doc.add(newField("tvposition","tvposition", customType7));
-      doc.add(newField("tvpositionoffset","tvpositionoffset", customType8));
+  static void addDocumentWithTermVectorFields(IndexWriter writer) throws IOException {
+    Document doc = new Document();
+    FieldType customType5 = new FieldType(TextField.TYPE_STORED);
+    customType5.setStoreTermVectors(true);
+    FieldType customType6 = new FieldType(TextField.TYPE_STORED);
+    customType6.setStoreTermVectors(true);
+    customType6.setStoreTermVectorOffsets(true);
+    FieldType customType7 = new FieldType(TextField.TYPE_STORED);
+    customType7.setStoreTermVectors(true);
+    customType7.setStoreTermVectorPositions(true);
+    FieldType customType8 = new FieldType(TextField.TYPE_STORED);
+    customType8.setStoreTermVectors(true);
+    customType8.setStoreTermVectorOffsets(true);
+    customType8.setStoreTermVectorPositions(true);
+    doc.add(newTextField("tvnot", "tvnot", Field.Store.YES));
+    doc.add(newField("termvector","termvector",customType5));
+    doc.add(newField("tvoffset","tvoffset", customType6));
+    doc.add(newField("tvposition","tvposition", customType7));
+    doc.add(newField("tvpositionoffset","tvpositionoffset", customType8));
       
-      writer.addDocument(doc);
+    writer.addDocument(doc);
   }
   
   static void addDoc(IndexWriter writer, String value) throws IOException {
-      Document doc = new Document();
-      doc.add(newTextField("content", value, Field.Store.NO));
-      writer.addDocument(doc);
+    Document doc = new Document();
+    doc.add(newTextField("content", value, Field.Store.NO));
+    writer.addDocument(doc);
   }
 
   // TODO: maybe this can reuse the logic of test dueling codecs?

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java?rev=1633314&r1=1633313&r2=1633314&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java Tue Oct 21 08:45:44 2014
@@ -21,10 +21,8 @@ import java.util.ArrayList;
 import java.util.Arrays;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.Document2;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
@@ -46,15 +44,18 @@ public class TestDocsAndPositions extend
    */
   public void testPositionsSimple() throws IOException {
     Directory directory = newDirectory();
+    FieldTypes types = new FieldTypes(new MockAnalyzer(random()));
     RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
-        newIndexWriterConfig(new MockAnalyzer(random())));
+                                                     types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(writer.w);
+    types.disableNorms(fieldName);
+    types.disableStored(fieldName);
+
     for (int i = 0; i < 39; i++) {
-      Document doc = new Document();
-      FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-      customType.setOmitNorms(true);
-      doc.add(newField(fieldName, "1 2 3 4 5 6 7 8 9 10 "
+      Document2 doc = new Document2(types);
+      doc.addLargeText(fieldName, "1 2 3 4 5 6 7 8 9 10 "
           + "1 2 3 4 5 6 7 8 9 10 " + "1 2 3 4 5 6 7 8 9 10 "
-          + "1 2 3 4 5 6 7 8 9 10", customType));
+          + "1 2 3 4 5 6 7 8 9 10");
       writer.addDocument(doc);
     }
     IndexReader reader = writer.getReader();
@@ -110,17 +111,18 @@ public class TestDocsAndPositions extend
    */
   public void testRandomPositions() throws IOException {
     Directory dir = newDirectory();
+    FieldTypes types = new FieldTypes(new MockAnalyzer(random()));
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
-        newIndexWriterConfig(new MockAnalyzer(random()))
-          .setMergePolicy(newLogMergePolicy()));
+                                                     types.getDefaultIndexWriterConfig().setMergePolicy(newLogMergePolicy()));
     int numDocs = atLeast(47);
     int max = 1051;
     int term = random().nextInt(max);
     Integer[][] positionsInDoc = new Integer[numDocs][];
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setOmitNorms(true);
+    types.setIndexWriter(writer.w);
+    types.disableNorms(fieldName);
+    types.disableStored(fieldName);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document2 doc = new Document2(types);
       ArrayList<Integer> positions = new ArrayList<>();
       StringBuilder builder = new StringBuilder();
       int num = atLeast(131);
@@ -135,7 +137,7 @@ public class TestDocsAndPositions extend
         builder.append(term);
         positions.add(num);
       }
-      doc.add(newField(fieldName, builder.toString(), customType));
+      doc.addLargeText(fieldName, builder.toString());
       positionsInDoc[i] = positions.toArray(new Integer[0]);
       writer.addDocument(doc);
     }
@@ -194,17 +196,19 @@ public class TestDocsAndPositions extend
 
   public void testRandomDocs() throws IOException {
     Directory dir = newDirectory();
+    FieldTypes types = new FieldTypes(new MockAnalyzer(random()));
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
-                                                     newIndexWriterConfig(new MockAnalyzer(random()))
-                                                       .setMergePolicy(newLogMergePolicy()));
+                                                     types.getDefaultIndexWriterConfig().setMergePolicy(newLogMergePolicy()));
+    types.setIndexWriter(writer.w);
+    types.disableNorms(fieldName);
+    types.disableStored(fieldName);
+
     int numDocs = atLeast(49);
     int max = 15678;
     int term = random().nextInt(max);
     int[] freqInDoc = new int[numDocs];
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setOmitNorms(true);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document2 doc = new Document2(types);
       StringBuilder builder = new StringBuilder();
       for (int j = 0; j < 199; j++) {
         int nextInt = random().nextInt(max);
@@ -213,7 +217,7 @@ public class TestDocsAndPositions extend
           freqInDoc[i]++;
         }
       }
-      doc.add(newField(fieldName, builder.toString(), customType));
+      doc.addLargeText(fieldName, builder.toString());
       writer.addDocument(doc);
     }
 
@@ -274,13 +278,15 @@ public class TestDocsAndPositions extend
    */
   public void testLargeNumberOfPositions() throws IOException {
     Directory dir = newDirectory();
+    FieldTypes types = new FieldTypes(new MockAnalyzer(random()));
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
-        newIndexWriterConfig(new MockAnalyzer(random())));
+                                                     types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(writer.w);
+    types.disableNorms(fieldName);
+    types.disableStored(fieldName);
     int howMany = 1000;
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setOmitNorms(true);
     for (int i = 0; i < 39; i++) {
-      Document doc = new Document();
+      Document2 doc = new Document2(types);
       StringBuilder builder = new StringBuilder();
       for (int j = 0; j < howMany; j++) {
         if (j % 2 == 0) {
@@ -289,7 +295,7 @@ public class TestDocsAndPositions extend
           builder.append("odd ");
         }
       }
-      doc.add(newField(fieldName, builder.toString(), customType));
+      doc.addLargeText(fieldName, builder.toString());
       writer.addDocument(doc);
     }
 
@@ -330,9 +336,12 @@ public class TestDocsAndPositions extend
   
   public void testDocsEnumStart() throws Exception {
     Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newStringField("foo", "bar", Field.Store.NO));
+    FieldTypes types = new FieldTypes(new MockAnalyzer(random()));
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(writer.w);
+    types.disableStored("foo");
+    Document2 doc = new Document2(types);
+    doc.addAtom("foo", "bar");
     writer.addDocument(doc);
     DirectoryReader reader = writer.getReader();
     LeafReader r = getOnlySegmentReader(reader);
@@ -355,9 +364,12 @@ public class TestDocsAndPositions extend
   
   public void testDocsAndPositionsEnumStart() throws Exception {
     Directory dir = newDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("foo", "bar", Field.Store.NO));
+    FieldTypes types = new FieldTypes(new MockAnalyzer(random()));
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, types.getDefaultIndexWriterConfig());
+    types.setIndexWriter(writer.w);
+    types.disableStored("foo");
+    Document2 doc = new Document2(types);
+    doc.addLargeText("foo", "bar");
     writer.addDocument(doc);
     DirectoryReader reader = writer.getReader();
     LeafReader r = getOnlySegmentReader(reader);

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=1633314&r1=1633313&r2=1633314&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java Tue Oct 21 08:45:44 2014
@@ -1889,41 +1889,6 @@ public class TestIndexWriter extends Luc
     dir.close();
   }
   
-  public void testDontInvokeAnalyzerForUnAnalyzedFields() throws Exception {
-    Analyzer analyzer = new Analyzer() {
-      @Override
-      protected TokenStreamComponents createComponents(String fieldName) {
-        throw new IllegalStateException("don't invoke me!");
-      }
-
-      @Override
-      public int getPositionIncrementGap(String fieldName) {
-        throw new IllegalStateException("don't invoke me!");
-      }
-
-      @Override
-      public int getOffsetGap(String fieldName) {
-        throw new IllegalStateException("don't invoke me!");
-      }
-    };
-    Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(analyzer));
-    Document doc = new Document();
-    FieldType customType = new FieldType(StringField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    Field f = newField("field", "abcd", customType);
-    doc.add(f);
-    doc.add(f);
-    Field f2 = newField("field", "", customType);
-    doc.add(f2);
-    doc.add(f);
-    w.addDocument(doc);
-    w.close();
-    dir.close();
-  }
-  
   //LUCENE-1468 -- make sure opening an IndexWriter with
   // create=true does not remove non-index files
   

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java?rev=1633314&r1=1633313&r2=1633314&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java Tue Oct 21 08:45:44 2014
@@ -29,6 +29,7 @@ import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.StoredField;
 import org.apache.lucene.index.FieldInfo.DocValuesType;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.DocIdSetIterator;
@@ -52,11 +53,6 @@ public class TestIndexableField extends 
       }
 
       @Override
-      public boolean tokenized() {
-        return true;
-      }
-
-      @Override
       public boolean storeTermVectors() {
         return indexOptions() != null && counter % 2 == 1 && counter % 10 != 9;
       }
@@ -83,7 +79,11 @@ public class TestIndexableField extends 
 
       @Override
       public FieldInfo.IndexOptions indexOptions() {
-        return counter%10 == 3 ? null : FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
+        if ((counter % 10) != 3) {
+          return FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
+        } else {
+          return null;
+        }
       }
 
       @Override
@@ -120,6 +120,11 @@ public class TestIndexableField extends 
     }
 
     @Override
+    public BytesRef binaryDocValue() {
+      return binaryValue();
+    }
+
+    @Override
     public String stringValue() {
       final int fieldID = counter%10;
       if (fieldID != 3 && fieldID != 7) {
@@ -129,8 +134,7 @@ public class TestIndexableField extends 
       }
     }
 
-    @Override
-    public Reader readerValue() {
+    private Reader readerValue() {
       if (counter%10 == 7) {
         return new StringReader("text " + counter);
       } else {
@@ -144,6 +148,11 @@ public class TestIndexableField extends 
     }
 
     @Override
+    public Number numericDocValue() {
+      return null;
+    }
+
+    @Override
     public IndexableFieldType fieldType() {
       return fieldType;
     }
@@ -384,17 +393,22 @@ public class TestIndexableField extends 
     }
 
     @Override
+    public BytesRef binaryDocValue() {
+      return null;
+    }
+
+    @Override
     public String stringValue() {
       return "foobar";
     }
 
     @Override
-    public Reader readerValue() {
+    public Number numericValue() {
       return null;
     }
 
     @Override
-    public Number numericValue() {
+    public Number numericDocValue() {
       return null;
     }
 

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java?rev=1633314&r1=1633313&r2=1633314&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java Tue Oct 21 08:45:44 2014
@@ -227,8 +227,7 @@ public class TestOmitNorms extends Lucen
    */
   public void testOmitNormsCombos() throws IOException {
     // indexed with norms
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    Field norms = new Field("foo", "a", customType);
+    Field norms = new Field("foo", "a", TextField.TYPE_STORED);
     // indexed without norms
     FieldType customType1 = new FieldType(TextField.TYPE_STORED);
     customType1.setOmitNorms(true);
@@ -243,7 +242,7 @@ public class TestOmitNorms extends Lucen
     customType3.setOmitNorms(true);
     Field noNormsNoIndex = new Field("foo", "a", customType3);
     // not indexed nor stored (doesnt exist at all, we index a different field instead)
-    Field emptyNorms = new Field("bar", "a", customType);
+    Field emptyNorms = new Field("bar", "a", TextField.TYPE_STORED);
     
     assertNotNull(getNorms("foo", norms, norms));
     assertNull(getNorms("foo", norms, noNorms));

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java?rev=1633314&r1=1633313&r2=1633314&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java Tue Oct 21 08:45:44 2014
@@ -43,7 +43,11 @@ public class TestTermVectorsWriter exten
   // LUCENE-1442
   public void testDoubleOffsetCounting() throws Exception {
     Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    // nocommit sneaky behavior change ...
+    MockAnalyzer a = new MockAnalyzer(random());
+    a.setOffsetGap(0);
+    a.setPositionIncrementGap(0);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(a));
     Document doc = new Document();
     FieldType customType = new FieldType(StringField.TYPE_NOT_STORED);
     customType.setStoreTermVectors(true);
@@ -52,8 +56,7 @@ public class TestTermVectorsWriter exten
     Field f = newField("field", "abcd", customType);
     doc.add(f);
     doc.add(f);
-    Field f2 = newField("field", "", customType);
-    doc.add(f2);
+    doc.add(newField("field", "", customType));
     doc.add(f);
     w.addDocument(doc);
     w.close();

Modified: lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/document/LazyDocument.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/document/LazyDocument.java?rev=1633314&r1=1633313&r2=1633314&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/document/LazyDocument.java (original)
+++ lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/document/LazyDocument.java Tue Oct 21 08:45:44 2014
@@ -167,17 +167,22 @@ public class LazyDocument {
     }
 
     @Override
+    public BytesRef binaryDocValue() {
+      return getRealValue().binaryDocValue();
+    }
+
+    @Override
     public String stringValue() {
       return getRealValue().stringValue();
     }
 
     @Override
-    public Reader readerValue() {
-      return getRealValue().readerValue();
+    public Number numericValue() {
+      return getRealValue().numericValue();
     }
 
     @Override
-    public Number numericValue() {
+    public Number numericDocValue() {
       return getRealValue().numericValue();
     }
 



Mime
View raw message