lucene-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mikemcc...@apache.org
Subject svn commit: r1640053 [8/10] - in /lucene/dev/branches/lucene6005/lucene: core/src/java/org/apache/lucene/document/ core/src/java/org/apache/lucene/index/ core/src/test/org/apache/lucene/ core/src/test/org/apache/lucene/codecs/compressing/ core/src/test...
Date Mon, 17 Nov 2014 00:43:47 GMT
Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java?rev=1640053&r1=1640052&r2=1640053&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java Mon Nov 17 00:43:44 2014
@@ -30,9 +30,11 @@ import org.apache.lucene.analysis.MockPa
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.document.IntField;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.StringField;
@@ -61,22 +63,27 @@ public class TestPostingsOffsets extends
     Directory dir = newDirectory();
     
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    Document2 doc = w.newDocument();
 
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
     if (random().nextBoolean()) {
-      ft.setStoreTermVectors(true);
-      ft.setStoreTermVectorPositions(random().nextBoolean());
-      ft.setStoreTermVectorOffsets(random().nextBoolean());
+      fieldTypes.enableTermVectors("content");
+      if (random().nextBoolean()) {
+        fieldTypes.enableTermVectorPositions("content");
+      }
+      if (random().nextBoolean()) {
+        fieldTypes.enableTermVectorOffsets("content");
+      }
     }
+
     Token[] tokens = new Token[] {
       makeToken("a", 1, 0, 6),
       makeToken("b", 1, 8, 9),
       makeToken("a", 1, 9, 17),
       makeToken("c", 1, 19, 50),
     };
-    doc.add(new Field("content", new CannedTokenStream(tokens), ft));
+    doc.addLargeText("content", new CannedTokenStream(tokens));
 
     w.addDocument(doc);
     IndexReader r = w.getReader();
@@ -130,21 +137,27 @@ public class TestPostingsOffsets extends
     iwc = newIndexWriterConfig(analyzer);
     iwc.setMergePolicy(newLogMergePolicy()); // will rely on docids a bit for skipping
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
     
-    FieldType ft = new FieldType(TextField.TYPE_STORED);
-    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
     if (random().nextBoolean()) {
-      ft.setStoreTermVectors(true);
-      ft.setStoreTermVectorOffsets(random().nextBoolean());
-      ft.setStoreTermVectorPositions(random().nextBoolean());
+      fieldTypes.enableTermVectors("numbers");
+      fieldTypes.enableTermVectors("oddeven");
+      if (random().nextBoolean()) {
+        fieldTypes.enableTermVectorOffsets("numbers");
+        fieldTypes.enableTermVectorOffsets("oddeven");
+      }
+      if (random().nextBoolean()) {
+        fieldTypes.enableTermVectorPositions("numbers");
+        fieldTypes.enableTermVectorPositions("oddeven");
+      }
     }
     
     int numDocs = atLeast(500);
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(new Field("numbers", English.intToEnglish(i), ft));
-      doc.add(new Field("oddeven", (i % 2) == 0 ? "even" : "odd", ft));
-      doc.add(new StringField("id", "" + i, Field.Store.NO));
+      Document2 doc = w.newDocument();
+      doc.addLargeText("numbers", English.intToEnglish(i));
+      doc.addLargeText("oddeven", (i % 2) == 0 ? "even" : "odd");
+      doc.addAtom("id", "" + i);
       w.addDocument(doc);
     }
     
@@ -222,6 +235,7 @@ public class TestPostingsOffsets extends
 
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
 
     final int numDocs = atLeast(20);
     //final int numDocs = atLeast(5);
@@ -230,17 +244,19 @@ public class TestPostingsOffsets extends
 
     // TODO: randomize what IndexOptions we use; also test
     // changing this up in one IW buffered segment...:
-    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
     if (random().nextBoolean()) {
-      ft.setStoreTermVectors(true);
-      ft.setStoreTermVectorOffsets(random().nextBoolean());
-      ft.setStoreTermVectorPositions(random().nextBoolean());
+      fieldTypes.enableTermVectors("content");
+      if (random().nextBoolean()) {
+        fieldTypes.enableTermVectorOffsets("content");
+      }
+      if (random().nextBoolean()) {
+        fieldTypes.enableTermVectorPositions("content");
+      }
     }
 
     for(int docCount=0;docCount<numDocs;docCount++) {
-      Document doc = new Document();
-      doc.add(new IntField("id", docCount, Field.Store.YES));
-      doc.add(new NumericDocValuesField("id", docCount));
+      Document2 doc = w.newDocument();
+      doc.addInt("id", docCount);
       List<Token> tokens = new ArrayList<>();
       final int numTokens = atLeast(100);
       //final int numTokens = atLeast(20);
@@ -282,7 +298,7 @@ public class TestPostingsOffsets extends
         offset += offIncr + tokenOffset;
         //System.out.println("  " + token + " posIncr=" + token.getPositionIncrement() + " pos=" + pos + " off=" + token.startOffset() + "/" + token.endOffset() + " (freq=" + postingsByDoc.get(docCount).size() + ")");
       }
-      doc.add(new Field("content", new CannedTokenStream(tokens.toArray(new Token[tokens.size()])), ft));
+      doc.addLargeText("content", new CannedTokenStream(tokens.toArray(new Token[tokens.size()])));
       w.addDocument(doc);
     }
     final DirectoryReader r = w.getReader();
@@ -352,51 +368,17 @@ public class TestPostingsOffsets extends
     dir.close();
   }
   
-  public void testWithUnindexedFields() throws Exception {
-    Directory dir = newDirectory();
-    RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc);
-    for (int i = 0; i < 100; i++) {
-      Document doc = new Document();
-      // ensure at least one doc is indexed with offsets
-      if (i < 99 && random().nextInt(2) == 0) {
-        // stored only
-        FieldType ft = new FieldType();
-        ft.setStored(true);
-        doc.add(new Field("foo", "boo!", ft));
-      } else {
-        FieldType ft = new FieldType(TextField.TYPE_STORED);
-        ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-        if (random().nextBoolean()) {
-          // store some term vectors for the checkindex cross-check
-          ft.setStoreTermVectors(true);
-          ft.setStoreTermVectorPositions(true);
-          ft.setStoreTermVectorOffsets(true);
-        }
-        doc.add(new Field("foo", "bar", ft));
-      }
-      riw.addDocument(doc);
-    }
-    CompositeReader ir = riw.getReader();
-    LeafReader slow = SlowCompositeReaderWrapper.wrap(ir);
-    FieldInfos fis = slow.getFieldInfos();
-    assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, fis.fieldInfo("foo").getIndexOptions());
-    slow.close();
-    ir.close();
-    riw.close();
-    dir.close();
-  }
-  
   public void testAddFieldTwice() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    FieldType customType3 = new FieldType(TextField.TYPE_STORED);
-    customType3.setStoreTermVectors(true);
-    customType3.setStoreTermVectorPositions(true);
-    customType3.setStoreTermVectorOffsets(true);    
-    customType3.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    doc.add(new Field("content3", "here is more content with aaa aaa aaa", customType3));
-    doc.add(new Field("content3", "here is more content with aaa aaa aaa", customType3));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.enableTermVectors("content3");
+    fieldTypes.enableTermVectorPositions("content3");
+    fieldTypes.enableTermVectorOffsets("content3");
+
+    Document2 doc = iw.newDocument();
+    doc.addLargeText("content3", "here is more content with aaa aaa aaa");
+    doc.addLargeText("content3", "here is more content with aaa aaa aaa");
     iw.addDocument(doc);
     iw.close();
     dir.close(); // checkindex
@@ -471,13 +453,11 @@ public class TestPostingsOffsets extends
     };
     IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(analyzer));
     // add good document
-    Document doc = new Document();
+    Document2 doc = iw.newDocument();
     iw.addDocument(doc);
     try {
-      FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-      ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-      doc.add(new Field("foo", "bar", ft));
-      doc.add(new Field("foo", "bar", ft));
+      doc.addLargeText("foo", "bar");
+      doc.addLargeText("foo", "bar");
       iw.addDocument(doc);
       fail("didn't get expected exception");
     } catch (IllegalArgumentException expected) {}
@@ -493,7 +473,7 @@ public class TestPostingsOffsets extends
   public void testLegalbutVeryLargeOffsets() throws Exception {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
-    Document doc = new Document();
+    Document2 doc = iw.newDocument();
     Token t1 = new Token("foo", 0, Integer.MAX_VALUE-500);
     if (random().nextBoolean()) {
       t1.setPayload(new BytesRef("test"));
@@ -502,14 +482,12 @@ public class TestPostingsOffsets extends
     TokenStream tokenStream = new CannedTokenStream(
         new Token[] { t1, t2 }
     );
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+    FieldTypes fieldTypes = iw.getFieldTypes();
     // store some term vectors for the checkindex cross-check
-    ft.setStoreTermVectors(true);
-    ft.setStoreTermVectorPositions(true);
-    ft.setStoreTermVectorOffsets(true);
-    Field field = new Field("foo", tokenStream, ft);
-    doc.add(field);
+    fieldTypes.enableTermVectors("foo");
+    fieldTypes.enableTermVectorPositions("foo");
+    fieldTypes.enableTermVectorOffsets("foo");
+    doc.addLargeText("foo", tokenStream);
     iw.addDocument(doc);
     iw.close();
     dir.close();
@@ -519,18 +497,18 @@ public class TestPostingsOffsets extends
   private void checkTokens(Token[] field1, Token[] field2) throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc);
+    FieldTypes fieldTypes = riw.getFieldTypes();
+
     boolean success = false;
     try {
-      FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-      ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
       // store some term vectors for the checkindex cross-check
-      ft.setStoreTermVectors(true);
-      ft.setStoreTermVectorPositions(true);
-      ft.setStoreTermVectorOffsets(true);
+      fieldTypes.enableTermVectors("body");
+      fieldTypes.enableTermVectorPositions("body");
+      fieldTypes.enableTermVectorOffsets("body");
      
-      Document doc = new Document();
-      doc.add(new Field("body", new CannedTokenStream(field1), ft));
-      doc.add(new Field("body", new CannedTokenStream(field2), ft));
+      Document2 doc = riw.newDocument();
+      doc.addLargeText("body", new CannedTokenStream(field1));
+      doc.addLargeText("body", new CannedTokenStream(field2));
       riw.addDocument(doc);
       riw.close();
       success = true;
@@ -546,17 +524,16 @@ public class TestPostingsOffsets extends
   private void checkTokens(Token[] tokens) throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc);
+    FieldTypes fieldTypes = riw.getFieldTypes();
     boolean success = false;
     try {
-      FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-      ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
       // store some term vectors for the checkindex cross-check
-      ft.setStoreTermVectors(true);
-      ft.setStoreTermVectorPositions(true);
-      ft.setStoreTermVectorOffsets(true);
+      fieldTypes.enableTermVectors("body");
+      fieldTypes.enableTermVectorPositions("body");
+      fieldTypes.enableTermVectorOffsets("body");
      
-      Document doc = new Document();
-      doc.add(new Field("body", new CannedTokenStream(tokens), ft));
+      Document2 doc = riw.newDocument();
+      doc.addLargeText("body", new CannedTokenStream(tokens));
       riw.addDocument(doc);
       riw.close();
       success = true;

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestReaderClosed.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestReaderClosed.java?rev=1640053&r1=1640052&r2=1640053&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestReaderClosed.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestReaderClosed.java Mon Nov 17 00:43:44 2014
@@ -22,6 +22,7 @@ import java.util.concurrent.RejectedExec
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.search.IndexSearcher;
@@ -43,15 +44,12 @@ public class TestReaderClosed extends Lu
         newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.KEYWORD, false))
           .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000)));
     
-    Document doc = new Document();
-    Field field = newStringField("field", "", Field.Store.NO);
-    doc.add(field);
-
     // we generate aweful prefixes: good for testing.
     // but for preflex codec, the test can be very slow, so use less iterations.
     int num = atLeast(10);
     for (int i = 0; i < num; i++) {
-      field.setStringValue(TestUtil.randomUnicodeString(random(), 10));
+      Document2 doc = writer.newDocument();
+      doc.addAtom("field", TestUtil.randomUnicodeString(random(), 10));
       writer.addDocument(doc);
     }
     reader = writer.getReader();

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestRollback.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestRollback.java?rev=1640053&r1=1640052&r2=1640053&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestRollback.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestRollback.java Mon Nov 17 00:43:44 2014
@@ -18,6 +18,7 @@ package org.apache.lucene.index;
  */
 
 import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.store.Directory;
@@ -30,8 +31,8 @@ public class TestRollback extends Lucene
     Directory dir = newDirectory();
     RandomIndexWriter rw = new RandomIndexWriter(random(), dir);
     for (int i = 0; i < 5; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("pk", Integer.toString(i), Field.Store.YES));
+      Document2 doc = rw.newDocument();
+      doc.addAtom("pk", Integer.toString(i));
       rw.addDocument(doc);
     }
     rw.close();
@@ -42,10 +43,10 @@ public class TestRollback extends Lucene
                                            .setOpenMode(IndexWriterConfig.OpenMode.APPEND));
 
     for (int i = 0; i < 3; i++) {
-      Document doc = new Document();
+      Document2 doc = w.newDocument();
       String value = Integer.toString(i);
-      doc.add(newStringField("pk", value, Field.Store.YES));
-      doc.add(newStringField("text", "foo", Field.Store.YES));
+      doc.addAtom("pk", value);
+      doc.addAtom("text", "foo");
       w.updateDocument(new Term("pk", value), doc);
     }
     w.rollback();

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java?rev=1640053&r1=1640052&r2=1640053&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java Mon Nov 17 00:43:44 2014
@@ -23,6 +23,7 @@ import org.apache.lucene.analysis.TokenS
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.store.Directory;
@@ -37,8 +38,8 @@ public class TestSameTokenSamePosition e
   public void test() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter riw = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(new TextField("eng", new BugReproTokenStream()));
+    Document2 doc = riw.newDocument();
+    doc.addLargeText("eng", new BugReproTokenStream());
     riw.addDocument(doc);
     riw.close();
     dir.close();
@@ -51,8 +52,8 @@ public class TestSameTokenSamePosition e
     Directory dir = newDirectory();
     RandomIndexWriter riw = new RandomIndexWriter(random(), dir);
     for (int i = 0; i < 100; i++) {
-      Document doc = new Document();
-      doc.add(new TextField("eng", new BugReproTokenStream()));
+      Document2 doc = riw.newDocument();
+      doc.addLargeText("eng", new BugReproTokenStream());
       riw.addDocument(doc);
     }
     riw.close();

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java?rev=1640053&r1=1640052&r2=1640053&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java Mon Nov 17 00:43:44 2014
@@ -43,11 +43,9 @@ public class TestSegmentMerger extends L
   private String mergedSegment = "test";
   //First segment to be merged
   private Directory merge1Dir;
-  private Document doc1 = new Document();
   private SegmentReader reader1 = null;
   //Second Segment to be merged
   private Directory merge2Dir;
-  private Document doc2 = new Document();
   private SegmentReader reader2 = null;
 
   @Override

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java?rev=1640053&r1=1640052&r2=1640053&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java Mon Nov 17 00:43:44 2014
@@ -19,14 +19,15 @@ package org.apache.lucene.index;
 
 import java.io.IOException;
 
-import org.apache.lucene.document.Field;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
 
 
 public class TestSegmentTermEnum extends LuceneTestCase {
@@ -133,10 +134,9 @@ public class TestSegmentTermEnum extends
     reader.close();
   }
 
-  private void addDoc(IndexWriter writer, String value) throws IOException
-  {
-    Document doc = new Document();
-    doc.add(newTextField("content", value, Field.Store.NO));
+  private void addDoc(IndexWriter writer, String value) throws IOException {
+    Document2 doc = writer.newDocument();
+    doc.addLargeText("content", value);
     writer.addDocument(doc);
   }
 }

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSizeBoundedForceMerge.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSizeBoundedForceMerge.java?rev=1640053&r1=1640052&r2=1640053&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSizeBoundedForceMerge.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSizeBoundedForceMerge.java Mon Nov 17 00:43:44 2014
@@ -19,8 +19,10 @@ package org.apache.lucene.index;
 
 import java.io.IOException;
 
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
@@ -34,9 +36,9 @@ public class TestSizeBoundedForceMerge e
 
   private void addDocs(IndexWriter writer, int numDocs, boolean withID) throws IOException {
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
+      Document2 doc = writer.newDocument();
       if (withID) {
-        doc.add(new StringField("id", "" + i, Field.Store.NO));
+        doc.addUniqueInt("id", i);
       }
       writer.addDocument(doc);
     }
@@ -281,13 +283,12 @@ public class TestSizeBoundedForceMerge e
     
     IndexWriterConfig conf = newWriterConfig();
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
     
     addDocs(writer, 3);
     addDocs(writer, 5);
     addDocs(writer, 3);
     
-    // delete the last document, so that the last segment is merged.
-    writer.deleteDocuments(new Term("id", "10"));
     writer.close();
     
     conf = newWriterConfig();
@@ -334,12 +335,13 @@ public class TestSizeBoundedForceMerge e
     
     IndexWriterConfig conf = newWriterConfig();
     IndexWriter writer = new IndexWriter(dir, conf);
+    FieldTypes fieldTypes = writer.getFieldTypes();
     
     addDocs(writer, 5, true);
     
     // delete the last document
     
-    writer.deleteDocuments(new Term("id", "4"));
+    writer.deleteDocuments(fieldTypes.newIntTerm("id", 4));
     writer.close();
     
     conf = newWriterConfig();

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java?rev=1640053&r1=1640052&r2=1640053&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java Mon Nov 17 00:43:44 2014
@@ -24,8 +24,10 @@ import java.util.List;
 import java.util.Random;
 
 import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IndexInput;
@@ -71,7 +73,7 @@ public class TestSnapshotDeletionPolicy 
       throws RuntimeException, IOException {
     for (int i = 0; i < numSnapshots; i++) {
       // create dummy document to trigger commit.
-      writer.addDocument(new Document());
+      writer.addDocument(writer.newDocument());
       writer.commit();
       snapshots.add(sdp.snapshot());
     }
@@ -123,16 +125,16 @@ public class TestSnapshotDeletionPolicy 
     }
     dp = (SnapshotDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
     writer.commit();
-    
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("content");
+    fieldTypes.enableTermVectorPositions("content");
+    fieldTypes.enableTermVectorOffsets("content");
+
     final Thread t = new Thread() {
         @Override
         public void run() {
-          Document doc = new Document();
-          FieldType customType = new FieldType(TextField.TYPE_STORED);
-          customType.setStoreTermVectors(true);
-          customType.setStoreTermVectorPositions(true);
-          customType.setStoreTermVectorOffsets(true);
-          doc.add(newField("content", "aaa", customType));
+          Document2 doc = writer.newDocument();
+          doc.addLargeText("content", "aaa");
           do {
             for(int i=0;i<27;i++) {
               try {
@@ -172,12 +174,8 @@ public class TestSnapshotDeletionPolicy 
     // Add one more document to force writer to commit a
     // final segment, so deletion policy has a chance to
     // delete again:
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    doc.add(newField("content", "aaa", customType));
+    Document2 doc = writer.newDocument();
+    doc.addLargeText("content", "aaa");
     writer.addDocument(doc);
 
     // Make sure we don't have any leftover files in the
@@ -299,7 +297,7 @@ public class TestSnapshotDeletionPolicy 
         @Override
         public void run() {
           try {
-            writer.addDocument(new Document());
+            writer.addDocument(writer.newDocument());
             writer.commit();
             snapshots[finalI] = sdp.snapshot();
           } catch (Exception e) {
@@ -319,7 +317,7 @@ public class TestSnapshotDeletionPolicy 
     }
 
     // Do one last commit, so that after we release all snapshots, we stay w/ one commit
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     
     for (int i=0;i<threads.length;i++) {
@@ -369,7 +367,7 @@ public class TestSnapshotDeletionPolicy 
     
     // Create another commit - we must do that, because otherwise the "snapshot"
     // files will still remain in the index, since it's the last commit.
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     
     // Release
@@ -387,7 +385,7 @@ public class TestSnapshotDeletionPolicy 
 
     IndexWriter writer = new IndexWriter(dir, getConfig(random(), getDeletionPolicy()));
     SnapshotDeletionPolicy sdp = (SnapshotDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     
     IndexCommit s1 = sdp.snapshot();
@@ -395,7 +393,7 @@ public class TestSnapshotDeletionPolicy 
     assertSame(s1, s2); // should be the same instance
     
     // create another commit
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     
     // release "s1" should not delete "s2"
@@ -418,12 +416,12 @@ public class TestSnapshotDeletionPolicy 
     }
     IndexWriter writer = new IndexWriter(dir, getConfig(random(), getDeletionPolicy()));
     SnapshotDeletionPolicy sdp = (SnapshotDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit();
     IndexCommit s1 = sdp.snapshot();
 
     // create another commit, not snapshotted.
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.close();
 
     // open a new writer w/ KeepOnlyLastCommit policy, so it will delete "s1"

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java?rev=1640053&r1=1640052&r2=1640053&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java Mon Nov 17 00:43:44 2014
@@ -36,23 +36,20 @@ public class TestStressAdvance extends L
       Directory dir = newDirectory();
       RandomIndexWriter w = new RandomIndexWriter(random(), dir);
       final Set<Integer> aDocs = new HashSet<>();
-      final Document doc = new Document();
-      final Field f = newStringField("field", "", Field.Store.NO);
-      doc.add(f);
-      final Field idField = newStringField("id", "", Field.Store.YES);
-      doc.add(idField);
       int num = atLeast(4097);
       if (VERBOSE) {
         System.out.println("\nTEST: numDocs=" + num);
       }
       for(int id=0;id<num;id++) {
+        Document2 doc = w.newDocument();
         if (random().nextInt(4) == 3) {
-          f.setStringValue("a");
+          doc.addAtom("field", "a");
           aDocs.add(id);
         } else {
-          f.setStringValue("b");
+          doc.addAtom("field", "b");
         }
-        idField.setStringValue(""+id);
+        
+        doc.addUniqueInt("id", id);
         w.addDocument(doc);
         if (VERBOSE) {
           System.out.println("\nTEST: doc upto " + id);
@@ -67,7 +64,7 @@ public class TestStressAdvance extends L
       final DirectoryReader r = w.getReader();
       final int[] idToDocID = new int[r.maxDoc()];
       for(int docID=0;docID<idToDocID.length;docID++) {
-        int id = Integer.parseInt(r.document(docID).getString("id"));
+        int id = r.document(docID).getInt("id");
         if (aDocs.contains(id)) {
           aDocIDs.add(docID);
         } else {

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestStressDeletes.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestStressDeletes.java?rev=1640053&r1=1640052&r2=1640053&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestStressDeletes.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestStressDeletes.java Mon Nov 17 00:43:44 2014
@@ -21,8 +21,10 @@ import java.util.concurrent.ConcurrentHa
 import java.util.concurrent.CountDownLatch;
 
 import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
@@ -44,6 +46,8 @@ public class TestStressDeletes extends L
     Directory dir = newDirectory();
     IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
     final IndexWriter w = new IndexWriter(dir, iwc);
+    final FieldTypes fieldTypes = w.getFieldTypes();
+
     final int iters = atLeast(2000);
     final Map<Integer,Boolean> exists = new ConcurrentHashMap<>();
     Thread[] threads = new Thread[TestUtil.nextInt(random(), 2, 6)];
@@ -59,12 +63,12 @@ public class TestStressDeletes extends L
                 synchronized (locks[id]) {
                   Boolean v = exists.get(id);
                   if (v == null || v.booleanValue() == false) {
-                    Document doc = new Document();
-                    doc.add(newStringField("id", ""+id, Field.Store.NO));
+                    Document2 doc = w.newDocument();
+                    doc.addUniqueInt("id", id);
                     w.addDocument(doc);
                     exists.put(id, true);
                   } else {
-                    w.deleteDocuments(new Term("id", ""+id));
+                    w.deleteDocuments(fieldTypes.newIntTerm("id", id));
                     exists.put(id, false);
                   }
                 }
@@ -92,7 +96,7 @@ public class TestStressDeletes extends L
     IndexSearcher s = newSearcher(r);
     for(Map.Entry<Integer,Boolean> ent : exists.entrySet()) {
       int id = ent.getKey();
-      TopDocs hits = s.search(new TermQuery(new Term("id", ""+id)), 1);
+      TopDocs hits = s.search(fieldTypes.newIntTermQuery("id", id), 1);
       if (ent.getValue()) {
         assertEquals(1, hits.totalHits);
       } else {

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing.java?rev=1640053&r1=1640052&r2=1640053&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing.java Mon Nov 17 00:43:44 2014
@@ -74,19 +74,21 @@ public class TestStressIndexing extends 
 
     @Override
     public void doWork() throws Exception {
+      FieldTypes fieldTypes = writer.getFieldTypes();
+
       // Add 10 docs:
       for(int j=0; j<10; j++) {
-        Document d = new Document();
+        Document2 d = writer.newDocument();
         int n = random().nextInt();
-        d.add(newStringField("id", Integer.toString(nextID++), Field.Store.YES));
-        d.add(newTextField("contents", English.intToEnglish(n), Field.Store.NO));
+        d.addInt("id", nextID++);
+        d.addLargeText("contents", English.intToEnglish(n));
         writer.addDocument(d);
       }
 
       // Delete 5 docs:
       int deleteID = nextID-1;
       for(int j=0; j<5; j++) {
-        writer.deleteDocuments(new Term("id", ""+deleteID));
+        writer.deleteDocuments(fieldTypes.newIntTerm("id", deleteID));
         deleteID -= 2;
       }
     }

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java?rev=1640053&r1=1640052&r2=1640053&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java Mon Nov 17 00:43:44 2014
@@ -232,9 +232,9 @@ public class TestStressNRT extends Lucen
 
                     // add tombstone first
                     if (tombstones) {
-                      Document d = new Document();
-                      d.add(newStringField("id", "-"+Integer.toString(id), Field.Store.YES));
-                      d.add(newField(field, Long.toString(nextVal), storedOnlyType));
+                      Document2 d = writer.newDocument();
+                      d.addAtom("id", "-"+Integer.toString(id));
+                      d.addStored(field, Long.toString(nextVal));
                       writer.updateDocument(new Term("id", "-"+Integer.toString(id)), d);
                     }
 
@@ -248,9 +248,9 @@ public class TestStressNRT extends Lucen
 
                     // add tombstone first
                     if (tombstones) {
-                      Document d = new Document();
-                      d.add(newStringField("id", "-"+Integer.toString(id), Field.Store.YES));
-                      d.add(newField(field, Long.toString(nextVal), storedOnlyType));
+                      Document2 d = writer.newDocument();
+                      d.addAtom("id", "-"+Integer.toString(id));
+                      d.addStored(field, Long.toString(nextVal));
                       writer.updateDocument(new Term("id", "-"+Integer.toString(id)), d);
                     }
 
@@ -261,9 +261,9 @@ public class TestStressNRT extends Lucen
                     model.put(id, -nextVal);
                   } else {
                     // assertU(adoc("id",Integer.toString(id), field, Long.toString(nextVal)));
-                    Document d = new Document();
-                    d.add(newStringField("id", Integer.toString(id), Field.Store.YES));
-                    d.add(newField(field, Long.toString(nextVal), storedOnlyType));
+                    Document2 d = writer.newDocument();
+                    d.addAtom("id", Integer.toString(id));
+                    d.addStored(field, Long.toString(nextVal));
                     if (VERBOSE) {
                       System.out.println("TEST: " + Thread.currentThread().getName() + ": u id:" + id + " val=" + nextVal);
                     }

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java?rev=1640053&r1=1640052&r2=1640053&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java Mon Nov 17 00:43:44 2014
@@ -17,8 +17,10 @@ package org.apache.lucene.index;
  * limitations under the License.
  */
 
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -34,22 +36,17 @@ public class TestSumDocFreq extends Luce
     
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    FieldTypes fieldTypes = writer.getFieldTypes();
     
-    Document doc = new Document();
-    Field id = newStringField("id", "", Field.Store.NO);
-    Field field1 = newTextField("foo", "", Field.Store.NO);
-    Field field2 = newTextField("bar", "", Field.Store.NO);
-    doc.add(id);
-    doc.add(field1);
-    doc.add(field2);
     for (int i = 0; i < numDocs; i++) {
-      id.setStringValue("" + i);
+      Document2 doc = writer.newDocument();
+      doc.addUniqueInt("id", i);
       char ch1 = (char) TestUtil.nextInt(random(), 'a', 'z');
       char ch2 = (char) TestUtil.nextInt(random(), 'a', 'z');
-      field1.setStringValue("" + ch1 + " " + ch2);
+      doc.addLargeText("foo", "" + ch1 + " " + ch2);
       ch1 = (char) TestUtil.nextInt(random(), 'a', 'z');
       ch2 = (char) TestUtil.nextInt(random(), 'a', 'z');
-      field2.setStringValue("" + ch1 + " " + ch2);
+      doc.addLargeText("bar", "" + ch1 + " " + ch2);
       writer.addDocument(doc);
     }
     
@@ -60,7 +57,7 @@ public class TestSumDocFreq extends Luce
     
     int numDeletions = atLeast(20);
     for (int i = 0; i < numDeletions; i++) {
-      writer.deleteDocuments(new Term("id", "" + random().nextInt(numDocs)));
+      writer.deleteDocuments(fieldTypes.newIntTerm("id", random().nextInt(numDocs)));
     }
     writer.forceMerge(1);
     writer.close();

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java?rev=1640053&r1=1640052&r2=1640053&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java Mon Nov 17 00:43:44 2014
@@ -26,9 +26,11 @@ import org.apache.lucene.analysis.tokena
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.TermVectorsReader;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.document.StoredField;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.search.DocIdSetIterator;
@@ -96,28 +98,21 @@ public class TestTermVectorsReader exten
             setMergePolicy(newLogMergePolicy(false, 10))
             .setUseCompoundFile(false)
     );
-
-    Document doc = new Document();
+    FieldTypes fieldTypes = writer.getFieldTypes();
     for(int i=0;i<testFields.length;i++) {
-      FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-      if (testFieldsStorePos[i] && testFieldsStoreOff[i]) {
-        customType.setStoreTermVectors(true);
-        customType.setStoreTermVectorPositions(true);
-        customType.setStoreTermVectorOffsets(true);
-      }
-      else if (testFieldsStorePos[i] && !testFieldsStoreOff[i]) {
-        customType.setStoreTermVectors(true);
-        customType.setStoreTermVectorPositions(true);
+      fieldTypes.enableTermVectors(testFields[i]);
+      if (testFieldsStorePos[i]) {
+        fieldTypes.enableTermVectorPositions(testFields[i]);
       }
-      else if (!testFieldsStorePos[i] && testFieldsStoreOff[i]) {
-        customType.setStoreTermVectors(true);
-        customType.setStoreTermVectorPositions(true);
-        customType.setStoreTermVectorOffsets(true);
+      if (testFieldsStoreOff[i]) {
+        fieldTypes.enableTermVectorOffsets(testFields[i]);
       }
-      else {
-        customType.setStoreTermVectors(true);
-      }
-      doc.add(new Field(testFields[i], "", customType));
+    }
+
+    Document2 doc = writer.newDocument();
+    for(int i=0;i<testFields.length;i++) {
+      // nocommit shouldn't it be testTerms[i] not ""?
+      doc.addLargeText(testFields[i], "");
     }
 
     //Create 5 documents for testing, they all have the same
@@ -339,117 +334,59 @@ public class TestTermVectorsReader exten
     MockAnalyzer a = new MockAnalyzer(random());
     a.setEnableChecks(false);
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, a);
-    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setStoreTermVectors(true);
-    ft.setStoreTermVectorPayloads(true);
-    Document doc = new Document();
-    doc.add(new Field("field", "value", ft));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
     try {
-      w.addDocument(doc);
+      fieldTypes.enableTermVectorPayloads("field");
       fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // Expected
-      assertEquals("cannot index term vector payloads without term vector positions (field=\"field\")", iae.getMessage());
+      assertEquals("field \"field\": cannot enable termVectorPayloads when termVectorPositions haven't been enabled", ise.getMessage());
     }
 
-    ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setStoreTermVectors(false);
-    ft.setStoreTermVectorOffsets(true);
-    doc = new Document();
-    doc.add(new Field("field", "value", ft));
     try {
-      w.addDocument(doc);
+      fieldTypes.enableTermVectorOffsets("field2");
       fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // Expected
-      assertEquals("cannot index term vector offsets when term vectors are not indexed (field=\"field\")", iae.getMessage());
+      assertEquals("field \"field2\": cannot enable termVectorOffsets when termVectors haven't been enabled", ise.getMessage());
     }
 
-    ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setStoreTermVectors(false);
-    ft.setStoreTermVectorPositions(true);
-    doc = new Document();
-    doc.add(new Field("field", "value", ft));
     try {
-      w.addDocument(doc);
+      fieldTypes.enableTermVectorPositions("field2");
       fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // Expected
-      assertEquals("cannot index term vector positions when term vectors are not indexed (field=\"field\")", iae.getMessage());
+      assertEquals("field \"field2\": cannot enable termVectorPositions when termVectors haven't been enabled", ise.getMessage());
     }
 
-    ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setStoreTermVectors(false);
-    ft.setStoreTermVectorPayloads(true);
-    doc = new Document();
-    doc.add(new Field("field", "value", ft));
     try {
-      w.addDocument(doc);
+      fieldTypes.enableTermVectorPayloads("field2");
       fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // Expected
-      assertEquals("cannot index term vector payloads when term vectors are not indexed (field=\"field\")", iae.getMessage());
+      assertEquals("field \"field2\": cannot enable termVectorPayloads when termVectors haven't been enabled", ise.getMessage());
     }
 
-    ft = new FieldType(TextField.TYPE_NOT_STORED);
-    ft.setStoreTermVectors(true);
-    ft.setStoreTermVectorPayloads(true);
-    doc = new Document();
-    doc.add(new Field("field", "value", ft));
+    fieldTypes.enableTermVectors("field3");
     try {
-      w.addDocument(doc);
-      fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
-      // Expected
-      assertEquals("cannot index term vector payloads without term vector positions (field=\"field\")", iae.getMessage());
-    }
-
-    ft = new FieldType(StoredField.TYPE);
-    ft.setStoreTermVectors(true);
-    doc = new Document();
-    doc.add(new Field("field", "value", ft));
-    try {
-      w.addDocument(doc);
-      fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
-      // Expected
-      assertEquals("cannot store term vectors for a field that is not indexed (field=\"field\")", iae.getMessage());
-    }
-
-    ft = new FieldType(StoredField.TYPE);
-    ft.setStoreTermVectorPositions(true);
-    doc = new Document();
-    doc.add(new Field("field", "value", ft));
-    try {
-      w.addDocument(doc);
-      fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
-      // Expected
-      assertEquals("cannot store term vector positions for a field that is not indexed (field=\"field\")", iae.getMessage());
-    }
-
-    ft = new FieldType(StoredField.TYPE);
-    ft.setStoreTermVectorOffsets(true);
-    doc = new Document();
-    doc.add(new Field("field", "value", ft));
-    try {
-      w.addDocument(doc);
+      fieldTypes.enableTermVectorPayloads("field3");
       fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // Expected
-      assertEquals("cannot store term vector offsets for a field that is not indexed (field=\"field\")", iae.getMessage());
+      assertEquals("field \"field3\": cannot enable termVectorPayloads when termVectorPositions haven't been enabled", ise.getMessage());
     }
 
-    ft = new FieldType(StoredField.TYPE);
-    ft.setStoreTermVectorPayloads(true);
-    doc = new Document();
-    doc.add(new Field("field", "value", ft));
+    fieldTypes.enableTermVectors("field4");
+    Document2 doc = w.newDocument();
     try {
+      doc.addStored("field4", "foo");
       w.addDocument(doc);
       fail("did not hit exception");
-    } catch (IllegalArgumentException iae) {
+    } catch (IllegalStateException ise) {
       // Expected
-      assertEquals("cannot store term vector payloads for a field that is not indexed (field=\"field\")", iae.getMessage());
+      assertEquals("field \"field4\": cannot enable term vectors when indexOptions is NONE", ise.getMessage());
     }
 
     w.close();

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java?rev=1640053&r1=1640052&r2=1640053&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java Mon Nov 17 00:43:44 2014
@@ -25,12 +25,15 @@ import org.apache.lucene.analysis.MockAn
 import org.apache.lucene.analysis.MockTokenFilter;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.document.StoredField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.DocValuesType;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
@@ -48,16 +51,16 @@ public class TestTermVectorsWriter exten
     a.setOffsetGap(0);
     a.setPositionIncrementGap(0);
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(a));
-    Document doc = new Document();
-    FieldType customType = new FieldType(StringField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    Field f = newField("field", "abcd", customType);
-    doc.add(f);
-    doc.add(f);
-    doc.add(newField("field", "", customType));
-    doc.add(f);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("field", DocValuesType.NONE);
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.enableTermVectorPositions("field");
+    Document2 doc = w.newDocument();
+    doc.addAtom("field", "abcd");
+    doc.addAtom("field", "abcd");
+    doc.addAtom("field", "");
+    doc.addAtom("field", "abcd");
     w.addDocument(doc);
     w.close();
 
@@ -105,15 +108,14 @@ public class TestTermVectorsWriter exten
   // LUCENE-1442
   public void testDoubleOffsetCounting2() throws Exception {
     Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    Field f = newField("field", "abcd", customType);
-    doc.add(f);
-    doc.add(f);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.enableTermVectorPositions("field");
+    Document2 doc = w.newDocument();
+    doc.addLargeText("field", "abcd");
+    doc.addLargeText("field", "abcd");
     w.addDocument(doc);
     w.close();
 
@@ -140,15 +142,14 @@ public class TestTermVectorsWriter exten
   // LUCENE-1448
   public void testEndOffsetPositionCharAnalyzer() throws Exception {
     Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    Field f = newField("field", "abcd   ", customType);
-    doc.add(f);
-    doc.add(f);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.enableTermVectorPositions("field");
+    Document2 doc = w.newDocument();
+    doc.addLargeText("field", "abcd   ");
+    doc.addLargeText("field", "abcd   ");
     w.addDocument(doc);
     w.close();
 
@@ -177,17 +178,16 @@ public class TestTermVectorsWriter exten
     Directory dir = newDirectory();
     Analyzer analyzer = new MockAnalyzer(random());
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(analyzer));
-    Document doc = new Document();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.enableTermVectorPositions("field");
+    Document2 doc = w.newDocument();
     try (TokenStream stream = analyzer.tokenStream("field", "abcd   ")) {
       stream.reset(); // TODO: weird to reset before wrapping with CachingTokenFilter... correct?
       TokenStream cachedStream = new CachingTokenFilter(stream);
-      FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-      customType.setStoreTermVectors(true);
-      customType.setStoreTermVectorPositions(true);
-      customType.setStoreTermVectorOffsets(true);
-      Field f = new Field("field", cachedStream, customType);
-      doc.add(f);
-      doc.add(f);
+      doc.addLargeText("field", cachedStream);
+      doc.addLargeText("field", cachedStream);
       w.addDocument(doc);
     }
     w.close();
@@ -216,14 +216,13 @@ public class TestTermVectorsWriter exten
   public void testEndOffsetPositionStopFilter() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET)));
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    Field f = newField("field", "abcd the", customType);
-    doc.add(f);
-    doc.add(f);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.enableTermVectorPositions("field");
+    Document2 doc = w.newDocument();
+    doc.addLargeText("field", "abcd the");
+    doc.addLargeText("field", "abcd the");
     w.addDocument(doc);
     w.close();
 
@@ -251,15 +250,13 @@ public class TestTermVectorsWriter exten
   public void testEndOffsetPositionStandard() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    Field f = newField("field", "abcd the  ", customType);
-    Field f2 = newField("field", "crunch man", customType);
-    doc.add(f);
-    doc.add(f2);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.enableTermVectorPositions("field");
+    Document2 doc = w.newDocument();
+    doc.addLargeText("field", "abcd the  ");
+    doc.addLargeText("field", "crunch man");
     w.addDocument(doc);
     w.close();
 
@@ -294,16 +291,14 @@ public class TestTermVectorsWriter exten
   // LUCENE-1448
   public void testEndOffsetPositionStandardEmptyField() throws Exception {
     Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-    Field f = newField("field", "", customType);
-    Field f2 = newField("field", "crunch man", customType);
-    doc.add(f);
-    doc.add(f2);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    Document2 doc = w.newDocument();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.enableTermVectorPositions("field");
+    doc.addLargeText("field", "");
+    doc.addLargeText("field", "crunch man");
     w.addDocument(doc);
     w.close();
 
@@ -333,19 +328,14 @@ public class TestTermVectorsWriter exten
   public void testEndOffsetPositionStandardEmptyField2() throws Exception {
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document doc = new Document();
-    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    customType.setStoreTermVectorPositions(true);
-    customType.setStoreTermVectorOffsets(true);
-
-    Field f = newField("field", "abcd", customType);
-    doc.add(f);
-    doc.add(newField("field", "", customType));
-
-    Field f2 = newField("field", "crunch", customType);
-    doc.add(f2);
-
+    Document2 doc = w.newDocument();
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
+    fieldTypes.enableTermVectorOffsets("field");
+    fieldTypes.enableTermVectorPositions("field");
+    doc.addLargeText("field", "abcd");
+    doc.addLargeText("field", "");
+    doc.addLargeText("field", "crunch");
     w.addDocument(doc);
     w.close();
 
@@ -383,24 +373,19 @@ public class TestTermVectorsWriter exten
           .setMergeScheduler(new SerialMergeScheduler())
           .setMergePolicy(new LogDocMergePolicy()));
 
-      Document document = new Document();
-      FieldType customType = new FieldType();
-      customType.setStored(true);
+      FieldTypes fieldTypes = writer.getFieldTypes();
+      fieldTypes.enableTermVectors("termVector");
+      fieldTypes.enableTermVectorOffsets("termVector");
+      fieldTypes.enableTermVectorPositions("termVector");
 
-      Field storedField = newField("stored", "stored", customType);
-      document.add(storedField);
+      Document2 document = writer.newDocument();
+      document.addStored("stored", "stored");
       writer.addDocument(document);
       writer.addDocument(document);
 
-      document = new Document();
-      document.add(storedField);
-      FieldType customType2 = new FieldType(StringField.TYPE_NOT_STORED);
-      customType2.setStoreTermVectors(true);
-      customType2.setStoreTermVectorPositions(true);
-      customType2.setStoreTermVectorOffsets(true);
-      Field termVectorField = newField("termVector", "termVector", customType2);
-
-      document.add(termVectorField);
+      document = writer.newDocument();
+      document.addStored("stored", "stored");
+      document.addLargeText("termVector", "termVector");
       writer.addDocument(document);
       writer.forceMerge(1);
       writer.close();
@@ -436,24 +421,19 @@ public class TestTermVectorsWriter exten
           .setMergeScheduler(new SerialMergeScheduler())
           .setMergePolicy(new LogDocMergePolicy()));
 
-      Document document = new Document();
-
-      FieldType customType = new FieldType();
-      customType.setStored(true);
+      FieldTypes fieldTypes = writer.getFieldTypes();
+      fieldTypes.enableTermVectors("termVector");
+      fieldTypes.enableTermVectorOffsets("termVector");
+      fieldTypes.enableTermVectorPositions("termVector");
 
-      Field storedField = newField("stored", "stored", customType);
-      document.add(storedField);
+      Document2 document = writer.newDocument();
+      document.addStored("stored", "stored");
       writer.addDocument(document);
       writer.addDocument(document);
 
-      document = new Document();
-      document.add(storedField);
-      FieldType customType2 = new FieldType(StringField.TYPE_NOT_STORED);
-      customType2.setStoreTermVectors(true);
-      customType2.setStoreTermVectorPositions(true);
-      customType2.setStoreTermVectorOffsets(true);
-      Field termVectorField = newField("termVector", "termVector", customType2);
-      document.add(termVectorField);
+      document = writer.newDocument();
+      document.addStored("stored", "stored");
+      document.addLargeText("termVector", "termVector");
       writer.addDocument(document);
       writer.forceMerge(1);
       writer.close();
@@ -475,21 +455,17 @@ public class TestTermVectorsWriter exten
         .setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
         .setMergeScheduler(new SerialMergeScheduler())
         .setMergePolicy(new LogDocMergePolicy()));
-
-    Document document = new Document();
-    FieldType customType = new FieldType();
-    customType.setStored(true);
-
-    Field storedField = newField("stored", "stored", customType);
-    document.add(storedField);
-    FieldType customType2 = new FieldType(StringField.TYPE_NOT_STORED);
-    customType2.setStoreTermVectors(true);
-    customType2.setStoreTermVectorPositions(true);
-    customType2.setStoreTermVectorOffsets(true);
-    Field termVectorField = newField("termVector", "termVector", customType2);
-    document.add(termVectorField);
-    for(int i=0;i<10;i++)
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    fieldTypes.enableTermVectors("termVector");
+    fieldTypes.enableTermVectorOffsets("termVector");
+    fieldTypes.enableTermVectorPositions("termVector");
+
+    Document2 document = writer.newDocument();
+    document.addStored("stored", "stored");
+    document.addLargeText("termVector", "termVector");
+    for(int i=0;i<10;i++) {
       writer.addDocument(document);
+    }
     writer.close();
 
     writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
@@ -497,9 +473,12 @@ public class TestTermVectorsWriter exten
         .setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
         .setMergeScheduler(new SerialMergeScheduler())
         .setMergePolicy(new LogDocMergePolicy()));
-    for(int i=0;i<6;i++)
+    document = writer.newDocument();
+    document.addStored("stored", "stored");
+    document.addLargeText("termVector", "termVector");
+    for(int i=0;i<6;i++) {
       writer.addDocument(document);
-
+    }
     writer.forceMerge(1);
     writer.close();
 
@@ -516,23 +495,24 @@ public class TestTermVectorsWriter exten
   public void testNoTermVectorAfterTermVector() throws IOException {
     Directory dir = newDirectory();
     IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document document = new Document();
-    FieldType customType2 = new FieldType(StringField.TYPE_NOT_STORED);
-    customType2.setStoreTermVectors(true);
-    customType2.setStoreTermVectorPositions(true);
-    customType2.setStoreTermVectorOffsets(true);
-    document.add(newField("tvtest", "a b c", customType2));
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.enableTermVectors("tvtest");
+    fieldTypes.enableTermVectorOffsets("tvtest");
+    fieldTypes.enableTermVectorPositions("tvtest");
+
+    Document2 document = iw.newDocument();
+    document.addLargeText("tvtest", "a b c");
     iw.addDocument(document);
-    document = new Document();
-    document.add(newTextField("tvtest", "x y z", Field.Store.NO));
+    document = iw.newDocument();
+    document.addLargeText("tvtest", "x y z");
     iw.addDocument(document);
     // Make first segment
     iw.commit();
 
     FieldType customType = new FieldType(StringField.TYPE_NOT_STORED);
     customType.setStoreTermVectors(true);
-    document = new Document();
-    document.add(newField("tvtest", "a b c", customType));
+    document = iw.newDocument();
+    document.addStored("tvtest", "a b c");
     iw.addDocument(document);
     // Make 2nd segment
     iw.commit();
@@ -542,139 +522,22 @@ public class TestTermVectorsWriter exten
     dir.close();
   }
 
-  // LUCENE-1010
-  public void testNoTermVectorAfterTermVectorMerge() throws IOException {
-    Directory dir = newDirectory();
-    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-    Document document = new Document();
-    FieldType customType = new FieldType(StringField.TYPE_NOT_STORED);
-    customType.setStoreTermVectors(true);
-    document.add(newField("tvtest", "a b c", customType));
-    iw.addDocument(document);
-    iw.commit();
-
-    document = new Document();
-    document.add(newTextField("tvtest", "x y z", Field.Store.NO));
-    iw.addDocument(document);
-    // Make first segment
-    iw.commit();
-
-    iw.forceMerge(1);
-
-    FieldType customType2 = new FieldType(StringField.TYPE_NOT_STORED);
-    customType2.setStoreTermVectors(true);
-    document.add(newField("tvtest", "a b c", customType2));
-    document = new Document();
-    iw.addDocument(document);
-    // Make 2nd segment
-    iw.commit();
-    iw.forceMerge(1);
-
-    iw.close();
-    dir.close();
-  }
-  
-  /** 
-   * In a single doc, for the same field, mix the term vectors up 
-   */
-  public void testInconsistentTermVectorOptions() throws IOException {
-    FieldType a, b;
-    
-    // no vectors + vectors
-    a = new FieldType(TextField.TYPE_NOT_STORED);   
-    b = new FieldType(TextField.TYPE_NOT_STORED);
-    b.setStoreTermVectors(true);
-    doTestMixup(a, b);
-    
-    // vectors + vectors with pos
-    a = new FieldType(TextField.TYPE_NOT_STORED);   
-    a.setStoreTermVectors(true);
-    b = new FieldType(TextField.TYPE_NOT_STORED);
-    b.setStoreTermVectors(true);
-    b.setStoreTermVectorPositions(true);
-    doTestMixup(a, b);
-    
-    // vectors + vectors with off
-    a = new FieldType(TextField.TYPE_NOT_STORED);   
-    a.setStoreTermVectors(true);
-    b = new FieldType(TextField.TYPE_NOT_STORED);
-    b.setStoreTermVectors(true);
-    b.setStoreTermVectorOffsets(true);
-    doTestMixup(a, b);
-    
-    // vectors with pos + vectors with pos + off
-    a = new FieldType(TextField.TYPE_NOT_STORED);   
-    a.setStoreTermVectors(true);
-    a.setStoreTermVectorPositions(true);
-    b = new FieldType(TextField.TYPE_NOT_STORED);
-    b.setStoreTermVectors(true);
-    b.setStoreTermVectorPositions(true);
-    b.setStoreTermVectorOffsets(true);
-    doTestMixup(a, b);
-    
-    // vectors with pos + vectors with pos + pay
-    a = new FieldType(TextField.TYPE_NOT_STORED);   
-    a.setStoreTermVectors(true);
-    a.setStoreTermVectorPositions(true);
-    b = new FieldType(TextField.TYPE_NOT_STORED);
-    b.setStoreTermVectors(true);
-    b.setStoreTermVectorPositions(true);
-    b.setStoreTermVectorPayloads(true);
-    doTestMixup(a, b);
-  }
-  
-  private void doTestMixup(FieldType ft1, FieldType ft2) throws IOException {
-    Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
-    
-    // add 3 good docs
-    for (int i = 0; i < 3; i++) {
-      Document doc = new Document();
-      doc.add(new StringField("id", Integer.toString(i), Field.Store.NO));
-      iw.addDocument(doc);
-    }
-    
-    // add broken doc
-    Document doc = new Document();
-    doc.add(new Field("field", "value1", ft1));
-    doc.add(new Field("field", "value2", ft2));
-    
-    // ensure broken doc hits exception
-    try {
-      iw.addDocument(doc);
-      fail("didn't hit expected exception");
-    } catch (IllegalArgumentException iae) {
-      assertNotNull(iae.getMessage());
-      assertTrue(iae.getMessage().startsWith("all instances of a given field name must have the same term vectors settings"));
-    }
-    
-    // ensure good docs are still ok
-    IndexReader ir = iw.getReader();
-    assertEquals(3, ir.numDocs());
-    
-    ir.close();
-    iw.close();
-    dir.close();
-  }
-
   // LUCENE-5611: don't abort segment when term vector settings are wrong
   public void testNoAbortOnBadTVSettings() throws Exception {
     Directory dir = newDirectory();
     // Don't use RandomIndexWriter because we want to be sure both docs go to 1 seg:
     IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter iw = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = iw.getFieldTypes();
+    fieldTypes.enableTermVectors("field");
 
-    Document doc = new Document();
+    Document2 doc = iw.newDocument();
     iw.addDocument(doc);
-    FieldType ft = new FieldType(StoredField.TYPE);
-    ft.setStoreTermVectors(true);
-    ft.freeze();
-    doc.add(new Field("field", "value", ft));
     try {
-      iw.addDocument(doc);
+      doc.addStored("field", "value");
       fail("should have hit exc");
-    } catch (IllegalArgumentException iae) {
-      // expected
+    } catch (IllegalStateException ise) {
+      assertEquals("field \"field\": cannot enable term vectors when indexOptions is NONE", ise.getMessage());
     }
     IndexReader r = DirectoryReader.open(iw, true);
 
@@ -684,4 +547,5 @@ public class TestTermVectorsWriter exten
     r.close();
     dir.close();
   }
+
 }

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java?rev=1640053&r1=1640052&r2=1640053&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java Mon Nov 17 00:43:44 2014
@@ -23,6 +23,7 @@ import java.util.Random;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
@@ -84,9 +85,6 @@ public class TestTermdocPerf extends Luc
       }
     };
 
-    Document doc = new Document();
-    
-    doc.add(newStringField(field, val, Field.Store.NO));
     IndexWriter writer = new IndexWriter(
         dir,
         newIndexWriterConfig(analyzer)
@@ -94,7 +92,8 @@ public class TestTermdocPerf extends Luc
           .setMaxBufferedDocs(100)
           .setMergePolicy(newLogMergePolicy(100))
     );
-
+    Document2 doc = writer.newDocument();
+    doc.addAtom(field, val);
     for (int i=0; i<ndocs; i++) {
       writer.addDocument(doc);
     }

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTerms.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTerms.java?rev=1640053&r1=1640052&r2=1640053&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTerms.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTerms.java Mon Nov 17 00:43:44 2014
@@ -20,6 +20,7 @@ package org.apache.lucene.index;
 import java.util.*;
 
 import org.apache.lucene.analysis.CannedBinaryTokenStream;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.DoubleField;
 import org.apache.lucene.document.Field;
@@ -38,8 +39,8 @@ public class TestTerms extends LuceneTes
   public void testTermMinMaxBasic() throws Exception {
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
-    doc.add(newTextField("field", "a b c cc ddd", Field.Store.NO));
+    Document2 doc = w.newDocument();
+    doc.addShortText("field", "a b c cc ddd");
     w.addDocument(doc);
     IndexReader r = w.getReader();
     Terms terms = MultiFields.getTerms(r, "field");
@@ -57,9 +58,6 @@ public class TestTerms extends LuceneTes
     BytesRef minTerm = null;
     BytesRef maxTerm = null;
     for(int i=0;i<numDocs;i++ ){
-      Document doc = new Document();
-      Field field = new TextField("field", "", Field.Store.NO);
-      doc.add(field);
       //System.out.println("  doc " + i);
       CannedBinaryTokenStream.BinaryToken[] tokens = new CannedBinaryTokenStream.BinaryToken[atLeast(10)];
       for(int j=0;j<tokens.length;j++) {
@@ -77,7 +75,9 @@ public class TestTerms extends LuceneTes
         }
         tokens[j] = new CannedBinaryTokenStream.BinaryToken(tokenBytes);
       }
-      field.setTokenStream(new CannedBinaryTokenStream(tokens));
+
+      Document2 doc = w.newDocument();
+      doc.addLargeText("field", new CannedBinaryTokenStream(tokens));
       w.addDocument(doc);
     }
 
@@ -98,18 +98,18 @@ public class TestTerms extends LuceneTes
     int minValue = Integer.MAX_VALUE;
     int maxValue = Integer.MIN_VALUE;
     for(int i=0;i<numDocs;i++ ){
-      Document doc = new Document();
+      Document2 doc = w.newDocument();
       int num = random().nextInt();
       minValue = Math.min(num, minValue);
       maxValue = Math.max(num, maxValue);
-      doc.add(new IntField("field", num, Field.Store.NO));
+      doc.addInt("field", num);
       w.addDocument(doc);
     }
     
     IndexReader r = w.getReader();
     Terms terms = MultiFields.getTerms(r, "field");
-    assertEquals(minValue, NumericUtils.getMinInt(terms));
-    assertEquals(maxValue, NumericUtils.getMaxInt(terms));
+    assertEquals(minValue, Document2.bytesToInt(terms.getMin()));
+    assertEquals(maxValue, Document2.bytesToInt(terms.getMax()));
 
     r.close();
     w.close();
@@ -123,19 +123,19 @@ public class TestTerms extends LuceneTes
     long minValue = Long.MAX_VALUE;
     long maxValue = Long.MIN_VALUE;
     for(int i=0;i<numDocs;i++ ){
-      Document doc = new Document();
+      Document2 doc = w.newDocument();
       long num = random().nextLong();
       minValue = Math.min(num, minValue);
       maxValue = Math.max(num, maxValue);
-      doc.add(new LongField("field", num, Field.Store.NO));
+      doc.addLong("field", num);
       w.addDocument(doc);
     }
     
     IndexReader r = w.getReader();
 
     Terms terms = MultiFields.getTerms(r, "field");
-    assertEquals(minValue, NumericUtils.getMinLong(terms));
-    assertEquals(maxValue, NumericUtils.getMaxLong(terms));
+    assertEquals(minValue, Document2.bytesToLong(terms.getMin()));
+    assertEquals(maxValue, Document2.bytesToLong(terms.getMax()));
 
     r.close();
     w.close();
@@ -149,18 +149,18 @@ public class TestTerms extends LuceneTes
     float minValue = Float.POSITIVE_INFINITY;
     float maxValue = Float.NEGATIVE_INFINITY;
     for(int i=0;i<numDocs;i++ ){
-      Document doc = new Document();
+      Document2 doc = w.newDocument();
       float num = random().nextFloat();
       minValue = Math.min(num, minValue);
       maxValue = Math.max(num, maxValue);
-      doc.add(new FloatField("field", num, Field.Store.NO));
+      doc.addFloat("field", num);
       w.addDocument(doc);
     }
     
     IndexReader r = w.getReader();
     Terms terms = MultiFields.getTerms(r, "field");
-    assertEquals(minValue, NumericUtils.sortableIntToFloat(NumericUtils.getMinInt(terms)), 0.0f);
-    assertEquals(maxValue, NumericUtils.sortableIntToFloat(NumericUtils.getMaxInt(terms)), 0.0f);
+    assertEquals(minValue, Document2.bytesToFloat(terms.getMin()), 0.0f);
+    assertEquals(maxValue, Document2.bytesToFloat(terms.getMax()), 0.0f);
 
     r.close();
     w.close();
@@ -174,11 +174,11 @@ public class TestTerms extends LuceneTes
     double minValue = Double.POSITIVE_INFINITY;
     double maxValue = Double.NEGATIVE_INFINITY;
     for(int i=0;i<numDocs;i++ ){
-      Document doc = new Document();
+      Document2 doc = w.newDocument();
       double num = random().nextDouble();
       minValue = Math.min(num, minValue);
       maxValue = Math.max(num, maxValue);
-      doc.add(new DoubleField("field", num, Field.Store.NO));
+      doc.addDouble("field", num);
       w.addDocument(doc);
     }
     
@@ -186,8 +186,8 @@ public class TestTerms extends LuceneTes
 
     Terms terms = MultiFields.getTerms(r, "field");
 
-    assertEquals(minValue, NumericUtils.sortableLongToDouble(NumericUtils.getMinLong(terms)), 0.0);
-    assertEquals(maxValue, NumericUtils.sortableLongToDouble(NumericUtils.getMaxLong(terms)), 0.0);
+    assertEquals(minValue, Document2.bytesToDouble(terms.getMin()), 0.0);
+    assertEquals(maxValue, Document2.bytesToDouble(terms.getMax()), 0.0);
 
     r.close();
     w.close();

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java?rev=1640053&r1=1640052&r2=1640053&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java Mon Nov 17 00:43:44 2014
@@ -24,14 +24,15 @@ import org.apache.lucene.analysis.MockAn
 import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.document.IntField;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LineFileDocs;
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.automaton.Automata;
 import org.apache.lucene.util.automaton.Automaton;
@@ -158,14 +159,13 @@ public class TestTermsEnum extends Lucen
   }
 
   private void addDoc(RandomIndexWriter w, Collection<String> terms, Map<BytesRef,Integer> termToID, int id) throws IOException {
-    Document doc = new Document();
-    doc.add(new IntField("id", id, Field.Store.YES));
-    doc.add(new NumericDocValuesField("id", id));
+    Document2 doc = w.newDocument();
+    doc.addUniqueInt("id", id);
     if (VERBOSE) {
       System.out.println("TEST: addDoc id:" + id + " terms=" + terms);
     }
     for (String s2 : terms) {
-      doc.add(newStringField("f", s2, Field.Store.NO));
+      doc.addAtom("f", s2);
       termToID.put(new BytesRef(s2), id);
     }
     w.addDocument(doc);
@@ -185,6 +185,8 @@ public class TestTermsEnum extends Lucen
   public void testIntersectRandom() throws IOException {
     final Directory dir = newDirectory();
     final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setMultiValued("f");
 
     final int numTerms = atLeast(300);
     //final int numTerms = 50;
@@ -358,9 +360,8 @@ public class TestTermsEnum extends Lucen
 
     final RandomIndexWriter w = new RandomIndexWriter(random(), d, iwc);
     for(String term : terms) {
-      Document doc = new Document();
-      Field f = newStringField(FIELD, term, Field.Store.NO);
-      doc.add(f);
+      Document2 doc = w.newDocument();
+      doc.addAtom(FIELD, term);
       w.addDocument(doc);
     }
     if (r != null) {
@@ -497,10 +498,10 @@ public class TestTermsEnum extends Lucen
   public void testZeroTerms() throws Exception {
     d = newDirectory();
     final RandomIndexWriter w = new RandomIndexWriter(random(), d);
-    Document doc = new Document();
-    doc.add(newTextField("field", "one two three", Field.Store.NO));
-    doc = new Document();
-    doc.add(newTextField("field2", "one two three", Field.Store.NO));
+    Document2 doc = w.newDocument();
+    doc.addLargeText("field", "one two three");
+    doc = w.newDocument();
+    doc.addLargeText("field2", "one two three");
     w.addDocument(doc);
     w.commit();
     w.deleteDocuments(new Term("field", "one"));
@@ -720,16 +721,16 @@ public class TestTermsEnum extends Lucen
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(new LogDocMergePolicy());
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(newTextField("field", "aaa", Field.Store.NO));
+    Document2 doc = w.newDocument();
+    doc.addAtom("field", "aaa");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newStringField("field", "bbb", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("field", "bbb");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newTextField("field", "ccc", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("field", "ccc");
     w.addDocument(doc);
 
     w.forceMerge(1);
@@ -770,20 +771,20 @@ public class TestTermsEnum extends Lucen
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(new LogDocMergePolicy());
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(newStringField("field", "abc", Field.Store.NO));
+    Document2 doc = w.newDocument();
+    doc.addAtom("field", "abc");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newStringField("field", "abd", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("field", "abd");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newStringField("field", "acd", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("field", "acd");
     w.addDocument(doc);
 
-    doc = new Document();
-    doc.add(newStringField("field", "bcd", Field.Store.NO));
+    doc = w.newDocument();
+    doc.addAtom("field", "bcd");
     w.addDocument(doc);
 
     w.forceMerge(1);
@@ -827,17 +828,20 @@ public class TestTermsEnum extends Lucen
     IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
     iwc.setMergePolicy(new LogDocMergePolicy());
     RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-    Document doc = new Document();
-    doc.add(newStringField("field", "", Field.Store.NO));
-    doc.add(newStringField("field", "abc", Field.Store.NO));
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableSorting("field");
+
+    Document2 doc = w.newDocument();
+    doc.addAtom("field", "");
+    doc.addAtom("field", "abc");
     w.addDocument(doc);
 
-    doc = new Document();
+    doc = w.newDocument();
     // add empty string to both documents, so that singletonDocID == -1.
     // For a FST-based term dict, we'll expect to see the first arc is 
     // flaged with HAS_FINAL_OUTPUT
-    doc.add(newStringField("field", "abc", Field.Store.NO));
-    doc.add(newStringField("field", "", Field.Store.NO));
+    doc.addAtom("field", "abc");
+    doc.addAtom("field", "");
     w.addDocument(doc);
 
     w.forceMerge(1);
@@ -894,8 +898,8 @@ public class TestTermsEnum extends Lucen
       terms.add(prefix + TestUtil.randomRealisticUnicodeString(random(), 1, 20));
     }
     for(String term : terms) {
-      Document doc = new Document();
-      doc.add(newStringField("id", term, Field.Store.YES));
+      Document2 doc = w.newDocument();
+      doc.addUniqueAtom("id", term);
       w.addDocument(doc);
     }
     IndexReader r = w.getReader();
@@ -979,8 +983,8 @@ public class TestTermsEnum extends Lucen
       IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
       iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
       RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
-      Document doc = new Document();
-      doc.add(newTextField("field", sb.toString(), Field.Store.NO));
+      Document2 doc = w.newDocument();
+      doc.addLargeText("field", sb.toString());
       w.addDocument(doc);
       IndexReader r = w.getReader();
       assertEquals(1, r.leaves().size());



Mime
View raw message