lucene-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mikemcc...@apache.org
Subject svn commit: r1145297 [4/8] - in /lucene/dev/branches/fieldtype: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/ lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/ lucene/contrib/instantiated/src/test/o...
Date Mon, 11 Jul 2011 19:13:06 GMT
Modified: lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=1145297&r1=1145296&r2=1145297&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java Mon Jul 11 19:12:59 2011
@@ -42,12 +42,12 @@ import org.apache.lucene.analysis.TokenS
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field.TermVector;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.document2.BinaryField;
+import org.apache.lucene.document2.Document;
+import org.apache.lucene.document2.Field;
+import org.apache.lucene.document2.FieldType;
+import org.apache.lucene.document2.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.FieldCache;
@@ -77,6 +77,7 @@ import org.apache.lucene.index.codecs.pr
 
 public class TestIndexWriter extends LuceneTestCase {
 
+    private static final FieldType storedTextType = new FieldType(TextField.TYPE_UNSTORED);
     public void testDocCount() throws IOException {
         Directory dir = newDirectory();
 
@@ -137,15 +138,15 @@ public class TestIndexWriter extends Luc
     static void addDoc(IndexWriter writer) throws IOException
     {
         Document doc = new Document();
-        doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+        doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED));
         writer.addDocument(doc);
     }
 
     static void addDocWithIndex(IndexWriter writer, int index) throws IOException
     {
         Document doc = new Document();
-        doc.add(newField("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
-        doc.add(newField("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(newField("content", "aaa " + index, storedTextType));
+        doc.add(newField("id", "" + index, storedTextType));
         writer.addDocument(doc);
     }
 
@@ -255,12 +256,12 @@ public class TestIndexWriter extends Luc
       IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10));
       for(int j=0;j<100;j++) {
         Document doc = new Document();
-        doc.add(newField("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
-        doc.add(newField("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
-        doc.add(newField("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
-        doc.add(newField("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
-        doc.add(newField("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
-        doc.add(newField("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(newField("a"+j, "aaa" + j, storedTextType));
+        doc.add(newField("b"+j, "aaa" + j, storedTextType));
+        doc.add(newField("c"+j, "aaa" + j, storedTextType));
+        doc.add(newField("d"+j, "aaa", storedTextType));
+        doc.add(newField("e"+j, "aaa", storedTextType));
+        doc.add(newField("f"+j, "aaa", storedTextType));
         writer.addDocument(doc);
       }
       writer.close();
@@ -291,7 +292,7 @@ public class TestIndexWriter extends Luc
       int lastNumFile = dir.listAll().length;
       for(int j=0;j<9;j++) {
         Document doc = new Document();
-        doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(newField("field", "aaa" + j, storedTextType));
         writer.addDocument(doc);
         int numFile = dir.listAll().length;
         // Verify that with a tiny RAM buffer we see new
@@ -314,7 +315,7 @@ public class TestIndexWriter extends Luc
       int lastFlushCount = -1;
       for(int j=1;j<52;j++) {
         Document doc = new Document();
-        doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(new Field("field", storedTextType, "aaa" + j));
         writer.addDocument(doc);
         _TestUtil.syncConcurrentMerges(writer);
         int flushCount = writer.getFlushCount();
@@ -368,7 +369,7 @@ public class TestIndexWriter extends Luc
 
       for(int j=1;j<52;j++) {
         Document doc = new Document();
-        doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(new Field("field", storedTextType, "aaa" + j));
         writer.addDocument(doc);
       }
       
@@ -429,7 +430,7 @@ public class TestIndexWriter extends Luc
         for(int j=0;j<100;j++) {
           Document doc = new Document();
           for(int k=0;k<100;k++) {
-            doc.add(newField("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
+            doc.add(newField("field", Integer.toString(random.nextInt()), storedTextType));
           }
           writer.addDocument(doc);
         }
@@ -438,7 +439,7 @@ public class TestIndexWriter extends Luc
         // occurs (heavy on byte blocks)
         for(int j=0;j<100;j++) {
           Document doc = new Document();
-          doc.add(newField("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
+          doc.add(newField("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", storedTextType));
           writer.addDocument(doc);
         }
 
@@ -453,7 +454,7 @@ public class TestIndexWriter extends Luc
           String longTerm = b.toString();
 
           Document doc = new Document();
-          doc.add(newField("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
+          doc.add(newField("field", longTerm, storedTextType));
           writer.addDocument(doc);
         }
       }
@@ -471,11 +472,17 @@ public class TestIndexWriter extends Luc
       MockDirectoryWrapper dir = newDirectory();
       IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10));
       // Enable norms for only 1 doc, pre flush
+      FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+      customType.setStored(true);
+      customType.setOmitNorms(true);
       for(int j=0;j<10;j++) {
         Document doc = new Document();
-        Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
+        Field f = null;
         if (j != 8) {
-          f.setOmitNorms(true);
+          f = newField("field", "aaa", customType);
+        }
+        else {
+          f = newField("field", "aaa", storedTextType);
         }
         doc.add(f);
         writer.addDocument(doc);
@@ -494,9 +501,12 @@ public class TestIndexWriter extends Luc
       // Enable norms for only 1 doc, post flush
       for(int j=0;j<27;j++) {
         Document doc = new Document();
-        Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
+        Field f = null;
         if (j != 26) {
-          f.setOmitNorms(true);
+          f = newField("field", "aaa", customType);
+        }
+        else {
+          f = newField("field", "aaa", storedTextType);
         }
         doc.add(f);
         writer.addDocument(doc);
@@ -526,7 +536,12 @@ public class TestIndexWriter extends Luc
         b.append(" a a a a a a a a");
       }
       Document doc = new Document();
-      doc.add(newField("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+      customType.setStored(true);
+      customType.setStoreTermVectors(true);
+      customType.setStoreTermVectorPositions(true);
+      customType.setStoreTermVectorOffsets(true);
+      doc.add(newField("field", b.toString(), customType));
       writer.addDocument(doc);
       writer.close();
 
@@ -594,7 +609,12 @@ public class TestIndexWriter extends Luc
               setMergePolicy(newLogMergePolicy(10))
       );
       Document doc = new Document();
-      doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+      FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+      customType.setStored(true);
+      customType.setStoreTermVectors(true);
+      customType.setStoreTermVectorPositions(true);
+      customType.setStoreTermVectorOffsets(true);
+      doc.add(newField("field", "aaa", customType));
       for(int i=0;i<19;i++)
         writer.addDocument(doc);
       writer.flush(false, true);
@@ -614,7 +634,12 @@ public class TestIndexWriter extends Luc
       IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
       writer.setInfoStream(VERBOSE ? System.out : null);
       Document doc = new Document();
-      doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+      customType.setStored(true);
+      customType.setStoreTermVectors(true);
+      customType.setStoreTermVectorPositions(true);
+      customType.setStoreTermVectorOffsets(true);
+      doc.add(newField("field", "aaa", customType));
       writer.addDocument(doc);
       writer.commit();
       if (VERBOSE) {
@@ -643,7 +668,9 @@ public class TestIndexWriter extends Luc
         TEST_VERSION_CURRENT, new MockAnalyzer(random)));
 
     Document document = new Document();
-    document.add(newField("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStoreTermVectors(true);
+    document.add(newField("tvtest", "", customType));
     iw.addDocument(document);
     iw.close();
     dir.close();
@@ -660,8 +687,9 @@ public class TestIndexWriter extends Luc
       ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
       IndexWriter iw = new IndexWriter(dir, conf);
       Document document = new Document();
-      document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
-                             Field.TermVector.YES));
+      FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+      customType.setStoreTermVectors(true);
+      document.add(newField("tvtest", "a b c", customType));
       Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
       for(int i=0;i<4;i++)
         iw.addDocument(document);
@@ -687,24 +715,21 @@ public class TestIndexWriter extends Luc
       Document doc = new Document();
       String contents = "aa bb cc dd ee ff gg hh ii jj kk";
 
+      FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+      customType.setStored(true);
+      FieldType type = null;
       if (i == 7) {
         // Add empty docs here
-        doc.add(newField("content3", "", Field.Store.NO,
-                          Field.Index.ANALYZED));
+        doc.add(newField("content3", "", TextField.TYPE_UNSTORED));
       } else {
-        Field.Store storeVal;
         if (i%2 == 0) {
-          doc.add(newField("content4", contents, Field.Store.YES,
-                            Field.Index.ANALYZED));
-          storeVal = Field.Store.YES;
+          doc.add(newField("content4", contents, customType));
+          type = customType;
         } else
-          storeVal = Field.Store.NO;
-        doc.add(newField("content1", contents, storeVal,
-                          Field.Index.ANALYZED));
-        doc.add(newField("content3", "", Field.Store.YES,
-                          Field.Index.ANALYZED));
-        doc.add(newField("content5", "", storeVal,
-                          Field.Index.ANALYZED));
+          type = TextField.TYPE_UNSTORED; 
+        doc.add(newField("content1", contents, TextField.TYPE_UNSTORED));
+        doc.add(newField("content3", "", customType));
+        doc.add(newField("content5", "", type));
       }
 
       for(int j=0;j<4;j++)
@@ -730,7 +755,11 @@ public class TestIndexWriter extends Luc
     Directory directory = newDirectory();
 
     final Document doc = new Document();
-    Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    customType.setTokenized(false);
+
+    Field idField = newField("id", "", customType);
     doc.add(idField);
 
     for(int pass=0;pass<2;pass++) {
@@ -834,7 +863,7 @@ public class TestIndexWriter extends Luc
     for(int i=0;i<10000;i++)
       b.append(" a");
     b.append(" x");
-    doc.add(newField("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("field", b.toString(), TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
     writer.close();
 
@@ -852,7 +881,7 @@ public class TestIndexWriter extends Luc
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
     Document doc = new Document();
-    doc.add(newField("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("", "a b c", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
     writer.close();
     dir.close();
@@ -886,8 +915,9 @@ public class TestIndexWriter extends Luc
     Directory dir = newDirectory();
     MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
     Document doc = new Document();
-    doc.add(newField("field", "a field", Field.Store.YES,
-                      Field.Index.ANALYZED));
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    doc.add(newField("field", "a field", customType));
     w.addDocument(doc);
     w.commit();
     assertTrue(w.beforeWasCalled);
@@ -930,7 +960,7 @@ public class TestIndexWriter extends Luc
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
     Document doc = new Document();
-    doc.add(new Field("field", tokens));
+    doc.add(new TextField("field", tokens));
     w.addDocument(doc);
     w.commit();
 
@@ -971,7 +1001,7 @@ public class TestIndexWriter extends Luc
       b[i] = (byte) (i+77);
 
     Document doc = new Document();
-    Field f = new Field("binary", b, 10, 17);
+    Field f = new BinaryField("binary", b, 10, 17);
     byte[] bx = f.binaryValue(null).bytes;
     assertTrue(bx != null);
     assertEquals(50, bx.length);
@@ -982,9 +1012,9 @@ public class TestIndexWriter extends Luc
     w.close();
 
     IndexReader ir = IndexReader.open(dir, true);
-    doc = ir.document(0);
-    f = doc.getField("binary");
-    b = f.binaryValue(null).bytes;
+    org.apache.lucene.document.Document doc2 = ir.document(0);
+    org.apache.lucene.document.Field f2 = doc2.getField("binary");
+    b = f2.binaryValue(null).bytes;
     assertTrue(b != null);
     assertEquals(17, b.length, 17);
     assertEquals(87, b[0]);
@@ -1000,10 +1030,11 @@ public class TestIndexWriter extends Luc
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
         TEST_VERSION_CURRENT, analyzer));
     Document doc = new Document();
-    Field f = newField("field", "", Field.Store.NO,
-                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
-    Field f2 = newField("field", "crunch man", Field.Store.NO,
-        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStoreTermVectors(true);
+    customType.setStoreTermVectorPositions(true);
+    Field f = newField("field", "", customType);
+    Field f2 = newField("field", "crunch man", customType);
     doc.add(f);
     doc.add(f2);
     w.addDocument(doc);
@@ -1045,8 +1076,14 @@ public class TestIndexWriter extends Luc
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
     Document doc = new Document();
-    doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
-                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    customType.setStoreTermVectors(true);
+    customType.setStoreTermVectorPositions(true);
+    customType.setStoreTermVectorOffsets(true);
+    
+    doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType));
     writer.addDocument(doc);
     writer.addDocument(doc);
     writer.addDocument(doc);
@@ -1098,7 +1135,7 @@ public class TestIndexWriter extends Luc
             w = new IndexWriter(dir, conf);
 
             Document doc = new Document();
-            doc.add(newField("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
+            doc.add(newField("field", "some text contents", storedTextType));
             for(int i=0;i<100;i++) {
               w.addDocument(doc);
               if (i%10 == 0) {
@@ -1212,9 +1249,18 @@ public class TestIndexWriter extends Luc
       b[i] = (byte) (i+77);
 
     Document doc = new Document();
-    Field f = new Field("binary", b, 10, 17);
+
+    FieldType customType = new FieldType(BinaryField.TYPE_STORED);
+    customType.setTokenized(true);
+    customType.setIndexed(true);
+    
+    Field f = new Field("binary", customType, b, 10, 17);
     f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false));
-    Field f2 = newField("string", "value", Field.Store.YES,Field.Index.ANALYZED);
+
+    FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED);
+    customType2.setStored(true);
+    
+    Field f2 = newField("string", "value", customType2);
     f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false));
     doc.add(f);
     doc.add(f2);
@@ -1237,9 +1283,9 @@ public class TestIndexWriter extends Luc
     w.close();
 
     IndexReader ir = IndexReader.open(dir, true);
-    doc = ir.document(0);
-    f = doc.getField("binary");
-    b = f.binaryValue(null).bytes;
+    org.apache.lucene.document.Document doc2 = ir.document(0);
+    org.apache.lucene.document.Field f3 = doc2.getField("binary");
+    b = f3.binaryValue(null).bytes;
     assertTrue(b != null);
     assertEquals(17, b.length, 17);
     assertEquals(87, b[0]);
@@ -1271,25 +1317,28 @@ public class TestIndexWriter extends Luc
     Directory d = newDirectory();
     IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
     Document doc = new Document();
-    doc.add(newField("zzz", "a b c", Field.Store.YES, Field.Index.NO));
-    doc.add(newField("aaa", "a b c", Field.Store.YES, Field.Index.NO));
-    doc.add(newField("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
+
+    FieldType customType = new FieldType();
+    customType.setStored(true);
+    doc.add(newField("zzz", "a b c", customType));
+    doc.add(newField("aaa", "a b c", customType));
+    doc.add(newField("zzz", "1 2 3", customType));
     w.addDocument(doc);
     IndexReader r = w.getReader();
-    doc = r.document(0);
-    Iterator<Fieldable> it = doc.getFields().iterator();
+    org.apache.lucene.document.Document doc2 = r.document(0);
+    Iterator<Fieldable> it = doc2.getFields().iterator();
     assertTrue(it.hasNext());
-    Field f = (Field) it.next();
+    org.apache.lucene.document.Field f = (org.apache.lucene.document.Field) it.next();
     assertEquals(f.name(), "zzz");
     assertEquals(f.stringValue(), "a b c");
 
     assertTrue(it.hasNext());
-    f = (Field) it.next();
+    f = (org.apache.lucene.document.Field) it.next();
     assertEquals(f.name(), "aaa");
     assertEquals(f.stringValue(), "a b c");
 
     assertTrue(it.hasNext());
-    f = (Field) it.next();
+    f = (org.apache.lucene.document.Field) it.next();
     assertEquals(f.name(), "zzz");
     assertEquals(f.stringValue(), "1 2 3");
     assertFalse(it.hasNext());
@@ -1321,7 +1370,7 @@ public class TestIndexWriter extends Luc
       s.append(' ').append(i);
     }
     Document d = new Document();
-    Field f = newField("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
+    Field f = newField("field", s.toString(), TextField.TYPE_UNSTORED);
     d.add(f);
     w.addDocument(d);
 
@@ -1353,7 +1402,7 @@ public class TestIndexWriter extends Luc
               setMergePolicy(mergePolicy)
       );
       Document doc = new Document();
-      doc.add(newField("field", "go", Field.Store.NO, Field.Index.ANALYZED));
+      doc.add(newField("field", "go", TextField.TYPE_UNSTORED));
       w.addDocument(doc);
       IndexReader r;
       if (iter == 0) {
@@ -1416,7 +1465,14 @@ public class TestIndexWriter extends Luc
 
     // First commit
     Document doc = new Document();
-    doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    customType.setStoreTermVectors(true);
+    customType.setStoreTermVectorPositions(true);
+    customType.setStoreTermVectorOffsets(true);
+    
+    doc.add(newField("c", "val", customType));
     writer.addDocument(doc);
     writer.commit();
     assertEquals(1, IndexReader.listCommits(dir).size());
@@ -1426,7 +1482,7 @@ public class TestIndexWriter extends Luc
 
     // Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
     doc = new Document();
-    doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(newField("c", "val", customType));
     writer.addDocument(doc);
     writer.commit();
     assertEquals(2, IndexReader.listCommits(dir).size());
@@ -1473,14 +1529,19 @@ public class TestIndexWriter extends Luc
     }
 
     Document doc = new Document();
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    customType.setStoreTermVectors(true);
+    customType.setStoreTermVectorPositions(true);
+    customType.setStoreTermVectorOffsets(true);
     // create as many files as possible
-    doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(newField("c", "val", customType));
     writer.addDocument(doc);
     // Adding just one document does not call flush yet.
     assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length);
 
     doc = new Document();
-    doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(newField("c", "val", customType));
     writer.addDocument(doc);
 
     // The second document should cause a flush.
@@ -1503,7 +1564,12 @@ public class TestIndexWriter extends Luc
         TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
 
     Document doc = new Document();
-    doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    customType.setStoreTermVectors(true);
+    customType.setStoreTermVectorPositions(true);
+    customType.setStoreTermVectorOffsets(true);
+    doc.add(newField("c", "val", customType));
     w.addDocument(doc);
     w.addDocument(doc);
     IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig(
@@ -1530,7 +1596,10 @@ public class TestIndexWriter extends Luc
 
     final List<Integer> fieldIDs = new ArrayList<Integer>();
 
-    Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    customType.setTokenized(false);
+    Field idField = newField("id", "", customType);
 
     for(int i=0;i<fieldCount;i++) {
       fieldIDs.add(i);
@@ -1542,6 +1611,8 @@ public class TestIndexWriter extends Luc
       System.out.println("TEST: build index docCount=" + docCount);
     }
 
+    FieldType customType2 = new FieldType();
+    customType2.setStored(true);
     for(int i=0;i<docCount;i++) {
       Document doc = new Document();
       doc.add(idField);
@@ -1556,7 +1627,7 @@ public class TestIndexWriter extends Luc
         final String s;
         if (rand.nextInt(4) != 3) {
           s = _TestUtil.randomUnicodeString(rand, 1000);
-          doc.add(newField("f"+field, s, Field.Store.YES, Field.Index.NO));
+          doc.add(newField("f"+field, s, customType2));
         } else {
           s = null;
         }
@@ -1598,7 +1669,7 @@ public class TestIndexWriter extends Luc
           }
           TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
           assertEquals(1, hits.totalHits);
-          Document doc = r.document(hits.scoreDocs[0].doc);
+          org.apache.lucene.document.Document doc = r.document(hits.scoreDocs[0].doc);
           Document docExp = docs.get(testID);
           for(int i=0;i<fieldCount;i++) {
             assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i),  doc.get("f"+i));
@@ -1622,12 +1693,23 @@ public class TestIndexWriter extends Luc
     String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg";
     BIG=BIG+BIG+BIG+BIG;
 
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    customType.setOmitNorms(true);
+    FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED);
+    customType2.setStored(true);
+    customType2.setTokenized(false);
+    FieldType customType3 = new FieldType(TextField.TYPE_UNSTORED);
+    customType3.setStored(true);
+    customType3.setTokenized(false);
+    customType3.setOmitNorms(true);
+    
     for (int i=0; i<2; i++) {
       Document doc = new Document();
-      doc.add(new Field("id", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
-      doc.add(new Field("str", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED));
-      doc.add(new Field("str2", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED));
-      doc.add(new Field("str3", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
+      doc.add(new Field("id", customType3, Integer.toString(i)+BIG));
+      doc.add(new Field("str", customType2, Integer.toString(i)+BIG));
+      doc.add(new Field("str2", storedTextType, Integer.toString(i)+BIG));
+      doc.add(new Field("str3", customType, Integer.toString(i)+BIG));
       indexWriter.addDocument(doc);
     }
 
@@ -1701,12 +1783,12 @@ public class TestIndexWriter extends Luc
 
     // This contents produces a too-long term:
     String contents = "abc xyz x" + bigTerm + " another term";
-    doc.add(new Field("content", contents, Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(new TextField("content", contents));
     w.addDocument(doc);
 
     // Make sure we can add another normal document
     doc = new Document();
-    doc.add(new Field("content", "abc bbb ccc", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(new TextField("content", "abc bbb ccc"));
     w.addDocument(doc);
 
     IndexReader reader = w.getReader();
@@ -1736,7 +1818,9 @@ public class TestIndexWriter extends Luc
     // Make sure we can add a document with exactly the
     // maximum length term, and search on that term:
     doc = new Document();
-    Field contentField = new Field("content", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setTokenized(false);
+    Field contentField = new Field("content", customType, "");
     doc.add(contentField);
 
     w = new RandomIndexWriter(random, dir);
@@ -1773,7 +1857,7 @@ public class TestIndexWriter extends Luc
     iwc.setReaderTermsIndexDivisor(1);
     IndexWriter writer = new IndexWriter(dir, iwc);
     Document doc = new Document();
-    doc.add(newField("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("", "a b c", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
     writer.close();
     dir.close();

Modified: lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java?rev=1145297&r1=1145296&r2=1145297&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (original)
+++ lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Mon Jul 11 19:12:59 2011
@@ -28,11 +28,11 @@ import org.apache.lucene.analysis.Analyz
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.document.Document;
+import org.apache.lucene.document2.Document;
+import org.apache.lucene.document2.FieldType;
+import org.apache.lucene.document2.StringField;
+import org.apache.lucene.document2.TextField;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field.TermVector;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
@@ -56,17 +56,18 @@ public class TestIndexWriterDelete exten
     IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
         TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(1));
 
+    FieldType custom = new FieldType(StringField.TYPE_UNSTORED);
+    custom.setStored(true);
+    FieldType custom1 = new FieldType();
+    custom1.setStored(true);
+    FieldType custom2 = new FieldType(TextField.TYPE_UNSTORED);
+    custom2.setStored(true);
     for (int i = 0; i < keywords.length; i++) {
       Document doc = new Document();
-      doc.add(newField("id", keywords[i], Field.Store.YES,
-                        Field.Index.NOT_ANALYZED));
-      doc.add(newField("country", unindexed[i], Field.Store.YES,
-                        Field.Index.NO));
-      doc.add(newField("contents", unstored[i], Field.Store.NO,
-                        Field.Index.ANALYZED));
-      doc
-        .add(newField("city", text[i], Field.Store.YES,
-                       Field.Index.ANALYZED));
+      doc.add(newField("id", keywords[i], custom));
+      doc.add(newField("country", unindexed[i], custom1));
+      doc.add(newField("contents", unstored[i], TextField.TYPE_UNSTORED));
+      doc.add(newField("city", text[i], custom2));
       modifier.addDocument(doc);
     }
     modifier.optimize();
@@ -384,11 +385,11 @@ public class TestIndexWriterDelete exten
   private void updateDoc(IndexWriter modifier, int id, int value)
       throws IOException {
     Document doc = new Document();
-    doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
-    doc.add(newField("id", String.valueOf(id), Field.Store.YES,
-        Field.Index.NOT_ANALYZED));
-    doc.add(newField("value", String.valueOf(value), Field.Store.NO,
-        Field.Index.NOT_ANALYZED));
+    FieldType custom = new FieldType(StringField.TYPE_UNSTORED);
+    custom.setStored(true);
+    doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED));
+    doc.add(newField("id", String.valueOf(id), custom));
+    doc.add(newField("value", String.valueOf(value), StringField.TYPE_UNSTORED));
     modifier.updateDocument(new Term("id", String.valueOf(id)), doc);
   }
 
@@ -396,11 +397,11 @@ public class TestIndexWriterDelete exten
   private void addDoc(IndexWriter modifier, int id, int value)
       throws IOException {
     Document doc = new Document();
-    doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
-    doc.add(newField("id", String.valueOf(id), Field.Store.YES,
-        Field.Index.NOT_ANALYZED));
-    doc.add(newField("value", String.valueOf(value), Field.Store.NO,
-        Field.Index.NOT_ANALYZED));
+    FieldType custom = new FieldType(StringField.TYPE_UNSTORED);
+    custom.setStored(true);
+    doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED));
+    doc.add(newField("id", String.valueOf(id), custom));
+    doc.add(newField("value", String.valueOf(value), StringField.TYPE_UNSTORED));
     modifier.addDocument(doc);
   }
 
@@ -434,12 +435,12 @@ public class TestIndexWriterDelete exten
     // TODO: find the resource leak that only occurs sometimes here.
     startDir.setNoDeleteOpenFile(false);
     IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
+    FieldType custom = new FieldType(StringField.TYPE_UNSTORED);
+    custom.setStored(true);
     for (int i = 0; i < 157; i++) {
       Document d = new Document();
-      d.add(newField("id", Integer.toString(i), Field.Store.YES,
-                      Field.Index.NOT_ANALYZED));
-      d.add(newField("content", "aaa " + i, Field.Store.NO,
-                      Field.Index.ANALYZED));
+      d.add(newField("id", Integer.toString(i), custom));
+      d.add(newField("content", "aaa " + i, TextField.TYPE_UNSTORED));
       writer.addDocument(d);
     }
     writer.close();
@@ -517,10 +518,8 @@ public class TestIndexWriterDelete exten
             for (int i = 0; i < 13; i++) {
               if (updates) {
                 Document d = new Document();
-                d.add(newField("id", Integer.toString(i), Field.Store.YES,
-                                Field.Index.NOT_ANALYZED));
-                d.add(newField("content", "bbb " + i, Field.Store.NO,
-                                Field.Index.ANALYZED));
+                d.add(newField("id", Integer.toString(i), custom));
+                d.add(newField("content", "bbb " + i, TextField.TYPE_UNSTORED));
                 modifier.updateDocument(new Term("id", Integer.toString(docId)), d);
               } else { // deletes
                 modifier.deleteDocuments(new Term("id", Integer.toString(docId)));
@@ -707,16 +706,18 @@ public class TestIndexWriterDelete exten
 
     dir.failOn(failure.reset());
 
+    FieldType custom = new FieldType(StringField.TYPE_UNSTORED);
+    custom.setStored(true);
+    FieldType custom1 = new FieldType();
+    custom1.setStored(true);
+    FieldType custom2 = new FieldType(TextField.TYPE_UNSTORED);
+    custom2.setStored(true);
     for (int i = 0; i < keywords.length; i++) {
       Document doc = new Document();
-      doc.add(newField("id", keywords[i], Field.Store.YES,
-                        Field.Index.NOT_ANALYZED));
-      doc.add(newField("country", unindexed[i], Field.Store.YES,
-                        Field.Index.NO));
-      doc.add(newField("contents", unstored[i], Field.Store.NO,
-                        Field.Index.ANALYZED));
-      doc.add(newField("city", text[i], Field.Store.YES,
-                        Field.Index.ANALYZED));
+      doc.add(newField("id", keywords[i], StringField.TYPE_UNSTORED));
+      doc.add(newField("country", unindexed[i], custom1));
+      doc.add(newField("contents", unstored[i], TextField.TYPE_UNSTORED));
+      doc.add(newField("city", text[i], custom2));
       modifier.addDocument(doc);
     }
     // flush (and commit if ac)
@@ -830,16 +831,18 @@ public class TestIndexWriterDelete exten
     modifier.commit();
     dir.failOn(failure.reset());
 
+    FieldType custom = new FieldType(StringField.TYPE_UNSTORED);
+    custom.setStored(true);
+    FieldType custom1 = new FieldType();
+    custom1.setStored(true);
+    FieldType custom2 = new FieldType(TextField.TYPE_UNSTORED);
+    custom2.setStored(true);
     for (int i = 0; i < keywords.length; i++) {
       Document doc = new Document();
-      doc.add(newField("id", keywords[i], Field.Store.YES,
-                        Field.Index.NOT_ANALYZED));
-      doc.add(newField("country", unindexed[i], Field.Store.YES,
-                        Field.Index.NO));
-      doc.add(newField("contents", unstored[i], Field.Store.NO,
-                        Field.Index.ANALYZED));
-      doc.add(newField("city", text[i], Field.Store.YES,
-                        Field.Index.ANALYZED));
+      doc.add(newField("id", keywords[i], custom));
+      doc.add(newField("country", unindexed[i], custom1));
+      doc.add(newField("contents", unstored[i], TextField.TYPE_UNSTORED));
+      doc.add(newField("city", text[i], custom2));
       try {
         modifier.addDocument(doc);
       } catch (IOException io) {
@@ -882,7 +885,7 @@ public class TestIndexWriterDelete exten
     Collections.shuffle(ids, random);
     for(int id : ids) {
       Document doc = new Document();
-      doc.add(newField("id", ""+id, Field.Index.NOT_ANALYZED));
+      doc.add(newField("id", ""+id, StringField.TYPE_UNSTORED));
       w.addDocument(doc);
     }
     Collections.shuffle(ids, random);
@@ -916,7 +919,7 @@ public class TestIndexWriterDelete exten
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setRAMBufferSizeMB(1.0).setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH).setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH));
     w.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
-    doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", TextField.TYPE_UNSTORED));
     int num = atLeast(3);
     for (int iter = 0; iter < num; iter++) {
       int count = 0;

Modified: lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java?rev=1145297&r1=1145296&r2=1145297&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (original)
+++ lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java Mon Jul 11 19:12:59 2011
@@ -32,8 +32,11 @@ import org.apache.lucene.analysis.MockAn
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document2.Document;
+import org.apache.lucene.document2.Field;
+import org.apache.lucene.document2.FieldType;
+import org.apache.lucene.document2.StringField;
+import org.apache.lucene.document2.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
@@ -53,6 +56,37 @@ public class TestIndexWriterExceptions e
   private static class DocCopyIterator implements Iterable<Document> {
     private final Document doc;
     private final int count;
+    
+    /* private field types */
+    /* private field types */
+
+    private static final FieldType custom = new FieldType(TextField.TYPE_UNSTORED);
+    private static final FieldType custom1 = new FieldType(TextField.TYPE_UNSTORED);
+    private static final FieldType custom2 = new FieldType(StringField.TYPE_UNSTORED);
+    private static final FieldType custom3 = new FieldType();
+    private static final FieldType custom4 = new FieldType(StringField.TYPE_UNSTORED);
+    private static final FieldType custom5 = new FieldType(TextField.TYPE_UNSTORED);
+    
+    static {
+      custom.setStored(true);
+
+      custom1.setStoreTermVectors(true);
+      custom1.setStoreTermVectorPositions(true);
+      custom1.setStoreTermVectorOffsets(true);
+
+      custom2.setStored(true);
+      
+      custom3.setStored(true);
+
+      custom4.setStoreTermVectors(true);
+      custom4.setStoreTermVectorPositions(true);
+      custom4.setStoreTermVectorOffsets(true);
+      
+      custom5.setStored(true);
+      custom5.setStoreTermVectors(true);
+      custom5.setStoreTermVectorPositions(true);
+      custom5.setStoreTermVectorOffsets(true);
+    }
 
     public DocCopyIterator(Document doc, int count) {
       this.count = count;
@@ -100,17 +134,17 @@ public class TestIndexWriterExceptions e
 
       final Document doc = new Document();
 
-      doc.add(newField("content1", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.ANALYZED));
-      doc.add(newField("content6", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-      doc.add(newField("content2", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NOT_ANALYZED));
-      doc.add(newField("content3", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NO));
+      doc.add(newField("content1", "aaa bbb ccc ddd", DocCopyIterator.custom));
+      doc.add(newField("content6", "aaa bbb ccc ddd", DocCopyIterator.custom1));
+      doc.add(newField("content2", "aaa bbb ccc ddd", DocCopyIterator.custom2));
+      doc.add(newField("content3", "aaa bbb ccc ddd", DocCopyIterator.custom3));
 
-      doc.add(newField("content4", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED));
-      doc.add(newField("content5", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED));
+      doc.add(newField("content4", "aaa bbb ccc ddd", TextField.TYPE_UNSTORED));
+      doc.add(newField("content5", "aaa bbb ccc ddd", StringField.TYPE_UNSTORED));
 
-      doc.add(newField("content7", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(newField("content7", "aaa bbb ccc ddd", DocCopyIterator.custom4));
 
-      final Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+      final Field idField = newField("id", "", DocCopyIterator.custom2);
       doc.add(idField);
 
       final long stopTime = System.currentTimeMillis() + 500;
@@ -336,8 +370,7 @@ public class TestIndexWriterExceptions e
     MockIndexWriter2 w = new MockIndexWriter2(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
     w.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
-    doc.add(newField("field", "a field", Field.Store.YES,
-                      Field.Index.ANALYZED));
+    doc.add(newField("field", "a field", DocCopyIterator.custom));
     w.addDocument(doc);
     w.doFail = true;
     try {
@@ -356,8 +389,7 @@ public class TestIndexWriterExceptions e
     MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
     w.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
-    doc.add(newField("field", "a field", Field.Store.YES,
-                      Field.Index.ANALYZED));
+    doc.add(newField("field", "a field", DocCopyIterator.custom));
     w.addDocument(doc);
 
     Analyzer analyzer = new Analyzer() {
@@ -370,8 +402,7 @@ public class TestIndexWriterExceptions e
     };
 
     Document crashDoc = new Document();
-    crashDoc.add(newField("crash", "do it on token 4", Field.Store.YES,
-                           Field.Index.ANALYZED));
+    crashDoc.add(newField("crash", "do it on token 4", DocCopyIterator.custom));
     try {
       w.addDocument(crashDoc, analyzer);
       fail("did not hit expected exception");
@@ -412,8 +443,7 @@ public class TestIndexWriterExceptions e
     MockIndexWriter3 w = new MockIndexWriter3(dir, conf);
     w.doFail = true;
     Document doc = new Document();
-    doc.add(newField("field", "a field", Field.Store.YES,
-                      Field.Index.ANALYZED));
+    doc.add(newField("field", "a field", DocCopyIterator.custom));
     for(int i=0;i<10;i++)
       try {
         w.addDocument(doc);
@@ -456,8 +486,7 @@ public class TestIndexWriterExceptions e
 
     Document doc = new Document();
     String contents = "aa bb cc dd ee ff gg hh ii jj kk";
-    doc.add(newField("content", contents, Field.Store.NO,
-        Field.Index.ANALYZED));
+    doc.add(newField("content", contents, TextField.TYPE_UNSTORED));
     try {
       writer.addDocument(doc);
       fail("did not hit expected exception");
@@ -466,14 +495,12 @@ public class TestIndexWriterExceptions e
 
     // Make sure we can add another normal document
     doc = new Document();
-    doc.add(newField("content", "aa bb cc dd", Field.Store.NO,
-        Field.Index.ANALYZED));
+    doc.add(newField("content", "aa bb cc dd", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
 
     // Make sure we can add another normal document
     doc = new Document();
-    doc.add(newField("content", "aa bb cc dd", Field.Store.NO,
-        Field.Index.ANALYZED));
+    doc.add(newField("content", "aa bb cc dd", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
 
     writer.close();
@@ -544,8 +571,7 @@ public class TestIndexWriterExceptions e
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
     Document doc = new Document();
     String contents = "aa bb cc dd ee ff gg hh ii jj kk";
-    doc.add(newField("content", contents, Field.Store.NO,
-        Field.Index.ANALYZED));
+    doc.add(newField("content", contents, TextField.TYPE_UNSTORED));
     boolean hitError = false;
     for(int i=0;i<200;i++) {
       try {
@@ -588,14 +614,11 @@ public class TestIndexWriterExceptions e
       lmp.setMergeFactor(Math.max(lmp.getMergeFactor(), 5));
 
       Document doc = new Document();
-      doc.add(newField("contents", "here are some contents", Field.Store.YES,
-                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
       writer.addDocument(doc);
       writer.addDocument(doc);
-      doc.add(newField("crash", "this should crash after 4 terms", Field.Store.YES,
-                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-      doc.add(newField("other", "this will not get indexed", Field.Store.YES,
-                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(newField("crash", "this should crash after 4 terms", DocCopyIterator.custom5));
+      doc.add(newField("other", "this will not get indexed", DocCopyIterator.custom5));
       try {
         writer.addDocument(doc);
         fail("did not hit expected exception");
@@ -608,8 +631,7 @@ public class TestIndexWriterExceptions e
 
       if (0 == i) {
         doc = new Document();
-        doc.add(newField("contents", "here are some contents", Field.Store.YES,
-                          Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+        doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
         writer.addDocument(doc);
         writer.addDocument(doc);
       }
@@ -641,8 +663,7 @@ public class TestIndexWriterExceptions e
       writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
           analyzer).setMaxBufferedDocs(10));
       doc = new Document();
-      doc.add(newField("contents", "here are some contents", Field.Store.YES,
-                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
       for(int j=0;j<17;j++)
         writer.addDocument(doc);
       writer.optimize();
@@ -698,14 +719,11 @@ public class TestIndexWriterExceptions e
                 try {
                   for(int iter=0;iter<NUM_ITER;iter++) {
                     Document doc = new Document();
-                    doc.add(newField("contents", "here are some contents", Field.Store.YES,
-                                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                    doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
                     writer.addDocument(doc);
                     writer.addDocument(doc);
-                    doc.add(newField("crash", "this should crash after 4 terms", Field.Store.YES,
-                                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-                    doc.add(newField("other", "this will not get indexed", Field.Store.YES,
-                                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                    doc.add(newField("crash", "this should crash after 4 terms", DocCopyIterator.custom5));
+                    doc.add(newField("other", "this will not get indexed", DocCopyIterator.custom5));
                     try {
                       writer.addDocument(doc);
                       fail("did not hit expected exception");
@@ -714,8 +732,7 @@ public class TestIndexWriterExceptions e
 
                     if (0 == finalI) {
                       doc = new Document();
-                      doc.add(newField("contents", "here are some contents", Field.Store.YES,
-                                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                      doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
                       writer.addDocument(doc);
                       writer.addDocument(doc);
                     }
@@ -760,8 +777,7 @@ public class TestIndexWriterExceptions e
       IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
           TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(10));
       Document doc = new Document();
-      doc.add(newField("contents", "here are some contents", Field.Store.YES,
-                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
       for(int j=0;j<17;j++)
         writer.addDocument(doc);
       writer.optimize();
@@ -804,7 +820,7 @@ public class TestIndexWriterExceptions e
   private void addDoc(IndexWriter writer) throws IOException
   {
       Document doc = new Document();
-      doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+      doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED));
       writer.addDocument(doc);
   }
 
@@ -900,8 +916,7 @@ public class TestIndexWriterExceptions e
       IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
           TEST_VERSION_CURRENT, new MockAnalyzer(random)));
       Document doc = new Document();
-      doc.add(newField("field", "a field", Field.Store.YES,
-          Field.Index.ANALYZED));
+      doc.add(newField("field", "a field", DocCopyIterator.custom));
       w.addDocument(doc);
       dir.failOn(failure);
       try {
@@ -1233,13 +1248,12 @@ public class TestIndexWriterExceptions e
         int numDocs = 10 + random.nextInt(30);
         for (int i = 0; i < numDocs; i++) {
           Document doc = new Document();
-          Field field = newField(random, "field", "a field", Field.Store.YES,
-              Field.Index.ANALYZED);
+          Field field = newField(random, "field", "a field", DocCopyIterator.custom);
           doc.add(field);
           // random TV
           try {
             w.addDocument(doc);
-            assertFalse(field.isTermVectorStored());
+            assertFalse(field.storeTermVectors());
           } catch (RuntimeException e) {
             assertTrue(e.getMessage().startsWith(FailOnTermVectors.EXC_MSG));
           }
@@ -1250,19 +1264,17 @@ public class TestIndexWriterExceptions e
             
         }
         Document document = new Document();
-        document.add(new Field("field", "a field", Field.Store.YES,
-            Field.Index.ANALYZED));
+        document.add(new Field("field", DocCopyIterator.custom, "a field"));
         w.addDocument(document);
 
         for (int i = 0; i < numDocs; i++) {
           Document doc = new Document();
-          Field field = newField(random, "field", "a field", Field.Store.YES,
-              Field.Index.ANALYZED);
+          Field field = newField(random, "field", "a field", DocCopyIterator.custom);
           doc.add(field);
           // random TV
           try {
             w.addDocument(doc);
-            assertFalse(field.isTermVectorStored());
+            assertFalse(field.storeTermVectors());
           } catch (RuntimeException e) {
             assertTrue(e.getMessage().startsWith(FailOnTermVectors.EXC_MSG));
           }
@@ -1272,8 +1284,7 @@ public class TestIndexWriterExceptions e
           }
         }
         document = new Document();
-        document.add(new Field("field", "a field", Field.Store.YES,
-            Field.Index.ANALYZED));
+        document.add(new Field("field", DocCopyIterator.custom, "a field"));
         w.addDocument(document);
         w.close();
         IndexReader reader = IndexReader.open(dir);
@@ -1327,7 +1338,7 @@ public class TestIndexWriterExceptions e
     final int numDocs1 = random.nextInt(25);
     for(int docCount=0;docCount<numDocs1;docCount++) {
       Document doc = new Document();
-      doc.add(newField("content", "good content", Field.Index.ANALYZED));
+      doc.add(newField("content", "good content", TextField.TYPE_UNSTORED));
       w.addDocument(doc);
     }
     
@@ -1335,10 +1346,10 @@ public class TestIndexWriterExceptions e
     for(int docCount=0;docCount<7;docCount++) {
       Document doc = new Document();
       docs.add(doc);
-      doc.add(newField("id", docCount+"", Field.Index.NOT_ANALYZED));
-      doc.add(newField("content", "silly content " + docCount, Field.Index.ANALYZED));
+      doc.add(newField("id", docCount+"", StringField.TYPE_UNSTORED));
+      doc.add(newField("content", "silly content " + docCount, TextField.TYPE_UNSTORED));
       if (docCount == 4) {
-        Field f = newField("crash", "", Field.Index.ANALYZED);
+        Field f = newField("crash", "", TextField.TYPE_UNSTORED);
         doc.add(f);
         MockTokenizer tokenizer = new MockTokenizer(new StringReader("crash me on the 4th token"), MockTokenizer.WHITESPACE, false);
         tokenizer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
@@ -1357,7 +1368,7 @@ public class TestIndexWriterExceptions e
     final int numDocs2 = random.nextInt(25);
     for(int docCount=0;docCount<numDocs2;docCount++) {
       Document doc = new Document();
-      doc.add(newField("content", "good content", Field.Index.ANALYZED));
+      doc.add(newField("content", "good content", TextField.TYPE_UNSTORED));
       w.addDocument(doc);
     }
 
@@ -1385,7 +1396,7 @@ public class TestIndexWriterExceptions e
     final int numDocs1 = random.nextInt(25);
     for(int docCount=0;docCount<numDocs1;docCount++) {
       Document doc = new Document();
-      doc.add(newField("content", "good content", Field.Index.ANALYZED));
+      doc.add(newField("content", "good content", TextField.TYPE_UNSTORED));
       w.addDocument(doc);
     }
 
@@ -1395,16 +1406,16 @@ public class TestIndexWriterExceptions e
     for(int docCount=0;docCount<numDocs2;docCount++) {
       Document doc = new Document();
       docs.add(doc);
-      doc.add(newField("subid", "subs", Field.Index.NOT_ANALYZED));
-      doc.add(newField("id", docCount+"", Field.Index.NOT_ANALYZED));
-      doc.add(newField("content", "silly content " + docCount, Field.Index.ANALYZED));
+      doc.add(newField("subid", "subs", StringField.TYPE_UNSTORED));
+      doc.add(newField("id", docCount+"", StringField.TYPE_UNSTORED));
+      doc.add(newField("content", "silly content " + docCount, TextField.TYPE_UNSTORED));
     }
     w.addDocuments(docs);
 
     final int numDocs3 = random.nextInt(25);
     for(int docCount=0;docCount<numDocs3;docCount++) {
       Document doc = new Document();
-      doc.add(newField("content", "good content", Field.Index.ANALYZED));
+      doc.add(newField("content", "good content", TextField.TYPE_UNSTORED));
       w.addDocument(doc);
     }
 
@@ -1414,10 +1425,10 @@ public class TestIndexWriterExceptions e
     for(int docCount=0;docCount<limit;docCount++) {
       Document doc = new Document();
       docs.add(doc);
-      doc.add(newField("id", docCount+"", Field.Index.NOT_ANALYZED));
-      doc.add(newField("content", "silly content " + docCount, Field.Index.ANALYZED));
+      doc.add(newField("id", docCount+"", StringField.TYPE_UNSTORED));
+      doc.add(newField("content", "silly content " + docCount, TextField.TYPE_UNSTORED));
       if (docCount == crashAt) {
-        Field f = newField("crash", "", Field.Index.ANALYZED);
+        Field f = newField("crash", "", TextField.TYPE_UNSTORED);
         doc.add(f);
         MockTokenizer tokenizer = new MockTokenizer(new StringReader("crash me on the 4th token"), MockTokenizer.WHITESPACE, false);
         tokenizer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
@@ -1437,7 +1448,7 @@ public class TestIndexWriterExceptions e
     final int numDocs4 = random.nextInt(25);
     for(int docCount=0;docCount<numDocs4;docCount++) {
       Document doc = new Document();
-      doc.add(newField("content", "good content", Field.Index.ANALYZED));
+      doc.add(newField("content", "good content", TextField.TYPE_UNSTORED));
       w.addDocument(doc);
     }
 

Modified: lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java?rev=1145297&r1=1145296&r2=1145297&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java (original)
+++ lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java Mon Jul 11 19:12:59 2011
@@ -20,8 +20,9 @@ package org.apache.lucene.index;
 import java.io.IOException;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document2.Document;
+import org.apache.lucene.document2.Field;
+import org.apache.lucene.document2.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.Directory;
 
@@ -220,7 +221,7 @@ public class TestIndexWriterMergePolicy 
 
   private void addDoc(IndexWriter writer) throws IOException {
     Document doc = new Document();
-    doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
   }
 

Modified: lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java?rev=1145297&r1=1145296&r2=1145297&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java (original)
+++ lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java Mon Jul 11 19:12:59 2011
@@ -17,11 +17,11 @@ package org.apache.lucene.index;
 
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.document2.Document;
+import org.apache.lucene.document2.Field;
+import org.apache.lucene.document2.FieldType;
+import org.apache.lucene.document2.StringField;
+import org.apache.lucene.document2.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.util.LuceneTestCase;
 
@@ -84,7 +84,7 @@ public class TestIndexWriterMerging exte
     int max = reader.maxDoc();
     for (int i = 0; i < max; i++)
     {
-      Document temp = reader.document(i);
+      org.apache.lucene.document.Document temp = reader.document(i);
       //System.out.println("doc "+i+"="+temp.getField("count").stringValue());
       //compare the index doc number to the value that it should be
       if (!temp.getField("count").stringValue().equals((i + startAt) + ""))
@@ -107,10 +107,12 @@ public class TestIndexWriterMerging exte
             setMergePolicy(newLogMergePolicy(2))
     );
 
+    FieldType custom = new FieldType(StringField.TYPE_UNSTORED);
+    custom.setStored(true);
     for (int i = start; i < (start + numDocs); i++)
     {
       Document temp = new Document();
-      temp.add(newField("count", (""+i), Field.Store.YES, Field.Index.NOT_ANALYZED));
+      temp.add(newField("count", (""+i), custom));
 
       writer.addDocument(temp);
     }
@@ -129,12 +131,19 @@ public class TestIndexWriterMerging exte
     Document document = new Document();
 
     document = new Document();
-    Field storedField = newField("stored", "stored", Field.Store.YES,
-                                  Field.Index.NO);
+
+    FieldType customType = new FieldType();
+    customType.setStored(true);
+
+    FieldType customType1 = new FieldType(TextField.TYPE_UNSTORED);
+    customType1.setTokenized(false);
+    customType1.setStoreTermVectors(true);
+    customType1.setStoreTermVectorPositions(true);
+    customType1.setStoreTermVectorOffsets(true);
+    
+    Field storedField = newField("stored", "stored", customType);
     document.add(storedField);
-    Field termVectorField = newField("termVector", "termVector",
-                                      Field.Store.NO, Field.Index.NOT_ANALYZED,
-                                      Field.TermVector.WITH_POSITIONS_OFFSETS);
+    Field termVectorField = newField("termVector", "termVector", customType1);
     document.add(termVectorField);
     for(int i=0;i<10;i++)
       writer.addDocument(document);
@@ -175,12 +184,19 @@ public class TestIndexWriterMerging exte
     Document document = new Document();
 
     document = new Document();
-    Field storedField = newField("stored", "stored", Store.YES,
-                                  Index.NO);
+
+    FieldType customType = new FieldType();
+    customType.setStored(true);
+
+    FieldType customType1 = new FieldType(TextField.TYPE_UNSTORED);
+    customType1.setTokenized(false);
+    customType1.setStoreTermVectors(true);
+    customType1.setStoreTermVectorPositions(true);
+    customType1.setStoreTermVectorOffsets(true);
+    
+    Field storedField = newField("stored", "stored", customType);
     document.add(storedField);
-    Field termVectorField = newField("termVector", "termVector",
-                                      Store.NO, Index.NOT_ANALYZED,
-                                      TermVector.WITH_POSITIONS_OFFSETS);
+    Field termVectorField = newField("termVector", "termVector", customType1);
     document.add(termVectorField);
     for(int i=0;i<98;i++)
       writer.addDocument(document);
@@ -223,13 +239,19 @@ public class TestIndexWriterMerging exte
 
     Document document = new Document();
 
+    FieldType customType = new FieldType();
+    customType.setStored(true);
+
+    FieldType customType1 = new FieldType(TextField.TYPE_UNSTORED);
+    customType1.setTokenized(false);
+    customType1.setStoreTermVectors(true);
+    customType1.setStoreTermVectorPositions(true);
+    customType1.setStoreTermVectorOffsets(true);
+    
     document = new Document();
-    Field storedField = newField("stored", "stored", Field.Store.YES,
-                                  Field.Index.NO);
+    Field storedField = newField("stored", "stored", customType);
     document.add(storedField);
-    Field termVectorField = newField("termVector", "termVector",
-                                      Field.Store.NO, Field.Index.NOT_ANALYZED,
-                                      Field.TermVector.WITH_POSITIONS_OFFSETS);
+    Field termVectorField = newField("termVector", "termVector", customType1);
     document.add(termVectorField);
     for(int i=0;i<98;i++)
       writer.addDocument(document);
@@ -292,8 +314,11 @@ public class TestIndexWriterMerging exte
     IndexWriter iw = new IndexWriter(dir, conf);
     iw.setInfoStream(VERBOSE ? System.out : null);
     Document document = new Document();
-    document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
-                           Field.TermVector.YES));
+
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStoreTermVectors(true);
+    
+    document.add(newField("tvtest", "a b c", customType));
     for(int i=0;i<177;i++)
       iw.addDocument(document);
     iw.close();

Modified: lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java?rev=1145297&r1=1145296&r2=1145297&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java (original)
+++ lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java Mon Jul 11 19:12:59 2011
@@ -20,8 +20,10 @@ package org.apache.lucene.index;
 import java.io.IOException;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document2.Document;
+import org.apache.lucene.document2.Field;
+import org.apache.lucene.document2.FieldType;
+import org.apache.lucene.document2.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.ScoreDoc;
@@ -466,7 +468,11 @@ public class TestIndexWriterOnDiskFull e
     _TestUtil.keepFullyDeletedSegments(w);
 
     Document doc = new Document();
-    doc.add(newField("f", "doctor who", Field.Store.YES, Field.Index.ANALYZED));
+
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(false);
+    
+    doc.add(newField("f", "doctor who", customType));
     w.addDocument(doc);
     w.commit();
 
@@ -502,7 +508,11 @@ public class TestIndexWriterOnDiskFull e
         .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
     dir.setMaxSizeInBytes(Math.max(1, dir.getRecomputedActualSizeInBytes()));
     final Document doc = new Document();
-    doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(false);
+    customType.setStoreTermVectorPositions(true);
+    customType.setStoreTermVectorOffsets(true);
+    doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", customType));
     try {
       writer.addDocument(doc);
       fail("did not hit disk full");
@@ -532,15 +542,17 @@ public class TestIndexWriterOnDiskFull e
   private void addDoc(IndexWriter writer) throws IOException
   {
       Document doc = new Document();
-      doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+      doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED));
       writer.addDocument(doc);
   }
   
   private void addDocWithIndex(IndexWriter writer, int index) throws IOException
   {
       Document doc = new Document();
-      doc.add(newField("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
-      doc.add(newField("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
+      FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+      customType.setStored(false);
+      doc.add(newField("content", "aaa " + index, customType));
+      doc.add(newField("id", "" + index, customType));
       writer.addDocument(doc);
   }
 }

Modified: lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java?rev=1145297&r1=1145296&r2=1145297&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java (original)
+++ lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java Mon Jul 11 19:12:59 2011
@@ -20,11 +20,8 @@ package org.apache.lucene.index;
 import java.io.IOException;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.document2.Document;
+import org.apache.lucene.document2.StringField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
@@ -37,7 +34,7 @@ public class TestIndexWriterOptimize ext
     MockDirectoryWrapper dir = newDirectory();
 
     final Document doc = new Document();
-    doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
+    doc.add(newField("content", "aaa", StringField.TYPE_UNSTORED));
     final int incrMin = TEST_NIGHTLY ? 15 : 40;
     for(int numDocs=10;numDocs<500;numDocs += _TestUtil.nextInt(random, incrMin, 5*incrMin)) {
       LogDocMergePolicy ldmp = new LogDocMergePolicy();
@@ -78,7 +75,7 @@ public class TestIndexWriterOptimize ext
     MockDirectoryWrapper dir = newDirectory();
 
     final Document doc = new Document();
-    doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
+    doc.add(newField("content", "aaa", StringField.TYPE_UNSTORED));
 
     LogDocMergePolicy ldmp = new LogDocMergePolicy();
     ldmp.setMinMergeDocs(1);
@@ -183,7 +180,7 @@ public class TestIndexWriterOptimize ext
               setMergePolicy(newLogMergePolicy(51))
       );
       Document doc = new Document();
-      doc.add(newField("field", "aaa", Store.NO, Index.NOT_ANALYZED));
+      doc.add(newField("field", "aaa", StringField.TYPE_UNSTORED));
       for(int i=0;i<100;i++)
         writer.addDocument(doc);
       writer.optimize(false);

Modified: lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java?rev=1145297&r1=1145296&r2=1145297&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java (original)
+++ lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java Mon Jul 11 19:12:59 2011
@@ -26,11 +26,11 @@ import java.util.Random;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.document2.Document;
+import org.apache.lucene.document2.Field;
+import org.apache.lucene.document2.FieldType;
+import org.apache.lucene.document2.StringField;
+import org.apache.lucene.document2.TextField;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
@@ -141,9 +141,9 @@ public class TestIndexWriterReader exten
 
     String id10 = r1.document(10).getField("id").stringValue();
     
-    Document newDoc = r1.document(10);
+    org.apache.lucene.document.Document newDoc = r1.document(10);
     newDoc.removeField("id");
-    newDoc.add(newField("id", Integer.toString(8000), Store.YES, Index.NOT_ANALYZED));
+    newDoc.add(newField("id", Integer.toString(8000), org.apache.lucene.document.Field.Store.YES, org.apache.lucene.document.Field.Index.NOT_ANALYZED));
     writer.updateDocument(new Term("id", id10), newDoc);
     assertFalse(r1.isCurrent());
 
@@ -167,7 +167,7 @@ public class TestIndexWriterReader exten
 
     writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
     Document doc = new Document();
-    doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("field", "a b c", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
     assertTrue(r2.isCurrent());
     assertTrue(r3.isCurrent());
@@ -189,14 +189,14 @@ public class TestIndexWriterReader exten
     
     IndexWriter writer = new IndexWriter(dir, iwc);
     Document doc = new Document();
-    doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("field", "a b c", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
     writer.close();
     
     iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
     writer = new IndexWriter(dir, iwc);
     doc = new Document();
-    doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("field", "a b c", TextField.TYPE_UNSTORED));
     IndexReader nrtReader = writer.getReader();
     assertTrue(nrtReader.isCurrent());
     writer.addDocument(doc);
@@ -273,9 +273,9 @@ public class TestIndexWriterReader exten
     assertEquals(100, index2df);
 
     // verify the docs are from different indexes
-    Document doc5 = r1.document(5);
+    org.apache.lucene.document.Document doc5 = r1.document(5);
     assertEquals("index1", doc5.get("indexname"));
-    Document doc150 = r1.document(150);
+    org.apache.lucene.document.Document doc150 = r1.document(150);
     assertEquals("index2", doc150.get("indexname"));
     r1.close();
     writer.close();
@@ -578,16 +578,27 @@ public class TestIndexWriterReader exten
   public static Document createDocument(int n, String indexName, int numFields) {
     StringBuilder sb = new StringBuilder();
     Document doc = new Document();
-    doc.add(new Field("id", Integer.toString(n), Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
-    doc.add(new Field("indexname", indexName, Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    customType.setStoreTermVectors(true);
+    customType.setStoreTermVectorPositions(true);
+    customType.setStoreTermVectorOffsets(true);
+
+    FieldType customType1 = new FieldType(StringField.TYPE_UNSTORED);
+    customType1.setStored(true);
+    customType1.setStoreTermVectors(true);
+    customType1.setStoreTermVectorPositions(true);
+    customType1.setStoreTermVectorOffsets(true);
+    
+    doc.add(new Field("id", customType1, Integer.toString(n)));
+    doc.add(new Field("indexname", customType1, indexName));
     sb.append("a");
     sb.append(n);
-    doc.add(new Field("field1", sb.toString(), Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("field1", customType, sb.toString()));
     sb.append(" b");
     sb.append(n);
     for (int i = 1; i < numFields; i++) {
-      doc.add(new Field("field" + (i + 1), sb.toString(), Store.YES,
-                        Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(new Field("field" + (i + 1), customType, sb.toString()));
     }
     return doc;
   }
@@ -915,8 +926,8 @@ public class TestIndexWriterReader exten
     Directory dir = newDirectory();
     final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
     Document doc = new Document();
-    doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
-    Field id = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
+    doc.add(newField("field", "a b c", TextField.TYPE_UNSTORED));
+    Field id = newField("id", "", StringField.TYPE_UNSTORED);
     doc.add(id);
     id.setValue("0");
     w.addDocument(doc);
@@ -939,8 +950,8 @@ public class TestIndexWriterReader exten
     Directory dir = newDirectory();
     final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
     Document doc = new Document();
-    doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
-    Field id = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
+    doc.add(newField("field", "a b c", TextField.TYPE_UNSTORED));
+    Field id = newField("id", "", StringField.TYPE_UNSTORED);
     doc.add(id);
     id.setValue("0");
     w.addDocument(doc);
@@ -997,7 +1008,9 @@ public class TestIndexWriterReader exten
     );
 
     Document doc = new Document();
-    doc.add(newField("foo", "bar", Field.Store.YES, Field.Index.NOT_ANALYZED));
+    FieldType customType = new FieldType(StringField.TYPE_UNSTORED);
+    customType.setStored(false);
+    doc.add(newField("foo", "bar", customType));
     for(int i=0;i<20;i++) {
       w.addDocument(doc);
     }
@@ -1022,7 +1035,7 @@ public class TestIndexWriterReader exten
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, conf);
     Document doc = new Document();
-    doc.add(new Field("f", "val", Store.NO, Index.ANALYZED));
+    doc.add(new TextField("f", "val"));
     w.addDocument(doc);
     IndexReader r = IndexReader.open(w, true).getSequentialSubReaders()[0];
     try {

Modified: lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java?rev=1145297&r1=1145296&r2=1145297&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java (original)
+++ lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java Mon Jul 11 19:12:59 2011
@@ -24,8 +24,11 @@ import java.util.Random;
 import java.util.Set;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document2.Document;
+import org.apache.lucene.document2.Field;
+import org.apache.lucene.document2.FieldType;
+import org.apache.lucene.document2.StringField;
+import org.apache.lucene.document2.TextField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
@@ -234,10 +237,10 @@ public class TestIndexWriterUnicode exte
     Directory d = newDirectory();
     IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
     Document doc = new Document();
-    doc.add(newField("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("field", "a a\uffffb", TextField.TYPE_UNSTORED));
     w.addDocument(doc);
     doc = new Document();
-    doc.add(newField("field", "a", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("field", "a", TextField.TYPE_UNSTORED));
     w.addDocument(doc);
     IndexReader r = w.getReader();
     assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
@@ -252,14 +255,17 @@ public class TestIndexWriterUnicode exte
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new TestIndexWriter.StringSplitAnalyzer()));
     Document doc = new Document();
 
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    
     final int count = utf8Data.length/2;
     for(int i=0;i<count;i++)
-      doc.add(newField("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
+      doc.add(newField("f" + i, utf8Data[2*i], customType));
     w.addDocument(doc);
     w.close();
 
     IndexReader ir = IndexReader.open(dir, true);
-    Document doc2 = ir.document(0);
+    org.apache.lucene.document.Document doc2 = ir.document(0);
     for(int i=0;i<count;i++) {
       assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
       assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
@@ -276,7 +282,7 @@ public class TestIndexWriterUnicode exte
     RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
     Document d = new Document();
     // Single segment
-    Field f = newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
+    Field f = newField("f", "", StringField.TYPE_UNSTORED);
     d.add(f);
     char[] chars = new char[2];
     final Set<String> allTerms = new HashSet<String>();

Modified: lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java?rev=1145297&r1=1145296&r2=1145297&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java (original)
+++ lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java Mon Jul 11 19:12:59 2011
@@ -20,8 +20,9 @@ package org.apache.lucene.index;
 import java.io.IOException;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document2.Document;
+import org.apache.lucene.document2.FieldType;
+import org.apache.lucene.document2.TextField;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
@@ -54,7 +55,13 @@ public class TestIndexWriterWithThreads 
     public void run() {
 
       final Document doc = new Document();
-      doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+      customType.setStored(true);
+      customType.setStoreTermVectors(true);
+      customType.setStoreTermVectorPositions(true);
+      customType.setStoreTermVectorOffsets(true);
+      
+      doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", customType));
 
       int idUpto = 0;
       int fullCount = 0;
@@ -288,7 +295,12 @@ public class TestIndexWriterWithThreads 
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
       .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
     final Document doc = new Document();
-    doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    customType.setStoreTermVectors(true);
+    customType.setStoreTermVectorPositions(true);
+    customType.setStoreTermVectorOffsets(true);
+    doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", customType));
 
     for(int i=0;i<6;i++)
       writer.addDocument(doc);

Modified: lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIsCurrent.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIsCurrent.java?rev=1145297&r1=1145296&r2=1145297&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIsCurrent.java (original)
+++ lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestIsCurrent.java Mon Jul 11 19:12:59 2011
@@ -17,9 +17,9 @@ package org.apache.lucene.index;
  * limitations under the License.
  */
 
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document2.Document;
+import org.apache.lucene.document2.FieldType;
+import org.apache.lucene.document2.TextField;
 import org.apache.lucene.util.*;
 import org.apache.lucene.store.*;
 
@@ -43,7 +43,9 @@ public class TestIsCurrent extends Lucen
 
     // write document
     Document doc = new Document();
-    doc.add(newField("UUID", "1", Store.YES, Index.ANALYZED));
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    doc.add(newField("UUID", "1", customType));
     writer.addDocument(doc);
     writer.commit();
   }

Modified: lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestLazyBug.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestLazyBug.java?rev=1145297&r1=1145296&r2=1145297&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestLazyBug.java (original)
+++ lucene/dev/branches/fieldtype/lucene/src/test/org/apache/lucene/index/TestLazyBug.java Mon Jul 11 19:12:59 2011
@@ -22,7 +22,11 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.*;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.document2.Document;
+import org.apache.lucene.document2.TextField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 import org.junit.AfterClass;
@@ -87,8 +91,7 @@ public class TestLazyBug extends LuceneT
           doc.add(newField("f"+f,
                             data[f % data.length]
                             + '#' + data[random.nextInt(data.length)],
-                            Field.Store.NO,
-                            Field.Index.ANALYZED));
+                            TextField.TYPE_UNSTORED));
         }
         writer.addDocument(doc);
       }
@@ -102,7 +105,7 @@ public class TestLazyBug extends LuceneT
   public void doTest(int[] docs) throws Exception {
     IndexReader reader = IndexReader.open(directory, true);
     for (int i = 0; i < docs.length; i++) {
-      Document d = reader.document(docs[i], SELECTOR);
+      org.apache.lucene.document.Document d = reader.document(docs[i], SELECTOR);
       d.get(MAGIC_FIELD);
 
       List<Fieldable> fields = d.getFields();



Mime
View raw message