lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mikemcc...@apache.org
Subject svn commit: r620576 [3/3] - in /lucene/java/trunk: ./ docs/ src/java/org/apache/lucene/index/ src/java/org/apache/lucene/store/ src/site/src/documentation/content/xdocs/ src/test/org/apache/lucene/index/ src/test/org/apache/lucene/store/ src/test/org/a...
Date Mon, 11 Feb 2008 18:56:13 GMT
Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java?rev=620576&r1=620575&r2=620576&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java Mon Feb 11 10:56:09 2008
@@ -270,13 +270,10 @@
       writer.close();
 
       assertEquals(2, policy.numOnInit);
-      if (autoCommit) {
-        assertTrue(policy.numOnCommit > 2);
-      } else {
+      if (!autoCommit)
         // If we are not auto committing then there should
         // be exactly 2 commits (one per close above):
         assertEquals(2, policy.numOnCommit);
-      }
 
       // Simplistic check: just verify all segments_N's still
       // exist, and, I can open a reader on each:
@@ -334,13 +331,10 @@
       writer.close();
 
       assertEquals(2, policy.numOnInit);
-      if (autoCommit) {
-        assertTrue(policy.numOnCommit > 2);
-      } else {
+      if (!autoCommit)
         // If we are not auto committing then there should
         // be exactly 2 commits (one per close above):
         assertEquals(2, policy.numOnCommit);
-      }
 
       // Simplistic check: just verify the index is in fact
       // readable:
@@ -459,11 +453,8 @@
       writer.close();
 
       assertEquals(2*(N+2), policy.numOnInit);
-      if (autoCommit) {
-        assertTrue(policy.numOnCommit > 2*(N+2)-1);
-      } else {
+      if (!autoCommit)
         assertEquals(2*(N+2)-1, policy.numOnCommit);
-      }
 
       IndexSearcher searcher = new IndexSearcher(dir);
       Hits hits = searcher.search(query);
@@ -565,11 +556,8 @@
       }
 
       assertEquals(1+3*(N+1), policy.numOnInit);
-      if (autoCommit) {
-        assertTrue(policy.numOnCommit > 3*(N+1)-1);
-      } else {
+      if (!autoCommit)
         assertEquals(2*(N+1), policy.numOnCommit);
-      }
 
       IndexSearcher searcher = new IndexSearcher(dir);
       Hits hits = searcher.search(query);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java?rev=620576&r1=620575&r2=620576&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java Mon Feb 11 10:56:09 2008
@@ -18,17 +18,8 @@
  */
 
 import org.apache.lucene.util.LuceneTestCase;
-import java.util.Vector;
-import java.util.Arrays;
-import java.io.ByteArrayOutputStream;
-import java.io.ObjectOutputStream;
-import java.io.IOException;
-import java.io.File;
 
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.Hits;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.IndexOutput;
@@ -77,8 +68,8 @@
     String[] files = dir.list();
 
     /*
-    for(int i=0;i<files.length;i++) {
-      System.out.println(i + ": " + files[i]);
+    for(int j=0;j<files.length;j++) {
+      System.out.println(j + ": " + files[j]);
     }
     */
 
@@ -145,8 +136,8 @@
     copyFile(dir, "_0.cfs", "deletable");
 
     // Create some old segments file:
-    copyFile(dir, "segments_a", "segments");
-    copyFile(dir, "segments_a", "segments_2");
+    copyFile(dir, "segments_3", "segments");
+    copyFile(dir, "segments_3", "segments_2");
 
     // Create a bogus cfs file shadowing a non-cfs segment:
     copyFile(dir, "_2.cfs", "_3.cfs");

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexModifier.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexModifier.java?rev=620576&r1=620575&r2=620576&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexModifier.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexModifier.java Mon Feb 11 10:56:09 2008
@@ -202,7 +202,7 @@
 
 class IndexThread extends Thread {
 
-  private final static int ITERATIONS = 500;       // iterations of thread test
+  private final static int TEST_SECONDS = 3;       // how many seconds to run each test 
 
   static int id = 0;
   static Stack idStack = new Stack();
@@ -224,8 +224,10 @@
   }
   
   public void run() {
+
+    final long endTime = System.currentTimeMillis() + 1000*TEST_SECONDS;
     try {
-      for(int i = 0; i < ITERATIONS; i++) {
+      while(System.currentTimeMillis() < endTime) {
         int rand = random.nextInt(101);
         if (rand < 5) {
           index.optimize();

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java?rev=620576&r1=620575&r2=620576&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java Mon Feb 11 10:56:09 2008
@@ -463,7 +463,7 @@
         fileDirName.mkdir();
       }
       try {
-        IndexReader reader = IndexReader.open(fileDirName);
+        IndexReader.open(fileDirName);
         fail("opening IndexReader on empty directory failed to produce FileNotFoundException");
       } catch (FileNotFoundException e) {
         // GOOD
@@ -779,6 +779,11 @@
       // Iterate w/ ever increasing free disk space:
       while(!done) {
         MockRAMDirectory dir = new MockRAMDirectory(startDir);
+
+        // If IndexReader hits disk full, it can write to
+        // the same files again.
+        dir.setPreventDoubleWrite(false);
+
         IndexReader reader = IndexReader.open(dir);
 
         // For each disk size, first try to commit against
@@ -838,6 +843,7 @@
           } catch (IOException e) {
             if (debug) {
               System.out.println("  hit IOException: " + e);
+              e.printStackTrace(System.out);
             }
             err = e;
             if (1 == x) {
@@ -855,7 +861,7 @@
           String[] startFiles = dir.list();
           SegmentInfos infos = new SegmentInfos();
           infos.read(dir);
-          IndexFileDeleter d = new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null);
+          new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null);
           String[] endFiles = dir.list();
 
           Arrays.sort(startFiles);
@@ -1030,7 +1036,7 @@
                           "deletetest");
       Directory dir = FSDirectory.getDirectory(dirFile);
       try {
-        IndexReader reader = IndexReader.open(dir);
+        IndexReader.open(dir);
         fail("expected FileNotFoundException");
       } catch (FileNotFoundException e) {
         // expected
@@ -1040,7 +1046,7 @@
 
       // Make sure we still get a CorruptIndexException (not NPE):
       try {
-        IndexReader reader = IndexReader.open(dir);
+        IndexReader.open(dir);
         fail("expected FileNotFoundException");
       } catch (FileNotFoundException e) {
         // expected

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=620576&r1=620575&r2=620576&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java Mon Feb 11 10:56:09 2008
@@ -651,19 +651,19 @@
       writer.setMaxBufferedDocs(2);
 
       for(int iter=0;iter<10;iter++) {
-
         for(int i=0;i<19;i++)
           writer.addDocument(doc);
 
-        writer.flush();
+        ((ConcurrentMergeScheduler) writer.getMergeScheduler()).sync();
+        writer.commit();
 
         SegmentInfos sis = new SegmentInfos();
-        ((ConcurrentMergeScheduler) writer.getMergeScheduler()).sync();
         sis.read(dir);
 
         final int segCount = sis.size();
 
         writer.optimize(7);
+        writer.commit();
 
         sis = new SegmentInfos();
         ((ConcurrentMergeScheduler) writer.getMergeScheduler()).sync();
@@ -1045,7 +1045,7 @@
      * and add docs to it.
      */
     public void testCommitOnCloseAbort() throws IOException {
-      Directory dir = new RAMDirectory();      
+      MockRAMDirectory dir = new MockRAMDirectory();      
       IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
       writer.setMaxBufferedDocs(10);
       for (int i = 0; i < 14; i++) {
@@ -1086,6 +1086,11 @@
       // and all is good:
       writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
       writer.setMaxBufferedDocs(10);
+
+      // On abort, writer in fact may write to the same
+      // segments_N file:
+      dir.setPreventDoubleWrite(false);
+
       for(int i=0;i<12;i++) {
         for(int j=0;j<17;j++) {
           addDoc(writer);
@@ -1273,48 +1278,48 @@
       writer.setMaxBufferedDocs(10);
       writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
 
-      long lastGen = -1;
+      int lastFlushCount = -1;
       for(int j=1;j<52;j++) {
         Document doc = new Document();
         doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
         writer.addDocument(doc);
         _TestUtil.syncConcurrentMerges(writer);
-        long gen = SegmentInfos.generationFromSegmentsFileName(SegmentInfos.getCurrentSegmentFileName(dir.list()));
+        int flushCount = writer.getFlushCount();
         if (j == 1)
-          lastGen = gen;
+          lastFlushCount = flushCount;
         else if (j < 10)
           // No new files should be created
-          assertEquals(gen, lastGen);
+          assertEquals(flushCount, lastFlushCount);
         else if (10 == j) {
-          assertTrue(gen > lastGen);
-          lastGen = gen;
+          assertTrue(flushCount > lastFlushCount);
+          lastFlushCount = flushCount;
           writer.setRAMBufferSizeMB(0.000001);
           writer.setMaxBufferedDocs(IndexWriter.DISABLE_AUTO_FLUSH);
         } else if (j < 20) {
-          assertTrue(gen > lastGen);
-          lastGen = gen;
+          assertTrue(flushCount > lastFlushCount);
+          lastFlushCount = flushCount;
         } else if (20 == j) {
           writer.setRAMBufferSizeMB(16);
           writer.setMaxBufferedDocs(IndexWriter.DISABLE_AUTO_FLUSH);
-          lastGen = gen;
+          lastFlushCount = flushCount;
         } else if (j < 30) {
-          assertEquals(gen, lastGen);
+          assertEquals(flushCount, lastFlushCount);
         } else if (30 == j) {
           writer.setRAMBufferSizeMB(0.000001);
           writer.setMaxBufferedDocs(IndexWriter.DISABLE_AUTO_FLUSH);
         } else if (j < 40) {
-          assertTrue(gen> lastGen);
-          lastGen = gen;
+          assertTrue(flushCount> lastFlushCount);
+          lastFlushCount = flushCount;
         } else if (40 == j) {
           writer.setMaxBufferedDocs(10);
           writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
-          lastGen = gen;
+          lastFlushCount = flushCount;
         } else if (j < 50) {
-          assertEquals(gen, lastGen);
+          assertEquals(flushCount, lastFlushCount);
           writer.setMaxBufferedDocs(10);
           writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
         } else if (50 == j) {
-          assertTrue(gen > lastGen);
+          assertTrue(flushCount > lastFlushCount);
         }
       }
       writer.close();
@@ -1334,46 +1339,46 @@
         writer.addDocument(doc);
       }
       
-      long lastGen = -1;
+      int lastFlushCount = -1;
       for(int j=1;j<52;j++) {
         writer.deleteDocuments(new Term("field", "aaa" + j));
         _TestUtil.syncConcurrentMerges(writer);
-        long gen = SegmentInfos.generationFromSegmentsFileName(SegmentInfos.getCurrentSegmentFileName(dir.list()));
+        int flushCount = writer.getFlushCount();
         if (j == 1)
-          lastGen = gen;
+          lastFlushCount = flushCount;
         else if (j < 10) {
           // No new files should be created
-          assertEquals(gen, lastGen);
+          assertEquals(flushCount, lastFlushCount);
         } else if (10 == j) {
-          assertTrue(gen > lastGen);
-          lastGen = gen;
+          assertTrue(flushCount > lastFlushCount);
+          lastFlushCount = flushCount;
           writer.setRAMBufferSizeMB(0.000001);
           writer.setMaxBufferedDeleteTerms(IndexWriter.DISABLE_AUTO_FLUSH);
         } else if (j < 20) {
-          assertTrue(gen > lastGen);
-          lastGen = gen;
+          assertTrue(flushCount > lastFlushCount);
+          lastFlushCount = flushCount;
         } else if (20 == j) {
           writer.setRAMBufferSizeMB(16);
           writer.setMaxBufferedDeleteTerms(IndexWriter.DISABLE_AUTO_FLUSH);
-          lastGen = gen;
+          lastFlushCount = flushCount;
         } else if (j < 30) {
-          assertEquals(gen, lastGen);
+          assertEquals(flushCount, lastFlushCount);
         } else if (30 == j) {
           writer.setRAMBufferSizeMB(0.000001);
           writer.setMaxBufferedDeleteTerms(IndexWriter.DISABLE_AUTO_FLUSH);
         } else if (j < 40) {
-          assertTrue(gen> lastGen);
-          lastGen = gen;
+          assertTrue(flushCount> lastFlushCount);
+          lastFlushCount = flushCount;
         } else if (40 == j) {
           writer.setMaxBufferedDeleteTerms(10);
           writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
-          lastGen = gen;
+          lastFlushCount = flushCount;
         } else if (j < 50) {
-          assertEquals(gen, lastGen);
+          assertEquals(flushCount, lastFlushCount);
           writer.setMaxBufferedDeleteTerms(10);
           writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
         } else if (50 == j) {
-          assertTrue(gen > lastGen);
+          assertTrue(flushCount > lastFlushCount);
         }
       }
       writer.close();
@@ -1831,11 +1836,18 @@
     public void eval(MockRAMDirectory dir)  throws IOException {
       if (doFail) {
         StackTraceElement[] trace = new Exception().getStackTrace();
+        boolean sawAppend = false;
+        boolean sawFlush = false;
         for (int i = 0; i < trace.length; i++) {
-          if ("org.apache.lucene.index.DocumentsWriter".equals(trace[i].getClassName()) && "appendPostings".equals(trace[i].getMethodName()) && count++ == 30) {
-            doFail = false;
-            throw new IOException("now failing during flush");
-          }
+          if ("org.apache.lucene.index.DocumentsWriter".equals(trace[i].getClassName()) && "appendPostings".equals(trace[i].getMethodName()))
+            sawAppend = true;
+          if ("doFlush".equals(trace[i].getMethodName()))
+            sawFlush = true;
+        }
+
+        if (sawAppend && sawFlush && count++ >= 30) {
+          doFail = false;
+          throw new IOException("now failing during flush");
         }
       }
     }
@@ -2263,6 +2275,7 @@
         try {
           writer.updateDocument(new Term("id", ""+(idUpto++)), doc);
         } catch (IOException ioe) {
+          //ioe.printStackTrace(System.out);
           if (ioe.getMessage().startsWith("fake disk full at") ||
               ioe.getMessage().equals("now failing on purpose")) {
             diskFull = true;
@@ -2282,6 +2295,7 @@
             break;
           }
         } catch (Throwable t) {
+          //t.printStackTrace(System.out);
           if (noErrors) {
             System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected Throwable:");
             t.printStackTrace(System.out);
@@ -2300,7 +2314,7 @@
   public void testCloseWithThreads() throws IOException {
     int NUM_THREADS = 3;
 
-    for(int iter=0;iter<50;iter++) {
+    for(int iter=0;iter<20;iter++) {
       MockRAMDirectory dir = new MockRAMDirectory();
       IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
       ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
@@ -2310,7 +2324,6 @@
       writer.setMergeFactor(4);
 
       IndexerThread[] threads = new IndexerThread[NUM_THREADS];
-      boolean diskFull = false;
 
       for(int i=0;i<NUM_THREADS;i++)
         threads[i] = new IndexerThread(writer, false);
@@ -2319,7 +2332,7 @@
         threads[i].start();
 
       try {
-        Thread.sleep(50);
+        Thread.sleep(100);
       } catch (InterruptedException ie) {
         Thread.currentThread().interrupt();
       }
@@ -2403,7 +2416,6 @@
       dir.setMaxSizeInBytes(4*1024+20*iter);
 
       IndexerThread[] threads = new IndexerThread[NUM_THREADS];
-      boolean diskFull = false;
 
       for(int i=0;i<NUM_THREADS;i++)
         threads[i] = new IndexerThread(writer, true);
@@ -2441,7 +2453,7 @@
   private static class FailOnlyOnAbortOrFlush extends MockRAMDirectory.Failure {
     private boolean onlyOnce;
     public FailOnlyOnAbortOrFlush(boolean onlyOnce) {
-      this.onlyOnce = true;
+      this.onlyOnce = onlyOnce;
     }
     public void eval(MockRAMDirectory dir)  throws IOException {
       if (doFail) {
@@ -2501,7 +2513,6 @@
       writer.setMergeFactor(4);
 
       IndexerThread[] threads = new IndexerThread[NUM_THREADS];
-      boolean diskFull = false;
 
       for(int i=0;i<NUM_THREADS;i++)
         threads[i] = new IndexerThread(writer, true);
@@ -2538,6 +2549,8 @@
         writer.close(false);
         success = true;
       } catch (IOException ioe) {
+        failure.clearDoFail();
+        writer.close(false);
       }
 
       if (success) {
@@ -2583,7 +2596,7 @@
   private static class FailOnlyInCloseDocStore extends MockRAMDirectory.Failure {
     private boolean onlyOnce;
     public FailOnlyInCloseDocStore(boolean onlyOnce) {
-      this.onlyOnce = true;
+      this.onlyOnce = onlyOnce;
     }
     public void eval(MockRAMDirectory dir)  throws IOException {
       if (doFail) {
@@ -2623,7 +2636,7 @@
   private static class FailOnlyInWriteSegment extends MockRAMDirectory.Failure {
     private boolean onlyOnce;
     public FailOnlyInWriteSegment(boolean onlyOnce) {
-      this.onlyOnce = true;
+      this.onlyOnce = onlyOnce;
     }
     public void eval(MockRAMDirectory dir)  throws IOException {
       if (doFail) {
@@ -2678,6 +2691,125 @@
     IndexReader reader = IndexReader.open(dir);
     Term t = new Term("field", "x");
     assertEquals(1, reader.docFreq(t));
+    reader.close();
+    dir.close();
+  }
+
+  // LUCENE-1044: Simulate checksum error in segments_N
+  public void testSegmentsChecksumError() throws IOException {
+    Directory dir = new MockRAMDirectory();
+
+    IndexWriter writer = null;
+
+    writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+
+    // add 100 documents
+    for (int i = 0; i < 100; i++) {
+      addDoc(writer);
+    }
+
+    // close
+    writer.close();
+
+    long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
+    assertTrue("segment generation should be > 1 but got " + gen, gen > 1);
+
+    final String segmentsFileName = SegmentInfos.getCurrentSegmentFileName(dir);
+    IndexInput in = dir.openInput(segmentsFileName);
+    IndexOutput out = dir.createOutput(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen));
+    out.copyBytes(in, in.length()-1);
+    byte b = in.readByte();
+    out.writeByte((byte) (1+b));
+    out.close();
+    in.close();
+
+    IndexReader reader = null;
+    try {
+      reader = IndexReader.open(dir);
+    } catch (IOException e) {
+      e.printStackTrace(System.out);
+      fail("segmentInfos failed to retry fallback to correct segments_N file");
+    }
+    reader.close();
+  }
+
+  // LUCENE-1044: test writer.commit() when ac=false
+  public void testForceCommit() throws IOException {
+    Directory dir = new MockRAMDirectory();
+
+    IndexWriter writer  = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
+    writer.setMaxBufferedDocs(2);
+    writer.setMergeFactor(5);
+
+    for (int i = 0; i < 23; i++)
+      addDoc(writer);
+
+    IndexReader reader = IndexReader.open(dir);
+    assertEquals(0, reader.numDocs());
+    writer.commit();
+    IndexReader reader2 = reader.reopen();
+    assertEquals(0, reader.numDocs());
+    assertEquals(23, reader2.numDocs());
+    reader.close();
+
+    for (int i = 0; i < 17; i++)
+      addDoc(writer);
+    assertEquals(23, reader2.numDocs());
+    reader2.close();
+    reader = IndexReader.open(dir);
+    assertEquals(23, reader.numDocs());
+    reader.close();
+    writer.commit();
+
+    reader = IndexReader.open(dir);
+    assertEquals(40, reader.numDocs());
+    reader.close();
+    writer.close();
+    dir.close();
+  }
+
+  // Throws IOException during MockRAMDirectory.sync
+  private static class FailOnlyInSync extends MockRAMDirectory.Failure {
+    boolean didFail;
+    public void eval(MockRAMDirectory dir)  throws IOException {
+      if (doFail) {
+        StackTraceElement[] trace = new Exception().getStackTrace();
+        for (int i = 0; i < trace.length; i++) {
+          if (doFail && "org.apache.lucene.store.MockRAMDirectory".equals(trace[i].getClassName()) && "sync".equals(trace[i].getMethodName())) {
+            didFail = true;
+            throw new IOException("now failing on purpose during sync");
+          }
+        }
+      }
+    }
+  }
+
+  // LUCENE-1044: test exception during sync
+  public void testExceptionDuringSync() throws IOException {
+    MockRAMDirectory dir = new MockRAMDirectory();
+    FailOnlyInSync failure = new FailOnlyInSync();
+    dir.failOn(failure);
+
+    IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
+    failure.setDoFail();
+
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
+    // We expect sync exceptions in the merge threads
+    cms.setSuppressExceptions();
+    writer.setMergeScheduler(cms);
+    writer.setMaxBufferedDocs(2);
+    writer.setMergeFactor(5);
+
+    for (int i = 0; i < 23; i++)
+      addDoc(writer);
+
+    cms.sync();
+    assertTrue(failure.didFail);
+    failure.clearDoFail();
+    writer.close();
+
+    IndexReader reader = IndexReader.open(dir);
+    assertEquals(23, reader.numDocs());
     reader.close();
     dir.close();
   }

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java?rev=620576&r1=620575&r2=620576&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Mon Feb 11 10:56:09 2008
@@ -30,7 +30,6 @@
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockRAMDirectory;
-import org.apache.lucene.store.RAMDirectory;
 
 public class TestIndexWriterDelete extends LuceneTestCase {
 
@@ -45,7 +44,7 @@
     for(int pass=0;pass<2;pass++) {
       boolean autoCommit = (0==pass);
 
-      Directory dir = new RAMDirectory();
+      Directory dir = new MockRAMDirectory();
       IndexWriter modifier = new IndexWriter(dir, autoCommit,
                                              new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
       modifier.setUseCompoundFile(true);
@@ -65,28 +64,17 @@
         modifier.addDocument(doc);
       }
       modifier.optimize();
-
-      if (!autoCommit) {
-        modifier.close();
-      }
+      modifier.commit();
 
       Term term = new Term("city", "Amsterdam");
       int hitCount = getHitCount(dir, term);
       assertEquals(1, hitCount);
-      if (!autoCommit) {
-        modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
-        modifier.setUseCompoundFile(true);
-      }
       modifier.deleteDocuments(term);
-      if (!autoCommit) {
-        modifier.close();
-      }
+      modifier.commit();
       hitCount = getHitCount(dir, term);
       assertEquals(0, hitCount);
 
-      if (autoCommit) {
-        modifier.close();
-      }
+      modifier.close();
       dir.close();
     }
   }
@@ -96,7 +84,7 @@
     for(int pass=0;pass<2;pass++) {
       boolean autoCommit = (0==pass);
 
-      Directory dir = new RAMDirectory();
+      Directory dir = new MockRAMDirectory();
       IndexWriter modifier = new IndexWriter(dir, autoCommit,
                                              new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
       modifier.setMaxBufferedDocs(2);
@@ -108,38 +96,26 @@
       for (int i = 0; i < 7; i++) {
         addDoc(modifier, ++id, value);
       }
-      modifier.flush();
+      modifier.commit();
 
       assertEquals(0, modifier.getNumBufferedDocuments());
       assertTrue(0 < modifier.getSegmentCount());
 
-      if (!autoCommit) {
-        modifier.close();
-      }
+      modifier.commit();
 
       IndexReader reader = IndexReader.open(dir);
       assertEquals(7, reader.numDocs());
       reader.close();
 
-      if (!autoCommit) {
-        modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
-        modifier.setMaxBufferedDocs(2);
-        modifier.setMaxBufferedDeleteTerms(2);
-      }
-
       modifier.deleteDocuments(new Term("value", String.valueOf(value)));
       modifier.deleteDocuments(new Term("value", String.valueOf(value)));
 
-      if (!autoCommit) {
-        modifier.close();
-      }
+      modifier.commit();
 
       reader = IndexReader.open(dir);
       assertEquals(0, reader.numDocs());
       reader.close();
-      if (autoCommit) {
-        modifier.close();
-      }
+      modifier.close();
       dir.close();
     }
   }
@@ -148,7 +124,7 @@
   public void testRAMDeletes() throws IOException {
     for(int pass=0;pass<2;pass++) {
       boolean autoCommit = (0==pass);
-      Directory dir = new RAMDirectory();
+      Directory dir = new MockRAMDirectory();
       IndexWriter modifier = new IndexWriter(dir, autoCommit,
                                              new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
       modifier.setMaxBufferedDocs(4);
@@ -169,9 +145,7 @@
       assertEquals(0, modifier.getSegmentCount());
       modifier.flush();
 
-      if (!autoCommit) {
-        modifier.close();
-      }
+      modifier.commit();
 
       IndexReader reader = IndexReader.open(dir);
       assertEquals(1, reader.numDocs());
@@ -179,9 +153,7 @@
       int hitCount = getHitCount(dir, new Term("id", String.valueOf(id)));
       assertEquals(1, hitCount);
       reader.close();
-      if (autoCommit) {
-        modifier.close();
-      }
+      modifier.close();
       dir.close();
     }
   }
@@ -191,7 +163,7 @@
     for(int pass=0;pass<2;pass++) {
       boolean autoCommit = (0==pass);
 
-      Directory dir = new RAMDirectory();
+      Directory dir = new MockRAMDirectory();
       IndexWriter modifier = new IndexWriter(dir, autoCommit,
                                              new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
       modifier.setMaxBufferedDocs(100);
@@ -208,23 +180,18 @@
       for (int i = 0; i < 5; i++) {
         addDoc(modifier, ++id, value);
       }
-      modifier.flush();
+      modifier.commit();
 
       for (int i = 0; i < 5; i++) {
         addDoc(modifier, ++id, value);
       }
       modifier.deleteDocuments(new Term("value", String.valueOf(value)));
 
-      modifier.flush();
-      if (!autoCommit) {
-        modifier.close();
-      }
+      modifier.commit();
 
       IndexReader reader = IndexReader.open(dir);
       assertEquals(5, reader.numDocs());
-      if (autoCommit) {
-        modifier.close();
-      }
+      modifier.close();
     }
   }
 
@@ -232,7 +199,7 @@
   public void testBatchDeletes() throws IOException {
     for(int pass=0;pass<2;pass++) {
       boolean autoCommit = (0==pass);
-      Directory dir = new RAMDirectory();
+      Directory dir = new MockRAMDirectory();
       IndexWriter modifier = new IndexWriter(dir, autoCommit,
                                              new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
       modifier.setMaxBufferedDocs(2);
@@ -244,29 +211,17 @@
       for (int i = 0; i < 7; i++) {
         addDoc(modifier, ++id, value);
       }
-      modifier.flush();
-      if (!autoCommit) {
-        modifier.close();
-      }
+      modifier.commit();
 
       IndexReader reader = IndexReader.open(dir);
       assertEquals(7, reader.numDocs());
       reader.close();
       
-      if (!autoCommit) {
-        modifier = new IndexWriter(dir, autoCommit,
-                                   new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
-        modifier.setMaxBufferedDocs(2);
-        modifier.setMaxBufferedDeleteTerms(2);
-      }
-
       id = 0;
       modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
       modifier.deleteDocuments(new Term("id", String.valueOf(++id)));
 
-      if (!autoCommit) {
-        modifier.close();
-      }
+      modifier.commit();
 
       reader = IndexReader.open(dir);
       assertEquals(5, reader.numDocs());
@@ -276,23 +231,13 @@
       for (int i = 0; i < terms.length; i++) {
         terms[i] = new Term("id", String.valueOf(++id));
       }
-      if (!autoCommit) {
-        modifier = new IndexWriter(dir, autoCommit,
-                                   new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
-        modifier.setMaxBufferedDocs(2);
-        modifier.setMaxBufferedDeleteTerms(2);
-      }
       modifier.deleteDocuments(terms);
-      if (!autoCommit) {
-        modifier.close();
-      }
+      modifier.commit();
       reader = IndexReader.open(dir);
       assertEquals(2, reader.numDocs());
       reader.close();
 
-      if (autoCommit) {
-        modifier.close();
-      }
+      modifier.close();
       dir.close();
     }
   }
@@ -338,7 +283,7 @@
       boolean autoCommit = (0==pass);
 
       // First build up a starting index:
-      RAMDirectory startDir = new RAMDirectory();
+      MockRAMDirectory startDir = new MockRAMDirectory();
       IndexWriter writer = new IndexWriter(startDir, autoCommit,
                                            new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
       for (int i = 0; i < 157; i++) {
@@ -444,38 +389,10 @@
             }
           }
 
-          // Whether we succeeded or failed, check that all
-          // un-referenced files were in fact deleted (ie,
-          // we did not create garbage). Just create a
-          // new IndexFileDeleter, have it delete
-          // unreferenced files, then verify that in fact
-          // no files were deleted:
-          String[] startFiles = dir.list();
-          SegmentInfos infos = new SegmentInfos();
-          infos.read(dir);
-          new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null);
-          String[] endFiles = dir.list();
-
-          Arrays.sort(startFiles);
-          Arrays.sort(endFiles);
-
-          // for(int i=0;i<startFiles.length;i++) {
-          // System.out.println(" startFiles: " + i + ": " + startFiles[i]);
-          // }
-
-          if (!Arrays.equals(startFiles, endFiles)) {
-            String successStr;
-            if (success) {
-              successStr = "success";
-            } else {
-              successStr = "IOException";
-              err.printStackTrace();
-            }
-            fail("reader.close() failed to delete unreferenced files after "
-                 + successStr + " (" + diskFree + " bytes): before delete:\n    "
-                 + arrayToString(startFiles) + "\n  after delete:\n    "
-                 + arrayToString(endFiles));
-          }
+          // If the close() succeeded, make sure there are
+          // no unreferenced files.
+          if (success)
+            TestIndexWriter.assertNoUnreferencedFiles(dir, "after writer.close");
 
           // Finally, verify index is not corrupt, and, if
           // we succeeded, we see all docs changed, and if
@@ -618,12 +535,8 @@
       // flush (and commit if ac)
 
       modifier.optimize();
+      modifier.commit();
 
-      // commit if !ac
-
-      if (!autoCommit) {
-        modifier.close();
-      }
       // one of the two files hits
 
       Term term = new Term("city", "Amsterdam");
@@ -632,11 +545,6 @@
 
       // open the writer again (closed above)
 
-      if (!autoCommit) {
-        modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
-        modifier.setUseCompoundFile(true);
-      }
-
       // delete the doc
       // max buf del terms is two, so this is buffered
 
@@ -648,7 +556,7 @@
       Document doc = new Document();
       modifier.addDocument(doc);
 
-      // flush the changes, the buffered deletes, and the new doc
+      // commit the changes, the buffered deletes, and the new doc
 
       // The failure object will fail on the first write after the del
       // file gets created when processing the buffered delete
@@ -659,38 +567,28 @@
       // in the !ac case, a new segments file won't be created but in
       // this case, creation of the cfs file happens next so we need
       // the doc (to test that it's okay that we don't lose deletes if
-      // failing while creating the cfs file
+      // failing while creating the cfs file)
 
       boolean failed = false;
       try {
-        modifier.flush();
+        modifier.commit();
       } catch (IOException ioe) {
         failed = true;
       }
 
       assertTrue(failed);
 
-      // The flush above failed, so we need to retry it (which will
+      // The commit above failed, so we need to retry it (which will
       // succeed, because the failure is a one-shot)
 
-      if (!autoCommit) {
-        modifier.close();
-      } else {
-        modifier.flush();
-      }
+      modifier.commit();
 
       hitCount = getHitCount(dir, term);
 
-      // If the delete was not cleared then hit count will
-      // be 0.  With autoCommit=false, we hit the exception
-      // on creating the compound file, so the delete was
-      // flushed successfully.
-      assertEquals(autoCommit ? 1:0, hitCount);
-
-      if (autoCommit) {
-        modifier.close();
-      }
+      // Make sure the delete was successfully flushed:
+      assertEquals(0, hitCount);
 
+      modifier.close();
       dir.close();
     }
   }

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiSegmentReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiSegmentReader.java?rev=620576&r1=620575&r2=620576&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiSegmentReader.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiSegmentReader.java Mon Feb 11 10:56:09 2008
@@ -46,8 +46,8 @@
     doc2 = new Document();
     DocHelper.setupDoc(doc1);
     DocHelper.setupDoc(doc2);
-    SegmentInfo info1 = DocHelper.writeDoc(dir, doc1);
-    SegmentInfo info2 = DocHelper.writeDoc(dir, doc2);
+    DocHelper.writeDoc(dir, doc1);
+    DocHelper.writeDoc(dir, doc2);
     sis = new SegmentInfos();
     sis.read(dir);
   }
@@ -102,7 +102,7 @@
     if (reader instanceof MultiReader)
       // MultiReader does not "own" the directory so it does
       // not write the changes to sis on commit:
-      sis.write(dir);
+      sis.commit(dir);
 
     sis.read(dir);
     reader = openReader();
@@ -115,7 +115,7 @@
     if (reader instanceof MultiReader)
       // MultiReader does not "own" the directory so it does
       // not write the changes to sis on commit:
-      sis.write(dir);
+      sis.commit(dir);
     sis.read(dir);
     reader = openReader();
     assertEquals( 1, reader.numDocs() );

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java?rev=620576&r1=620575&r2=620576&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java Mon Feb 11 10:56:09 2008
@@ -20,12 +20,9 @@
 import org.apache.lucene.store.*;
 import org.apache.lucene.document.*;
 import org.apache.lucene.analysis.*;
-import org.apache.lucene.index.*;
 import org.apache.lucene.search.*;
 import org.apache.lucene.queryParser.*;
 
-import org.apache.lucene.util.LuceneTestCase;
-
 import java.util.Random;
 import java.io.File;
 
@@ -123,6 +120,7 @@
     modifier.setMaxBufferedDocs(10);
 
     TimedThread[] threads = new TimedThread[4];
+    int numThread = 0;
 
     if (mergeScheduler != null)
       modifier.setMergeScheduler(mergeScheduler);
@@ -130,34 +128,30 @@
     // One modifier that writes 10 docs then removes 5, over
     // and over:
     IndexerThread indexerThread = new IndexerThread(modifier, threads);
-    threads[0] = indexerThread;
+    threads[numThread++] = indexerThread;
     indexerThread.start();
-      
+    
     IndexerThread indexerThread2 = new IndexerThread(modifier, threads);
-    threads[2] = indexerThread2;
+    threads[numThread++] = indexerThread2;
     indexerThread2.start();
       
     // Two searchers that constantly just re-instantiate the
     // searcher:
     SearcherThread searcherThread1 = new SearcherThread(directory, threads);
-    threads[3] = searcherThread1;
+    threads[numThread++] = searcherThread1;
     searcherThread1.start();
 
     SearcherThread searcherThread2 = new SearcherThread(directory, threads);
-    threads[3] = searcherThread2;
+    threads[numThread++] = searcherThread2;
     searcherThread2.start();
 
-    indexerThread.join();
-    indexerThread2.join();
-    searcherThread1.join();
-    searcherThread2.join();
+    for(int i=0;i<numThread;i++)
+      threads[i].join();
 
     modifier.close();
 
-    assertTrue("hit unexpected exception in indexer", !indexerThread.failed);
-    assertTrue("hit unexpected exception in indexer2", !indexerThread2.failed);
-    assertTrue("hit unexpected exception in search1", !searcherThread1.failed);
-    assertTrue("hit unexpected exception in search2", !searcherThread2.failed);
+    for(int i=0;i<numThread;i++)
+      assertTrue(!((TimedThread) threads[i]).failed);
 
     //System.out.println("    Writer: " + indexerThread.count + " iterations");
     //System.out.println("Searcher 1: " + searcherThread1.count + " searchers created");

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestThreadedOptimize.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestThreadedOptimize.java?rev=620576&r1=620575&r2=620576&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestThreadedOptimize.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestThreadedOptimize.java Mon Feb 11 10:56:09 2008
@@ -39,10 +39,10 @@
   private final static int NUM_THREADS = 3;
   //private final static int NUM_THREADS = 5;
 
-  private final static int NUM_ITER = 2;
+  private final static int NUM_ITER = 1;
   //private final static int NUM_ITER = 10;
 
-  private final static int NUM_ITER2 = 2;
+  private final static int NUM_ITER2 = 1;
   //private final static int NUM_ITER2 = 5;
 
   private boolean failed;
@@ -138,8 +138,8 @@
   */
   public void testThreadedOptimize() throws Exception {
     Directory directory = new MockRAMDirectory();
-    runTest(directory, false, null);
-    runTest(directory, true, null);
+    runTest(directory, false, new SerialMergeScheduler());
+    runTest(directory, true, new SerialMergeScheduler());
     runTest(directory, false, new ConcurrentMergeScheduler());
     runTest(directory, true, new ConcurrentMergeScheduler());
     directory.close();
@@ -150,8 +150,8 @@
 
     String dirName = tempDir + "/luceneTestThreadedOptimize";
     directory = FSDirectory.getDirectory(dirName);
-    runTest(directory, false, null);
-    runTest(directory, true, null);
+    runTest(directory, false, new SerialMergeScheduler());
+    runTest(directory, true, new SerialMergeScheduler());
     runTest(directory, false, new ConcurrentMergeScheduler());
     runTest(directory, true, new ConcurrentMergeScheduler());
     directory.close();

Modified: lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMDirectory.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMDirectory.java?rev=620576&r1=620575&r2=620576&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMDirectory.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMDirectory.java Mon Feb 11 10:56:09 2008
@@ -24,7 +24,10 @@
 import java.util.Random;
 import java.util.Map;
 import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Set;
 import java.util.ArrayList;
+import java.util.Arrays;
 
 /**
  * This is a subclass of RAMDirectory that adds methods
@@ -40,6 +43,10 @@
   double randomIOExceptionRate;
   Random randomState;
   boolean noDeleteOpenFile = true;
+  boolean preventDoubleWrite = true;
+  private Set unSyncedFiles;
+  private Set createdFiles;
+  volatile boolean crashed;
 
   // NOTE: we cannot initialize the Map here due to the
   // order in which our constructor actually does this
@@ -47,29 +54,78 @@
   // like super is called, then our members are initialized:
   Map openFiles;
 
+  private void init() {
+    if (openFiles == null)
+      openFiles = new HashMap();
+    if (createdFiles == null)
+      createdFiles = new HashSet();
+    if (unSyncedFiles == null)
+      unSyncedFiles = new HashSet();
+  }
+
   public MockRAMDirectory() {
     super();
-    if (openFiles == null) {
-      openFiles = new HashMap();
-    }
+    init();
   }
   public MockRAMDirectory(String dir) throws IOException {
     super(dir);
-    if (openFiles == null) {
-      openFiles = new HashMap();
-    }
+    init();
   }
   public MockRAMDirectory(Directory dir) throws IOException {
     super(dir);
-    if (openFiles == null) {
-      openFiles = new HashMap();
-    }
+    init();
   }
   public MockRAMDirectory(File dir) throws IOException {
     super(dir);
-    if (openFiles == null) {
+    init();
+  }
+
+  /** If set to true, we throw an IOException if the same
+   *  file is opened by createOutput, ever. */
+  public void setPreventDoubleWrite(boolean value) {
+    preventDoubleWrite = value;
+  }
+
+  public synchronized void sync(String name) throws IOException {
+    maybeThrowDeterministicException();
+    if (crashed)
+      throw new IOException("cannot sync after crash");
+    if (unSyncedFiles.contains(name))
+      unSyncedFiles.remove(name);
+  }
+
+  /** Simulates a crash of OS or machine by overwriting
+   *  unsycned files. */
+  public void crash() throws IOException {
+    synchronized(this) {
+      crashed = true;
       openFiles = new HashMap();
     }
+    Iterator it = unSyncedFiles.iterator();
+    unSyncedFiles = new HashSet();
+    int count = 0;
+    while(it.hasNext()) {
+      String name = (String) it.next();
+      RAMFile file = (RAMFile) fileMap.get(name);
+      if (count % 3 == 0) {
+        deleteFile(name, true);
+      } else if (count % 3 == 1) {
+        // Zero out file entirely
+        final int numBuffers = file.numBuffers();
+        for(int i=0;i<numBuffers;i++) {
+          byte[] buffer = file.getBuffer(i);
+          Arrays.fill(buffer, (byte) 0);
+        }
+      } else if (count % 3 == 2) {
+        // Truncate the file:
+        file.setLength(file.getLength()/2);
+      }
+      count++;
+    }
+  }
+
+  public synchronized void clearCrash() throws IOException {
+    crashed = false;
   }
 
   public void setMaxSizeInBytes(long maxSize) {
@@ -126,24 +182,41 @@
   }
 
   public synchronized void deleteFile(String name) throws IOException {
-    synchronized(openFiles) {
-      if (noDeleteOpenFile && openFiles.containsKey(name)) {
-        throw new IOException("MockRAMDirectory: file \"" + name + "\" is still open: cannot delete");
+    deleteFile(name, false);
+  }
+
+  private synchronized void deleteFile(String name, boolean forced) throws IOException {
+    if (crashed && !forced)
+      throw new IOException("cannot delete after crash");
+
+    if (unSyncedFiles.contains(name))
+      unSyncedFiles.remove(name);
+    if (!forced) {
+      synchronized(openFiles) {
+        if (noDeleteOpenFile && openFiles.containsKey(name)) {
+          throw new IOException("MockRAMDirectory: file \"" + name + "\" is still open: cannot delete");
+        }
       }
     }
     super.deleteFile(name);
   }
 
   public IndexOutput createOutput(String name) throws IOException {
-    if (openFiles == null) {
-      openFiles = new HashMap();
-    }
+    if (crashed)
+      throw new IOException("cannot createOutput after crash");
+    init();
     synchronized(openFiles) {
+      if (preventDoubleWrite && createdFiles.contains(name))
+        throw new IOException("file \"" + name + "\" was already written to");
       if (noDeleteOpenFile && openFiles.containsKey(name))
        throw new IOException("MockRAMDirectory: file \"" + name + "\" is still open: cannot overwrite");
     }
     RAMFile file = new RAMFile(this);
     synchronized (this) {
+      if (crashed)
+        throw new IOException("cannot createOutput after crash");
+      unSyncedFiles.add(name);
+      createdFiles.add(name);
       RAMFile existing = (RAMFile)fileMap.get(name);
       // Enforce write once:
       if (existing!=null && !name.equals("segments.gen"))

Modified: lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMInputStream.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMInputStream.java?rev=620576&r1=620575&r2=620576&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMInputStream.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMInputStream.java Mon Feb 11 10:56:09 2008
@@ -45,11 +45,14 @@
     if (!isClone) {
       synchronized(dir.openFiles) {
         Integer v = (Integer) dir.openFiles.get(name);
-        if (v.intValue() == 1) {
-          dir.openFiles.remove(name);
-        } else {
-          v = new Integer(v.intValue()-1);
-          dir.openFiles.put(name, v);
+        // Could be null when MockRAMDirectory.crash() was called
+        if (v != null) {
+          if (v.intValue() == 1) {
+            dir.openFiles.remove(name);
+          } else {
+            v = new Integer(v.intValue()-1);
+            dir.openFiles.put(name, v);
+          }
         }
       }
     }

Modified: lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMOutputStream.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMOutputStream.java?rev=620576&r1=620575&r2=620576&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMOutputStream.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMOutputStream.java Mon Feb 11 10:56:09 2008
@@ -63,6 +63,11 @@
     long freeSpace = dir.maxSize - dir.sizeInBytes();
     long realUsage = 0;
 
+    // If MockRAMDir crashed since we were opened, then
+    // don't write anything:
+    if (dir.crashed)
+      throw new IOException("MockRAMDirectory was crashed");
+
     // Enforce disk full:
     if (dir.maxSize != 0 && freeSpace <= len) {
       // Compute the real disk free.  This will greatly slow

Modified: lucene/java/trunk/src/test/org/apache/lucene/util/LuceneTestCase.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/util/LuceneTestCase.java?rev=620576&r1=620575&r2=620576&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/util/LuceneTestCase.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/util/LuceneTestCase.java Mon Feb 11 10:56:09 2008
@@ -46,6 +46,9 @@
 
   protected void tearDown() throws Exception {
     if (ConcurrentMergeScheduler.anyUnhandledExceptions()) {
+      // Clear the failure so that we don't just keep
+      // failing subsequent test cases
+      ConcurrentMergeScheduler.clearUnhandledExceptions();
       fail("ConcurrentMergeScheduler hit unhandled exceptions");
     }
   }



Mime
View raw message