Return-Path: X-Original-To: apmail-lucene-commits-archive@www.apache.org Delivered-To: apmail-lucene-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id AA65C44B9 for ; Mon, 6 Jun 2011 03:41:31 +0000 (UTC) Received: (qmail 38686 invoked by uid 500); 6 Jun 2011 03:41:31 -0000 Mailing-List: contact commits-help@lucene.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@lucene.apache.org Delivered-To: mailing list commits@lucene.apache.org Received: (qmail 38532 invoked by uid 99); 6 Jun 2011 03:41:29 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 06 Jun 2011 03:41:29 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 06 Jun 2011 03:41:18 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id 514B123889D5; Mon, 6 Jun 2011 03:40:55 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1132517 [1/3] - in /lucene/dev/trunk/lucene/src: test-framework/org/apache/lucene/analysis/ test-framework/org/apache/lucene/index/ test-framework/org/apache/lucene/util/ test/org/apache/lucene/index/ test/org/apache/lucene/index/codecs/pr... Date: Mon, 06 Jun 2011 03:40:54 -0000 To: commits@lucene.apache.org From: rmuir@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20110606034055.514B123889D5@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: rmuir Date: Mon Jun 6 03:40:53 2011 New Revision: 1132517 URL: http://svn.apache.org/viewvc?rev=1132517&view=rev Log: LUCENE-3175: speed up core tests Added: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java (with props) lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java (with props) lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java (with props) lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java (with props) lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java (with props) Modified: lucene/dev/trunk/lucene/src/test-framework/org/apache/lucene/analysis/MockAnalyzer.java lucene/dev/trunk/lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java lucene/dev/trunk/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/Test2BTerms.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestGlobalFieldNumbers.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReader.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnJRECrash.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestLazyBug.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestLongPostings.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestOmitNorms.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestOmitTf.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPayloads.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/codecs/preflex/TestSurrogates.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestBoolean2.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestArrayUtil.java Modified: lucene/dev/trunk/lucene/src/test-framework/org/apache/lucene/analysis/MockAnalyzer.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test-framework/org/apache/lucene/analysis/MockAnalyzer.java?rev=1132517&r1=1132516&r2=1132517&view=diff ============================================================================== --- lucene/dev/trunk/lucene/src/test-framework/org/apache/lucene/analysis/MockAnalyzer.java (original) +++ lucene/dev/trunk/lucene/src/test-framework/org/apache/lucene/analysis/MockAnalyzer.java Mon Jun 6 03:40:53 2011 @@ -23,6 +23,7 @@ import java.util.HashMap; import java.util.Map; import java.util.Random; +import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.automaton.CharacterRunAutomaton; /** @@ -127,13 +128,16 @@ public final class MockAnalyzer extends private synchronized TokenFilter maybePayload(TokenFilter stream, String fieldName) { Integer val = previousMappings.get(fieldName); if (val == null) { - switch(random.nextInt(3)) { - case 0: val = -1; // no payloads - break; - case 1: val = Integer.MAX_VALUE; // variable length payload - break; - case 2: val = random.nextInt(12); // fixed length payload - break; + val = -1; // no payloads + if (LuceneTestCase.TEST_NIGHTLY || random.nextInt(20) == 0) { + switch(random.nextInt(3)) { + case 0: val = -1; // no payloads + break; + case 1: val = Integer.MAX_VALUE; // variable length payload + break; + case 2: val = random.nextInt(12); // fixed length payload + break; + } } previousMappings.put(fieldName, val); // save it so we are consistent for this field } Modified: lucene/dev/trunk/lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java?rev=1132517&r1=1132516&r2=1132517&view=diff ============================================================================== --- lucene/dev/trunk/lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java (original) +++ lucene/dev/trunk/lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java Mon Jun 6 03:40:53 2011 @@ -272,7 +272,7 @@ public class RandomIndexWriter implement public void close() throws IOException { // if someone isn't using getReader() API, we want to be sure to // maybeOptimize since presumably they might open a reader on the dir. - if (getReaderCalled == false && r.nextInt(4) == 2) { + if (getReaderCalled == false && r.nextInt(8) == 2) { doRandomOptimize(); } w.close(); Modified: lucene/dev/trunk/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java?rev=1132517&r1=1132516&r2=1132517&view=diff ============================================================================== --- lucene/dev/trunk/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (original) +++ lucene/dev/trunk/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java Mon Jun 6 03:40:53 2011 @@ -836,14 +836,22 @@ public abstract class LuceneTestCase ext c.setMergeScheduler(new SerialMergeScheduler()); } if (r.nextBoolean()) { - if (r.nextInt(20) == 17) { - c.setMaxBufferedDocs(2); + if ((TEST_NIGHTLY && random.nextBoolean()) || r.nextInt(20) == 17) { + // crazy value + c.setMaxBufferedDocs(_TestUtil.nextInt(r, 2, 7)); } else { - c.setMaxBufferedDocs(_TestUtil.nextInt(r, 2, 1000)); + // reasonable value + c.setMaxBufferedDocs(_TestUtil.nextInt(r, 8, 1000)); } } if (r.nextBoolean()) { - c.setTermIndexInterval(_TestUtil.nextInt(r, 1, 1000)); + if ((TEST_NIGHTLY && random.nextBoolean()) || r.nextInt(20) == 17) { + // crazy value + c.setTermIndexInterval(random.nextBoolean() ? _TestUtil.nextInt(r, 1, 31) : _TestUtil.nextInt(r, 129, 1000)); + } else { + // reasonable value + c.setTermIndexInterval(_TestUtil.nextInt(r, 32, 128)); + } } if (r.nextBoolean()) { c.setIndexerThreadPool(new ThreadAffinityDocumentsWriterThreadPool(_TestUtil.nextInt(r, 1, 20))); @@ -874,22 +882,22 @@ public abstract class LuceneTestCase ext LogMergePolicy logmp = r.nextBoolean() ? new LogDocMergePolicy() : new LogByteSizeMergePolicy(); logmp.setUseCompoundFile(r.nextBoolean()); logmp.setCalibrateSizeByDeletes(r.nextBoolean()); - if (r.nextInt(3) == 2) { - logmp.setMergeFactor(2); + if ((TEST_NIGHTLY && random.nextBoolean()) || r.nextInt(20) == 17) { + logmp.setMergeFactor(_TestUtil.nextInt(r, 2, 4)); } else { - logmp.setMergeFactor(_TestUtil.nextInt(r, 2, 20)); + logmp.setMergeFactor(_TestUtil.nextInt(r, 5, 50)); } return logmp; } public static TieredMergePolicy newTieredMergePolicy(Random r) { TieredMergePolicy tmp = new TieredMergePolicy(); - if (r.nextInt(3) == 2) { - tmp.setMaxMergeAtOnce(2); - tmp.setMaxMergeAtOnceExplicit(2); + if ((TEST_NIGHTLY && random.nextBoolean()) || r.nextInt(20) == 17) { + tmp.setMaxMergeAtOnce(_TestUtil.nextInt(r, 2, 4)); + tmp.setMaxMergeAtOnceExplicit(_TestUtil.nextInt(r, 2, 4)); } else { - tmp.setMaxMergeAtOnce(_TestUtil.nextInt(r, 2, 20)); - tmp.setMaxMergeAtOnceExplicit(_TestUtil.nextInt(r, 2, 30)); + tmp.setMaxMergeAtOnce(_TestUtil.nextInt(r, 5, 50)); + tmp.setMaxMergeAtOnceExplicit(_TestUtil.nextInt(r, 5, 50)); } tmp.setMaxMergedSegmentMB(0.2 + r.nextDouble() * 2.0); tmp.setFloorSegmentMB(0.2 + r.nextDouble() * 2.0); @@ -1052,8 +1060,13 @@ public abstract class LuceneTestCase ext /** Returns a new field instance, using the specified random. * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */ public static Field newField(Random random, String name, String value, Store store, Index index, TermVector tv) { + if (!TEST_NIGHTLY && random.nextInt(20) > 0) { + // most of the time, don't modify the params + return new Field(name, value, store, index, tv); + } + if (!index.isIndexed()) - return new Field(name, value, store, index); + return new Field(name, value, store, index, tv); if (!store.isStored() && random.nextBoolean()) store = Store.YES; // randomly store it @@ -1115,7 +1128,7 @@ public abstract class LuceneTestCase ext }; public static String randomDirectory(Random random) { - if (random.nextInt(10) == 0) { + if (random.nextInt(20) == 0) { return CORE_DIRECTORIES[random.nextInt(CORE_DIRECTORIES.length)]; } else { return "RAMDirectory"; @@ -1179,7 +1192,7 @@ public abstract class LuceneTestCase ext public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) throws IOException { if (random.nextBoolean()) { - if (maybeWrap && random.nextBoolean()) { + if (maybeWrap && random.nextInt(20) == 0) { return new IndexSearcher(new SlowMultiReaderWrapper(r)); } else { return new IndexSearcher(r); @@ -1408,6 +1421,10 @@ public abstract class LuceneTestCase ext Codec codec = previousMappings.get(name); if (codec == null) { codec = knownCodecs.get(Math.abs(perFieldSeed ^ name.hashCode()) % knownCodecs.size()); + if (codec instanceof SimpleTextCodec && perFieldSeed % 5 != 0) { + // make simpletext rarer, choose again + codec = knownCodecs.get(Math.abs(perFieldSeed ^ name.toUpperCase(Locale.ENGLISH).hashCode()) % knownCodecs.size()); + } previousMappings.put(name, codec); } return codec.name; Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/Test2BTerms.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/Test2BTerms.java?rev=1132517&r1=1132516&r2=1132517&view=diff ============================================================================== --- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/Test2BTerms.java (original) +++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/Test2BTerms.java Mon Jun 6 03:40:53 2011 @@ -155,6 +155,7 @@ public class Test2BTerms extends LuceneT MockDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BTerms")); dir.setThrottling(MockDirectoryWrapper.Throttling.NEVER); + dir.setCheckIndexOnClose(false); // don't double-checkindex //Directory dir = newFSDirectory(new File("/p/lucene/indices/2bindex")); if (true) { Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java?rev=1132517&r1=1132516&r2=1132517&view=diff ============================================================================== --- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (original) +++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java Mon Jun 6 03:40:53 2011 @@ -816,8 +816,6 @@ public class TestAddIndexes extends Luce c.joinThreads(); - _TestUtil.checkIndex(c.dir2); - c.closeDir(); assertTrue(c.failures.size() == 0); @@ -908,8 +906,6 @@ public class TestAddIndexes extends Luce if (VERBOSE) { System.out.println("TEST: done join threads"); } - _TestUtil.checkIndex(c.dir2); - c.closeDir(); assertTrue(c.failures.size() == 0); @@ -933,8 +929,6 @@ public class TestAddIndexes extends Luce c.joinThreads(); - _TestUtil.checkIndex(c.dir2); - c.closeDir(); assertTrue(c.failures.size() == 0); @@ -1039,7 +1033,6 @@ public class TestAddIndexes extends Luce writer.addIndexes(aux, aux2); assertEquals(190, writer.maxDoc()); writer.close(); - _TestUtil.checkIndex(dir, provider); dir.close(); aux.close(); Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java?rev=1132517&r1=1132516&r2=1132517&view=diff ============================================================================== --- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (original) +++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java Mon Jun 6 03:40:53 2011 @@ -188,8 +188,6 @@ public class TestBackwardsCompatibility w.setInfoStream(VERBOSE ? System.out : null); w.optimize(); w.close(); - - _TestUtil.checkIndex(dir); dir.close(); _TestUtil.rmDir(oldIndxeDir); @@ -207,8 +205,6 @@ public class TestBackwardsCompatibility TEST_VERSION_CURRENT, new MockAnalyzer(random))); w.addIndexes(dir); w.close(); - - _TestUtil.checkIndex(targetDir); dir.close(); targetDir.close(); @@ -229,9 +225,7 @@ public class TestBackwardsCompatibility w.addIndexes(reader); w.close(); reader.close(); - - _TestUtil.checkIndex(targetDir); - + dir.close(); targetDir.close(); _TestUtil.rmDir(oldIndxeDir); @@ -743,8 +737,6 @@ public class TestBackwardsCompatibility .upgrade(); checkAllSegmentsUpgraded(dir); - - _TestUtil.checkIndex(dir); dir.close(); _TestUtil.rmDir(oldIndxeDir); Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java?rev=1132517&r1=1132516&r2=1132517&view=diff ============================================================================== --- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java (original) +++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java Mon Jun 6 03:40:53 2011 @@ -214,7 +214,7 @@ public class TestDocTermOrds extends Luc public void testRandom() throws Exception { MockDirectoryWrapper dir = newDirectory(); - final int NUM_TERMS = 100 * RANDOM_MULTIPLIER; + final int NUM_TERMS = (TEST_NIGHTLY ? 100 : 20) * RANDOM_MULTIPLIER; final Set terms = new HashSet(); while(terms.size() < NUM_TERMS) { final String s = _TestUtil.randomRealisticUnicodeString(random); @@ -226,7 +226,7 @@ public class TestDocTermOrds extends Luc final BytesRef[] termsArray = terms.toArray(new BytesRef[terms.size()]); Arrays.sort(termsArray); - final int NUM_DOCS = 1000 * RANDOM_MULTIPLIER; + final int NUM_DOCS = (TEST_NIGHTLY ? 1000 : 100) * RANDOM_MULTIPLIER; IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); @@ -264,7 +264,7 @@ public class TestDocTermOrds extends Luc } for(int ord : ordsForDocSet) { ordsForDoc[upto++] = ord; - Field field = newField("field", termsArray[ord].utf8ToString(), Field.Index.NOT_ANALYZED); + Field field = newField("field", termsArray[ord].utf8ToString(), Field.Index.NOT_ANALYZED_NO_NORMS); if (VERBOSE) { System.out.println(" f=" + termsArray[ord].utf8ToString()); } @@ -317,7 +317,7 @@ public class TestDocTermOrds extends Luc } final String[] prefixesArray = prefixes.toArray(new String[prefixes.size()]); - final int NUM_TERMS = 100 * RANDOM_MULTIPLIER; + final int NUM_TERMS = (TEST_NIGHTLY ? 100 : 20) * RANDOM_MULTIPLIER; final Set terms = new HashSet(); while(terms.size() < NUM_TERMS) { final String s = prefixesArray[random.nextInt(prefixesArray.length)] + _TestUtil.randomRealisticUnicodeString(random); @@ -329,7 +329,7 @@ public class TestDocTermOrds extends Luc final BytesRef[] termsArray = terms.toArray(new BytesRef[terms.size()]); Arrays.sort(termsArray); - final int NUM_DOCS = 1000 * RANDOM_MULTIPLIER; + final int NUM_DOCS = (TEST_NIGHTLY ? 1000 : 100) * RANDOM_MULTIPLIER; IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); @@ -367,7 +367,7 @@ public class TestDocTermOrds extends Luc } for(int ord : ordsForDocSet) { ordsForDoc[upto++] = ord; - Field field = newField("field", termsArray[ord].utf8ToString(), Field.Index.NOT_ANALYZED); + Field field = newField("field", termsArray[ord].utf8ToString(), Field.Index.NOT_ANALYZED_NO_NORMS); if (VERBOSE) { System.out.println(" f=" + termsArray[ord].utf8ToString()); } @@ -458,9 +458,9 @@ public class TestDocTermOrds extends Luc final TermsEnum te = dto.getOrdTermsEnum(r); if (te == null) { if (prefixRef == null) { - assertNull(r.fields().terms("field")); + assertNull(MultiFields.getTerms(r, "field")); } else { - Terms terms = r.fields().terms("field"); + Terms terms = MultiFields.getTerms(r, "field"); if (terms != null) { TermsEnum termsEnum = terms.iterator(); TermsEnum.SeekStatus result = termsEnum.seek(prefixRef, false); Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java?rev=1132517&r1=1132516&r2=1132517&view=diff ============================================================================== --- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java (original) +++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java Mon Jun 6 03:40:53 2011 @@ -51,13 +51,13 @@ public class TestDocsAndPositions extend Document doc = new Document(); doc.add(newField(fieldName, "1 2 3 4 5 6 7 8 9 10 " + "1 2 3 4 5 6 7 8 9 10 " + "1 2 3 4 5 6 7 8 9 10 " - + "1 2 3 4 5 6 7 8 9 10", Field.Store.YES, Field.Index.ANALYZED)); + + "1 2 3 4 5 6 7 8 9 10", Field.Store.NO, Field.Index.ANALYZED_NO_NORMS)); writer.addDocument(doc); } IndexReader reader = writer.getReader(); writer.close(); - for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) { + for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13) * RANDOM_MULTIPLIER; i++) { BytesRef bytes = new BytesRef("1"); ReaderContext topReaderContext = reader.getTopReaderContext(); AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext); @@ -112,7 +112,7 @@ public class TestDocsAndPositions extend Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); - int numDocs = 131; + int numDocs = TEST_NIGHTLY ? 131 : 47; int max = 1051; int term = random.nextInt(max); Integer[][] positionsInDoc = new Integer[numDocs][]; @@ -120,7 +120,8 @@ public class TestDocsAndPositions extend Document doc = new Document(); ArrayList positions = new ArrayList(); StringBuilder builder = new StringBuilder(); - for (int j = 0; j < 3049; j++) { + int num = TEST_NIGHTLY ? 3049 : 499; + for (int j = 0; j < num; j++) { int nextInt = random.nextInt(max); builder.append(nextInt).append(" "); if (nextInt == term) { @@ -129,10 +130,10 @@ public class TestDocsAndPositions extend } if (positions.size() == 0) { builder.append(term); - positions.add(3049); + positions.add(num); } - doc.add(newField(fieldName, builder.toString(), Field.Store.YES, - Field.Index.ANALYZED)); + doc.add(newField(fieldName, builder.toString(), Field.Store.NO, + Field.Index.ANALYZED_NO_NORMS)); positionsInDoc[i] = positions.toArray(new Integer[0]); writer.addDocument(doc); } @@ -140,7 +141,7 @@ public class TestDocsAndPositions extend IndexReader reader = writer.getReader(); writer.close(); - for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) { + for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13) * RANDOM_MULTIPLIER; i++) { BytesRef bytes = new BytesRef("" + term); ReaderContext topReaderContext = reader.getTopReaderContext(); AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext); @@ -192,7 +193,7 @@ public class TestDocsAndPositions extend Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); - int numDocs = 499; + int numDocs = TEST_NIGHTLY ? 499 : 131; int max = 15678; int term = random.nextInt(max); int[] freqInDoc = new int[numDocs]; @@ -206,15 +207,15 @@ public class TestDocsAndPositions extend freqInDoc[i]++; } } - doc.add(newField(fieldName, builder.toString(), Field.Store.YES, - Field.Index.ANALYZED)); + doc.add(newField(fieldName, builder.toString(), Field.Store.NO, + Field.Index.ANALYZED_NO_NORMS)); writer.addDocument(doc); } IndexReader reader = writer.getReader(); writer.close(); - for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) { + for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13) * RANDOM_MULTIPLIER; i++) { BytesRef bytes = new BytesRef("" + term); ReaderContext topReaderContext = reader.getTopReaderContext(); AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext); @@ -281,8 +282,8 @@ public class TestDocsAndPositions extend builder.append("odd "); } } - doc.add(newField(fieldName, builder.toString(), Field.Store.YES, - Field.Index.ANALYZED)); + doc.add(newField(fieldName, builder.toString(), Field.Store.NO, + Field.Index.ANALYZED_NO_NORMS)); writer.addDocument(doc); } @@ -290,7 +291,7 @@ public class TestDocsAndPositions extend IndexReader reader = writer.getReader(); writer.close(); - for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) { + for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13) * RANDOM_MULTIPLIER; i++) { BytesRef bytes = new BytesRef("even"); ReaderContext topReaderContext = reader.getTopReaderContext(); Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java?rev=1132517&r1=1132516&r2=1132517&view=diff ============================================================================== --- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (original) +++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java Mon Jun 6 03:40:53 2011 @@ -39,16 +39,17 @@ import org.apache.lucene.store.IndexInpu import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; +import org.junit.AfterClass; +import org.junit.BeforeClass; public class TestFieldsReader extends LuceneTestCase { - private Directory dir; - private Document testDoc = new Document(); - private FieldInfos fieldInfos = null; + private static Directory dir; + private static Document testDoc = new Document(); + private static FieldInfos fieldInfos = null; private final static String TEST_SEGMENT_NAME = "_0"; - @Override - public void setUp() throws Exception { - super.setUp(); + @BeforeClass + public static void beforeClass() throws Exception { fieldInfos = new FieldInfos(); DocHelper.setupDoc(testDoc); _TestUtil.add(testDoc, fieldInfos); @@ -61,10 +62,12 @@ public class TestFieldsReader extends Lu FaultyIndexInput.doFail = false; } - @Override - public void tearDown() throws Exception { + @AfterClass + public static void afterClass() throws Exception { dir.close(); - super.tearDown(); + dir = null; + fieldInfos = null; + testDoc = null; } public void test() throws IOException { assertTrue(dir != null); @@ -302,7 +305,7 @@ public class TestFieldsReader extends Lu FieldsReader reader; long lazyTime = 0; long regularTime = 0; - int length = 50; + int length = 10; Set lazyFieldNames = new HashSet(); lazyFieldNames.add(DocHelper.LARGE_LAZY_FIELD_KEY); SetBasedFieldSelector fieldSelector = new SetBasedFieldSelector(Collections. emptySet(), lazyFieldNames); Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java?rev=1132517&r1=1132516&r2=1132517&view=diff ============================================================================== --- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java (original) +++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java Mon Jun 6 03:40:53 2011 @@ -231,8 +231,8 @@ public class TestFlushByRamOrCountsPolic for (int i = 0; i < numThreads.length; i++) { AtomicInteger numDocs = new AtomicInteger(numDocumentsToIndex); MockDirectoryWrapper dir = newDirectory(); - // mock a very slow harddisk here so that flushing is very slow - dir.setThrottling(MockDirectoryWrapper.Throttling.ALWAYS); + // mock a very slow harddisk sometimes here so that flushing is very slow + dir.setThrottling(MockDirectoryWrapper.Throttling.SOMETIMES); IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); iwc.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH); Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestGlobalFieldNumbers.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestGlobalFieldNumbers.java?rev=1132517&r1=1132516&r2=1132517&view=diff ============================================================================== --- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestGlobalFieldNumbers.java (original) +++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestGlobalFieldNumbers.java Mon Jun 6 03:40:53 2011 @@ -42,7 +42,7 @@ import org.apache.lucene.util._TestUtil; public class TestGlobalFieldNumbers extends LuceneTestCase { public void testGlobalFieldNumberFiles() throws IOException { - for (int i = 0; i < 39; i++) { + for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13); i++) { Directory dir = newDirectory(); { IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, @@ -113,7 +113,7 @@ public class TestGlobalFieldNumbers exte } public void testIndexReaderCommit() throws IOException { - for (int i = 0; i < 39; i++) { + for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13); i++) { Directory dir = newDirectory(); { IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, @@ -156,7 +156,7 @@ public class TestGlobalFieldNumbers exte } public void testGlobalFieldNumberFilesAcrossCommits() throws IOException { - for (int i = 0; i < 39; i++) { + for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13); i++) { Directory dir = newDirectory(); { IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( @@ -207,7 +207,7 @@ public class TestGlobalFieldNumbers exte } public void testGlobalFieldNumberOnOldCommit() throws IOException { - for (int i = 0; i < 39; i++) { + for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13); i++) { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy( Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReader.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReader.java?rev=1132517&r1=1132516&r2=1132517&view=diff ============================================================================== --- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReader.java (original) +++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReader.java Mon Jun 6 03:40:53 2011 @@ -304,7 +304,7 @@ public class TestIndexReader extends Luc d.close(); } - private void assertTermDocsCount(String msg, + static void assertTermDocsCount(String msg, IndexReader reader, Term term, int expected) @@ -322,50 +322,6 @@ public class TestIndexReader extends Luc assertEquals(msg + ", count mismatch", expected, count); } - public void testBasicDelete() throws IOException { - Directory dir = newDirectory(); - - IndexWriter writer = null; - IndexReader reader = null; - Term searchTerm = new Term("content", "aaa"); - - // add 100 documents with term : aaa - writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); - writer.setInfoStream(VERBOSE ? System.out : null); - for (int i = 0; i < 100; i++) { - addDoc(writer, searchTerm.text()); - } - writer.close(); - - // OPEN READER AT THIS POINT - this should fix the view of the - // index at the point of having 100 "aaa" documents and 0 "bbb" - reader = IndexReader.open(dir, false); - assertEquals("first docFreq", 100, reader.docFreq(searchTerm)); - assertTermDocsCount("first reader", reader, searchTerm, 100); - reader.close(); - - // DELETE DOCUMENTS CONTAINING TERM: aaa - int deleted = 0; - reader = IndexReader.open(dir, false); - deleted = reader.deleteDocuments(searchTerm); - assertEquals("deleted count", 100, deleted); - assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm)); - assertTermDocsCount("deleted termDocs", reader, searchTerm, 0); - - // open a 2nd reader to make sure first reader can - // commit its changes (.del) while second reader - // is open: - IndexReader reader2 = IndexReader.open(dir, false); - reader.close(); - - // CREATE A NEW READER and re-test - reader = IndexReader.open(dir, false); - assertEquals("deleted docFreq", 0, reader.docFreq(searchTerm)); - assertTermDocsCount("deleted termDocs", reader, searchTerm, 0); - reader.close(); - reader2.close(); - dir.close(); - } public void testBinaryFields() throws IOException { Directory dir = newDirectory(); @@ -600,11 +556,6 @@ public class TestIndexReader extends Luc dir.close(); } - - public void testDeleteReaderWriterConflictUnoptimized() throws IOException{ - deleteReaderWriterConflict(false); - } - /* ??? public void testOpenEmptyDirectory() throws IOException{ String dirName = "test.empty"; File fileDirName = new File(dirName); @@ -620,90 +571,6 @@ public class TestIndexReader extends Luc rmDir(fileDirName); }*/ - public void testDeleteReaderWriterConflictOptimized() throws IOException{ - deleteReaderWriterConflict(true); - } - - private void deleteReaderWriterConflict(boolean optimize) throws IOException { - //Directory dir = new RAMDirectory(); - Directory dir = newDirectory(); - - Term searchTerm = new Term("content", "aaa"); - Term searchTerm2 = new Term("content", "bbb"); - - // add 100 documents with term : aaa - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); - for (int i = 0; i < 100; i++) { - addDoc(writer, searchTerm.text()); - } - writer.close(); - - // OPEN READER AT THIS POINT - this should fix the view of the - // index at the point of having 100 "aaa" documents and 0 "bbb" - IndexReader reader = IndexReader.open(dir, false); - assertEquals("first docFreq", 100, reader.docFreq(searchTerm)); - assertEquals("first docFreq", 0, reader.docFreq(searchTerm2)); - assertTermDocsCount("first reader", reader, searchTerm, 100); - assertTermDocsCount("first reader", reader, searchTerm2, 0); - - // add 100 documents with term : bbb - writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); - for (int i = 0; i < 100; i++) { - addDoc(writer, searchTerm2.text()); - } - - // REQUEST OPTIMIZATION - // This causes a new segment to become current for all subsequent - // searchers. Because of this, deletions made via a previously open - // reader, which would be applied to that reader's segment, are lost - // for subsequent searchers/readers - if(optimize) - writer.optimize(); - writer.close(); - - // The reader should not see the new data - assertEquals("first docFreq", 100, reader.docFreq(searchTerm)); - assertEquals("first docFreq", 0, reader.docFreq(searchTerm2)); - assertTermDocsCount("first reader", reader, searchTerm, 100); - assertTermDocsCount("first reader", reader, searchTerm2, 0); - - - // DELETE DOCUMENTS CONTAINING TERM: aaa - // NOTE: the reader was created when only "aaa" documents were in - int deleted = 0; - try { - deleted = reader.deleteDocuments(searchTerm); - fail("Delete allowed on an index reader with stale segment information"); - } catch (StaleReaderException e) { - /* success */ - } - - // Re-open index reader and try again. This time it should see - // the new data. - reader.close(); - reader = IndexReader.open(dir, false); - assertEquals("first docFreq", 100, reader.docFreq(searchTerm)); - assertEquals("first docFreq", 100, reader.docFreq(searchTerm2)); - assertTermDocsCount("first reader", reader, searchTerm, 100); - assertTermDocsCount("first reader", reader, searchTerm2, 100); - - deleted = reader.deleteDocuments(searchTerm); - assertEquals("deleted count", 100, deleted); - assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm)); - assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2)); - assertTermDocsCount("deleted termDocs", reader, searchTerm, 0); - assertTermDocsCount("deleted termDocs", reader, searchTerm2, 100); - reader.close(); - - // CREATE A NEW READER and re-test - reader = IndexReader.open(dir, false); - assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2)); - assertTermDocsCount("deleted termDocs", reader, searchTerm, 0); - assertTermDocsCount("deleted termDocs", reader, searchTerm2, 100); - reader.close(); - dir.close(); - } - public void testFilesOpenClose() throws IOException { // Create initial data set File dirFile = _TestUtil.getTempDir("TestIndexReader.testFilesOpenClose"); @@ -812,259 +679,6 @@ public class TestIndexReader extends Luc dir.close(); } - public void testUndeleteAll() throws IOException { - Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); - addDocumentWithFields(writer); - addDocumentWithFields(writer); - writer.close(); - IndexReader reader = IndexReader.open(dir, false); - reader.deleteDocument(0); - reader.deleteDocument(1); - reader.undeleteAll(); - reader.close(); - reader = IndexReader.open(dir, false); - assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll() - reader.close(); - dir.close(); - } - - public void testUndeleteAllAfterClose() throws IOException { - Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); - addDocumentWithFields(writer); - addDocumentWithFields(writer); - writer.close(); - IndexReader reader = IndexReader.open(dir, false); - reader.deleteDocument(0); - reader.close(); - reader = IndexReader.open(dir, false); - reader.undeleteAll(); - assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll() - reader.close(); - dir.close(); - } - - public void testUndeleteAllAfterCloseThenReopen() throws IOException { - Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); - addDocumentWithFields(writer); - addDocumentWithFields(writer); - writer.close(); - IndexReader reader = IndexReader.open(dir, false); - reader.deleteDocument(0); - reader.close(); - reader = IndexReader.open(dir, false); - reader.undeleteAll(); - reader.close(); - reader = IndexReader.open(dir, false); - assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll() - reader.close(); - dir.close(); - } - - public void testDeleteReaderReaderConflictUnoptimized() throws IOException{ - deleteReaderReaderConflict(false); - } - - public void testDeleteReaderReaderConflictOptimized() throws IOException{ - deleteReaderReaderConflict(true); - } - - /** - * Make sure if reader tries to commit but hits disk - * full that reader remains consistent and usable. - */ - public void testDiskFull() throws IOException { - - Term searchTerm = new Term("content", "aaa"); - int START_COUNT = 157; - int END_COUNT = 144; - - // First build up a starting index: - MockDirectoryWrapper startDir = newDirectory(); - IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); - if (VERBOSE) { - System.out.println("TEST: create initial index"); - writer.setInfoStream(System.out); - } - for(int i=0;i<157;i++) { - Document d = new Document(); - d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); - d.add(newField("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED)); - writer.addDocument(d); - if (0==i%10) - writer.commit(); - } - writer.close(); - - { - IndexReader r = IndexReader.open(startDir); - IndexSearcher searcher = newSearcher(r); - ScoreDoc[] hits = null; - try { - hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; - } catch (IOException e) { - e.printStackTrace(); - fail("exception when init searching: " + e); - } - searcher.close(); - r.close(); - } - - long diskUsage = startDir.getRecomputedActualSizeInBytes(); - long diskFree = diskUsage+100; - - IOException err = null; - - boolean done = false; - boolean gotExc = false; - - // Iterate w/ ever increasing free disk space: - while(!done) { - MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir)); - - // If IndexReader hits disk full, it can write to - // the same files again. - dir.setPreventDoubleWrite(false); - - IndexReader reader = IndexReader.open(dir, false); - - // For each disk size, first try to commit against - // dir that will hit random IOExceptions & disk - // full; after, give it infinite disk space & turn - // off random IOExceptions & retry w/ same reader: - boolean success = false; - - for(int x=0;x<2;x++) { - - double rate = 0.05; - double diskRatio = ((double) diskFree)/diskUsage; - long thisDiskFree; - String testName; - - if (0 == x) { - thisDiskFree = diskFree; - if (diskRatio >= 2.0) { - rate /= 2; - } - if (diskRatio >= 4.0) { - rate /= 2; - } - if (diskRatio >= 6.0) { - rate = 0.0; - } - if (VERBOSE) { - System.out.println("\ncycle: " + diskFree + " bytes"); - } - testName = "disk full during reader.close() @ " + thisDiskFree + " bytes"; - } else { - thisDiskFree = 0; - rate = 0.0; - if (VERBOSE) { - System.out.println("\ncycle: same writer: unlimited disk space"); - } - testName = "reader re-use after disk full"; - } - - dir.setMaxSizeInBytes(thisDiskFree); - dir.setRandomIOExceptionRate(rate); - Similarity sim = new DefaultSimilarity(); - try { - if (0 == x) { - int docId = 12; - for(int i=0;i<13;i++) { - reader.deleteDocument(docId); - reader.setNorm(docId, "content", sim.encodeNormValue(2.0f)); - docId += 12; - } - } - reader.close(); - success = true; - if (0 == x) { - done = true; - } - } catch (IOException e) { - if (VERBOSE) { - System.out.println(" hit IOException: " + e); - e.printStackTrace(System.out); - } - err = e; - gotExc = true; - if (1 == x) { - e.printStackTrace(); - fail(testName + " hit IOException after disk space was freed up"); - } - } - - // Finally, verify index is not corrupt, and, if - // we succeeded, we see all docs changed, and if - // we failed, we see either all docs or no docs - // changed (transactional semantics): - IndexReader newReader = null; - try { - newReader = IndexReader.open(dir, false); - } catch (IOException e) { - e.printStackTrace(); - fail(testName + ":exception when creating IndexReader after disk full during close: " + e); - } - /* - int result = newReader.docFreq(searchTerm); - if (success) { - if (result != END_COUNT) { - fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT); - } - } else { - // On hitting exception we still may have added - // all docs: - if (result != START_COUNT && result != END_COUNT) { - err.printStackTrace(); - fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT); - } - } - */ - - IndexSearcher searcher = newSearcher(newReader); - ScoreDoc[] hits = null; - try { - hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; - } catch (IOException e) { - e.printStackTrace(); - fail(testName + ": exception when searching: " + e); - } - int result2 = hits.length; - if (success) { - if (result2 != END_COUNT) { - fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT); - } - } else { - // On hitting exception we still may have added - // all docs: - if (result2 != START_COUNT && result2 != END_COUNT) { - err.printStackTrace(); - fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT); - } - } - - searcher.close(); - newReader.close(); - - if (result2 == END_COUNT) { - if (!gotExc) - fail("never hit disk full"); - break; - } - } - - dir.close(); - - // Try again with 10 more bytes of free space: - diskFree += 10; - } - - startDir.close(); - } - public void testDocsOutOfOrderJIRA140() throws IOException { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); @@ -1161,133 +775,7 @@ public class TestIndexReader extends Luc dir.close(); } - public void testMultiReaderDeletes() throws Exception { - Directory dir = newDirectory(); - RandomIndexWriter w= new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); - Document doc = new Document(); - doc.add(newField("f", "doctor", Field.Store.NO, Field.Index.NOT_ANALYZED)); - w.addDocument(doc); - doc = new Document(); - w.commit(); - doc.add(newField("f", "who", Field.Store.NO, Field.Index.NOT_ANALYZED)); - w.addDocument(doc); - IndexReader r = new SlowMultiReaderWrapper(w.getReader()); - w.close(); - - assertNull(r.getDeletedDocs()); - r.close(); - - r = new SlowMultiReaderWrapper(IndexReader.open(dir, false)); - - assertNull(r.getDeletedDocs()); - assertEquals(1, r.deleteDocuments(new Term("f", "doctor"))); - assertNotNull(r.getDeletedDocs()); - assertTrue(r.getDeletedDocs().get(0)); - assertEquals(1, r.deleteDocuments(new Term("f", "who"))); - assertTrue(r.getDeletedDocs().get(1)); - r.close(); - dir.close(); - } - - private void deleteReaderReaderConflict(boolean optimize) throws IOException { - Directory dir = newDirectory(); - - Term searchTerm1 = new Term("content", "aaa"); - Term searchTerm2 = new Term("content", "bbb"); - Term searchTerm3 = new Term("content", "ccc"); - - // add 100 documents with term : aaa - // add 100 documents with term : bbb - // add 100 documents with term : ccc - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); - for (int i = 0; i < 100; i++) { - addDoc(writer, searchTerm1.text()); - addDoc(writer, searchTerm2.text()); - addDoc(writer, searchTerm3.text()); - } - if(optimize) - writer.optimize(); - writer.close(); - - // OPEN TWO READERS - // Both readers get segment info as exists at this time - IndexReader reader1 = IndexReader.open(dir, false); - assertEquals("first opened", 100, reader1.docFreq(searchTerm1)); - assertEquals("first opened", 100, reader1.docFreq(searchTerm2)); - assertEquals("first opened", 100, reader1.docFreq(searchTerm3)); - assertTermDocsCount("first opened", reader1, searchTerm1, 100); - assertTermDocsCount("first opened", reader1, searchTerm2, 100); - assertTermDocsCount("first opened", reader1, searchTerm3, 100); - - IndexReader reader2 = IndexReader.open(dir, false); - assertEquals("first opened", 100, reader2.docFreq(searchTerm1)); - assertEquals("first opened", 100, reader2.docFreq(searchTerm2)); - assertEquals("first opened", 100, reader2.docFreq(searchTerm3)); - assertTermDocsCount("first opened", reader2, searchTerm1, 100); - assertTermDocsCount("first opened", reader2, searchTerm2, 100); - assertTermDocsCount("first opened", reader2, searchTerm3, 100); - - // DELETE DOCS FROM READER 2 and CLOSE IT - // delete documents containing term: aaa - // when the reader is closed, the segment info is updated and - // the first reader is now stale - reader2.deleteDocuments(searchTerm1); - assertEquals("after delete 1", 100, reader2.docFreq(searchTerm1)); - assertEquals("after delete 1", 100, reader2.docFreq(searchTerm2)); - assertEquals("after delete 1", 100, reader2.docFreq(searchTerm3)); - assertTermDocsCount("after delete 1", reader2, searchTerm1, 0); - assertTermDocsCount("after delete 1", reader2, searchTerm2, 100); - assertTermDocsCount("after delete 1", reader2, searchTerm3, 100); - reader2.close(); - - // Make sure reader 1 is unchanged since it was open earlier - assertEquals("after delete 1", 100, reader1.docFreq(searchTerm1)); - assertEquals("after delete 1", 100, reader1.docFreq(searchTerm2)); - assertEquals("after delete 1", 100, reader1.docFreq(searchTerm3)); - assertTermDocsCount("after delete 1", reader1, searchTerm1, 100); - assertTermDocsCount("after delete 1", reader1, searchTerm2, 100); - assertTermDocsCount("after delete 1", reader1, searchTerm3, 100); - - - // ATTEMPT TO DELETE FROM STALE READER - // delete documents containing term: bbb - try { - reader1.deleteDocuments(searchTerm2); - fail("Delete allowed from a stale index reader"); - } catch (IOException e) { - /* success */ - } - - // RECREATE READER AND TRY AGAIN - reader1.close(); - reader1 = IndexReader.open(dir, false); - assertEquals("reopened", 100, reader1.docFreq(searchTerm1)); - assertEquals("reopened", 100, reader1.docFreq(searchTerm2)); - assertEquals("reopened", 100, reader1.docFreq(searchTerm3)); - assertTermDocsCount("reopened", reader1, searchTerm1, 0); - assertTermDocsCount("reopened", reader1, searchTerm2, 100); - assertTermDocsCount("reopened", reader1, searchTerm3, 100); - - reader1.deleteDocuments(searchTerm2); - assertEquals("deleted 2", 100, reader1.docFreq(searchTerm1)); - assertEquals("deleted 2", 100, reader1.docFreq(searchTerm2)); - assertEquals("deleted 2", 100, reader1.docFreq(searchTerm3)); - assertTermDocsCount("deleted 2", reader1, searchTerm1, 0); - assertTermDocsCount("deleted 2", reader1, searchTerm2, 0); - assertTermDocsCount("deleted 2", reader1, searchTerm3, 100); - reader1.close(); - - // Open another reader to confirm that everything is deleted - reader2 = IndexReader.open(dir, false); - assertTermDocsCount("reopened 2", reader2, searchTerm1, 0); - assertTermDocsCount("reopened 2", reader2, searchTerm2, 0); - assertTermDocsCount("reopened 2", reader2, searchTerm3, 100); - reader2.close(); - - dir.close(); - } - - private void addDocumentWithFields(IndexWriter writer) throws IOException + static void addDocumentWithFields(IndexWriter writer) throws IOException { Document doc = new Document(); doc.add(newField("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED)); @@ -1297,7 +785,7 @@ public class TestIndexReader extends Luc writer.addDocument(doc); } - private void addDocumentWithDifferentFields(IndexWriter writer) throws IOException + static void addDocumentWithDifferentFields(IndexWriter writer) throws IOException { Document doc = new Document(); doc.add(newField("keyword2","test1", Field.Store.YES, Field.Index.NOT_ANALYZED)); @@ -1307,7 +795,7 @@ public class TestIndexReader extends Luc writer.addDocument(doc); } - private void addDocumentWithTermVectorFields(IndexWriter writer) throws IOException + static void addDocumentWithTermVectorFields(IndexWriter writer) throws IOException { Document doc = new Document(); doc.add(newField("tvnot","tvnot", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); @@ -1319,7 +807,7 @@ public class TestIndexReader extends Luc writer.addDocument(doc); } - private void addDoc(IndexWriter writer, String value) throws IOException { + static void addDoc(IndexWriter writer, String value) throws IOException { Document doc = new Document(); doc.add(newField("content", value, Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(doc); @@ -1557,28 +1045,7 @@ public class TestIndexReader extends Luc dir.close(); } - // LUCENE-1647 - public void testIndexReaderUnDeleteAll() throws Exception { - MockDirectoryWrapper dir = newDirectory(); - dir.setPreventDoubleWrite(false); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); - writer.addDocument(createDocument("a")); - writer.addDocument(createDocument("b")); - writer.addDocument(createDocument("c")); - writer.close(); - IndexReader reader = IndexReader.open(dir, false); - reader.deleteDocuments(new Term("id", "a")); - reader.flush(); - reader.deleteDocuments(new Term("id", "b")); - reader.undeleteAll(); - reader.deleteDocuments(new Term("id", "b")); - reader.close(); - IndexReader.open(dir,true).close(); - dir.close(); - } - - private Document createDocument(String id) { + static Document createDocument(String id) { Document doc = new Document(); doc.add(newField("id", id, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); return doc; @@ -1692,54 +1159,6 @@ public class TestIndexReader extends Luc dir.close(); } - // LUCENE-1579: Make sure all SegmentReaders are new when - // reopen switches readOnly - public void testReopenChangeReadonly() throws Exception { - Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter( - dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). - setMaxBufferedDocs(-1). - setMergePolicy(newLogMergePolicy(10)) - ); - Document doc = new Document(); - doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED)); - writer.addDocument(doc); - writer.commit(); - - // Open reader1 - IndexReader r = IndexReader.open(dir, false); - assertTrue(r instanceof DirectoryReader); - IndexReader r1 = getOnlySegmentReader(r); - final int[] ints = FieldCache.DEFAULT.getInts(r1, "number"); - assertEquals(1, ints.length); - assertEquals(17, ints[0]); - - // Reopen to readonly w/ no chnages - IndexReader r3 = r.reopen(true); - assertTrue(((DirectoryReader) r3).readOnly); - r3.close(); - - // Add new segment - writer.addDocument(doc); - writer.commit(); - - // Reopen reader1 --> reader2 - IndexReader r2 = r.reopen(true); - r.close(); - assertTrue(((DirectoryReader) r2).readOnly); - IndexReader[] subs = r2.getSequentialSubReaders(); - final int[] ints2 = FieldCache.DEFAULT.getInts(subs[0], "number"); - r2.close(); - - assertTrue(((SegmentReader) subs[0]).readOnly); - assertTrue(((SegmentReader) subs[1]).readOnly); - assertTrue(ints == ints2); - - writer.close(); - dir.close(); - } - // LUCENE-1586: getUniqueTermCount public void testUniqueTermCount() throws Exception { Directory dir = newDirectory(); Added: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java?rev=1132517&view=auto ============================================================================== --- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java (added) +++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java Mon Jun 6 03:40:53 2011 @@ -0,0 +1,374 @@ +package org.apache.lucene.index; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; + +import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.MockDirectoryWrapper; +import org.apache.lucene.util.LuceneTestCase; + +import static org.apache.lucene.index.TestIndexReader.addDoc; +import static org.apache.lucene.index.TestIndexReader.addDocumentWithFields; +import static org.apache.lucene.index.TestIndexReader.assertTermDocsCount; +import static org.apache.lucene.index.TestIndexReader.createDocument; + +public class TestIndexReaderDelete extends LuceneTestCase { + private void deleteReaderReaderConflict(boolean optimize) throws IOException { + Directory dir = newDirectory(); + + Term searchTerm1 = new Term("content", "aaa"); + Term searchTerm2 = new Term("content", "bbb"); + Term searchTerm3 = new Term("content", "ccc"); + + // add 100 documents with term : aaa + // add 100 documents with term : bbb + // add 100 documents with term : ccc + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + for (int i = 0; i < 100; i++) { + addDoc(writer, searchTerm1.text()); + addDoc(writer, searchTerm2.text()); + addDoc(writer, searchTerm3.text()); + } + if(optimize) + writer.optimize(); + writer.close(); + + // OPEN TWO READERS + // Both readers get segment info as exists at this time + IndexReader reader1 = IndexReader.open(dir, false); + assertEquals("first opened", 100, reader1.docFreq(searchTerm1)); + assertEquals("first opened", 100, reader1.docFreq(searchTerm2)); + assertEquals("first opened", 100, reader1.docFreq(searchTerm3)); + assertTermDocsCount("first opened", reader1, searchTerm1, 100); + assertTermDocsCount("first opened", reader1, searchTerm2, 100); + assertTermDocsCount("first opened", reader1, searchTerm3, 100); + + IndexReader reader2 = IndexReader.open(dir, false); + assertEquals("first opened", 100, reader2.docFreq(searchTerm1)); + assertEquals("first opened", 100, reader2.docFreq(searchTerm2)); + assertEquals("first opened", 100, reader2.docFreq(searchTerm3)); + assertTermDocsCount("first opened", reader2, searchTerm1, 100); + assertTermDocsCount("first opened", reader2, searchTerm2, 100); + assertTermDocsCount("first opened", reader2, searchTerm3, 100); + + // DELETE DOCS FROM READER 2 and CLOSE IT + // delete documents containing term: aaa + // when the reader is closed, the segment info is updated and + // the first reader is now stale + reader2.deleteDocuments(searchTerm1); + assertEquals("after delete 1", 100, reader2.docFreq(searchTerm1)); + assertEquals("after delete 1", 100, reader2.docFreq(searchTerm2)); + assertEquals("after delete 1", 100, reader2.docFreq(searchTerm3)); + assertTermDocsCount("after delete 1", reader2, searchTerm1, 0); + assertTermDocsCount("after delete 1", reader2, searchTerm2, 100); + assertTermDocsCount("after delete 1", reader2, searchTerm3, 100); + reader2.close(); + + // Make sure reader 1 is unchanged since it was open earlier + assertEquals("after delete 1", 100, reader1.docFreq(searchTerm1)); + assertEquals("after delete 1", 100, reader1.docFreq(searchTerm2)); + assertEquals("after delete 1", 100, reader1.docFreq(searchTerm3)); + assertTermDocsCount("after delete 1", reader1, searchTerm1, 100); + assertTermDocsCount("after delete 1", reader1, searchTerm2, 100); + assertTermDocsCount("after delete 1", reader1, searchTerm3, 100); + + + // ATTEMPT TO DELETE FROM STALE READER + // delete documents containing term: bbb + try { + reader1.deleteDocuments(searchTerm2); + fail("Delete allowed from a stale index reader"); + } catch (IOException e) { + /* success */ + } + + // RECREATE READER AND TRY AGAIN + reader1.close(); + reader1 = IndexReader.open(dir, false); + assertEquals("reopened", 100, reader1.docFreq(searchTerm1)); + assertEquals("reopened", 100, reader1.docFreq(searchTerm2)); + assertEquals("reopened", 100, reader1.docFreq(searchTerm3)); + assertTermDocsCount("reopened", reader1, searchTerm1, 0); + assertTermDocsCount("reopened", reader1, searchTerm2, 100); + assertTermDocsCount("reopened", reader1, searchTerm3, 100); + + reader1.deleteDocuments(searchTerm2); + assertEquals("deleted 2", 100, reader1.docFreq(searchTerm1)); + assertEquals("deleted 2", 100, reader1.docFreq(searchTerm2)); + assertEquals("deleted 2", 100, reader1.docFreq(searchTerm3)); + assertTermDocsCount("deleted 2", reader1, searchTerm1, 0); + assertTermDocsCount("deleted 2", reader1, searchTerm2, 0); + assertTermDocsCount("deleted 2", reader1, searchTerm3, 100); + reader1.close(); + + // Open another reader to confirm that everything is deleted + reader2 = IndexReader.open(dir, false); + assertTermDocsCount("reopened 2", reader2, searchTerm1, 0); + assertTermDocsCount("reopened 2", reader2, searchTerm2, 0); + assertTermDocsCount("reopened 2", reader2, searchTerm3, 100); + reader2.close(); + + dir.close(); + } + + private void deleteReaderWriterConflict(boolean optimize) throws IOException { + //Directory dir = new RAMDirectory(); + Directory dir = newDirectory(); + + Term searchTerm = new Term("content", "aaa"); + Term searchTerm2 = new Term("content", "bbb"); + + // add 100 documents with term : aaa + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + for (int i = 0; i < 100; i++) { + addDoc(writer, searchTerm.text()); + } + writer.close(); + + // OPEN READER AT THIS POINT - this should fix the view of the + // index at the point of having 100 "aaa" documents and 0 "bbb" + IndexReader reader = IndexReader.open(dir, false); + assertEquals("first docFreq", 100, reader.docFreq(searchTerm)); + assertEquals("first docFreq", 0, reader.docFreq(searchTerm2)); + assertTermDocsCount("first reader", reader, searchTerm, 100); + assertTermDocsCount("first reader", reader, searchTerm2, 0); + + // add 100 documents with term : bbb + writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + for (int i = 0; i < 100; i++) { + addDoc(writer, searchTerm2.text()); + } + + // REQUEST OPTIMIZATION + // This causes a new segment to become current for all subsequent + // searchers. Because of this, deletions made via a previously open + // reader, which would be applied to that reader's segment, are lost + // for subsequent searchers/readers + if(optimize) + writer.optimize(); + writer.close(); + + // The reader should not see the new data + assertEquals("first docFreq", 100, reader.docFreq(searchTerm)); + assertEquals("first docFreq", 0, reader.docFreq(searchTerm2)); + assertTermDocsCount("first reader", reader, searchTerm, 100); + assertTermDocsCount("first reader", reader, searchTerm2, 0); + + + // DELETE DOCUMENTS CONTAINING TERM: aaa + // NOTE: the reader was created when only "aaa" documents were in + int deleted = 0; + try { + deleted = reader.deleteDocuments(searchTerm); + fail("Delete allowed on an index reader with stale segment information"); + } catch (StaleReaderException e) { + /* success */ + } + + // Re-open index reader and try again. This time it should see + // the new data. + reader.close(); + reader = IndexReader.open(dir, false); + assertEquals("first docFreq", 100, reader.docFreq(searchTerm)); + assertEquals("first docFreq", 100, reader.docFreq(searchTerm2)); + assertTermDocsCount("first reader", reader, searchTerm, 100); + assertTermDocsCount("first reader", reader, searchTerm2, 100); + + deleted = reader.deleteDocuments(searchTerm); + assertEquals("deleted count", 100, deleted); + assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm)); + assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2)); + assertTermDocsCount("deleted termDocs", reader, searchTerm, 0); + assertTermDocsCount("deleted termDocs", reader, searchTerm2, 100); + reader.close(); + + // CREATE A NEW READER and re-test + reader = IndexReader.open(dir, false); + assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2)); + assertTermDocsCount("deleted termDocs", reader, searchTerm, 0); + assertTermDocsCount("deleted termDocs", reader, searchTerm2, 100); + reader.close(); + dir.close(); + } + + public void testBasicDelete() throws IOException { + Directory dir = newDirectory(); + + IndexWriter writer = null; + IndexReader reader = null; + Term searchTerm = new Term("content", "aaa"); + + // add 100 documents with term : aaa + writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer.setInfoStream(VERBOSE ? System.out : null); + for (int i = 0; i < 100; i++) { + addDoc(writer, searchTerm.text()); + } + writer.close(); + + // OPEN READER AT THIS POINT - this should fix the view of the + // index at the point of having 100 "aaa" documents and 0 "bbb" + reader = IndexReader.open(dir, false); + assertEquals("first docFreq", 100, reader.docFreq(searchTerm)); + assertTermDocsCount("first reader", reader, searchTerm, 100); + reader.close(); + + // DELETE DOCUMENTS CONTAINING TERM: aaa + int deleted = 0; + reader = IndexReader.open(dir, false); + deleted = reader.deleteDocuments(searchTerm); + assertEquals("deleted count", 100, deleted); + assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm)); + assertTermDocsCount("deleted termDocs", reader, searchTerm, 0); + + // open a 2nd reader to make sure first reader can + // commit its changes (.del) while second reader + // is open: + IndexReader reader2 = IndexReader.open(dir, false); + reader.close(); + + // CREATE A NEW READER and re-test + reader = IndexReader.open(dir, false); + assertEquals("deleted docFreq", 0, reader.docFreq(searchTerm)); + assertTermDocsCount("deleted termDocs", reader, searchTerm, 0); + reader.close(); + reader2.close(); + dir.close(); + } + + public void testDeleteReaderReaderConflictUnoptimized() throws IOException { + deleteReaderReaderConflict(false); + } + + public void testDeleteReaderReaderConflictOptimized() throws IOException { + deleteReaderReaderConflict(true); + } + + public void testDeleteReaderWriterConflictUnoptimized() throws IOException { + deleteReaderWriterConflict(false); + } + + public void testDeleteReaderWriterConflictOptimized() throws IOException { + deleteReaderWriterConflict(true); + } + + public void testMultiReaderDeletes() throws Exception { + Directory dir = newDirectory(); + RandomIndexWriter w= new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + Document doc = new Document(); + doc.add(newField("f", "doctor", Field.Store.NO, Field.Index.NOT_ANALYZED)); + w.addDocument(doc); + doc = new Document(); + w.commit(); + doc.add(newField("f", "who", Field.Store.NO, Field.Index.NOT_ANALYZED)); + w.addDocument(doc); + IndexReader r = new SlowMultiReaderWrapper(w.getReader()); + w.close(); + + assertNull(r.getDeletedDocs()); + r.close(); + + r = new SlowMultiReaderWrapper(IndexReader.open(dir, false)); + + assertNull(r.getDeletedDocs()); + assertEquals(1, r.deleteDocuments(new Term("f", "doctor"))); + assertNotNull(r.getDeletedDocs()); + assertTrue(r.getDeletedDocs().get(0)); + assertEquals(1, r.deleteDocuments(new Term("f", "who"))); + assertTrue(r.getDeletedDocs().get(1)); + r.close(); + dir.close(); + } + + public void testUndeleteAll() throws IOException { + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + addDocumentWithFields(writer); + addDocumentWithFields(writer); + writer.close(); + IndexReader reader = IndexReader.open(dir, false); + reader.deleteDocument(0); + reader.deleteDocument(1); + reader.undeleteAll(); + reader.close(); + reader = IndexReader.open(dir, false); + assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll() + reader.close(); + dir.close(); + } + + public void testUndeleteAllAfterClose() throws IOException { + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + addDocumentWithFields(writer); + addDocumentWithFields(writer); + writer.close(); + IndexReader reader = IndexReader.open(dir, false); + reader.deleteDocument(0); + reader.close(); + reader = IndexReader.open(dir, false); + reader.undeleteAll(); + assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll() + reader.close(); + dir.close(); + } + + public void testUndeleteAllAfterCloseThenReopen() throws IOException { + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + addDocumentWithFields(writer); + addDocumentWithFields(writer); + writer.close(); + IndexReader reader = IndexReader.open(dir, false); + reader.deleteDocument(0); + reader.close(); + reader = IndexReader.open(dir, false); + reader.undeleteAll(); + reader.close(); + reader = IndexReader.open(dir, false); + assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll() + reader.close(); + dir.close(); + } + + // LUCENE-1647 + public void testIndexReaderUnDeleteAll() throws Exception { + MockDirectoryWrapper dir = newDirectory(); + dir.setPreventDoubleWrite(false); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer.addDocument(createDocument("a")); + writer.addDocument(createDocument("b")); + writer.addDocument(createDocument("c")); + writer.close(); + IndexReader reader = IndexReader.open(dir, false); + reader.deleteDocuments(new Term("id", "a")); + reader.flush(); + reader.deleteDocuments(new Term("id", "b")); + reader.undeleteAll(); + reader.deleteDocuments(new Term("id", "b")); + reader.close(); + IndexReader.open(dir,true).close(); + dir.close(); + } +} Added: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java?rev=1132517&view=auto ============================================================================== --- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java (added) +++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java Mon Jun 6 03:40:53 2011 @@ -0,0 +1,228 @@ +package org.apache.lucene.index; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; + +import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.search.DefaultSimilarity; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.Similarity; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.store.MockDirectoryWrapper; +import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.LuceneTestCase; + +public class TestIndexReaderOnDiskFull extends LuceneTestCase { + /** + * Make sure if reader tries to commit but hits disk + * full that reader remains consistent and usable. + */ + public void testDiskFull() throws IOException { + + Term searchTerm = new Term("content", "aaa"); + int START_COUNT = 157; + int END_COUNT = 144; + + // First build up a starting index: + MockDirectoryWrapper startDir = newDirectory(); + IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + if (VERBOSE) { + System.out.println("TEST: create initial index"); + writer.setInfoStream(System.out); + } + for(int i=0;i<157;i++) { + Document d = new Document(); + d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); + d.add(newField("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED)); + writer.addDocument(d); + if (0==i%10) + writer.commit(); + } + writer.close(); + + { + IndexReader r = IndexReader.open(startDir); + IndexSearcher searcher = newSearcher(r); + ScoreDoc[] hits = null; + try { + hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; + } catch (IOException e) { + e.printStackTrace(); + fail("exception when init searching: " + e); + } + searcher.close(); + r.close(); + } + + long diskUsage = startDir.getRecomputedActualSizeInBytes(); + long diskFree = diskUsage+100; + + IOException err = null; + + boolean done = false; + boolean gotExc = false; + + // Iterate w/ ever increasing free disk space: + while(!done) { + MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir)); + + // If IndexReader hits disk full, it can write to + // the same files again. + dir.setPreventDoubleWrite(false); + + IndexReader reader = IndexReader.open(dir, false); + + // For each disk size, first try to commit against + // dir that will hit random IOExceptions & disk + // full; after, give it infinite disk space & turn + // off random IOExceptions & retry w/ same reader: + boolean success = false; + + for(int x=0;x<2;x++) { + + double rate = 0.05; + double diskRatio = ((double) diskFree)/diskUsage; + long thisDiskFree; + String testName; + + if (0 == x) { + thisDiskFree = diskFree; + if (diskRatio >= 2.0) { + rate /= 2; + } + if (diskRatio >= 4.0) { + rate /= 2; + } + if (diskRatio >= 6.0) { + rate = 0.0; + } + if (VERBOSE) { + System.out.println("\ncycle: " + diskFree + " bytes"); + } + testName = "disk full during reader.close() @ " + thisDiskFree + " bytes"; + } else { + thisDiskFree = 0; + rate = 0.0; + if (VERBOSE) { + System.out.println("\ncycle: same writer: unlimited disk space"); + } + testName = "reader re-use after disk full"; + } + + dir.setMaxSizeInBytes(thisDiskFree); + dir.setRandomIOExceptionRate(rate); + Similarity sim = new DefaultSimilarity(); + try { + if (0 == x) { + int docId = 12; + for(int i=0;i<13;i++) { + reader.deleteDocument(docId); + reader.setNorm(docId, "content", sim.encodeNormValue(2.0f)); + docId += 12; + } + } + reader.close(); + success = true; + if (0 == x) { + done = true; + } + } catch (IOException e) { + if (VERBOSE) { + System.out.println(" hit IOException: " + e); + e.printStackTrace(System.out); + } + err = e; + gotExc = true; + if (1 == x) { + e.printStackTrace(); + fail(testName + " hit IOException after disk space was freed up"); + } + } + + // Finally, verify index is not corrupt, and, if + // we succeeded, we see all docs changed, and if + // we failed, we see either all docs or no docs + // changed (transactional semantics): + IndexReader newReader = null; + try { + newReader = IndexReader.open(dir, false); + } catch (IOException e) { + e.printStackTrace(); + fail(testName + ":exception when creating IndexReader after disk full during close: " + e); + } + /* + int result = newReader.docFreq(searchTerm); + if (success) { + if (result != END_COUNT) { + fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT); + } + } else { + // On hitting exception we still may have added + // all docs: + if (result != START_COUNT && result != END_COUNT) { + err.printStackTrace(); + fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT); + } + } + */ + + IndexSearcher searcher = newSearcher(newReader); + ScoreDoc[] hits = null; + try { + hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; + } catch (IOException e) { + e.printStackTrace(); + fail(testName + ": exception when searching: " + e); + } + int result2 = hits.length; + if (success) { + if (result2 != END_COUNT) { + fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT); + } + } else { + // On hitting exception we still may have added + // all docs: + if (result2 != START_COUNT && result2 != END_COUNT) { + err.printStackTrace(); + fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT); + } + } + + searcher.close(); + newReader.close(); + + if (result2 == END_COUNT) { + if (!gotExc) + fail("never hit disk full"); + break; + } + } + + dir.close(); + + // Try again with 10 more bytes of free space: + diskFree += 10; + } + + startDir.close(); + } +} Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java?rev=1132517&r1=1132516&r2=1132517&view=diff ============================================================================== --- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (original) +++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java Mon Jun 6 03:40:53 2011 @@ -36,6 +36,7 @@ import org.apache.lucene.document.Field. import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.DefaultSimilarity; +import org.apache.lucene.search.FieldCache; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Similarity; @@ -1244,4 +1245,52 @@ public class TestIndexReaderReopen exten r.close(); dir.close(); } + + // LUCENE-1579: Make sure all SegmentReaders are new when + // reopen switches readOnly + public void testReopenChangeReadonly() throws Exception { + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter( + dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + setMaxBufferedDocs(-1). + setMergePolicy(newLogMergePolicy(10)) + ); + Document doc = new Document(); + doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED)); + writer.addDocument(doc); + writer.commit(); + + // Open reader1 + IndexReader r = IndexReader.open(dir, false); + assertTrue(r instanceof DirectoryReader); + IndexReader r1 = getOnlySegmentReader(r); + final int[] ints = FieldCache.DEFAULT.getInts(r1, "number"); + assertEquals(1, ints.length); + assertEquals(17, ints[0]); + + // Reopen to readonly w/ no chnages + IndexReader r3 = r.reopen(true); + assertTrue(((DirectoryReader) r3).readOnly); + r3.close(); + + // Add new segment + writer.addDocument(doc); + writer.commit(); + + // Reopen reader1 --> reader2 + IndexReader r2 = r.reopen(true); + r.close(); + assertTrue(((DirectoryReader) r2).readOnly); + IndexReader[] subs = r2.getSequentialSubReaders(); + final int[] ints2 = FieldCache.DEFAULT.getInts(subs[0], "number"); + r2.close(); + + assertTrue(((SegmentReader) subs[0]).readOnly); + assertTrue(((SegmentReader) subs[1]).readOnly); + assertTrue(ints == ints2); + + writer.close(); + dir.close(); + } }