lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mikemcc...@apache.org
Subject svn commit: r782406 [1/2] - in /lucene/java/trunk: ./ src/java/org/apache/lucene/index/ src/java/org/apache/lucene/search/ src/test/org/apache/lucene/index/
Date Sun, 07 Jun 2009 16:31:19 GMT
Author: mikemccand
Date: Sun Jun  7 16:31:18 2009
New Revision: 782406

URL: http://svn.apache.org/viewvc?rev=782406&view=rev
Log:
LUCENE-1651: always return DirectoryReader (renamed from MultiSegmentReader) from IndexReader.open

Added:
    lucene/java/trunk/src/java/org/apache/lucene/index/DirectoryReader.java   (with props)
    lucene/java/trunk/src/java/org/apache/lucene/index/ReadOnlyDirectoryReader.java   (with props)
    lucene/java/trunk/src/test/org/apache/lucene/index/TestDirectoryReader.java   (with props)
Removed:
    lucene/java/trunk/src/java/org/apache/lucene/index/DirectoryIndexReader.java
    lucene/java/trunk/src/java/org/apache/lucene/index/MultiSegmentReader.java
    lucene/java/trunk/src/java/org/apache/lucene/index/ReadOnlyMultiSegmentReader.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiSegmentReader.java
Modified:
    lucene/java/trunk/common-build.xml
    lucene/java/trunk/src/java/org/apache/lucene/index/IndexReader.java
    lucene/java/trunk/src/java/org/apache/lucene/index/IndexWriter.java
    lucene/java/trunk/src/java/org/apache/lucene/index/MultiReader.java
    lucene/java/trunk/src/java/org/apache/lucene/index/SegmentInfos.java
    lucene/java/trunk/src/java/org/apache/lucene/index/SegmentReader.java
    lucene/java/trunk/src/java/org/apache/lucene/search/SortField.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderClone.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterReader.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiReader.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestOmitTf.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestPayloads.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestSegmentTermEnum.java

Modified: lucene/java/trunk/common-build.xml
URL: http://svn.apache.org/viewvc/lucene/java/trunk/common-build.xml?rev=782406&r1=782405&r2=782406&view=diff
==============================================================================
--- lucene/java/trunk/common-build.xml (original)
+++ lucene/java/trunk/common-build.xml Sun Jun  7 16:31:18 2009
@@ -42,7 +42,7 @@
   <property name="Name" value="Lucene"/>
   <property name="dev.version" value="2.9-dev"/>
   <property name="version" value="${dev.version}"/>
-  <property name="compatibility.tag" value="lucene_2_4_back_compat_tests_20090607"/>
+  <property name="compatibility.tag" value="lucene_2_4_back_compat_tests_20090607a"/>
   <property name="spec.version" value="${version}"/>	
   <property name="year" value="2000-${current.year}"/>
   <property name="final.name" value="lucene-${name}-${version}"/>

Added: lucene/java/trunk/src/java/org/apache/lucene/index/DirectoryReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/DirectoryReader.java?rev=782406&view=auto
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/DirectoryReader.java (added)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/DirectoryReader.java Sun Jun  7 16:31:18 2009
@@ -0,0 +1,1285 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.FileNotFoundException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.Collections;
+import java.util.ArrayList;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.Lock;
+import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.FSDirectory;
+
+/** 
+ * An IndexReader which reads indexes with multiple segments.
+ */
+class DirectoryReader extends IndexReader implements Cloneable {
+  protected Directory directory;
+  protected boolean readOnly;
+  protected boolean closeDirectory;
+
+  IndexWriter writer;
+
+  private IndexDeletionPolicy deletionPolicy;
+  private final HashSet synced = new HashSet();
+  private Lock writeLock;
+  private SegmentInfos segmentInfos;
+  private boolean stale;
+
+  private boolean rollbackHasChanges;
+  private SegmentInfos rollbackSegmentInfos;
+
+  private SegmentReader[] subReaders;
+  private int[] starts;                           // 1st docno for each segment
+  private Map normsCache = new HashMap();
+  private int maxDoc = 0;
+  private int numDocs = -1;
+  private boolean hasDeletions = false;
+
+  static IndexReader open(final Directory directory, final boolean closeDirectory, final IndexDeletionPolicy deletionPolicy, final IndexCommit commit, final boolean readOnly) throws CorruptIndexException, IOException {
+    SegmentInfos.FindSegmentsFile finder = new SegmentInfos.FindSegmentsFile(directory) {
+
+      protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
+
+        SegmentInfos infos = new SegmentInfos();
+        infos.read(directory, segmentFileName);
+
+        if (readOnly)
+          return new ReadOnlyDirectoryReader(directory, infos, deletionPolicy, closeDirectory);
+        else
+          return new DirectoryReader(directory, infos, deletionPolicy, closeDirectory, false);
+      }
+    };
+
+    IndexReader reader = null;
+    try {
+      reader = (IndexReader) finder.run(commit);
+    } finally {
+      // We passed false above for closeDirectory so that
+      // the directory would not be closed before we were
+      // done retrying, so at this point if we truly failed
+      // to open a reader, which means an exception is being
+      // thrown, then close the directory now:
+      if (reader == null && closeDirectory) {
+        try {
+          directory.close();
+        } catch (IOException ioe) {
+          // suppress, so we keep throwing original failure
+          // from opening the reader
+        }
+      }
+    }
+
+    return reader;
+  }
+
+  /** Construct reading the named set of readers. */
+  DirectoryReader(Directory directory, SegmentInfos sis, IndexDeletionPolicy deletionPolicy, boolean closeDirectory, boolean readOnly) throws IOException {
+    this.directory = directory;
+    this.readOnly = readOnly;
+    this.closeDirectory = closeDirectory;
+    this.segmentInfos = sis;
+    this.deletionPolicy = deletionPolicy;
+
+    if (!readOnly) {
+      // We assume that this segments_N was previously
+      // properly sync'd:
+      synced.addAll(sis.files(directory, true));
+    }
+
+    // To reduce the chance of hitting FileNotFound
+    // (and having to retry), we open segments in
+    // reverse because IndexWriter merges & deletes
+    // the newest segments first.
+
+    SegmentReader[] readers = new SegmentReader[sis.size()];
+    for (int i = sis.size()-1; i >= 0; i--) {
+      boolean success = false;
+      try {
+        readers[i] = SegmentReader.get(readOnly, sis.info(i));
+        success = true;
+      } finally {
+        if (!success) {
+          // Close all readers we had opened:
+          for(i++;i<sis.size();i++) {
+            try {
+              readers[i].close();
+            } catch (Throwable ignore) {
+              // keep going - we want to clean up as much as possible
+            }
+          }
+        }
+      }
+    }
+
+    initialize(readers);
+  }
+
+  // Used by near real-time search
+  DirectoryReader(IndexWriter writer, SegmentInfos infos) throws IOException {
+    this.directory = writer.getDirectory();
+    this.readOnly = true;
+    this.closeDirectory = false;
+    this.segmentInfos = infos;
+    if (!readOnly) {
+      // We assume that this segments_N was previously
+      // properly sync'd:
+      synced.addAll(infos.files(directory, true));
+    }
+
+    // IndexWriter synchronizes externally before calling
+    // us, which ensures infos will not change; so there's
+    // no need to process segments in reverse order
+    final int numSegments = infos.size();
+    SegmentReader[] readers = new SegmentReader[numSegments];
+    final Directory dir = writer.getDirectory();
+    int upto = 0;
+
+    for (int i=0;i<numSegments;i++) {
+      boolean success = false;
+      try {
+        final SegmentInfo info = infos.info(upto);
+        if (info.dir == dir) {
+          readers[upto++] = writer.readerPool.getReadOnlyClone(info, true);
+        }
+        success = true;
+      } finally {
+        if (!success) {
+          // Close all readers we had opened:
+          for(upto--;upto>=0;upto--) {
+            try {
+              readers[upto].close();
+            } catch (Throwable ignore) {
+              // keep going - we want to clean up as much as possible
+            }
+          }
+        }
+      }
+    }
+
+    this.writer = writer;
+
+    if (upto < readers.length) {
+      // This means some segments were in a foreign Directory
+      SegmentReader[] newReaders = new SegmentReader[upto];
+      System.arraycopy(readers, 0, newReaders, 0, upto);
+      readers = newReaders;
+    }
+
+    initialize(readers);
+  }
+
+  /** This contructor is only used for {@link #reopen()} */
+  DirectoryReader(Directory directory, SegmentInfos infos, boolean closeDirectory, SegmentReader[] oldReaders, int[] oldStarts,
+                     Map oldNormsCache, boolean readOnly, boolean doClone) throws IOException {
+    this.directory = directory;
+    this.readOnly = readOnly;
+    this.closeDirectory = closeDirectory;
+    this.segmentInfos = infos;
+    if (!readOnly) {
+      // We assume that this segments_N was previously
+      // properly sync'd:
+      synced.addAll(infos.files(directory, true));
+    }
+
+    // we put the old SegmentReaders in a map, that allows us
+    // to lookup a reader using its segment name
+    Map segmentReaders = new HashMap();
+
+    if (oldReaders != null) {
+      // create a Map SegmentName->SegmentReader
+      for (int i = 0; i < oldReaders.length; i++) {
+        segmentReaders.put(oldReaders[i].getSegmentName(), new Integer(i));
+      }
+    }
+    
+    SegmentReader[] newReaders = new SegmentReader[infos.size()];
+    
+    // remember which readers are shared between the old and the re-opened
+    // DirectoryReader - we have to incRef those readers
+    boolean[] readerShared = new boolean[infos.size()];
+    
+    for (int i = infos.size() - 1; i>=0; i--) {
+      // find SegmentReader for this segment
+      Integer oldReaderIndex = (Integer) segmentReaders.get(infos.info(i).name);
+      if (oldReaderIndex == null) {
+        // this is a new segment, no old SegmentReader can be reused
+        newReaders[i] = null;
+      } else {
+        // there is an old reader for this segment - we'll try to reopen it
+        newReaders[i] = oldReaders[oldReaderIndex.intValue()];
+      }
+
+      boolean success = false;
+      try {
+        SegmentReader newReader;
+        if (newReaders[i] == null || infos.info(i).getUseCompoundFile() != newReaders[i].getSegmentInfo().getUseCompoundFile()) {
+
+          // We should never see a totally new segment during cloning
+          assert !doClone;
+
+          // this is a new reader; in case we hit an exception we can close it safely
+          newReader = SegmentReader.get(readOnly, infos.info(i));
+        } else {
+          newReader = newReaders[i].reopenSegment(infos.info(i), doClone, readOnly);
+        }
+        if (newReader == newReaders[i]) {
+          // this reader will be shared between the old and the new one,
+          // so we must incRef it
+          readerShared[i] = true;
+          newReader.incRef();
+        } else {
+          readerShared[i] = false;
+          newReaders[i] = newReader;
+        }
+        success = true;
+      } finally {
+        if (!success) {
+          for (i++; i < infos.size(); i++) {
+            if (newReaders[i] != null) {
+              try {
+                if (!readerShared[i]) {
+                  // this is a new subReader that is not used by the old one,
+                  // we can close it
+                  newReaders[i].close();
+                } else {
+                  // this subReader is also used by the old reader, so instead
+                  // closing we must decRef it
+                  newReaders[i].decRef();
+                }
+              } catch (IOException ignore) {
+                // keep going - we want to clean up as much as possible
+              }
+            }
+          }
+        }
+      }
+    }    
+    
+    // initialize the readers to calculate maxDoc before we try to reuse the old normsCache
+    initialize(newReaders);
+    
+    // try to copy unchanged norms from the old normsCache to the new one
+    if (oldNormsCache != null) {
+      Iterator it = oldNormsCache.entrySet().iterator();
+      while (it.hasNext()) {
+        Map.Entry entry = (Map.Entry) it.next();
+        String field = (String) entry.getKey();
+        if (!hasNorms(field)) {
+          continue;
+        }
+
+        byte[] oldBytes = (byte[]) entry.getValue();
+
+        byte[] bytes = new byte[maxDoc()];
+
+        for (int i = 0; i < subReaders.length; i++) {
+          Integer oldReaderIndex = ((Integer) segmentReaders.get(subReaders[i].getSegmentName()));
+
+          // this SegmentReader was not re-opened, we can copy all of its norms 
+          if (oldReaderIndex != null &&
+               (oldReaders[oldReaderIndex.intValue()] == subReaders[i] 
+                 || oldReaders[oldReaderIndex.intValue()].norms.get(field) == subReaders[i].norms.get(field))) {
+            // we don't have to synchronize here: either this constructor is called from a SegmentReader,
+            // in which case no old norms cache is present, or it is called from MultiReader.reopen(),
+            // which is synchronized
+            System.arraycopy(oldBytes, oldStarts[oldReaderIndex.intValue()], bytes, starts[i], starts[i+1] - starts[i]);
+          } else {
+            subReaders[i].norms(field, bytes, starts[i]);
+          }
+        }
+
+        normsCache.put(field, bytes);      // update cache
+      }
+    }
+  }
+
+  private void initialize(SegmentReader[] subReaders) {
+    this.subReaders = subReaders;
+    starts = new int[subReaders.length + 1];    // build starts array
+    for (int i = 0; i < subReaders.length; i++) {
+      starts[i] = maxDoc;
+      maxDoc += subReaders[i].maxDoc();      // compute maxDocs
+
+      if (subReaders[i].hasDeletions())
+        hasDeletions = true;
+    }
+    starts[subReaders.length] = maxDoc;
+  }
+
+  public final synchronized Object clone() {
+    try {
+      return clone(readOnly); // Preserve current readOnly
+    } catch (Exception ex) {
+      throw new RuntimeException(ex);
+    }
+  }
+
+  public final synchronized IndexReader clone(boolean openReadOnly) throws CorruptIndexException, IOException {
+    DirectoryReader newReader = doReopen((SegmentInfos) segmentInfos.clone(), true, openReadOnly);
+
+    if (this != newReader) {
+      newReader.closeDirectory = closeDirectory;
+      newReader.deletionPolicy = deletionPolicy;
+    }
+    newReader.writer = writer;
+    // If we're cloning a non-readOnly reader, move the
+    // writeLock (if there is one) to the new reader:
+    if (!openReadOnly && writeLock != null) {
+      // In near real-time search, reader is always readonly
+      assert writer == null;
+      newReader.writeLock = writeLock;
+      newReader.hasChanges = hasChanges;
+      newReader.hasDeletions = hasDeletions;
+      writeLock = null;
+      hasChanges = false;
+    }
+
+    return newReader;
+  }
+
+  public final synchronized IndexReader reopen() throws CorruptIndexException, IOException {
+    // Preserve current readOnly
+    return doReopen(readOnly, null);
+  }
+
+  public final synchronized IndexReader reopen(boolean openReadOnly) throws CorruptIndexException, IOException {
+    return doReopen(openReadOnly, null);
+  }
+
+  public final synchronized IndexReader reopen(final IndexCommit commit) throws CorruptIndexException, IOException {
+    return doReopen(true, commit);
+  }
+
+  private synchronized IndexReader doReopen(final boolean openReadOnly, IndexCommit commit) throws CorruptIndexException, IOException {
+    ensureOpen();
+
+    assert commit == null || openReadOnly;
+
+    // If we were obtained by writer.getReader(), re-ask the
+    // writer to get a new reader.
+    if (writer != null) {
+      assert readOnly;
+
+      if (!openReadOnly) {
+        throw new IllegalArgumentException("a reader obtained from IndexWriter.getReader() can only be reopened with openReadOnly=true (got false)");
+      }
+
+      if (commit != null) {
+        throw new IllegalArgumentException("a reader obtained from IndexWriter.getReader() cannot currently accept a commit");
+      }
+
+      if (!writer.isOpen(true)) {
+        throw new AlreadyClosedException("cannot reopen: the IndexWriter this reader was obtained from is now closed");
+      }
+
+      // TODO: right now we *always* make a new reader; in
+      // the future we could have write make some effort to
+      // detect that no changes have occurred
+      IndexReader reader = writer.getReader();
+      reader.setDisableFakeNorms(getDisableFakeNorms());
+      return reader;
+    }
+
+    if (commit == null) {
+      if (hasChanges) {
+        // We have changes, which means we are not readOnly:
+        assert readOnly == false;
+        // and we hold the write lock:
+        assert writeLock != null;
+        // so no other writer holds the write lock, which
+        // means no changes could have been done to the index:
+        assert isCurrent();
+
+        if (openReadOnly) {
+          return (IndexReader) clone(openReadOnly);
+        } else {
+          return this;
+        }
+      } else if (isCurrent()) {
+        if (openReadOnly != readOnly) {
+          // Just fallback to clone
+          return (IndexReader) clone(openReadOnly);
+        } else {
+          return this;
+        }
+      }
+    } else {
+      if (directory != commit.getDirectory())
+        throw new IOException("the specified commit does not match the specified Directory");
+      if (segmentInfos != null && commit.getSegmentsFileName().equals(segmentInfos.getCurrentSegmentFileName())) {
+        if (readOnly != openReadOnly) {
+          // Just fallback to clone
+          return (IndexReader) clone(openReadOnly);
+        } else {
+          return this;
+        }
+      }
+    }
+
+    final SegmentInfos.FindSegmentsFile finder = new SegmentInfos.FindSegmentsFile(directory) {
+
+      protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
+        SegmentInfos infos = new SegmentInfos();
+        infos.read(directory, segmentFileName);
+        return doReopen(infos, false, openReadOnly);
+      }
+    };
+
+    DirectoryReader reader = null;
+
+    // While trying to reopen, we temporarily mark our
+    // closeDirectory as false.  This way any exceptions hit
+    // partway while opening the reader, which is expected
+    // eg if writer is committing, won't close our
+    // directory.  We restore this value below:
+    final boolean myCloseDirectory = closeDirectory;
+    closeDirectory = false;
+
+    try {
+      reader = (DirectoryReader) finder.run(commit);
+    } finally {
+      if (myCloseDirectory) {
+        assert directory instanceof FSDirectory;
+        // Restore my closeDirectory
+        closeDirectory = true;
+        if (reader != null && reader != this) {
+          // Success, and a new reader was actually opened
+          reader.closeDirectory = true;
+          // Clone the directory
+          reader.directory = FSDirectory.getDirectory(((FSDirectory) directory).getFile());
+        }
+      }
+    }
+
+    return reader;
+  }
+
+  private synchronized DirectoryReader doReopen(SegmentInfos infos, boolean doClone, boolean openReadOnly) throws CorruptIndexException, IOException {
+    DirectoryReader reader;
+	  if (openReadOnly) {
+      reader = new ReadOnlyDirectoryReader(directory, infos, closeDirectory, subReaders, starts, normsCache, doClone);
+    } else {
+      reader = new DirectoryReader(directory, infos, closeDirectory, subReaders, starts, normsCache, false, doClone);
+    }
+    reader.setDisableFakeNorms(getDisableFakeNorms());
+    return reader;
+  }
+
+  /** Version number when this IndexReader was opened. */
+  public long getVersion() {
+    ensureOpen();
+    return segmentInfos.getVersion();
+  }
+
+  public TermFreqVector[] getTermFreqVectors(int n) throws IOException {
+    ensureOpen();
+    int i = readerIndex(n);        // find segment num
+    return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment
+  }
+
+  public TermFreqVector getTermFreqVector(int n, String field)
+      throws IOException {
+    ensureOpen();
+    int i = readerIndex(n);        // find segment num
+    return subReaders[i].getTermFreqVector(n - starts[i], field);
+  }
+
+
+  public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
+    ensureOpen();
+    int i = readerIndex(docNumber);        // find segment num
+    subReaders[i].getTermFreqVector(docNumber - starts[i], field, mapper);
+  }
+
+  public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
+    ensureOpen();
+    int i = readerIndex(docNumber);        // find segment num
+    subReaders[i].getTermFreqVector(docNumber - starts[i], mapper);
+  }
+
+  /**
+   * Checks is the index is optimized (if it has a single segment and no deletions)
+   * @return <code>true</code> if the index is optimized; <code>false</code> otherwise
+   */
+  public boolean isOptimized() {
+    ensureOpen();
+    return segmentInfos.size() == 1 && !hasDeletions();
+  }
+  
+  public synchronized int numDocs() {
+    // Don't call ensureOpen() here (it could affect performance)
+    if (numDocs == -1) {        // check cache
+      int n = 0;                // cache miss--recompute
+      for (int i = 0; i < subReaders.length; i++)
+        n += subReaders[i].numDocs();      // sum from readers
+      numDocs = n;
+    }
+    return numDocs;
+  }
+
+  public int maxDoc() {
+    // Don't call ensureOpen() here (it could affect performance)
+    return maxDoc;
+  }
+
+  // inherit javadoc
+  public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
+    ensureOpen();
+    int i = readerIndex(n);                          // find segment num
+    return subReaders[i].document(n - starts[i], fieldSelector);    // dispatch to segment reader
+  }
+
+  public boolean isDeleted(int n) {
+    // Don't call ensureOpen() here (it could affect performance)
+    final int i = readerIndex(n);                           // find segment num
+    return subReaders[i].isDeleted(n - starts[i]);    // dispatch to segment reader
+  }
+
+  public boolean hasDeletions() {
+    // Don't call ensureOpen() here (it could affect performance)
+    return hasDeletions;
+  }
+
+  protected void doDelete(int n) throws CorruptIndexException, IOException {
+    numDocs = -1;                             // invalidate cache
+    int i = readerIndex(n);                   // find segment num
+    subReaders[i].deleteDocument(n - starts[i]);      // dispatch to segment reader
+    hasDeletions = true;
+  }
+
+  protected void doUndeleteAll() throws CorruptIndexException, IOException {
+    for (int i = 0; i < subReaders.length; i++)
+      subReaders[i].undeleteAll();
+
+    hasDeletions = false;
+    numDocs = -1;                                 // invalidate cache
+  }
+
+  private int readerIndex(int n) {    // find reader for doc n:
+    return readerIndex(n, this.starts, this.subReaders.length);
+  }
+  
+  final static int readerIndex(int n, int[] starts, int numSubReaders) {    // find reader for doc n:
+    int lo = 0;                                      // search starts array
+    int hi = numSubReaders - 1;                  // for first element less
+
+    while (hi >= lo) {
+      int mid = (lo + hi) >>> 1;
+      int midValue = starts[mid];
+      if (n < midValue)
+        hi = mid - 1;
+      else if (n > midValue)
+        lo = mid + 1;
+      else {                                      // found a match
+        while (mid+1 < numSubReaders && starts[mid+1] == midValue) {
+          mid++;                                  // scan to last match
+        }
+        return mid;
+      }
+    }
+    return hi;
+  }
+
+  public boolean hasNorms(String field) throws IOException {
+    ensureOpen();
+    for (int i = 0; i < subReaders.length; i++) {
+      if (subReaders[i].hasNorms(field)) return true;
+    }
+    return false;
+  }
+
+  private byte[] ones;
+  private byte[] fakeNorms() {
+    if (ones==null) ones=SegmentReader.createFakeNorms(maxDoc());
+    return ones;
+  }
+
+  public synchronized byte[] norms(String field) throws IOException {
+    ensureOpen();
+    byte[] bytes = (byte[])normsCache.get(field);
+    if (bytes != null)
+      return bytes;          // cache hit
+    if (!hasNorms(field))
+      return getDisableFakeNorms() ? null : fakeNorms();
+
+    bytes = new byte[maxDoc()];
+    for (int i = 0; i < subReaders.length; i++)
+      subReaders[i].norms(field, bytes, starts[i]);
+    normsCache.put(field, bytes);      // update cache
+    return bytes;
+  }
+
+  public synchronized void norms(String field, byte[] result, int offset)
+    throws IOException {
+    ensureOpen();
+    byte[] bytes = (byte[])normsCache.get(field);
+    if (bytes==null && !hasNorms(field)) {
+      Arrays.fill(result, offset, result.length, DefaultSimilarity.encodeNorm(1.0f));
+    } else if (bytes != null) {                           // cache hit
+      System.arraycopy(bytes, 0, result, offset, maxDoc());
+    } else {
+      for (int i = 0; i < subReaders.length; i++) {      // read from segments
+        subReaders[i].norms(field, result, offset + starts[i]);
+      }
+    }
+  }
+
+  protected void doSetNorm(int n, String field, byte value)
+    throws CorruptIndexException, IOException {
+    synchronized (normsCache) {
+      normsCache.remove(field);                         // clear cache      
+    }
+    int i = readerIndex(n);                           // find segment num
+    subReaders[i].setNorm(n-starts[i], field, value); // dispatch
+  }
+
+  public TermEnum terms() throws IOException {
+    ensureOpen();
+    return new MultiTermEnum(this, subReaders, starts, null);
+  }
+
+  public TermEnum terms(Term term) throws IOException {
+    ensureOpen();
+    return new MultiTermEnum(this, subReaders, starts, term);
+  }
+
+  public int docFreq(Term t) throws IOException {
+    ensureOpen();
+    int total = 0;          // sum freqs in segments
+    for (int i = 0; i < subReaders.length; i++)
+      total += subReaders[i].docFreq(t);
+    return total;
+  }
+
+  public TermDocs termDocs() throws IOException {
+    ensureOpen();
+    return new MultiTermDocs(this, subReaders, starts);
+  }
+
+  public TermPositions termPositions() throws IOException {
+    ensureOpen();
+    return new MultiTermPositions(this, subReaders, starts);
+  }
+
+  /**
+   * Tries to acquire the WriteLock on this directory. this method is only valid if this IndexReader is directory
+   * owner.
+   *
+   * @throws StaleReaderException  if the index has changed since this reader was opened
+   * @throws CorruptIndexException if the index is corrupt
+   * @throws org.apache.lucene.store.LockObtainFailedException
+   *                               if another writer has this index open (<code>write.lock</code> could not be
+   *                               obtained)
+   * @throws IOException           if there is a low-level IO error
+   */
+  protected void acquireWriteLock() throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
+
+    if (readOnly) {
+      // NOTE: we should not reach this code w/ the core
+      // IndexReader classes; however, an external subclass
+      // of IndexReader could reach this.
+      ReadOnlySegmentReader.noWrite();
+    }
+
+    if (segmentInfos != null) {
+      ensureOpen();
+      if (stale)
+        throw new StaleReaderException("IndexReader out of date and no longer valid for delete, undelete, or setNorm operations");
+
+      if (writeLock == null) {
+        Lock writeLock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME);
+        if (!writeLock.obtain(IndexWriter.WRITE_LOCK_TIMEOUT)) // obtain write lock
+          throw new LockObtainFailedException("Index locked for write: " + writeLock);
+        this.writeLock = writeLock;
+
+        // we have to check whether index has changed since this reader was opened.
+        // if so, this reader is no longer valid for deletion
+        if (SegmentInfos.readCurrentVersion(directory) > segmentInfos.getVersion()) {
+          stale = true;
+          this.writeLock.release();
+          this.writeLock = null;
+          throw new StaleReaderException("IndexReader out of date and no longer valid for delete, undelete, or setNorm operations");
+        }
+      }
+    }
+  }
+
+  /** @deprecated  */
+  protected void doCommit() throws IOException {
+    doCommit(null);
+  }
+
+  /**
+   * Commit changes resulting from delete, undeleteAll, or setNorm operations
+   * <p/>
+   * If an exception is hit, then either no changes or all changes will have been committed to the index (transactional
+   * semantics).
+   *
+   * @throws IOException if there is a low-level IO error
+   */
+  protected void doCommit(Map commitUserData) throws IOException {
+    if (hasChanges) {
+      segmentInfos.setUserData(commitUserData);
+      // Default deleter (for backwards compatibility) is
+      // KeepOnlyLastCommitDeleter:
+      IndexFileDeleter deleter = new IndexFileDeleter(directory,
+                                                      deletionPolicy == null ? new KeepOnlyLastCommitDeletionPolicy() : deletionPolicy,
+                                                      segmentInfos, null, null);
+
+      // Checkpoint the state we are about to change, in
+      // case we have to roll back:
+      startCommit();
+
+      boolean success = false;
+      try {
+        for (int i = 0; i < subReaders.length; i++)
+          subReaders[i].commit();
+
+        // Sync all files we just wrote
+        Iterator it = segmentInfos.files(directory, false).iterator();
+        while (it.hasNext()) {
+          final String fileName = (String) it.next();
+          if (!synced.contains(fileName)) {
+            assert directory.fileExists(fileName);
+            directory.sync(fileName);
+            synced.add(fileName);
+          }
+        }
+
+        segmentInfos.commit(directory);
+        success = true;
+      } finally {
+
+        if (!success) {
+
+          // Rollback changes that were made to
+          // SegmentInfos but failed to get [fully]
+          // committed.  This way this reader instance
+          // remains consistent (matched to what's
+          // actually in the index):
+          rollbackCommit();
+
+          // Recompute deletable files & remove them (so
+          // partially written .del files, etc, are
+          // removed):
+          deleter.refresh();
+        }
+      }
+
+      // Have the deleter remove any now unreferenced
+      // files due to this commit:
+      deleter.checkpoint(segmentInfos, true);
+      deleter.close();
+
+      if (writeLock != null) {
+        writeLock.release();  // release write lock
+        writeLock = null;
+      }
+    }
+    hasChanges = false;
+  }
+
+  void startCommit() {
+    rollbackHasChanges = hasChanges;
+    rollbackSegmentInfos = (SegmentInfos) segmentInfos.clone();
+    for (int i = 0; i < subReaders.length; i++) {
+      subReaders[i].startCommit();
+    }
+  }
+
+  void rollbackCommit() {
+    hasChanges = rollbackHasChanges;
+    for (int i = 0; i < segmentInfos.size(); i++) {
+      // Rollback each segmentInfo.  Because the
+      // SegmentReader holds a reference to the
+      // SegmentInfo we can't [easily] just replace
+      // segmentInfos, so we reset it in place instead:
+      segmentInfos.info(i).reset(rollbackSegmentInfos.info(i));
+    }
+    rollbackSegmentInfos = null;
+    for (int i = 0; i < subReaders.length; i++) {
+      subReaders[i].rollbackCommit();
+    }
+  }
+
+  /** Release the write lock, if needed. */
+  protected void finalize() throws Throwable {
+    try {
+      if (writeLock != null) {
+        writeLock.release();                        // release write lock
+        writeLock = null;
+      }
+    } finally {
+      super.finalize();
+    }
+  }
+
+  public Map getCommitUserData() {
+    ensureOpen();
+    return segmentInfos.getUserData();
+  }
+
+  /**
+   * Check whether this IndexReader is still using the current (i.e., most recently committed) version of the index.  If
+   * a writer has committed any changes to the index since this reader was opened, this will return <code>false</code>,
+   * in which case you must open a new IndexReader in order to see the changes.  See the description of the <a
+   * href="IndexWriter.html#autoCommit"><code>autoCommit</code></a> flag which controls when the {@link IndexWriter}
+   * actually commits changes to the index.
+   *
+   * @throws CorruptIndexException if the index is corrupt
+   * @throws IOException           if there is a low-level IO error
+   */
+  public boolean isCurrent() throws CorruptIndexException, IOException {
+    ensureOpen();
+    return SegmentInfos.readCurrentVersion(directory) == segmentInfos.getVersion();
+  }
+
+  protected synchronized void doClose() throws IOException {
+    for (int i = 0; i < subReaders.length; i++)
+      subReaders[i].decRef();
+
+    if (closeDirectory)
+      directory.close();
+  }
+
+  public Collection getFieldNames (IndexReader.FieldOption fieldNames) {
+    ensureOpen();
+    return getFieldNames(fieldNames, this.subReaders);
+  }
+  
+  static Collection getFieldNames (IndexReader.FieldOption fieldNames, IndexReader[] subReaders) {
+    // maintain a unique set of field names
+    Set fieldSet = new HashSet();
+    for (int i = 0; i < subReaders.length; i++) {
+      IndexReader reader = subReaders[i];
+      Collection names = reader.getFieldNames(fieldNames);
+      fieldSet.addAll(names);
+    }
+    return fieldSet;
+  } 
+  
+  public IndexReader[] getSequentialSubReaders() {
+    return subReaders;
+  }
+
+  public void setTermInfosIndexDivisor(int indexDivisor) throws IllegalStateException {
+    for (int i = 0; i < subReaders.length; i++)
+      subReaders[i].setTermInfosIndexDivisor(indexDivisor);
+  }
+
+  public int getTermInfosIndexDivisor() throws IllegalStateException {
+    if (subReaders.length > 0)
+      return subReaders[0].getTermInfosIndexDivisor();
+    else
+      throw new IllegalStateException("no readers");
+  }
+
+  public void setDisableFakeNorms(boolean disableFakeNorms) {
+    super.setDisableFakeNorms(disableFakeNorms);
+    for (int i = 0; i < subReaders.length; i++)
+        subReaders[i].setDisableFakeNorms(disableFakeNorms);
+  }
+
+  /** Returns the directory this index resides in. */
+  public Directory directory() {
+    // Don't ensureOpen here -- in certain cases, when a
+    // cloned/reopened reader needs to commit, it may call
+    // this method on the closed original reader
+    return directory;
+  }
+
+  /**
+   * Expert: return the IndexCommit that this reader has opened.
+   * <p/>
+   * <p><b>WARNING</b>: this API is new and experimental and may suddenly change.</p>
+   */
+  public IndexCommit getIndexCommit() throws IOException {
+    return new ReaderCommit(segmentInfos, directory);
+  }
+
+  /** @see org.apache.lucene.index.IndexReader#listCommits */
+  public static Collection listCommits(Directory dir) throws IOException {
+    final String[] files = dir.listAll();
+
+    Collection commits = new ArrayList();
+
+    SegmentInfos latest = new SegmentInfos();
+    latest.read(dir);
+    final long currentGen = latest.getGeneration();
+
+    commits.add(new ReaderCommit(latest, dir));
+
+    for(int i=0;i<files.length;i++) {
+
+      final String fileName = files[i];
+
+      if (fileName.startsWith(IndexFileNames.SEGMENTS) &&
+          !fileName.equals(IndexFileNames.SEGMENTS_GEN) &&
+          SegmentInfos.generationFromSegmentsFileName(fileName) < currentGen) {
+
+        SegmentInfos sis = new SegmentInfos();
+        try {
+          // IOException allowed to throw there, in case
+          // segments_N is corrupt
+          sis.read(dir, fileName);
+        } catch (FileNotFoundException fnfe) {
+          // LUCENE-948: on NFS (and maybe others), if
+          // you have writers switching back and forth
+          // between machines, it's very likely that the
+          // dir listing will be stale and will claim a
+          // file segments_X exists when in fact it
+          // doesn't.  So, we catch this and handle it
+          // as if the file does not exist
+          sis = null;
+        }
+
+        if (sis != null)
+          commits.add(new ReaderCommit(sis, dir));
+      }
+    }
+
+    return commits;
+  }
+
+  private static final class ReaderCommit extends IndexCommit {
+    private String segmentsFileName;
+    Collection files;
+    Directory dir;
+    long generation;
+    long version;
+    final boolean isOptimized;
+    final Map userData;
+
+    ReaderCommit(SegmentInfos infos, Directory dir) throws IOException {
+      segmentsFileName = infos.getCurrentSegmentFileName();
+      this.dir = dir;
+      userData = infos.getUserData();
+      files = Collections.unmodifiableCollection(infos.files(dir, true));
+      version = infos.getVersion();
+      generation = infos.getGeneration();
+      isOptimized = infos.size() == 1 && !infos.info(0).hasDeletions();
+    }
+
+    public boolean isOptimized() {
+      return isOptimized;
+    }
+
+    public String getSegmentsFileName() {
+      return segmentsFileName;
+    }
+
+    public Collection getFileNames() {
+      return files;
+    }
+
+    public Directory getDirectory() {
+      return dir;
+    }
+
+    public long getVersion() {
+      return version;
+    }
+
+    public long getGeneration() {
+      return generation;
+    }
+
+    public boolean isDeleted() {
+      return false;
+    }
+
+    public Map getUserData() {
+      return userData;
+    }
+  }
+
+  static class MultiTermEnum extends TermEnum {
+    IndexReader topReader; // used for matching TermEnum to TermDocs
+    private SegmentMergeQueue queue;
+  
+    private Term term;
+    private int docFreq;
+    final SegmentMergeInfo[] matchingSegments; // null terminated array of matching segments
+
+    public MultiTermEnum(IndexReader topReader, IndexReader[] readers, int[] starts, Term t)
+      throws IOException {
+      this.topReader = topReader;
+      queue = new SegmentMergeQueue(readers.length);
+      matchingSegments = new SegmentMergeInfo[readers.length+1];
+      for (int i = 0; i < readers.length; i++) {
+        IndexReader reader = readers[i];
+        TermEnum termEnum;
+  
+        if (t != null) {
+          termEnum = reader.terms(t);
+        } else
+          termEnum = reader.terms();
+  
+        SegmentMergeInfo smi = new SegmentMergeInfo(starts[i], termEnum, reader);
+        smi.ord = i;
+        if (t == null ? smi.next() : termEnum.term() != null)
+          queue.put(smi);          // initialize queue
+        else
+          smi.close();
+      }
+  
+      if (t != null && queue.size() > 0) {
+        next();
+      }
+    }
+  
+    public boolean next() throws IOException {
+      for (int i=0; i<matchingSegments.length; i++) {
+        SegmentMergeInfo smi = matchingSegments[i];
+        if (smi==null) break;
+        if (smi.next())
+          queue.put(smi);
+        else
+          smi.close(); // done with segment
+      }
+      
+      int numMatchingSegments = 0;
+      matchingSegments[0] = null;
+
+      SegmentMergeInfo top = (SegmentMergeInfo)queue.top();
+
+      if (top == null) {
+        term = null;
+        return false;
+      }
+  
+      term = top.term;
+      docFreq = 0;
+  
+      while (top != null && term.compareTo(top.term) == 0) {
+        matchingSegments[numMatchingSegments++] = top;
+        queue.pop();
+        docFreq += top.termEnum.docFreq();    // increment freq
+        top = (SegmentMergeInfo)queue.top();
+      }
+
+      matchingSegments[numMatchingSegments] = null;
+      return true;
+    }
+  
+    public Term term() {
+      return term;
+    }
+  
+    public int docFreq() {
+      return docFreq;
+    }
+  
+    public void close() throws IOException {
+      queue.close();
+    }
+  }
+
+  static class MultiTermDocs implements TermDocs {
+    IndexReader topReader;  // used for matching TermEnum to TermDocs
+    protected IndexReader[] readers;
+    protected int[] starts;
+    protected Term term;
+  
+    protected int base = 0;
+    protected int pointer = 0;
+  
+    private TermDocs[] readerTermDocs;
+    protected TermDocs current;              // == readerTermDocs[pointer]
+
+    private MultiTermEnum tenum;  // the term enum used for seeking... can be null
+    int matchingSegmentPos;  // position into the matching segments from tenum
+    SegmentMergeInfo smi;     // current segment mere info... can be null
+
+    public MultiTermDocs(IndexReader topReader, IndexReader[] r, int[] s) {
+      this.topReader = topReader;
+      readers = r;
+      starts = s;
+  
+      readerTermDocs = new TermDocs[r.length];
+    }
+
+    public int doc() {
+      return base + current.doc();
+    }
+    public int freq() {
+      return current.freq();
+    }
+  
+    public void seek(Term term) {
+      this.term = term;
+      this.base = 0;
+      this.pointer = 0;
+      this.current = null;
+      this.tenum = null;
+      this.smi = null;
+      this.matchingSegmentPos = 0;
+    }
+  
+    public void seek(TermEnum termEnum) throws IOException {
+      seek(termEnum.term());
+      if (termEnum instanceof MultiTermEnum) {
+        tenum = (MultiTermEnum)termEnum;
+        if (topReader != tenum.topReader)
+          tenum = null;
+      }
+    }
+  
+    public boolean next() throws IOException {
+      for(;;) {
+        if (current!=null && current.next()) {
+          return true;
+        }
+        else if (pointer < readers.length) {
+          if (tenum != null) {
+            smi = tenum.matchingSegments[matchingSegmentPos++];
+            if (smi==null) {
+              pointer = readers.length;
+              return false;
+            }
+            pointer = smi.ord;
+          }
+          base = starts[pointer];
+          current = termDocs(pointer++);
+        } else {
+          return false;
+        }
+      }
+    }
+  
+    /** Optimized implementation. */
+    public int read(final int[] docs, final int[] freqs) throws IOException {
+      while (true) {
+        while (current == null) {
+          if (pointer < readers.length) {      // try next segment
+            if (tenum != null) {
+              smi = tenum.matchingSegments[matchingSegmentPos++];
+              if (smi==null) {
+                pointer = readers.length;
+                return 0;
+              }
+              pointer = smi.ord;
+            }
+            base = starts[pointer];
+            current = termDocs(pointer++);
+          } else {
+            return 0;
+          }
+        }
+        int end = current.read(docs, freqs);
+        if (end == 0) {          // none left in segment
+          current = null;
+        } else {            // got some
+          final int b = base;        // adjust doc numbers
+          for (int i = 0; i < end; i++)
+           docs[i] += b;
+          return end;
+        }
+      }
+    }
+  
+   /* A Possible future optimization could skip entire segments */ 
+    public boolean skipTo(int target) throws IOException {
+      for(;;) {
+        if (current != null && current.skipTo(target-base)) {
+          return true;
+        } else if (pointer < readers.length) {
+          if (tenum != null) {
+            SegmentMergeInfo smi = tenum.matchingSegments[matchingSegmentPos++];
+            if (smi==null) {
+              pointer = readers.length;
+              return false;
+            }
+            pointer = smi.ord;
+          }
+          base = starts[pointer];
+          current = termDocs(pointer++);
+        } else
+          return false;
+      }
+    }
+  
+    private TermDocs termDocs(int i) throws IOException {
+      TermDocs result = readerTermDocs[i];
+      if (result == null)
+        result = readerTermDocs[i] = termDocs(readers[i]);
+      if (smi != null) {
+        assert(smi.ord == i);
+        assert(smi.termEnum.term().equals(term));
+        result.seek(smi.termEnum);
+      } else {
+        result.seek(term);
+      }
+      return result;
+    }
+  
+    protected TermDocs termDocs(IndexReader reader)
+      throws IOException {
+      return term==null ? reader.termDocs(null) : reader.termDocs();
+    }
+  
+    public void close() throws IOException {
+      for (int i = 0; i < readerTermDocs.length; i++) {
+        if (readerTermDocs[i] != null)
+          readerTermDocs[i].close();
+      }
+    }
+  }
+
+  static class MultiTermPositions extends MultiTermDocs implements TermPositions {
+    public MultiTermPositions(IndexReader topReader, IndexReader[] r, int[] s) {
+      super(topReader,r,s);
+    }
+  
+    protected TermDocs termDocs(IndexReader reader) throws IOException {
+      return (TermDocs)reader.termPositions();
+    }
+  
+    public int nextPosition() throws IOException {
+      return ((TermPositions)current).nextPosition();
+    }
+    
+    public int getPayloadLength() {
+      return ((TermPositions)current).getPayloadLength();
+    }
+     
+    public byte[] getPayload(byte[] data, int offset) throws IOException {
+      return ((TermPositions)current).getPayload(data, offset);
+    }
+  
+  
+    // TODO: Remove warning after API has been finalized
+    public boolean isPayloadAvailable() {
+      return ((TermPositions) current).isPayloadAvailable();
+    }
+  }
+}

Propchange: lucene/java/trunk/src/java/org/apache/lucene/index/DirectoryReader.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: lucene/java/trunk/src/java/org/apache/lucene/index/IndexReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/IndexReader.java?rev=782406&r1=782405&r2=782406&view=diff
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/IndexReader.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/IndexReader.java Sun Jun  7 16:31:18 2009
@@ -374,7 +374,7 @@
   }
 
   private static IndexReader open(final Directory directory, final boolean closeDirectory, final IndexDeletionPolicy deletionPolicy, final IndexCommit commit, final boolean readOnly) throws CorruptIndexException, IOException {
-    return DirectoryIndexReader.open(directory, closeDirectory, deletionPolicy, commit, readOnly);
+    return DirectoryReader.open(directory, closeDirectory, deletionPolicy, commit, readOnly);
   }
 
   /**
@@ -1296,7 +1296,7 @@
    *  progress while this method is running, that commit
    *  may or may not be returned array.  */
   public static Collection listCommits(Directory dir) throws IOException {
-    return DirectoryIndexReader.listCommits(dir);
+    return DirectoryReader.listCommits(dir);
   }
 
   /** Expert: returns the sequential sub readers that this
@@ -1308,12 +1308,12 @@
    *  reader is a null reader (for example a MultiReader
    *  that has no sub readers).
    *  <p>
-   *  NOTE: for a MultiSegmentReader, which is obtained by
-   *  {@link #open} when the index has more than one
-   *  segment, you should not use the sub-readers returned
-   *  by this method to make any changes (setNorm,
-   *  deleteDocument, etc.).  Doing so will likely lead to
-   *  index corruption.  Use the parent reader instead. */
+   *  NOTE: You should not try using sub-readers returned by
+   *  this method to make any changes (setNorm, deleteDocument,
+   *  etc.). While this might succeed for one composite reader
+   *  (like MultiReader), it will most likely lead to index
+   *  corruption for other readers (like DirectoryReader obtained
+   *  through {@link #open}. Use the parent reader directly. */
   public IndexReader[] getSequentialSubReaders() {
     return null;
   }

Modified: lucene/java/trunk/src/java/org/apache/lucene/index/IndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/IndexWriter.java?rev=782406&r1=782405&r2=782406&view=diff
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/IndexWriter.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/IndexWriter.java Sun Jun  7 16:31:18 2009
@@ -440,7 +440,7 @@
     // reader; in theory we could do similar retry logic,
     // just like we do when loading segments_N
     synchronized(this) {
-      return new ReadOnlyMultiSegmentReader(this, segmentInfos);
+      return new ReadOnlyDirectoryReader(this, segmentInfos);
     }
   }
 
@@ -616,7 +616,6 @@
         // synchronized
         // Returns a ref, which we xfer to readerMap:
         sr = SegmentReader.get(info, readBufferSize, doOpenStores);
-        sr.writer = IndexWriter.this;
         readerMap.put(info, sr);
       } else if (doOpenStores) {
         sr.openDocStores();

Modified: lucene/java/trunk/src/java/org/apache/lucene/index/MultiReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/MultiReader.java?rev=782406&r1=782405&r2=782406&view=diff
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/MultiReader.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/MultiReader.java Sun Jun  7 16:31:18 2009
@@ -25,9 +25,9 @@
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.index.MultiSegmentReader.MultiTermDocs;
-import org.apache.lucene.index.MultiSegmentReader.MultiTermEnum;
-import org.apache.lucene.index.MultiSegmentReader.MultiTermPositions;
+import org.apache.lucene.index.DirectoryReader.MultiTermDocs;
+import org.apache.lucene.index.DirectoryReader.MultiTermEnum;
+import org.apache.lucene.index.DirectoryReader.MultiTermPositions;
 import org.apache.lucene.search.DefaultSimilarity;
 
 /** An IndexReader which reads multiple indexes, appending their content.
@@ -268,7 +268,7 @@
   }
 
   private int readerIndex(int n) {    // find reader for doc n:
-    return MultiSegmentReader.readerIndex(n, this.starts, this.subReaders.length);
+    return DirectoryReader.readerIndex(n, this.starts, this.subReaders.length);
   }
   
   public boolean hasNorms(String field) throws IOException {
@@ -377,7 +377,7 @@
   
   public Collection getFieldNames (IndexReader.FieldOption fieldNames) {
     ensureOpen();
-    return MultiSegmentReader.getFieldNames(fieldNames, this.subReaders);
+    return DirectoryReader.getFieldNames(fieldNames, this.subReaders);
   }  
   
   /**

Added: lucene/java/trunk/src/java/org/apache/lucene/index/ReadOnlyDirectoryReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/ReadOnlyDirectoryReader.java?rev=782406&view=auto
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/ReadOnlyDirectoryReader.java (added)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/ReadOnlyDirectoryReader.java Sun Jun  7 16:31:18 2009
@@ -0,0 +1,41 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.store.Directory;
+
+import java.io.IOException;
+import java.util.Map;
+
+class ReadOnlyDirectoryReader extends DirectoryReader {
+  ReadOnlyDirectoryReader(Directory directory, SegmentInfos sis, IndexDeletionPolicy deletionPolicy, boolean closeDirectory) throws IOException {
+    super(directory, sis, deletionPolicy, closeDirectory, true);
+  }
+
+  ReadOnlyDirectoryReader(Directory directory, SegmentInfos infos, boolean closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, Map oldNormsCache, boolean doClone) throws IOException {
+    super(directory, infos, closeDirectory, oldReaders, oldStarts, oldNormsCache, true, doClone);
+  }
+  
+  ReadOnlyDirectoryReader(IndexWriter writer, SegmentInfos infos) throws IOException {
+    super(writer, infos);
+  }
+  
+  protected void acquireWriteLock() {
+    ReadOnlySegmentReader.noWrite();
+  }
+}

Propchange: lucene/java/trunk/src/java/org/apache/lucene/index/ReadOnlyDirectoryReader.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: lucene/java/trunk/src/java/org/apache/lucene/index/SegmentInfos.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/SegmentInfos.java?rev=782406&r1=782405&r2=782406&view=diff
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/SegmentInfos.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/SegmentInfos.java Sun Jun  7 16:31:18 2009
@@ -535,6 +535,16 @@
     }
 
     public Object run() throws CorruptIndexException, IOException {
+      return run(null);
+    }
+    
+    public Object run(IndexCommit commit) throws CorruptIndexException, IOException {
+      if (commit != null) {
+        if (directory != commit.getDirectory())
+          throw new IOException("the specified commit does not match the specified Directory");
+        return doBody(commit.getSegmentsFileName());
+      }
+
       String segmentFileName = null;
       long lastGen = -1;
       long gen = 0;

Modified: lucene/java/trunk/src/java/org/apache/lucene/index/SegmentReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/SegmentReader.java?rev=782406&r1=782405&r2=782406&view=diff
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/SegmentReader.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/SegmentReader.java Sun Jun  7 16:31:18 2009
@@ -38,10 +38,11 @@
 import org.apache.lucene.util.BitVector;
 import org.apache.lucene.util.CloseableThreadLocal;
 
-/**
- * @version $Id$
- */
-class SegmentReader extends DirectoryIndexReader {
+/** @version $Id */
+class SegmentReader extends IndexReader implements Cloneable {
+  protected Directory directory;
+  protected boolean readOnly;
+
   private String segment;
   private SegmentInfo si;
   private int readBufferSize;
@@ -59,6 +60,7 @@
   private boolean normsDirty = false;
   private int pendingDeleteCount;
 
+  private boolean rollbackHasChanges = false;
   private boolean rollbackDeletedDocsDirty = false;
   private boolean rollbackNormsDirty = false;
   private int rollbackPendingDeleteCount;
@@ -378,7 +380,7 @@
    * @deprecated
    */
   public static SegmentReader get(SegmentInfo si) throws CorruptIndexException, IOException {
-    return get(false, si.dir, si, null, false, false, BufferedIndexInput.BUFFER_SIZE, true);
+    return get(false, si.dir, si, BufferedIndexInput.BUFFER_SIZE, true);
   }
 
   /**
@@ -386,25 +388,7 @@
    * @throws IOException if there is a low-level IO error
    */
   public static SegmentReader get(boolean readOnly, SegmentInfo si) throws CorruptIndexException, IOException {
-    return get(readOnly, si.dir, si, null, false, false, BufferedIndexInput.BUFFER_SIZE, true);
-  }
-
-  /**
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws IOException if there is a low-level IO error
-   * @deprecated
-   */
-  static SegmentReader get(SegmentInfo si, boolean doOpenStores) throws CorruptIndexException, IOException {
-    return get(false, si.dir, si, null, false, false, BufferedIndexInput.BUFFER_SIZE, doOpenStores);
-  }
-
-  /**
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws IOException if there is a low-level IO error
-   * @deprecated
-   */
-  public static SegmentReader get(SegmentInfo si, int readBufferSize) throws CorruptIndexException, IOException {
-    return get(false, si.dir, si, null, false, false, readBufferSize, true);
+    return get(readOnly, si.dir, si, BufferedIndexInput.BUFFER_SIZE, true);
   }
 
   /**
@@ -413,37 +397,7 @@
    * @deprecated
    */
   static SegmentReader get(SegmentInfo si, int readBufferSize, boolean doOpenStores) throws CorruptIndexException, IOException {
-    return get(false, si.dir, si, null, false, false, readBufferSize, doOpenStores);
-  }
-
-  /**
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws IOException if there is a low-level IO error
-   */
-  static SegmentReader get(boolean readOnly, SegmentInfo si, int readBufferSize, boolean doOpenStores) throws CorruptIndexException, IOException {
-    return get(readOnly, si.dir, si, null, false, false, readBufferSize, doOpenStores);
-  }
-
-  /**
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws IOException if there is a low-level IO error
-   */
-  public static SegmentReader get(boolean readOnly, SegmentInfos sis, SegmentInfo si,
-                                  boolean closeDir) throws CorruptIndexException, IOException {
-    return get(readOnly, si.dir, si, sis, closeDir, true, BufferedIndexInput.BUFFER_SIZE, true);
-  }
-
-  /**
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws IOException if there is a low-level IO error
-   * @deprecated
-   */
-  public static SegmentReader get(Directory dir, SegmentInfo si,
-                                  SegmentInfos sis,
-                                  boolean closeDir, boolean ownDir,
-                                  int readBufferSize)
-    throws CorruptIndexException, IOException {
-    return get(false, dir, si, sis, closeDir, ownDir, readBufferSize, true);
+    return get(false, si.dir, si, readBufferSize, doOpenStores);
   }
 
   /**
@@ -453,8 +407,6 @@
   public static SegmentReader get(boolean readOnly,
                                   Directory dir,
                                   SegmentInfo si,
-                                  SegmentInfos sis,
-                                  boolean closeDir, boolean ownDir,
                                   int readBufferSize,
                                   boolean doOpenStores)
     throws CorruptIndexException, IOException {
@@ -467,8 +419,57 @@
     } catch (Exception e) {
       throw new RuntimeException("cannot load SegmentReader class: " + e, e);
     }
-    instance.init(dir, sis, closeDir, readOnly);
-    instance.initialize(si, readBufferSize, doOpenStores);
+    instance.directory = dir;
+    instance.readOnly = readOnly;
+    instance.segment = si.name;
+    instance.si = si;
+    instance.readBufferSize = readBufferSize;
+
+    boolean success = false;
+
+    try {
+      // Use compound file directory for some files, if it exists
+      Directory cfsDir = instance.directory();
+      if (si.getUseCompoundFile()) {
+        instance.cfsReader = new CompoundFileReader(instance.directory(), instance.segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION, readBufferSize);
+        cfsDir = instance.cfsReader;
+      }
+
+      instance.fieldInfos = new FieldInfos(cfsDir, instance.segment + ".fnm");
+
+      if (doOpenStores) {
+        instance.openDocStores();
+      }
+
+      boolean anyProx = false;
+      final int numFields = instance.fieldInfos.size();
+      for(int i=0;!anyProx && i<numFields;i++)
+        if (!instance.fieldInfos.fieldInfo(i).omitTermFreqAndPositions)
+          anyProx = true;
+
+      instance.tis = new TermInfosReader(cfsDir, instance.segment, instance.fieldInfos, readBufferSize);
+
+      instance.loadDeletedDocs();
+
+      // make sure that all index files have been read or are kept open
+      // so that if an index update removes them we'll still have them
+      instance.freqStream = cfsDir.openInput(instance.segment + ".frq", readBufferSize);
+      if (anyProx)
+        instance.proxStream = cfsDir.openInput(instance.segment + ".prx", readBufferSize);
+      instance.openNorms(cfsDir, readBufferSize);
+
+      success = true;
+    } finally {
+
+      // With lock-less commits, it's entirely possible (and
+      // fine) to hit a FileNotFound exception above.  In
+      // this case, we want to explicitly close any subset
+      // of things that were opened so that we don't have to
+      // wait for a GC to do so.
+      if (!success) {
+        instance.doClose();
+      }
+    }
     return instance;
   }
 
@@ -521,58 +522,6 @@
     }
   }
 
-  private void initialize(SegmentInfo si, int readBufferSize, boolean doOpenStores) throws CorruptIndexException, IOException {
-    segment = si.name;
-    this.si = si;
-    this.readBufferSize = readBufferSize;
-
-    boolean success = false;
-
-    try {
-      // Use compound file directory for some files, if it exists
-      Directory cfsDir = directory();
-      if (si.getUseCompoundFile()) {
-        cfsReader = new CompoundFileReader(directory(), segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION, readBufferSize);
-        cfsDir = cfsReader;
-      }
-
-      fieldInfos = new FieldInfos(cfsDir, segment + ".fnm");
-
-      if (doOpenStores) {
-        openDocStores();
-      }
-
-      boolean anyProx = false;
-      final int numFields = fieldInfos.size();
-      for(int i=0;!anyProx && i<numFields;i++)
-        if (!fieldInfos.fieldInfo(i).omitTermFreqAndPositions)
-          anyProx = true;
-
-      tis = new TermInfosReader(cfsDir, segment, fieldInfos, readBufferSize);
-      
-      loadDeletedDocs();
-
-      // make sure that all index files have been read or are kept open
-      // so that if an index update removes them we'll still have them
-      freqStream = cfsDir.openInput(segment + ".frq", readBufferSize);
-      if (anyProx)
-        proxStream = cfsDir.openInput(segment + ".prx", readBufferSize);
-      openNorms(cfsDir, readBufferSize);
-
-      success = true;
-    } finally {
-
-      // With lock-less commits, it's entirely possible (and
-      // fine) to hit a FileNotFound exception above.  In
-      // this case, we want to explicitly close any subset
-      // of things that were opened so that we don't have to
-      // wait for a GC to do so.
-      if (!success) {
-        doClose();
-      }
-    }
-  }
-  
   private void loadDeletedDocs() throws IOException {
     // NOTE: the bitvector is stored using the regular directory, not cfs
     if (hasDeletions(si)) {
@@ -611,35 +560,18 @@
     return (BitVector)bv.clone();
   }
 
-  protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos, boolean doClone, boolean openReadOnly) throws CorruptIndexException, IOException {
-    DirectoryIndexReader newReader;
-
-    if (infos == null) {
-      if (doClone) {
-        // OK: directly clone myself
-        newReader = reopenSegment(si, doClone, openReadOnly);
-      } else {
-        throw new UnsupportedOperationException("cannot reopen a standalone SegmentReader");
-      }
-    } else if (infos.size() == 1) {
-      SegmentInfo si = infos.info(0);
-      if (segment.equals(si.name) && si.getUseCompoundFile() == SegmentReader.this.si.getUseCompoundFile()) {
-        newReader = reopenSegment(si, doClone, openReadOnly);
-      } else { 
-        // segment not referenced anymore, reopen not possible
-        // or segment format changed
-        newReader = SegmentReader.get(openReadOnly, infos, infos.info(0), false);
-      }
-    } else {
-      if (openReadOnly)
-        newReader = new ReadOnlyMultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null, doClone);
-      else
-        newReader = new MultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null, false, doClone);
+  public final synchronized Object clone() {
+    try {
+      return clone(readOnly); // Preserve current readOnly
+    } catch (Exception ex) {
+      throw new RuntimeException(ex);
     }
-    newReader.setDisableFakeNorms(getDisableFakeNorms());
-    return newReader;
   }
-  
+
+  public final synchronized IndexReader clone(boolean openReadOnly) throws CorruptIndexException, IOException {
+    return reopenSegment(si, true, openReadOnly);
+  }
+
   synchronized SegmentReader reopenSegment(SegmentInfo si, boolean doClone, boolean openReadOnly) throws CorruptIndexException, IOException {
     boolean deletionsUpToDate = (this.si.hasDeletions() == si.hasDeletions()) 
                                   && (!si.hasDeletions() || this.si.getDelFileName().equals(si.getDelFileName()));
@@ -752,35 +684,42 @@
     return clone;
   }
 
-  protected void commitChanges() throws IOException {
-
-    if (deletedDocsDirty) {               // re-write deleted
-      si.advanceDelGen();
-
-      // We can write directly to the actual name (vs to a
-      // .tmp & renaming it) because the file is not live
-      // until segments file is written:
-      deletedDocs.write(directory(), si.getDelFileName());
-      
-      si.setDelCount(si.getDelCount()+pendingDeleteCount);
-      pendingDeleteCount = 0;
-      assert deletedDocs.count() == si.getDelCount(): "delete count mismatch during commit: info=" + si.getDelCount() + " vs BitVector=" + deletedDocs.count();
-    } else {
-      assert pendingDeleteCount == 0;
-    }
+  /** @deprecated  */
+  protected void doCommit() throws IOException {
+    doCommit(null);
+  }
+
+  protected void doCommit(Map commitUserData) throws IOException {
+    if (hasChanges) {
+      if (deletedDocsDirty) {               // re-write deleted
+        si.advanceDelGen();
+
+        // We can write directly to the actual name (vs to a
+        // .tmp & renaming it) because the file is not live
+        // until segments file is written:
+        deletedDocs.write(directory(), si.getDelFileName());
+
+        si.setDelCount(si.getDelCount()+pendingDeleteCount);
+        pendingDeleteCount = 0;
+        assert deletedDocs.count() == si.getDelCount(): "delete count mismatch during commit: info=" + si.getDelCount() + " vs BitVector=" + deletedDocs.count();
+      } else {
+        assert pendingDeleteCount == 0;
+      }
 
-    if (normsDirty) {               // re-write norms
-      si.setNumFields(fieldInfos.size());
-      Iterator it = norms.values().iterator();
-      while (it.hasNext()) {
-        Norm norm = (Norm) it.next();
-        if (norm.dirty) {
-          norm.reWrite(si);
+      if (normsDirty) {               // re-write norms
+        si.setNumFields(fieldInfos.size());
+        Iterator it = norms.values().iterator();
+        while (it.hasNext()) {
+          Norm norm = (Norm) it.next();
+          if (norm.dirty) {
+            norm.reWrite(si);
+          }
         }
       }
+      deletedDocsDirty = false;
+      normsDirty = false;
+      hasChanges = false;
     }
-    deletedDocsDirty = false;
-    normsDirty = false;
   }
 
   FieldsReader getFieldsReader() {
@@ -788,7 +727,6 @@
   }
   
   protected void doClose() throws IOException {
-
     termVectorsLocal.close();
     fieldsReaderLocal.close();
     
@@ -825,12 +763,6 @@
       if (storeCFSReader != null)
         storeCFSReader.close();
     }
-
-    // In DirectoryIndexReader.reopen, our directory
-    // instance was made private to us (cloned), so we
-    // always call super.doClose to possibly close the
-    // directory:
-    super.doClose();
   }
 
   static boolean hasDeletions(SegmentInfo si) throws IOException {
@@ -1254,7 +1186,7 @@
   }
 
   void startCommit() {
-    super.startCommit();
+    rollbackHasChanges = hasChanges;
     rollbackDeletedDocsDirty = deletedDocsDirty;
     rollbackNormsDirty = normsDirty;
     rollbackPendingDeleteCount = pendingDeleteCount;
@@ -1266,7 +1198,7 @@
   }
 
   void rollbackCommit() {
-    super.rollbackCommit();
+    hasChanges = rollbackHasChanges;
     deletedDocsDirty = rollbackDeletedDocsDirty;
     normsDirty = rollbackNormsDirty;
     pendingDeleteCount = rollbackPendingDeleteCount;
@@ -1277,6 +1209,14 @@
     }
   }
 
+  /** Returns the directory this index resides in. */
+  public Directory directory() {
+    // Don't ensureOpen here -- in certain cases, when a
+    // cloned/reopened reader needs to commit, it may call
+    // this method on the closed original reader
+    return directory;
+  }
+
   // This is necessary so that cloned SegmentReaders (which
   // share the underlying postings data) will map to the
   // same entry in the FieldCache.  See LUCENE-1579.
@@ -1287,4 +1227,28 @@
   public long getUniqueTermCount() {
     return tis.size();
   }
+
+  /**
+   * Lotsa tests did hacks like:<br/>
+   * SegmentReader reader = (SegmentReader) IndexReader.open(dir);<br/>
+   * They broke. This method serves as a hack to keep hacks working
+   */
+  static SegmentReader getOnlySegmentReader(Directory dir) throws IOException {
+    return getOnlySegmentReader(IndexReader.open(dir));
+  }
+
+  static SegmentReader getOnlySegmentReader(IndexReader reader) {
+    if (reader instanceof SegmentReader)
+      return (SegmentReader) reader;
+
+    if (reader instanceof DirectoryReader) {
+      IndexReader[] subReaders = reader.getSequentialSubReaders();
+      if (subReaders.length != 1)
+        throw new IllegalArgumentException(reader + " has " + subReaders.length + " segments instead of exactly one");
+
+      return (SegmentReader) subReaders[0];
+    }
+
+    throw new IllegalArgumentException(reader + " is not a SegmentReader or a single-segment DirectoryReader");
+  }
 }

Modified: lucene/java/trunk/src/java/org/apache/lucene/search/SortField.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/search/SortField.java?rev=782406&r1=782405&r2=782406&view=diff
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/search/SortField.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/search/SortField.java Sun Jun  7 16:31:18 2009
@@ -324,7 +324,7 @@
   }
   
   /**
-   * Use legacy IndexSearch implementation: search with a MultiSegmentReader rather
+   * Use legacy IndexSearch implementation: search with a DirectoryReader rather
    * than passing a single hit collector to multiple SegmentReaders.
    * 
    * @param legacy true for legacy behavior

Added: lucene/java/trunk/src/test/org/apache/lucene/index/TestDirectoryReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestDirectoryReader.java?rev=782406&view=auto
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestDirectoryReader.java (added)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestDirectoryReader.java Sun Jun  7 16:31:18 2009
@@ -0,0 +1,202 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.LuceneTestCase;
+
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+
+import java.io.IOException;
+
+public class TestDirectoryReader extends LuceneTestCase {
+  protected Directory dir;
+  private Document doc1;
+  private Document doc2;
+  protected SegmentReader [] readers = new SegmentReader[2];
+  protected SegmentInfos sis;
+  
+  
+  public TestDirectoryReader(String s) {
+    super(s);
+  }
+
+  protected void setUp() throws Exception {
+    super.setUp();
+    dir = new RAMDirectory();
+    doc1 = new Document();
+    doc2 = new Document();
+    DocHelper.setupDoc(doc1);
+    DocHelper.setupDoc(doc2);
+    DocHelper.writeDoc(dir, doc1);
+    DocHelper.writeDoc(dir, doc2);
+    sis = new SegmentInfos();
+    sis.read(dir);
+  }
+
+  protected IndexReader openReader() throws IOException {
+    IndexReader reader;
+    reader = IndexReader.open(dir);
+    assertTrue(reader instanceof DirectoryReader);
+
+    assertTrue(dir != null);
+    assertTrue(sis != null);
+    assertTrue(reader != null);
+    
+    return reader;
+  }
+
+  public void test() throws Exception {
+    setUp();
+    doTestDocument();
+    doTestUndeleteAll();
+  }    
+
+  public void doTestDocument() throws IOException {
+    sis.read(dir);
+    IndexReader reader = openReader();
+    assertTrue(reader != null);
+    Document newDoc1 = reader.document(0);
+    assertTrue(newDoc1 != null);
+    assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size());
+    Document newDoc2 = reader.document(1);
+    assertTrue(newDoc2 != null);
+    assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
+    TermFreqVector vector = reader.getTermFreqVector(0, DocHelper.TEXT_FIELD_2_KEY);
+    assertTrue(vector != null);
+    TestSegmentReader.checkNorms(reader);
+  }
+
+  public void doTestUndeleteAll() throws IOException {
+    sis.read(dir);
+    IndexReader reader = openReader();
+    assertTrue(reader != null);
+    assertEquals( 2, reader.numDocs() );
+    reader.deleteDocument(0);
+    assertEquals( 1, reader.numDocs() );
+    reader.undeleteAll();
+    assertEquals( 2, reader.numDocs() );
+
+    // Ensure undeleteAll survives commit/close/reopen:
+    reader.commit();
+    reader.close();
+
+    if (reader instanceof MultiReader)
+      // MultiReader does not "own" the directory so it does
+      // not write the changes to sis on commit:
+      sis.commit(dir);
+
+    sis.read(dir);
+    reader = openReader();
+    assertEquals( 2, reader.numDocs() );
+
+    reader.deleteDocument(0);
+    assertEquals( 1, reader.numDocs() );
+    reader.commit();
+    reader.close();
+    if (reader instanceof MultiReader)
+      // MultiReader does not "own" the directory so it does
+      // not write the changes to sis on commit:
+      sis.commit(dir);
+    sis.read(dir);
+    reader = openReader();
+    assertEquals( 1, reader.numDocs() );
+  }
+        
+  
+  public void _testTermVectors() {
+    MultiReader reader = new MultiReader(readers);
+    assertTrue(reader != null);
+  }
+  
+
+  public void testIsCurrent() throws IOException {
+    RAMDirectory ramDir1=new RAMDirectory();
+    addDoc(ramDir1, "test foo", true);
+    RAMDirectory ramDir2=new RAMDirectory();
+    addDoc(ramDir2, "test blah", true);
+    IndexReader[] readers = new IndexReader[]{IndexReader.open(ramDir1), IndexReader.open(ramDir2)};
+    MultiReader mr = new MultiReader(readers);
+    assertTrue(mr.isCurrent());   // just opened, must be current
+    addDoc(ramDir1, "more text", false);
+    assertFalse(mr.isCurrent());   // has been modified, not current anymore
+    addDoc(ramDir2, "even more text", false);
+    assertFalse(mr.isCurrent());   // has been modified even more, not current anymore
+    try {
+      mr.getVersion();
+      fail();
+    } catch (UnsupportedOperationException e) {
+      // expected exception
+    }
+    mr.close();
+  }
+
+  public void testMultiTermDocs() throws IOException {
+    RAMDirectory ramDir1=new RAMDirectory();
+    addDoc(ramDir1, "test foo", true);
+    RAMDirectory ramDir2=new RAMDirectory();
+    addDoc(ramDir2, "test blah", true);
+    RAMDirectory ramDir3=new RAMDirectory();
+    addDoc(ramDir3, "test wow", true);
+
+    IndexReader[] readers1 = new IndexReader[]{IndexReader.open(ramDir1), IndexReader.open(ramDir3)};
+    IndexReader[] readers2 = new IndexReader[]{IndexReader.open(ramDir1), IndexReader.open(ramDir2), IndexReader.open(ramDir3)};
+    MultiReader mr2 = new MultiReader(readers1);
+    MultiReader mr3 = new MultiReader(readers2);
+
+    // test mixing up TermDocs and TermEnums from different readers.
+    TermDocs td2 = mr2.termDocs();
+    TermEnum te3 = mr3.terms(new Term("body","wow"));
+    td2.seek(te3);
+    int ret = 0;
+
+    // This should blow up if we forget to check that the TermEnum is from the same
+    // reader as the TermDocs.
+    while (td2.next()) ret += td2.doc();
+    td2.close();
+    te3.close();
+
+    // really a dummy assert to ensure that we got some docs and to ensure that
+    // nothing is optimized out.
+    assertTrue(ret > 0);
+  }
+
+  public void testAllTermDocs() throws IOException {
+    IndexReader reader = openReader();
+    int NUM_DOCS = 2;
+    TermDocs td = reader.termDocs(null);
+    for(int i=0;i<NUM_DOCS;i++) {
+      assertTrue(td.next());
+      assertEquals(i, td.doc());
+      assertEquals(1, td.freq());
+    }
+    td.close();
+    reader.close();
+  }
+
+  private void addDoc(RAMDirectory ramDir1, String s, boolean create) throws IOException {
+    IndexWriter iw = new IndexWriter(ramDir1, new StandardAnalyzer(), create, IndexWriter.MaxFieldLength.LIMITED);
+    Document doc = new Document();
+    doc.add(new Field("body", s, Field.Store.YES, Field.Index.ANALYZED));
+    iw.addDocument(doc);
+    iw.close();
+  }
+}

Propchange: lucene/java/trunk/src/test/org/apache/lucene/index/TestDirectoryReader.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java?rev=782406&r1=782405&r2=782406&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java Sun Jun  7 16:31:18 2009
@@ -299,7 +299,7 @@
 
     _TestUtil.checkIndex(dir);
 
-    SegmentReader reader = (SegmentReader) IndexReader.open(dir);
+    SegmentReader reader = SegmentReader.getOnlySegmentReader(dir);
     FieldInfos fi = reader.fieldInfos();
     // f1
     assertFalse("f1 should have no norms", reader.hasNorms("f1"));

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java?rev=782406&r1=782405&r2=782406&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java Sun Jun  7 16:31:18 2009
@@ -1675,8 +1675,7 @@
     writer.close();
 
     // Open reader
-    IndexReader r = IndexReader.open(dir);
-    assertTrue(r instanceof SegmentReader);
+    IndexReader r = SegmentReader.getOnlySegmentReader(dir);
     final int[] ints = FieldCache.DEFAULT.getInts(r, "number");
     assertEquals(1, ints.length);
     assertEquals(17, ints[0]);
@@ -1685,7 +1684,6 @@
     IndexReader r2 = (IndexReader) r.clone();
     r.close();
     assertTrue(r2 != r);
-    assertTrue(r2 instanceof SegmentReader);
     final int[] ints2 = FieldCache.DEFAULT.getInts(r2, "number");
     r2.close();
 
@@ -1709,8 +1707,8 @@
 
     // Open reader1
     IndexReader r = IndexReader.open(dir);
-    assertTrue(r instanceof SegmentReader);
-    final int[] ints = FieldCache.DEFAULT.getInts(r, "number");
+    IndexReader r1 = SegmentReader.getOnlySegmentReader(r);
+    final int[] ints = FieldCache.DEFAULT.getInts(r1, "number");
     assertEquals(1, ints.length);
     assertEquals(17, ints[0]);
 
@@ -1719,11 +1717,9 @@
     writer.commit();
 
     // Reopen reader1 --> reader2
-    IndexReader r2 = (IndexReader) r.reopen();
+    IndexReader r2 = r.reopen();
     r.close();
-    assertTrue(r2 instanceof MultiSegmentReader);
     IndexReader sub0 = r2.getSequentialSubReaders()[0];
-    assertTrue(sub0 instanceof SegmentReader);
     final int[] ints2 = FieldCache.DEFAULT.getInts(sub0, "number");
     r2.close();
     assertTrue(ints == ints2);
@@ -1743,14 +1739,15 @@
 
     // Open reader1
     IndexReader r = IndexReader.open(dir);
-    assertTrue(r instanceof SegmentReader);
-    final int[] ints = FieldCache.DEFAULT.getInts(r, "number");
+    assertTrue(r instanceof DirectoryReader);
+    IndexReader r1 = SegmentReader.getOnlySegmentReader(r);
+    final int[] ints = FieldCache.DEFAULT.getInts(r1, "number");
     assertEquals(1, ints.length);
     assertEquals(17, ints[0]);
 
     // Reopen to readonly w/ no chnages
-    IndexReader r3 = (IndexReader) r.reopen(true);
-    assertTrue(r3 instanceof ReadOnlySegmentReader);
+    IndexReader r3 = r.reopen(true);
+    assertTrue(r3 instanceof ReadOnlyDirectoryReader);
     r3.close();
 
     // Add new segment
@@ -1758,9 +1755,9 @@
     writer.commit();
 
     // Reopen reader1 --> reader2
-    IndexReader r2 = (IndexReader) r.reopen(true);
+    IndexReader r2 = r.reopen(true);
     r.close();
-    assertTrue(r2 instanceof MultiSegmentReader);
+    assertTrue(r2 instanceof ReadOnlyDirectoryReader);
     IndexReader[] subs = r2.getSequentialSubReaders();
     final int[] ints2 = FieldCache.DEFAULT.getInts(subs[0], "number");
     r2.close();
@@ -1784,7 +1781,8 @@
     writer.commit();
 
     IndexReader r = IndexReader.open(dir);
-    assertEquals(36, r.getUniqueTermCount());
+    IndexReader r1 = SegmentReader.getOnlySegmentReader(r);
+    assertEquals(36, r1.getUniqueTermCount());
     writer.addDocument(doc);
     writer.commit();
     IndexReader r2 = r.reopen();



Mime
View raw message