jackrabbit-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mreut...@apache.org
Subject svn commit: r289211 - in /incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene: IndexInfos.java IndexMerger.java MultiIndex.java Recovery.java RedoLog.java VolatileIndex.java
Date Thu, 15 Sep 2005 11:39:41 GMT
Author: mreutegg
Date: Thu Sep 15 04:39:35 2005
New Revision: 289211

URL: http://svn.apache.org/viewcvs?rev=289211&view=rev
Log:
JCR-204: Improve recoverability

Added:
    incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/Recovery.java   (with props)
Modified:
    incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/IndexInfos.java
    incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/IndexMerger.java
    incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java
    incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/RedoLog.java
    incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/VolatileIndex.java

Modified: incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/IndexInfos.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/IndexInfos.java?rev=289211&r1=289210&r2=289211&view=diff
==============================================================================
--- incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/IndexInfos.java (original)
+++ incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/IndexInfos.java Thu Sep 15 04:39:35 2005
@@ -26,6 +26,8 @@
 import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Set;
+import java.util.HashSet;
 
 /**
  * Stores a sequence of index names.
@@ -48,6 +50,11 @@
     private List indexes = new ArrayList();
 
     /**
+     * Set of names for quick lookup.
+     */
+    private Set names = new HashSet();
+
+    /**
      * Name of the file where the infos are stored.
      */
     private final String name;
@@ -93,7 +100,9 @@
             DataInputStream di = new DataInputStream(in);
             counter = di.readInt();
             for (int i = di.readInt(); i > 0; i--) {
-                indexes.add(di.readUTF());
+                String indexName = di.readUTF();
+                indexes.add(indexName);
+                names.add(indexName);
             }
         } finally {
             in.close();
@@ -157,7 +166,11 @@
      * @param name the name to add.
      */
     void addName(String name) {
+        if (names.contains(name)) {
+            throw new IllegalArgumentException("already contains: " + name);
+        }
         indexes.add(name);
+        names.add(name);
         dirty = true;
     }
 
@@ -167,6 +180,7 @@
      */
     void removeName(String name) {
         indexes.remove(name);
+        names.remove(name);
         dirty = true;
     }
 
@@ -175,8 +189,20 @@
      * @param i the position.
      */
     void removeName(int i) {
-        indexes.remove(i);
+        Object name = indexes.remove(i);
+        names.remove(name);
         dirty = true;
+    }
+
+    /**
+     * Returns <code>true</code> if <code>name</code> exists in this
+     * <code>IndexInfos</code>; <code>false</code> otherwise.
+     *
+     * @param name the name to test existence.
+     * @return <code>true</code> it is exists in this <code>IndexInfos</code>.
+     */
+    boolean contains(String name) {
+        return names.contains(name);
     }
 
     /**

Modified: incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/IndexMerger.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/IndexMerger.java?rev=289211&r1=289210&r2=289211&view=diff
==============================================================================
--- incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/IndexMerger.java (original)
+++ incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/IndexMerger.java Thu Sep 15 04:39:35 2005
@@ -249,7 +249,7 @@
             }
             try {
                 log.debug("create new index");
-                PersistentIndex index = multiIndex.createIndex();
+                PersistentIndex index = multiIndex.getOrCreateIndex(null, true);
                 boolean success = false;
                 try {
 

Modified: incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java?rev=289211&r1=289210&r2=289211&view=diff
==============================================================================
--- incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java (original)
+++ incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java Thu Sep 15 04:39:35 2005
@@ -21,6 +21,7 @@
 import org.apache.jackrabbit.core.state.ItemStateManager;
 import org.apache.jackrabbit.core.state.NoSuchItemStateException;
 import org.apache.jackrabbit.core.state.NodeState;
+import org.apache.jackrabbit.uuid.Constants;
 import org.apache.log4j.Logger;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
@@ -57,8 +58,8 @@
  * <p/>
  * The persistent indexes are merged from time to time. The merge behaviour
  * is configurable using the methods: {@link SearchIndex#setMaxMergeDocs(int)},
- * {@link SearchIndex#setMergeFactor(int)} and {@link SearchIndex#setMinMergeDocs(int)}. For detailed
- * description of the configuration parameters see also the lucene
+ * {@link SearchIndex#setMergeFactor(int)} and {@link SearchIndex#setMinMergeDocs(int)}.
+ * For detailed description of the configuration parameters see also the lucene
  * <code>IndexWriter</code> class.
  * <p/>
  * This class is thread-safe.
@@ -96,7 +97,10 @@
     private final IndexInfos deletable = new IndexInfos("deletable");
 
     /**
-     * List of persistent indexes.
+     * List of open persistent indexes. This list may also contain an open
+     * PersistentIndex owned by the IndexMerger daemon. Such an index is not
+     * registered with indexNames and <b>must not</b> be used in regular index
+     * operations (delete node, etc.)!
      */
     private final List indexes = new ArrayList();
 
@@ -148,9 +152,9 @@
     private boolean redoLogApplied = false;
 
     /**
-     * The last time this index was modified. That is, a document was added.
+     * The time this index was last flushed or a transaction was committed.
      */
-    private long lastModificationTime;
+    private long lastFlushTime;
 
     /**
      * The <code>IndexMerger</code> for this <code>MultiIndex</code>.
@@ -158,9 +162,24 @@
     private final IndexMerger merger;
 
     /**
-     * Timer to schedule commits of the volatile index after some idle time.
+     * Timer to schedule flushes of this index after some idle time.
      */
-    private final Timer commitTimer = new Timer(true);
+    private final Timer flushTimer = new Timer(true);
+
+    /**
+     * The RedoLog of this <code>MultiIndex</code>.
+     */
+    private final RedoLog redoLog;
+
+    /**
+     * The next transaction id.
+     */
+    private long nextTransactionId = 0;
+
+    /**
+     * The current transaction id.
+     */
+    private long currentTransactionId = -1;
 
     /**
      * Creates a new MultiIndex.
@@ -179,16 +198,15 @@
         this.indexDir = indexDir;
         this.handler = handler;
         this.cache = new DocNumberCache(handler.getCacheSize());
+        this.redoLog = new RedoLog(new File(indexDir, REDO_LOG)); 
 
-        boolean doInitialIndex = false;
         if (indexNames.exists(indexDir)) {
             indexNames.read(indexDir);
-        } else {
-            doInitialIndex = true;
         }
         if (deletable.exists(indexDir)) {
             deletable.read(indexDir);
         }
+
         // try to remove deletable files if there are any
         attemptDelete();
 
@@ -206,8 +224,14 @@
             // open persistent indexes
             for (int i = 0; i < indexNames.size(); i++) {
                 File sub = new File(indexDir, indexNames.getName(i));
-                if (!sub.exists() && !sub.mkdir()) {
-                    throw new IOException("Unable to create directory: " + sub.getAbsolutePath());
+                // only open if it still exists
+                // it is possible that indexNames still contains a name for
+                // an index that has been deleted, but indexNames has not been
+                // written to disk.
+                if (!sub.exists()) {
+                    log.debug("index does not exist anymore: " + sub.getAbsolutePath());
+                    // move on to next index
+                    continue;
                 }
                 PersistentIndex index = new PersistentIndex(indexNames.getName(i),
                         sub, false, handler.getAnalyzer(), cache);
@@ -219,60 +243,36 @@
                 merger.indexAdded(index.getName(), index.getNumDocuments());
             }
 
-            // create volatile index and check / apply redo log
             // init volatile index
-            RedoLog redoLog = new RedoLog(new File(indexDir, REDO_LOG));
+            resetVolatileIndex();
 
-            if (redoLog.hasEntries()) {
-                // when we have entries in the redo log there is no need to reindex
-                doInitialIndex = false;
-
-                log.warn("Found uncommitted redo log. Applying changes now...");
-                // apply changes to persistent index
-                Iterator it = redoLog.getEntries().iterator();
-                while (it.hasNext()) {
-                    RedoLog.Entry entry = (RedoLog.Entry) it.next();
-                    if (entry.type == RedoLog.Entry.NODE_ADDED) {
-                        try {
-                            NodeState state = (NodeState) stateMgr.getItemState(new NodeId(entry.uuid));
-                            addNodePersistent(state);
-                        } catch (NoSuchItemStateException e) {
-                            // item does not exist anymore
-                        } catch (Exception e) {
-                            log.warn("Unable to add node to index: ", e);
-                        }
-                    } else {
-                        deleteNodePersistent(entry.uuid);
-                    }
-                }
-                log.warn("Redo changes applied.");
-                redoLog.clear();
-                redoLogApplied = true;
-            }
+            redoLogApplied = redoLog.hasEntries();
 
-            volatileIndex = new VolatileIndex(handler.getAnalyzer(), redoLog);
-            volatileIndex.setUseCompoundFile(handler.getUseCompoundFile());
-            volatileIndex.setBufferSize(handler.getBufferSize());
+            // run recovery
+            Recovery.run(this, redoLog);
 
             // now that we are ready, start index merger
             merger.start();
 
-            if (doInitialIndex) {
-                // index root node
+            // do an initial index if there are no indexes at all
+            if (indexNames.size() == 0) {
+                // traverse and index workspace
+                executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
                 NodeState rootState = (NodeState) stateMgr.getItemState(new NodeId(rootUUID));
                 createIndex(rootState, stateMgr);
+                executeAndLog(new Commit(getTransactionId()));
             }
         } catch (ItemStateException e) {
             throw new IOException("Error indexing root node: " + e.getMessage());
         } catch (RepositoryException e) {
             throw new IOException("Error indexing root node: " + e.getMessage());
         }
-        lastModificationTime = System.currentTimeMillis();
-        startCommitTimer();
+
+        startFlushTimer();
     }
 
     /**
-     * Update the index by removing some documents and adding others.
+     * Atomically updates the index by removing some documents and adding others.
      *
      * @param remove Iterator of <code>Term</code>s that identify documents to
      *               remove
@@ -285,23 +285,31 @@
         synchronized (updateMonitor) {
             updateInProgress = true;
         }
-        boolean hasAdditions = add.hasNext();
         try {
-            // todo block with remove & add is not atomic
+            long transactionId = nextTransactionId++;
+            executeAndLog(new Start(transactionId));
+
+            boolean flush = false;
             while (remove.hasNext()) {
-                internalRemoveDocument((Term) remove.next());
+                String uuid = ((Term) remove.next()).text();
+                executeAndLog(new DeleteNode(transactionId, uuid));
             }
             while (add.hasNext()) {
                 Document doc = (Document) add.next();
                 if (doc != null) {
-                    internalAddDocument(doc);
+                    executeAndLog(new AddNode(transactionId, doc));
+                    // commit volatile index if needed
+                    flush |= checkVolatileCommit();
                 }
             }
+            executeAndLog(new Commit(transactionId));
+
+            // flush whole index when volatile index has been commited.
+            if (flush) {
+                flush();
+            }
         } finally {
             synchronized (updateMonitor) {
-                if (hasAdditions) {
-                    lastModificationTime = System.currentTimeMillis();
-                }
                 updateInProgress = false;
                 updateMonitor.notifyAll();
                 if (multiReader != null) {
@@ -336,8 +344,7 @@
     }
 
     /**
-     * Deletes all documents that match the <code>idTerm</code> and immediately
-     * commits the changes to the persistent indexes.
+     * Deletes all documents that match the <code>idTerm</code>.
      *
      * @param idTerm documents that match this term will be deleted.
      * @return the number of deleted documents.
@@ -349,12 +356,23 @@
         }
         int num;
         try {
+            executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
             num = volatileIndex.removeDocument(idTerm);
+            if (num > 0) {
+                redoLog.append(new DeleteNode(getTransactionId(), idTerm.text()));
+            }
             for (int i = 0; i < indexes.size(); i++) {
                 PersistentIndex index = (PersistentIndex) indexes.get(i);
-                num += index.removeDocument(idTerm);
-                index.commit();
+                // only remove documents from registered indexes
+                if (indexNames.contains(index.getName())) {
+                    int removed = index.removeDocument(idTerm);
+                    if (removed > 0) {
+                        redoLog.append(new DeleteNode(getTransactionId(), idTerm.text()));
+                    }
+                    num += removed;
+                }
             }
+            executeAndLog(new Commit(getTransactionId()));
         } finally {
             synchronized (updateMonitor) {
                 updateInProgress = false;
@@ -416,18 +434,40 @@
      * Creates a new Persistent index. The new index is not registered with this
      * <code>MultiIndex</code>.
      *
+     * @param indexName the name of the index to open, or <code>null</code> if
+     *                  an index with a new name should be created.
+     * @param create    if the index that is opened should delete existing index
+     *                  data.
      * @return a new <code>PersistentIndex</code>.
      * @throws IOException if a new index cannot be created.
      */
-    synchronized PersistentIndex createIndex() throws IOException {
-        File sub = newIndexFolder();
-        String name = sub.getName();
-        PersistentIndex index = new PersistentIndex(name, sub, true,
+    synchronized PersistentIndex getOrCreateIndex(String indexName, boolean create)
+            throws IOException {
+        // check existing
+        for (Iterator it = indexes.iterator(); it.hasNext(); ) {
+            PersistentIndex idx = (PersistentIndex) it.next();
+            if (idx.getName().equals(indexName)) {
+                return idx;
+            }
+        }
+
+        // otherwise open / create it
+        File sub;
+        if (indexName == null) {
+            sub = newIndexFolder();
+            indexName = sub.getName();
+        } else {
+            sub = new File(indexDir, indexName);
+        }
+        PersistentIndex index = new PersistentIndex(indexName, sub, create,
                 handler.getAnalyzer(), cache);
         index.setMaxMergeDocs(handler.getMaxMergeDocs());
         index.setMergeFactor(handler.getMergeFactor());
         index.setMinMergeDocs(handler.getMinMergeDocs());
         index.setUseCompoundFile(handler.getUseCompoundFile());
+
+        // add to list of open indexes and return it
+        indexes.add(index);
         return index;
     }
 
@@ -447,34 +487,31 @@
                                      PersistentIndex index,
                                      Collection deleted)
             throws IOException {
-        Set names = new HashSet(Arrays.asList(obsoleteIndexes));
-        // delete documents in index
-        for (Iterator it = deleted.iterator(); it.hasNext(); ) {
-            Term id = (Term) it.next();
-            int del = index.removeDocument(id);
-            log.debug("deleted " + del + " document for id: " + id.text());
-        }
-        index.commit();
 
-        // now replace indexes
         synchronized (updateMonitor) {
             updateInProgress = true;
         }
         try {
-            for (Iterator it = indexes.iterator(); it.hasNext(); ) {
-                PersistentIndex idx = (PersistentIndex) it.next();
-                if (names.contains(idx.getName())) {
-                    it.remove();
-                    indexNames.removeName(idx.getName());
-                    idx.close();
-                    deleteIndex(idx);
+            executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
+            // delete obsolete indexes
+            Set names = new HashSet(Arrays.asList(obsoleteIndexes));
+            for (Iterator it = names.iterator(); it.hasNext(); ) {
+                // do not try to delete indexes that are already gone
+                String indexName = (String) it.next();
+                if (indexNames.contains(indexName)) {
+                    executeAndLog(new DeleteIndex(getTransactionId(), indexName));
                 }
             }
-            // add new
-            indexes.add(index);
-            indexNames.addName(index.getName());
-            merger.indexAdded(index.getName(), index.getNumDocuments());
-            indexNames.write(indexDir);
+
+            executeAndLog(new AddIndex(getTransactionId(), index.getName()));
+
+            // delete documents in index
+            for (Iterator it = deleted.iterator(); it.hasNext(); ) {
+                Term id = (Term) it.next();
+                executeAndLog(new DeleteNode(getTransactionId(), id.text()));
+            }
+
+            executeAndLog(new Commit(getTransactionId()));
         } finally {
             synchronized (updateMonitor) {
                 updateInProgress = false;
@@ -512,11 +549,16 @@
             // some other read thread might have created the reader in the
             // meantime -> check again
             if (multiReader == null) {
-                ReadOnlyIndexReader[] readers = new ReadOnlyIndexReader[indexes.size() + 1];
+                List readerList = new ArrayList();
                 for (int i = 0; i < indexes.size(); i++) {
-                    readers[i] = ((PersistentIndex) indexes.get(i)).getReadOnlyIndexReader();
+                    PersistentIndex pIdx = (PersistentIndex) indexes.get(i);
+                    if (indexNames.contains(pIdx.getName())) {
+                        readerList.add(pIdx.getReadOnlyIndexReader());
+                    }
                 }
-                readers[readers.length - 1] = volatileIndex.getReadOnlyIndexReader();
+                readerList.add(volatileIndex.getReadOnlyIndexReader());
+                ReadOnlyIndexReader[] readers =
+                        (ReadOnlyIndexReader[]) readerList.toArray(new ReadOnlyIndexReader[readerList.size()]);
                 multiReader = new CachingMultiReader(readers, cache);
             }
             multiReader.incrementRefCount();
@@ -525,6 +567,15 @@
     }
 
     /**
+     * Returns the volatile index.
+     *
+     * @return the volatile index.
+     */
+    VolatileIndex getVolatileIndex() {
+        return volatileIndex;
+    }
+
+    /**
      * Closes this <code>MultiIndex</code>.
      */
     void close() {
@@ -536,7 +587,7 @@
 
         synchronized (this) {
             // stop timer
-            commitTimer.cancel();
+            flushTimer.cancel();
 
             // commit / close indexes
             if (multiReader != null) {
@@ -548,9 +599,7 @@
                 multiReader = null;
             }
             try {
-                if (volatileIndex.getRedoLog().hasEntries()) {
-                    commit();
-                }
+                flush();
             } catch (IOException e) {
                 log.error("Exception while closing search index.", e);
             }
@@ -571,10 +620,11 @@
 
     /**
      * Returns a lucene Document for the <code>node</code>.
+     *
      * @param node the node to index.
      * @return the index document.
      * @throws RepositoryException if an error occurs while reading from the
-     *   workspace.
+     *                             workspace.
      */
     Document createDocument(NodeState node) throws RepositoryException {
         return handler.createDocument(node, nsMappings);
@@ -590,129 +640,167 @@
     }
 
     /**
-     * Deletes the <code>index</code>. If the index directory cannot be removed
-     * because (windows) file handles are still open, the directory is marked
-     * for future deletion.
+     * Removes the <code>index</code> from the list of active sub indexes. The
+     * Index is not acutally deleted right away, but postponed to the transaction
+     * commit.
      * <p/>
      * This method does not close the index, but rather expects that the index
      * has already been closed.
      *
      * @param index the index to delete.
      */
-    void deleteIndex(PersistentIndex index) {
-        File dir = new File(indexDir, index.getName());
-        if (!deleteIndex(dir)) {
-            // try again later
-            deletable.addName(index.getName());
+    synchronized void deleteIndex(PersistentIndex index) {
+        // remove it from the lists if index is registered
+        indexes.remove(index);
+        indexNames.removeName(index.getName());
+        deletable.addName(index.getName());
+    }
+
+    /**
+     * Flushes this <code>MultiIndex</code>. Persists all pending changes and
+     * resets the redo log.
+     *
+     * @throws IOException if the flush fails.
+     */
+    synchronized void flush() throws IOException {
+        // commit volatile index
+        executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
+        commitVolatileIndex();
+
+        // commit persistent indexes
+        for (int i = indexes.size() - 1; i >= 0; i--) {
+            PersistentIndex index = (PersistentIndex) indexes.get(i);
+            // only commit indexes we own
+            // index merger also places PersistentIndex instances in indexes,
+            // but does not make them public by registering the name in indexNames
+            if (indexNames.contains(index.getName())) {
+                index.commit();
+                // check if index still contains documents
+                if (index.getNumDocuments() == 0) {
+                    executeAndLog(new DeleteIndex(getTransactionId(), index.getName()));
+                }
+            }
         }
+        executeAndLog(new Commit(getTransactionId()));
+
+        indexNames.write(indexDir);
+
+        // reset redo log
+        redoLog.clear();
+
+        lastFlushTime = System.currentTimeMillis();
+
+        // delete obsolete indexes
+        attemptDelete();
+    }
+
+    //-------------------------< internal >-------------------------------------
+
+    /**
+     * Resets the volatile index to a new instance.
+     */
+    private void resetVolatileIndex() throws IOException {
+        volatileIndex = new VolatileIndex(handler.getAnalyzer());
+        volatileIndex.setUseCompoundFile(handler.getUseCompoundFile());
+        volatileIndex.setBufferSize(handler.getBufferSize());
+    }
+
+    /**
+     * Returns a lucene Document for the Node with <code>id</code>.
+     *
+     * @param id the id of the node to index.
+     * @return the index document.
+     * @throws RepositoryException if an error occurs while reading from the
+     *                             workspace or if there is no node with
+     *                             <code>id</code>.
+     */
+    private Document createDocument(NodeId id) throws RepositoryException {
         try {
-            deletable.write(indexDir);
-        } catch (IOException e) {
-            log.warn("Exception while writing deletable indexes: " + e);
+            NodeState state = (NodeState) handler.getContext().getItemStateManager().getItemState(id);
+            return createDocument(state);
+        } catch (NoSuchItemStateException e) {
+            throw new RepositoryException("Node " + id + " does not exist", e);
+        } catch (ItemStateException e) {
+            throw new RepositoryException("Error retrieving node: " + id, e);
         }
     }
 
-    //-------------------------< internal >-------------------------------------
+    /**
+     * Returns the current transaction id.
+     *
+     * @return the current transaction id.
+     */
+    private long getTransactionId() {
+        return currentTransactionId;
+    }
 
     /**
-     * Unsynchronized implementation to remove a document from the index. Note:
-     * this method will at most remove 1 (one) document from the index. This
-     * method assumes <code>idTerm</code> is unique.
-     *
-     * @param idTerm term that identifies the document to remove.
-     * @return number of documents to remove.
-     * @throws IOException if an error occurs while updating the index.
-     */
-    private int internalRemoveDocument(Term idTerm) throws IOException {
-        // if the document cannot be deleted from the volatile index
-        // delete it from one of the persistent indexes.
-        int num = volatileIndex.removeDocument(idTerm);
-        if (num == 0) {
-            for (int i = indexes.size() - 1; i >= 0; i--) {
-                PersistentIndex index = (PersistentIndex) indexes.get(i);
-                num = index.removeDocument(idTerm);
-                if (num > 0) {
-                    return num;
-                }
-            }
-        } else {
-            return num;
+     * Executes action <code>a</code> and appends the action to the redo log if
+     * successful.
+     *
+     * @param a the <code>Action</code> to execute.
+     * @return the executed action.
+     * @throws IOException         if an error occurs while executing the action
+     *                             or appending the action to the redo log.
+     */
+    private Action executeAndLog(Action a)
+            throws IOException {
+        a.execute(this);
+        redoLog.append(a);
+        if (a.getType() == Action.TYPE_COMMIT) {
+            redoLog.flush();
         }
-        return 0;
+        return a;
     }
 
     /**
-     * Unsynchronized implementation to add a document to the index.
+     * Checks if it is needed to commit the volatile index according to {@link
+     * SearchIndex#getMinMergeDocs()}.
      *
-     * @param doc the document to add.
-     * @throws IOException if an error occurs while adding the document to the
+     * @return <code>true</code> if the volatile index has been committed,
+     *         <code>false</code> otherwise.
+     * @throws IOException if an error occurs while committing the volatile
      *                     index.
      */
-    private void internalAddDocument(Document doc) throws IOException {
-        volatileIndex.addDocument(doc);
-        if (volatileIndex.getRedoLog().getSize() >= handler.getMinMergeDocs()) {
-            long time = System.currentTimeMillis();
-            commit();
-            time = System.currentTimeMillis() - time;
-            log.info("Committed in-memory index in " + time + "ms.");
+    private boolean checkVolatileCommit() throws IOException {
+        if (volatileIndex.getNumDocuments() >= handler.getMinMergeDocs()) {
+            commitVolatileIndex();
+            return true;
         }
+        return false;
     }
 
     /**
-     * Commits the volatile index to a persistent index, commits persistent
-     * indexes (persist deletions) and finally merges indexes if necessary.
+     * Commits the volatile index to a persistent index. The new persistent
+     * index is added to the list of indexes but not written to disk. When this
+     * method returns a new volatile index has been created.
      *
-     * @throws IOException if an error occurs.
+     * @throws IOException if an error occurs while writing the volatile index
+     *                     to disk.
      */
-    private void commit() throws IOException {
+    private void commitVolatileIndex() throws IOException {
 
         // check if volatile index contains documents at all
-        if (volatileIndex.getIndexReader().numDocs() > 0) {
-
-            File sub = newIndexFolder();
-            String name = sub.getName();
-            PersistentIndex index = new PersistentIndex(name, sub, true,
-                    handler.getAnalyzer(), cache);
-            index.setMaxMergeDocs(handler.getMaxMergeDocs());
-            index.setMergeFactor(handler.getMergeFactor());
-            index.setMinMergeDocs(handler.getMinMergeDocs());
-            index.setUseCompoundFile(handler.getUseCompoundFile());
-            index.copyIndex(volatileIndex);
-
-            // if merge has been successful add index
-            indexes.add(index);
-            indexNames.addName(name);
-            indexNames.write(indexDir);
-
-            merger.indexAdded(index.getName(), index.getNumDocuments());
-
-            // check if obsolete indexes can be deleted
-            // todo move to other place?
-            attemptDelete();
-        }
+        if (volatileIndex.getNumDocuments() > 0) {
 
-        // commit persistent indexes
-        for (int i = indexes.size() - 1; i >= 0; i--) {
-            PersistentIndex index = (PersistentIndex) indexes.get(i);
-            index.commit();
-            // check if index still contains documents
-            if (index.getNumDocuments() == 0) {
-                indexes.remove(i);
-                indexNames.removeName(index.getName());
-                indexNames.write(indexDir);
-                index.close();
-                deleteIndex(index);
-            }
-        }
+            long time = System.currentTimeMillis();
+            // create index
+            CreateIndex create = new CreateIndex(getTransactionId(), null, true);
+            executeAndLog(create);
+
+            // commit volatile index
+            executeAndLog(new VolatileCommit(getTransactionId(), create.getIndexName()));
+
+            // add new index
+            AddIndex add = new AddIndex(getTransactionId(), create.getIndexName());
+            executeAndLog(add);
 
-        // reset redo log
-        volatileIndex.getRedoLog().clear();
-
-        // create new volatile index
-        volatileIndex = new VolatileIndex(handler.getAnalyzer(), volatileIndex.getRedoLog());
-        volatileIndex.setUseCompoundFile(handler.getUseCompoundFile());
-        volatileIndex.setBufferSize(handler.getBufferSize());
+            // create new volatile index
+            resetVolatileIndex();
 
+            time = System.currentTimeMillis() - time;
+            log.info("Committed in-memory index in " + time + "ms.");
+        }
     }
 
     /**
@@ -728,7 +816,9 @@
      */
     private void createIndex(NodeState node, ItemStateManager stateMgr)
             throws IOException, ItemStateException, RepositoryException {
-        addDocument(createDocument(node));
+        String uuid = node.getId().toString();
+        executeAndLog(new AddNode(getTransactionId(), uuid));
+        checkVolatileCommit();
         List children = node.getChildNodeEntries();
         for (Iterator it = children.iterator(); it.hasNext();) {
             NodeState.ChildNodeEntry child = (NodeState.ChildNodeEntry) it.next();
@@ -738,65 +828,6 @@
     }
 
     /**
-     * Adds a node to the persistent index. This method will <b>not</b> aquire a
-     * write lock while writing!
-     * <p/>
-     * If an error occurs when reading from the ItemStateManager an error log
-     * message is written and the node is ignored.
-     *
-     * @param node the node to add.
-     * @throws IOException         if an error occurs while writing to the
-     *                             index.
-     */
-    private void addNodePersistent(NodeState node)
-            throws IOException {
-        Document doc;
-        try {
-            doc = createDocument(node);
-        } catch (RepositoryException e) {
-            log.warn("RepositoryException: " + e.getMessage());
-            return;
-        }
-        // make sure at least one persistent index exists
-        if (indexes.size() == 0) {
-            File sub = newIndexFolder();
-            String name = sub.getName();
-            PersistentIndex index = new PersistentIndex(name, sub, true,
-                    handler.getAnalyzer(), cache);
-            index.setMaxMergeDocs(handler.getMaxMergeDocs());
-            index.setMergeFactor(handler.getMergeFactor());
-            index.setMinMergeDocs(handler.getMinMergeDocs());
-            index.setUseCompoundFile(handler.getUseCompoundFile());
-            indexes.add(index);
-            indexNames.addName(name);
-            indexNames.write(indexDir);
-        }
-        // add node to last index
-        PersistentIndex last = (PersistentIndex) indexes.get(indexes.size() - 1);
-        last.addDocument(doc);
-    }
-
-    /**
-     * Removes a node from the persistent index. This method will <b>not</b>
-     * aquire a write lock while writing!
-     *
-     * @param uuid the uuid of the node to remove.
-     * @throws IOException if an error occurs while writing to the index.
-     */
-    private void deleteNodePersistent(String uuid) throws IOException {
-        Term idTerm = new Term(FieldNames.UUID, uuid);
-        // try to remove node from index until successful
-        // use reverse order; nodes that survived for a long time
-        // will probably never be deleted.
-        for (int i = indexes.size() - 1; i >= 0; i--) {
-            PersistentIndex index = (PersistentIndex) indexes.get(i);
-            if (index.removeDocument(idTerm) > 0) {
-                break;
-            }
-        }
-    }
-
-    /**
      * Attempts to delete all files recorded in {@link #deletable}.
      */
     private void attemptDelete() {
@@ -859,40 +890,40 @@
     }
 
     /**
-     * Starts the commit timer that periodically checks if the volatile index
-     * should be committed. The timer task will call {@link #checkCommit()}.
+     * Starts the flush timer that periodically checks if the index
+     * should be flushed. The timer task will call {@link #checkFlush()}.
      */
-    private void startCommitTimer() {
-        commitTimer.schedule(new TimerTask() {
+    private void startFlushTimer() {
+        lastFlushTime = System.currentTimeMillis();
+        flushTimer.schedule(new TimerTask() {
             public void run() {
-                checkCommit();
+                checkFlush();
             }
         }, 0, 1000);
     }
 
     /**
-     * Checks the duration between the last modification to this index and the
-     * current time and commits the volatile index (if there are changes at all)
+     * Checks the duration between the last commit to this index and the
+     * current time and flushes the index (if there are changes at all)
      * if the duration (idle time) is more than {@link SearchIndex#getVolatileIdleTime()}
      * seconds.
      */
-    private synchronized void checkCommit() {
-        long idleTime = System.currentTimeMillis() - lastModificationTime;
-        // do not commit if volatileIdleTime is zero or negative
+    private synchronized void checkFlush() {
+        long idleTime = System.currentTimeMillis() - lastFlushTime;
+        // do not flush if volatileIdleTime is zero or negative
         if (handler.getVolatileIdleTime() > 0
                 && idleTime > handler.getVolatileIdleTime() * 1000) {
             try {
-                if (volatileIndex.getRedoLog().hasEntries()) {
-                    log.info("Committing in-memory index after being idle for " +
+                if (redoLog.hasEntries()) {
+                    log.info("Flushing index after being idle for " +
                             idleTime + " ms.");
                     synchronized (updateMonitor) {
                         updateInProgress = true;
                     }
                     try {
-                        commit();
+                        flush();
                     } finally {
                         synchronized (updateMonitor) {
-                            lastModificationTime = System.currentTimeMillis();
                             updateInProgress = false;
                             updateMonitor.notifyAll();
                             if (multiReader != null) {
@@ -905,6 +936,766 @@
             } catch (IOException e) {
                 log.error("Unable to commit volatile index", e);
             }
+        }
+    }
+
+    //------------------------< Actions >---------------------------------------
+
+    /**
+     * Defines an action on an <code>MultiIndex</code>.
+     */
+    public abstract static class Action {
+
+        /**
+         * Action identifier in redo log for transaction start action.
+         */
+        static final String START = "STR";
+
+        /**
+         * Action type for start action.
+         */
+        public static final int TYPE_START = 0;
+
+        /**
+         * Action identifier in redo log for add node action.
+         */
+        static final String ADD_NODE = "ADD";
+
+        /**
+         * Action type for add node action.
+         */
+        public static final int TYPE_ADD_NODE = 1;
+
+        /**
+         * Action identifier in redo log for node delete action.
+         */
+        static final String DELETE_NODE = "DEL";
+
+        /**
+         * Action type for delete node action.
+         */
+        public static final int TYPE_DELETE_NODE = 2;
+
+        /**
+         * Action identifier in redo log for transaction commit action.
+         */
+        static final String COMMIT = "COM";
+
+        /**
+         * Action type for commit action.
+         */
+        public static final int TYPE_COMMIT = 3;
+
+        /**
+         * Action identifier in redo log for volatile index commit action.
+         */
+        static final String VOLATILE_COMMIT = "VOL_COM";
+
+        /**
+         * Action type for volatile index commit action.
+         */
+        public static final int TYPE_VOLATILE_COMMIT = 4;
+
+        /**
+         * Action identifier in redo log for index create action.
+         */
+        static final String CREATE_INDEX = "CRE_IDX";
+
+        /**
+         * Action type for create index action.
+         */
+        public static final int TYPE_CREATE_INDEX = 5;
+
+        /**
+         * Action identifier in redo log for index add action.
+         */
+        static final String ADD_INDEX = "ADD_IDX";
+
+        /**
+         * Action type for add index action.
+         */
+        public static final int TYPE_ADD_INDEX = 6;
+
+        /**
+         * Action identifier in redo log for delete index action.
+         */
+        static final String DELETE_INDEX = "DEL_IDX";
+
+        /**
+         * Action type for delete index action.
+         */
+        public static final int TYPE_DELETE_INDEX = 7;
+
+        /**
+         * Transaction identifier for internal actions like index replace or
+         * volatile index commit triggered by timer thread.
+         */
+        static final long INTERNAL_TRANSACTION = -1;
+
+        /**
+         * The id of the transaction that executed this action.
+         */
+        private final long transactionId;
+
+        /**
+         * The action type.
+         */
+        private final int type;
+
+        /**
+         * Creates a new <code>Action</code>.
+         *
+         * @param transactionId the id of the transaction that executed this
+         *                      action.
+         * @param type          the action type.
+         */
+        Action(long transactionId, int type) {
+            this.transactionId = transactionId;
+            this.type = type;
+        }
+
+        /**
+         * Returns the transaction id for this <code>Action</code>.
+         *
+         * @return the transaction id for this <code>Action</code>.
+         */
+        long getTransactionId() {
+            return transactionId;
+        }
+
+        /**
+         * Returns the action type.
+         *
+         * @return the action type.
+         */
+        int getType() {
+            return type;
+        }
+
+        /**
+         * Executes this action on the <code>index</code>.
+         *
+         * @param index the index where to execute the action.
+         * @throws IOException         if the action fails due to some I/O error in
+         *                             the index or some other error.
+         */
+        public abstract void execute(MultiIndex index) throws IOException;
+
+        /**
+         * Executes the inverse operation of this action. That is, does an undo
+         * of this action. This default implementation does nothing, but returns
+         * silently.
+         *
+         * @param index the index where to undo the action.
+         * @throws IOException if the action cannot be undone.
+         */
+        public void undo(MultiIndex index) throws IOException {
+        }
+
+        /**
+         * Returns a <code>String</code> representation of this action that can be
+         * written to the {@link RedoLog}.
+         *
+         * @return a <code>String</code> representation of this action.
+         */
+        public abstract String toString();
+
+        /**
+         * Parses an line in the redo log and created an {@link Action}.
+         *
+         * @param line the line from the redo log.
+         * @return an <code>Action</code>.
+         * @throws IllegalArgumentException if the line is malformed.
+         */
+        static Action fromString(String line) throws IllegalArgumentException {
+            int endTransIdx = line.indexOf(' ');
+            if (endTransIdx == -1) {
+                throw new IllegalArgumentException(line);
+            }
+            long transactionId;
+            try {
+                transactionId = Long.parseLong(line.substring(0, endTransIdx));
+            } catch (NumberFormatException e) {
+                throw new IllegalArgumentException(line);
+            }
+            int endActionIdx = line.indexOf(' ', endTransIdx + 1);
+            if (endActionIdx == -1) {
+                // action does not have arguments
+                endActionIdx = line.length();
+            }
+            String actionLabel = line.substring(endTransIdx + 1, endActionIdx);
+            String arguments = "";
+            if (endActionIdx + 1 <= line.length()) {
+                arguments = line.substring(endActionIdx + 1);
+            }
+            Action a;
+            if (actionLabel.equals(Action.ADD_NODE)) {
+                a = AddNode.fromString(transactionId, arguments);
+            } else if (actionLabel.equals(Action.ADD_INDEX)) {
+                a = AddIndex.fromString(transactionId, arguments);
+            } else if (actionLabel.equals(Action.COMMIT)) {
+                a = Commit.fromString(transactionId, arguments);
+            } else if (actionLabel.equals(Action.CREATE_INDEX)) {
+                a = CreateIndex.fromString(transactionId, arguments);
+            } else if (actionLabel.equals(Action.DELETE_INDEX)) {
+                a = DeleteIndex.fromString(transactionId, arguments);
+            } else if (actionLabel.equals(Action.DELETE_NODE)) {
+                a = DeleteNode.fromString(transactionId, arguments);
+            } else if (actionLabel.equals(Action.START)) {
+                a = Start.fromString(transactionId, arguments);
+            } else if (actionLabel.equals(Action.VOLATILE_COMMIT)) {
+                a = VolatileCommit.fromString(transactionId, arguments);
+            } else {
+                throw new IllegalArgumentException(line);
+            }
+            return a;
+        }
+    }
+
+    /**
+     * Adds an index to the MultiIndex's active persistent index list.
+     */
+    private static class AddIndex extends Action {
+
+        /**
+         * The name of the index to add.
+         */
+        private String indexName;
+
+        /**
+         * Creates a new AddIndex action.
+         *
+         * @param transactionId the id of the transaction that executes this
+         *                      action.
+         * @param indexName     the name of the index to add, or <code>null</code>
+         *                      if an index with a new name should be created.
+         */
+        AddIndex(long transactionId, String indexName) {
+            super(transactionId, Action.TYPE_ADD_INDEX);
+            this.indexName = indexName;
+        }
+
+        /**
+         * Creates a new AddIndex action.
+         *
+         * @param transactionId the id of the transaction that executes this
+         *                      action.
+         * @param arguments     the name of the index to add.
+         * @return the AddIndex action.
+         * @throws IllegalArgumentException if the arguments are malformed.
+         */
+        static AddIndex fromString(long transactionId, String arguments) {
+            return new AddIndex(transactionId, arguments);
+        }
+
+        /**
+         * Adds a sub index to <code>index</code>.
+         *
+         * @inheritDoc
+         */
+        public void execute(MultiIndex index) throws IOException {
+            PersistentIndex idx = index.getOrCreateIndex(indexName, false);
+            if (!index.indexNames.contains(indexName)) {
+                index.indexNames.addName(indexName);
+                // now that the index is in the active list let the merger know about it
+                index.merger.indexAdded(indexName, idx.getNumDocuments());
+            }
+        }
+
+        /**
+         * @inheritDoc
+         */
+        public String toString() {
+            StringBuffer logLine = new StringBuffer();
+            logLine.append(Long.toString(getTransactionId()));
+            logLine.append(' ');
+            logLine.append(Action.ADD_INDEX);
+            logLine.append(' ');
+            logLine.append(indexName);
+            return logLine.toString();
+        }
+    }
+
+    /**
+     * Adds a node to the index.
+     */
+    private static class AddNode extends Action {
+
+        /**
+         * The maximum length of a AddNode String.
+         */
+        private static final int ENTRY_LENGTH = Long.toString(Long.MAX_VALUE).length()
+                + Action.ADD_NODE.length()
+                + Constants.UUID_FORMATTED_LENGTH
+                + 2;
+
+        /**
+         * The uuid of the node to add.
+         */
+        private final String uuid;
+
+        /**
+         * The document to add to the index, or <code>null</code> if not available.
+         */
+        private Document doc;
+
+        /**
+         * Creates a new AddNode action.
+         *
+         * @param transactionId the id of the transaction that executes this action.
+         * @param uuid the uuid of the node to add.
+         */
+        AddNode(long transactionId, String uuid) {
+            super(transactionId, Action.TYPE_ADD_NODE);
+            this.uuid = uuid;
+        }
+
+        /**
+         * Creates a new AddNode action.
+         *
+         * @param transactionId the id of the transaction that executes this action.
+         * @param doc the document to add.
+         */
+        AddNode(long transactionId, Document doc) {
+            this(transactionId, doc.get(FieldNames.UUID));
+            this.doc = doc;
+        }
+
+        /**
+         * Creates a new AddNode action.
+         *
+         * @param transactionId the id of the transaction that executes this
+         *                      action.
+         * @param arguments     the arguments to this action. The uuid of the node
+         *                      to add
+         * @return the AddNode action.
+         * @throws IllegalArgumentException if the arguments are malformed. Not a
+         *                                  UUID.
+         */
+        static AddNode fromString(long transactionId, String arguments)
+                throws IllegalArgumentException {
+            // simple length check
+            if (arguments.length() != Constants.UUID_FORMATTED_LENGTH) {
+                throw new IllegalArgumentException("arguments is not a uuid");
+            }
+            return new AddNode(transactionId, arguments);
+        }
+
+        /**
+         * Adds a node to the index.
+         *
+         * @inheritDoc
+         */
+        public void execute(MultiIndex index) throws IOException {
+            if (doc == null) {
+                try {
+                    doc = index.createDocument(new NodeId(uuid));
+                } catch (RepositoryException e) {
+                    // node does not exist anymore
+                    log.debug(e.getMessage());
+                }
+            }
+            if (doc != null) {
+                index.volatileIndex.addDocument(doc);
+            }
+        }
+
+        /**
+         * @inheritDoc
+         */
+        public String toString() {
+            StringBuffer logLine = new StringBuffer(ENTRY_LENGTH);
+            logLine.append(Long.toString(getTransactionId()));
+            logLine.append(' ');
+            logLine.append(Action.ADD_NODE);
+            logLine.append(' ');
+            logLine.append(uuid);
+            return logLine.toString();
+        }
+    }
+
+    /**
+     * Commits a transaction.
+     */
+    private static class Commit extends Action {
+
+        /**
+         * Creates a new Commit action.
+         *
+         * @param transactionId the id of the transaction that is committed.
+         */
+        Commit(long transactionId) {
+            super(transactionId, Action.TYPE_COMMIT);
+        }
+
+        /**
+         * Creates a new Commit action.
+         *
+         * @param transactionId the id of the transaction that executes this
+         *                      action.
+         * @param arguments     ignored by this method.
+         * @return the Commit action.
+         */
+        static Commit fromString(long transactionId, String arguments) {
+            return new Commit(transactionId);
+        }
+
+        /**
+         * Touches the last flush time (sets it to the current time).
+         *
+         * @inheritDoc
+         */
+        public void execute(MultiIndex index) throws IOException {
+            index.lastFlushTime = System.currentTimeMillis();
+        }
+
+        /**
+         * @inheritDoc
+         */
+        public String toString() {
+            return Long.toString(getTransactionId()) + ' ' + Action.COMMIT;
+        }
+    }
+
+    /**
+     * Creates an new sub index but does not add it to the active persistent index
+     * list.
+     */
+    private static class CreateIndex extends Action {
+
+        /**
+         * The name of the index to add.
+         */
+        private String indexName;
+
+        /**
+         * Indicates if the index is forced to be created. That is, existing
+         * index data is deleted.
+         */
+        private final boolean create;
+
+        /**
+         * Creates a new CreateIndex action.
+         *
+         * @param transactionId the id of the transaction that executes this
+         *                      action.
+         * @param indexName     the name of the index to add, or <code>null</code>
+         *                      if an index with a new name should be created.
+         * @param create        if <code>true</code> existing index data is
+         *                      overwritten.
+         */
+        CreateIndex(long transactionId, String indexName, boolean create) {
+            super(transactionId, Action.TYPE_CREATE_INDEX);
+            this.indexName = indexName;
+            this.create = create;
+        }
+
+        /**
+         * Creates a new CreateIndex action.
+         *
+         * @param transactionId the id of the transaction that executes this
+         *                      action.
+         * @param arguments     the name of the index to create.
+         * @return the AddIndex action.
+         * @throws IllegalArgumentException if the arguments are malformed.
+         */
+        static CreateIndex fromString(long transactionId, String arguments) {
+            // when created from String, this action is executed as redo action
+            // -> don't create index, simply open it.
+            return new CreateIndex(transactionId, arguments, false);
+        }
+
+        /**
+         * Creates a new index.
+         *
+         * @inheritDoc
+         */
+        public void execute(MultiIndex index) throws IOException {
+            PersistentIndex idx = index.getOrCreateIndex(indexName, create);
+            indexName = idx.getName();
+        }
+
+        /**
+         * @inheritDoc
+         */
+        public void undo(MultiIndex index) throws IOException {
+            PersistentIndex idx = index.getOrCreateIndex(indexName, false);
+            index.deleteIndex(idx);
+        }
+
+        /**
+         * @inheritDoc
+         */
+        public String toString() {
+            StringBuffer logLine = new StringBuffer();
+            logLine.append(Long.toString(getTransactionId()));
+            logLine.append(' ');
+            logLine.append(Action.CREATE_INDEX);
+            logLine.append(' ');
+            logLine.append(indexName);
+            return logLine.toString();
+        }
+
+        /**
+         * Returns the index name that has been created. If this method is called
+         * before {@link #execute(MultiIndex)} it will return <code>null</code>.
+         *
+         * @return the name of the index that has been created.
+         */
+        String getIndexName() {
+            return indexName;
+        }
+    }
+
+    /**
+     * Closes and deletes an index that is no longer in use.
+     */
+    private static class DeleteIndex extends Action {
+
+        /**
+         * The name of the index to add.
+         */
+        private String indexName;
+
+        /**
+         * Creates a new DeleteIndex action.
+         *
+         * @param transactionId the id of the transaction that executes this
+         *                      action.
+         * @param indexName     the name of the index to delete.
+         */
+        DeleteIndex(long transactionId, String indexName) {
+            super(transactionId, Action.TYPE_DELETE_INDEX);
+            this.indexName = indexName;
+        }
+
+        /**
+         * Creates a new DeleteIndex action.
+         *
+         * @param transactionId the id of the transaction that executes this
+         *                      action.
+         * @param arguments     the name of the index to delete.
+         * @return the DeleteIndex action.
+         * @throws IllegalArgumentException if the arguments are malformed.
+         */
+        static DeleteIndex fromString(long transactionId, String arguments) {
+            return new DeleteIndex(transactionId, arguments);
+        }
+
+        /**
+         * Removes a sub index from <code>index</code>.
+         *
+         * @inheritDoc
+         */
+        public void execute(MultiIndex index) throws IOException {
+            // get index if it exists
+            for (Iterator it = index.indexes.iterator(); it.hasNext(); ) {
+                PersistentIndex idx = (PersistentIndex) it.next();
+                if (idx.getName().equals(indexName)) {
+                    idx.close();
+                    index.deleteIndex(idx);
+                    break;
+                }
+            }
+        }
+
+        /**
+         * @inheritDoc
+         */
+        public String toString() {
+            StringBuffer logLine = new StringBuffer();
+            logLine.append(Long.toString(getTransactionId()));
+            logLine.append(' ');
+            logLine.append(Action.DELETE_INDEX);
+            logLine.append(' ');
+            logLine.append(indexName);
+            return logLine.toString();
+        }
+    }
+
+    /**
+     * Deletes a node from the index.
+     */
+    private static class DeleteNode extends Action {
+
+        /**
+         * The maximum length of a DeleteNode String.
+         */
+        private static final int ENTRY_LENGTH = Long.toString(Long.MAX_VALUE).length()
+                + Action.DELETE_NODE.length()
+                + Constants.UUID_FORMATTED_LENGTH
+                + 2;
+
+        /**
+         * The uuid of the node to remove.
+         */
+        private final String uuid;
+
+        /**
+         * Creates a new DeleteNode action.
+         *
+         * @param transactionId the id of the transaction that executes this action.
+         * @param uuid the uuid of the node to delete.
+         */
+        DeleteNode(long transactionId, String uuid) {
+            super(transactionId, Action.TYPE_DELETE_NODE);
+            this.uuid = uuid;
+        }
+
+        /**
+         * Creates a new DeleteNode action.
+         *
+         * @param transactionId the id of the transaction that executes this
+         *                      action.
+         * @param arguments     the uuid of the node to delete.
+         * @return the DeleteNode action.
+         * @throws IllegalArgumentException if the arguments are malformed. Not a
+         *                                  UUID.
+         */
+        static DeleteNode fromString(long transactionId, String arguments) {
+            // simple length check
+            if (arguments.length() != Constants.UUID_FORMATTED_LENGTH) {
+                throw new IllegalArgumentException("arguments is not a uuid");
+            }
+            return new DeleteNode(transactionId, arguments);
+        }
+
+        /**
+         * Deletes a node from the index.
+         *
+         * @inheritDoc
+         */
+        public void execute(MultiIndex index) throws IOException {
+            Term idTerm = new Term(FieldNames.UUID, uuid);
+            // if the document cannot be deleted from the volatile index
+            // delete it from one of the persistent indexes.
+            int num = index.volatileIndex.removeDocument(idTerm);
+            if (num == 0) {
+                for (int i = index.indexes.size() - 1; i >= 0; i--) {
+                    // only look in registered indexes
+                    PersistentIndex idx = (PersistentIndex) index.indexes.get(i);
+                    if (index.indexNames.contains(idx.getName())) {
+                        num = idx.removeDocument(idTerm);
+                        if (num > 0) {
+                            return;
+                        }
+                    }
+                }
+            }
+        }
+
+        /**
+         * @inheritDoc
+         */
+        public String toString() {
+            StringBuffer logLine = new StringBuffer(ENTRY_LENGTH);
+            logLine.append(Long.toString(getTransactionId()));
+            logLine.append(' ');
+            logLine.append(Action.DELETE_NODE);
+            logLine.append(' ');
+            logLine.append(uuid);
+            return logLine.toString();
+        }
+    }
+
+    /**
+     * Starts a transaction.
+     */
+    private static class Start extends Action {
+
+        /**
+         * Creates a new Start transaction action.
+         *
+         * @param transactionId the id of the transaction that started.
+         */
+        Start(long transactionId) {
+            super(transactionId, Action.TYPE_START);
+        }
+
+        /**
+         * Creates a new Start action.
+         *
+         * @param transactionId the id of the transaction that executes this
+         *                      action.
+         * @param arguments     ignored by this method.
+         * @return the Start action.
+         */
+        static Start fromString(long transactionId, String arguments) {
+            return new Start(transactionId);
+        }
+
+        /**
+         * Sets the current transaction id on <code>index</code>.
+         *
+         * @inheritDoc
+         */
+        public void execute(MultiIndex index) throws IOException {
+            index.currentTransactionId = getTransactionId();
+        }
+
+        /**
+         * @inheritDoc
+         */
+        public String toString() {
+            return Long.toString(getTransactionId()) + ' ' + Action.START;
+        }
+    }
+
+    /**
+     * Commits the volatile index to disk.
+     */
+    private static class VolatileCommit extends Action {
+
+        /**
+         * The name of the target index to commit to.
+         */
+        private final String targetIndex;
+
+        /**
+         * Creates a new VolatileCommit action.
+         *
+         * @param transactionId the id of the transaction that executes this action.
+         */
+        VolatileCommit(long transactionId, String targetIndex) {
+            super(transactionId, Action.TYPE_VOLATILE_COMMIT);
+            this.targetIndex = targetIndex;
+        }
+
+        /**
+         * Creates a new VolatileCommit action.
+         *
+         * @param transactionId the id of the transaction that executes this
+         *                      action.
+         * @param arguments     ignored by this implementation.
+         * @return the VolatileCommit action.
+         */
+        static VolatileCommit fromString(long transactionId, String arguments) {
+            return new VolatileCommit(transactionId, arguments);
+        }
+
+        /**
+         * Commits the volatile index to disk.
+         *
+         * @inheritDoc
+         */
+        public void execute(MultiIndex index) throws IOException {
+            VolatileIndex volatileIndex = index.getVolatileIndex();
+            PersistentIndex persistentIndex = index.getOrCreateIndex(targetIndex, true);
+            persistentIndex.copyIndex(volatileIndex);
+            index.resetVolatileIndex();
+        }
+
+        /**
+         * @inheritDoc
+         */
+        public String toString() {
+            StringBuffer logLine = new StringBuffer();
+            logLine.append(Long.toString(getTransactionId()));
+            logLine.append(' ');
+            logLine.append(Action.VOLATILE_COMMIT);
+            logLine.append(' ');
+            logLine.append(targetIndex);
+            return logLine.toString();
         }
     }
 }

Added: incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/Recovery.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/Recovery.java?rev=289211&view=auto
==============================================================================
--- incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/Recovery.java (added)
+++ incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/Recovery.java Thu Sep 15 04:39:35 2005
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2004-2005 The Apache Software Foundation or its licensors,
+ *                     as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.core.query.lucene;
+
+import org.apache.log4j.Logger;
+
+import java.util.Set;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.io.IOException;
+
+/**
+ * Implements the recovery process.
+ */
+class Recovery {
+
+    /**
+     * The logger instance for this class.
+     */
+    private static final Logger log = Logger.getLogger(Recovery.class);
+
+    /**
+     * The MultiIndex where to run the recovery on.
+     */
+    private final MultiIndex index;
+
+    /**
+     * The redo redoLog.
+     */
+    private final RedoLog redoLog;
+
+    /**
+     * The ids of the uncommitted transactions. Set of Integer objects.
+     */
+    private final Set losers = new HashSet();
+
+    /**
+     * Creates a new Recovery instance.
+     *
+     * @param index the MultiIndex to recover.
+     * @param redoLog the redo redoLog.
+     */
+    private Recovery(MultiIndex index, RedoLog redoLog) {
+        this.index = index;
+        this.redoLog = redoLog;
+    }
+
+    /**
+     * Runs a recovery on <code>index</code> if <code>redoLog</code> contains
+     * log entries.
+     * <p/>
+     * If recovery succeeds the <code>index</code> is flushed and the redo log
+     * is cleared. That is, the <code>index</code> is stable.<br/>
+     * If recovery fails an IOException is thrown, and the redo log will not
+     * be modified. The recovery process can then be executed again, after
+     * fixing the cause of the IOException (e.g. disk full).
+     *
+     * @param index the index to recover.
+     * @param redoLog the redo log.
+     * @throws IOException if the recovery fails.
+     */
+    static void run(MultiIndex index, RedoLog redoLog) throws IOException {
+        if (!redoLog.hasEntries()) {
+            log.debug("RedoLog is empty, no recovery needed.");
+            return;
+        }
+        log.info("Found uncommitted redo log. Applying changes now...");
+        Recovery r = new Recovery(index, redoLog);
+        r.run();
+        log.info("Redo changes applied.");
+    }
+
+    /**
+     * Runs the recovery process.
+     *
+     * @throws IOException if the recovery fails.
+     */
+    private void run() throws IOException {
+        List actions = redoLog.getActions();
+
+        // find loser transactions
+        for (Iterator it = actions.iterator(); it.hasNext(); ) {
+            MultiIndex.Action a = (MultiIndex.Action) it.next();
+            if (a.getType() == MultiIndex.Action.TYPE_START) {
+                losers.add(new Long(a.getTransactionId()));
+            } else if (a.getType() == MultiIndex.Action.TYPE_COMMIT) {
+                losers.remove(new Long(a.getTransactionId()));
+            }
+        }
+
+        // find last volatile commit without changes from a loser
+        int lastSafeVolatileCommit = -1;
+        Set transactionIds = new HashSet();
+        for (int i = 0; i < actions.size(); i++) {
+            MultiIndex.Action a = (MultiIndex.Action) actions.get(i);
+            if (a.getType() == MultiIndex.Action.TYPE_COMMIT) {
+                transactionIds.clear();
+            } else if (a.getType() == MultiIndex.Action.TYPE_VOLATILE_COMMIT) {
+                transactionIds.retainAll(losers);
+                // check if transactionIds contains losers
+                if (transactionIds.size() > 0) {
+                    // found dirty volatile commit
+                    break;
+                } else {
+                    lastSafeVolatileCommit = i;
+                }
+            } else {
+                transactionIds.add(new Long(a.getTransactionId()));
+            }
+        }
+
+        // delete dirty indexes
+        for (int i = lastSafeVolatileCommit + 1; i < actions.size(); i++) {
+            MultiIndex.Action a = (MultiIndex.Action) actions.get(i);
+            if (a.getType() == MultiIndex.Action.TYPE_CREATE_INDEX) {
+                a.undo(index);
+            }
+        }
+
+        // replay actions up to last safe volatile commit
+        // ignore add node actions, they are included in volatile commits
+        for (int i = 0; i < actions.size() && i <= lastSafeVolatileCommit; i++) {
+            MultiIndex.Action a = (MultiIndex.Action) actions.get(i);
+            switch (a.getType()) {
+                case MultiIndex.Action.TYPE_ADD_INDEX:
+                case MultiIndex.Action.TYPE_CREATE_INDEX:
+                case MultiIndex.Action.TYPE_DELETE_INDEX:
+                case MultiIndex.Action.TYPE_DELETE_NODE:
+                    a.execute(index);
+            }
+        }
+
+        // now replay the rest until we encounter a loser transaction
+        for (int i = lastSafeVolatileCommit + 1; i < actions.size(); i++) {
+            MultiIndex.Action a = (MultiIndex.Action) actions.get(i);
+            if (losers.contains(new Long(a.getTransactionId()))) {
+                break;
+            } else {
+                a.execute(index);
+            }
+        }
+
+        // now we are consistent again -> flush
+        index.flush();
+    }
+}

Propchange: incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/Recovery.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/RedoLog.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/RedoLog.java?rev=289211&r1=289210&r2=289211&view=diff
==============================================================================
--- incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/RedoLog.java (original)
+++ incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/RedoLog.java Thu Sep 15 04:39:35 2005
@@ -16,7 +16,6 @@
  */
 package org.apache.jackrabbit.core.query.lucene;
 
-import org.apache.jackrabbit.uuid.Constants;
 import org.apache.log4j.Logger;
 
 import java.io.BufferedReader;
@@ -31,39 +30,47 @@
 import java.io.OutputStream;
 import java.io.FileOutputStream;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.List;
 
 /**
- * Implements a redo log for the {@link VolatileIndex}. While nodes are added to
- * and removed from the volatile index (held in memory) a redo log is written to
- * keep track of the changes. In case the Jackrabbit process terminates
- * unexpected the redo log is applied when Jackrabbit is restarted the next
- * time.<br/>
- * When the {@link VolatileIndex} is merged with the peristent index the, redo
- * log is cleared.
+ * Implements a redo log for changes that have not been committed to disk. While
+ * nodes are added to and removed from the volatile index (held in memory) a
+ * redo log is written to keep track of the changes. In case the Jackrabbit
+ * process terminates unexpected the redo log is applied when Jackrabbit is
+ * restarted the next time.
  * <p/>
  * This class is not thread-safe.
  */
 class RedoLog {
 
-    /** Logger instance for this class */
+    /**
+     * Logger instance for this class
+     */
     private static final Logger log = Logger.getLogger(RedoLog.class);
 
-    /** Implements a {@link EntryCollector} with an empty collect method */
-    private static final EntryCollector DUMMY_COLLECTOR = new EntryCollector() {
-        public void collect(Entry entry) {
-            // do nothing
+    /**
+     * Implements a {@link ActionCollector} that counts all entries and sets
+     * {@link #entryCount}.
+     */
+    private final ActionCollector ENTRY_COUNTER = new ActionCollector() {
+        public void collect(MultiIndex.Action a) {
+            entryCount++;
         }
     };
 
-    /** The log file */
+    /**
+     * The log file
+     */
     private final File logFile;
 
-    /** The number of log enties in the log file */
+    /**
+     * The number of log entries in the log file
+     */
     private int entryCount = 0;
 
-    /** Writer to the log file */
+    /**
+     * Writer to the log file
+     */
     private Writer out;
 
     /**
@@ -78,7 +85,7 @@
             log.getParentFile().mkdirs();
             log.createNewFile();
         }
-        read(DUMMY_COLLECTOR);
+        read(ENTRY_COUNTER);
     }
 
     /**
@@ -100,42 +107,33 @@
     }
 
     /**
-     * Returns a collection with all {@link Entry} instances in the redo log.
-     * @return an collection with all {@link Entry} instances in the redo log.
-     * @throws IOException if an error occurs while reading from the
+     * Returns a List with all {@link MultiIndex.Action} instances in the
      * redo log.
-     */
-    Collection getEntries() throws IOException {
-        final List entries = new ArrayList();
-        read(new EntryCollector() {
-            public void collect(Entry entry) {
-                entries.add(entry);
+     *
+     * @return an List with all {@link MultiIndex.Action} instances in the
+     *         redo log.
+     * @throws IOException if an error occurs while reading from the redo log.
+     */
+    List getActions() throws IOException {
+        final List actions = new ArrayList();
+        read(new ActionCollector() {
+            public void collect(MultiIndex.Action a) {
+                actions.add(a);
             }
         });
-        return entries;
-    }
-
-    /**
-     * Informs this redo log that a node has been added.
-     * @param uuid the uuid of the node.
-     * @throws IOException if the node cannot be written to the redo
-     * log.
-     */
-    void nodeAdded(String uuid) throws IOException {
-        initOut();
-        out.write(new Entry(uuid, Entry.NODE_ADDED).toString() + "\n");
-        entryCount++;
+        return actions;
     }
 
     /**
-     * Informs this redo log that a node has been removed.
-     * @param uuid the uuid of the node.
+     * Appends an action to the log.
+     *
+     * @param action the action to append.
      * @throws IOException if the node cannot be written to the redo
      * log.
      */
-    void nodeRemoved(String uuid) throws IOException {
+    void append(MultiIndex.Action action) throws IOException {
         initOut();
-        out.write(new Entry(uuid, Entry.NODE_REMOVED).toString() + "\n");
+        out.write(action.toString() + "\n");
         entryCount++;
     }
 
@@ -176,22 +174,20 @@
     }
 
     /**
-     * Reads the log file and sets the {@link #entryCount} with the number
-     * of entries read.
-     * @param collector called back for each {@link Entry} read.
+     * Reads the log file and calls back {@link RedoLog.ActionCollector}.
+     *
+     * @param collector called back for each {@link MultiIndex.Action} read.
      * @throws IOException if an error occurs while reading from the
      * log file.
      */
-    private void read(EntryCollector collector) throws IOException {
+    private void read(ActionCollector collector) throws IOException {
         InputStream in = new FileInputStream(logFile);
         try {
             BufferedReader reader = new BufferedReader(new InputStreamReader(in));
             String line;
             while ((line = reader.readLine()) != null) {
                 try {
-                    Entry e = Entry.fromString(line);
-                    collector.collect(e);
-                    entryCount++;
+                    collector.collect(MultiIndex.Action.fromString(line));
                 } catch (IllegalArgumentException e) {
                     log.warn("Malformed redo entry: " + e.getMessage());
                 }
@@ -207,96 +203,14 @@
         }
     }
 
-    /**
-     * Helper class that represents an entry in the redo log.
-     */
-    public static class Entry {
-
-        /** The length of a log entry: UUID + &lt;space> + (ADD | REM) */
-        private static final int ENTRY_LENGTH = Constants.UUID_FORMATTED_LENGTH + 4;
-
-        /** Type constant for node added entry */
-        static final int NODE_ADDED = 1;
-
-        /** Type constant for node removed entry */
-        static final int NODE_REMOVED = 2;
-
-        /** Type string for node added */
-        private static final String ADD = "ADD";
-
-        /** Type string for node removed */
-        private static final String REM = "REM";
-
-        /** The uuid of the node */
-        public final String uuid;
-
-        /** The type of event */
-        public final int type;
-
-        /**
-         * Creates a new log entry.
-         * @param uuid the uuid of the node
-         * @param type the event type.
-         */
-        private Entry(String uuid, int type) {
-            this.uuid = uuid;
-            this.type = type;
-        }
-
-        /**
-         * Parses an line in the redo log and created a {@link Entry}.
-         * @param logLine the line from the redo log.
-         * @return a log <code>Entry</code>.
-         * @throws IllegalArgumentException if the line is malformed.
-         */
-        static Entry fromString(String logLine) throws IllegalArgumentException {
-            if (logLine.length() != ENTRY_LENGTH) {
-                throw new IllegalArgumentException("Malformed log entry: " + logLine);
-            }
-            String uuid = logLine.substring(0, Constants.UUID_FORMATTED_LENGTH);
-            String typeString = logLine.substring(Constants.UUID_FORMATTED_LENGTH + 1);
-            if (ADD.equals(typeString)) {
-                return new Entry(uuid, NODE_ADDED);
-            } else if (REM.equals(typeString)) {
-                return new Entry(uuid, NODE_REMOVED);
-            } else {
-                throw new IllegalArgumentException("Unrecognized type string in log entry: " + logLine);
-            }
-        }
-
-        /**
-         * Returns the string representation of this <code>Entry</code>:<br/>
-         * UUID &lt;space> (ADD | REM)
-         * @return the string representation of this <code>Entry</code>.
-         */
-        public String toString() {
-            return uuid + " " + getStringForType(type);
-        }
-
-        /**
-         * Returns the string representation for an entry <code>type</code>. If
-         * <code>type</code> is {@link #NODE_ADDED}, <code>ADD</code> is
-         * returned, otherwise <code>REM</code> is returned.
-         * @param type the entry type.
-         * @return the string representation for an entry <code>type</code>.
-         */
-        private static String getStringForType(int type) {
-            if (type == NODE_ADDED) {
-                return ADD;
-            } else {
-                return REM;
-            }
-        }
-    }
-
     //-----------------------< internal >---------------------------------------
 
     /**
-     * Helper interface to collect Entries read from the redo log.
+     * Helper interface to collect Actions read from the redo log.
      */
-    interface EntryCollector {
+    interface ActionCollector {
 
-        /** Called when an entry is created */
-        void collect(Entry entry);
+        /** Called when an action is created */
+        void collect(MultiIndex.Action action);
     }
 }

Modified: incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/VolatileIndex.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/VolatileIndex.java?rev=289211&r1=289210&r2=289211&view=diff
==============================================================================
--- incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/VolatileIndex.java (original)
+++ incubator/jackrabbit/trunk/src/java/org/apache/jackrabbit/core/query/lucene/VolatileIndex.java Thu Sep 15 04:39:35 2005
@@ -31,7 +31,7 @@
 import java.util.Enumeration;
 
 /**
- * Implements an in-memory index with a redo log.
+ * Implements an in-memory index with a pending buffer.
  */
 class VolatileIndex extends AbstractIndex {
 
@@ -45,10 +45,9 @@
      */
     private static final int DEFAULT_BUFFER_SIZE = 10;
 
-    /** The redo log */
-    private final RedoLog redoLog;
-
-    /** Map of pending documents to add to the index */
+    /**
+     * Map of pending documents to add to the index
+     */
     private final Map pending = new LinkedMap();
 
     /**
@@ -57,36 +56,28 @@
     private int bufferSize = DEFAULT_BUFFER_SIZE;
 
     /**
-     * Creates a new <code>VolatileIndex</code> using an <code>analyzer</code>
-     * and a redo <code>log</code>.
-     * @param analyzer the analyzer to use.
-     * @param log the redo log.
-     * @throws IOException if an error occurs while opening the index.
+     * The number of documents in this index.
      */
-    VolatileIndex(Analyzer analyzer, RedoLog log) throws IOException {
-        super(analyzer, new RAMDirectory(), null);
-        redoLog = log;
-    }
+    private int numDocs = 0;
 
     /**
-     * Returns the redo log of this volatile index.
-     * @return the redo log of this volatile index.
+     * Creates a new <code>VolatileIndex</code> using an <code>analyzer</code>.
+     *
+     * @param analyzer the analyzer to use.
+     * @throws IOException if an error occurs while opening the index.
      */
-    RedoLog getRedoLog() {
-        return redoLog;
+    VolatileIndex(Analyzer analyzer) throws IOException {
+        super(analyzer, new RAMDirectory(), null);
     }
 
     /**
-     * Overwrites the default implementation by writing an entry to the
-     * redo log and then adds it to the pending list.
+     * Overwrites the default implementation by adding the document to a pending
+     * list and commits the pending list if needed.
+     *
      * @param doc the document to add to the index.
-     * @throws IOException if an error occurs while writing to the redo log
-     * or the index.
+     * @throws IOException if an error occurs while writing to the index.
      */
     void addDocument(Document doc) throws IOException {
-        redoLog.nodeAdded(doc.get(FieldNames.UUID));
-        redoLog.flush();
-
         Document old = (Document) pending.put(doc.get(FieldNames.UUID), doc);
         if (old != null) {
             disposeDocument(old);
@@ -95,32 +86,41 @@
             commitPending();
         }
         invalidateSharedReader();
+        numDocs++;
     }
 
     /**
-     * Overwrites the default implementation by writing an entry to the redo
-     * log and then calling the <code>super.removeDocument()</code> method or
-     * if the document is in the pending list, removes it from there.
+     * Overwrites the default implementation to remove the document from the
+     * pending list if it is present or simply calls <code>super.removeDocument()</code>.
      *
      * @param idTerm the uuid term of the document to remove.
-     * @throws IOException if an error occurs while writing to the redo log
-     * or the index.
      * @return the number of deleted documents
+     * @throws IOException if an error occurs while removing the document from
+     *                     the index.
      */
     int removeDocument(Term idTerm) throws IOException {
-        redoLog.nodeRemoved(idTerm.text());
-        redoLog.flush();
-
         Document doc = (Document) pending.remove(idTerm.text());
+        int num;
         if (doc != null) {
             disposeDocument(doc);
             // pending document has been removed
-            return 1;
+            num = 1;
         } else {
             // remove document from index
-            return super.getIndexReader().delete(idTerm);
-            }
+            num = super.getIndexReader().delete(idTerm);
         }
+        numDocs -= num;
+        return num;
+    }
+
+    /**
+     * Returns the number of valid documents in this index.
+     *
+     * @return the number of valid documents in this index.
+     */
+    int getNumDocuments() throws IOException {
+        return numDocs;
+    }
 
     /**
      * Overwrites the implementation in {@link AbstractIndex} to trigger



Mime
View raw message