jackrabbit-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mreut...@apache.org
Subject svn commit: r234492 - /incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/
Date Mon, 22 Aug 2005 14:20:20 GMT
Author: mreutegg
Date: Mon Aug 22 07:20:02 2005
New Revision: 234492

URL: http://svn.apache.org/viewcvs?rev=234492&view=rev
Log:
JCR-190: Caching in QueryHandler does not scale well

Added:
    incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/DocId.java   (with props)
    incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/DocNumberCache.java   (with props)
    incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/SingleTermDocs.java   (with props)
Modified:
    incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/AbstractIndex.java
    incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/CachingIndexReader.java
    incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/CachingMultiReader.java
    incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/DescendantSelfAxisQuery.java
    incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java
    incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/PersistentIndex.java
    incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/ReadOnlyIndexReader.java
    incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java
    incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/SharedIndexReader.java
    incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/VolatileIndex.java

Modified: incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/AbstractIndex.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/AbstractIndex.java?rev=234492&r1=234491&r2=234492&view=diff
==============================================================================
--- incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/AbstractIndex.java (original)
+++ incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/AbstractIndex.java Mon Aug 22 07:20:02 2005
@@ -75,19 +75,30 @@
     /** mergeFactor config parameter */
     private int mergeFactor = 10;
 
+    /**
+     * The document number cache if this index may use one.
+     */
+    private DocNumberCache cache;
+
     /** The shared IndexReader for all read-only IndexReaders */
     private SharedIndexReader sharedReader;
 
     /**
      * Constructs an index with an <code>analyzer</code> and a
      * <code>directory</code>.
-     * @param analyzer the analyzer for text tokenizing.
+     *
+     * @param analyzer  the analyzer for text tokenizing.
      * @param directory the underlying directory.
+     * @param cache     the document number cache if this index should use one;
+     *                  otherwise <code>cache</code> is <code>null</code>.
      * @throws IOException if the index cannot be initialized.
      */
-    AbstractIndex(Analyzer analyzer, Directory directory) throws IOException {
+    AbstractIndex(Analyzer analyzer,
+                  Directory directory,
+                  DocNumberCache cache) throws IOException {
         this.analyzer = analyzer;
         this.directory = directory;
+        this.cache = cache;
 
         if (!IndexReader.indexExists(directory)) {
             indexWriter = new IndexWriter(directory, analyzer, true);
@@ -173,7 +184,8 @@
         }
         if (sharedReader == null) {
             // create new shared reader
-            sharedReader = new SharedIndexReader(new CachingIndexReader(IndexReader.open(getDirectory())));
+            CachingIndexReader cr = new CachingIndexReader(IndexReader.open(getDirectory()), cache);
+            sharedReader = new SharedIndexReader(cr);
         }
         return new ReadOnlyIndexReader(sharedReader, deleted);
     }

Modified: incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/CachingIndexReader.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/CachingIndexReader.java?rev=234492&r1=234491&r2=234492&view=diff
==============================================================================
--- incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/CachingIndexReader.java (original)
+++ incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/CachingIndexReader.java Mon Aug 22 07:20:02 2005
@@ -25,23 +25,12 @@
 import org.apache.lucene.index.TermEnum;
 
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
+import java.util.BitSet;
 
 /**
  * Implements an <code>IndexReader</code> that maintains caches to resolve
- * {@link IndexReader#termDocs(Term)} calls efficiently.
+ * {@link #getParent(int, BitSet)} calls efficiently.
  * <p/>
- * The caches are:
- * <ul>
- * <li>idCache: maps UUID to document number</li>
- * <li>documentCache: maps document number to {@link Document} instance</li>
- * <li>parentCache: maps parentUUID to List of document numbers</li>
- * </ul>
  */
 class CachingIndexReader extends FilterIndexReader {
 
@@ -51,181 +40,180 @@
     private static final Logger log = Logger.getLogger(CachingIndexReader.class);
 
     /**
-     * The document idCache. Maps UUIDs to document number.
+     * The current value of the global creation tick counter.
      */
-    private Map idCache;
+    private static long currentTick;
 
     /**
-     * The document cache. Maps document number to Document instance.
+     * Cache of nodes parent relation. If an entry in the array is not null,
+     * that means the node with the document number = array-index has the node
+     * with <code>DocId</code> as parent.
      */
-    private Map documentCache;
+    private final DocId[] parents;
 
     /**
-     * The parent id cache. Maps parent UUID to List of document numbers.
+     * Tick when this index reader was created.
      */
-    private Map parentCache;
+    private final long creationTick = getNextCreationTick();
+
+    /**
+     * Document number cache if available. May be <code>null</code>.
+     */
+    private final DocNumberCache cache;
 
     /**
      * Creates a new <code>CachingIndexReader</code> based on
      * <code>delegatee</code>
+     *
      * @param delegatee the base <code>IndexReader</code>.
+     * @param cache     a document number cache, or <code>null</code> if not
+     *                  available to this reader.
      */
-    CachingIndexReader(IndexReader delegatee) {
+    CachingIndexReader(IndexReader delegatee, DocNumberCache cache) {
         super(delegatee);
+        this.cache = cache;
+        parents = new DocId[delegatee.maxDoc()];
     }
 
     /**
-     * If the field of <code>term</code> is {@link FieldNames#UUID} this
-     * <code>CachingIndexReader</code> returns a <code>TermDocs</code> instance
-     * with a cached document id. If <code>term</code> has any other field
-     * the call is delegated to the base <code>IndexReader</code>.<br/>
-     * If <code>term</code> is for a {@link FieldNames#UUID} field and this
-     * <code>CachingIndexReader</code> does not have such a document,
-     * {@link #EMPTY} is returned.
+     * Returns the <code>DocId</code> of the parent of <code>n</code> or
+     * {@link DocId#NULL} if <code>n</code> does not have a parent
+     * (<code>n</code> is the root node).
      *
-     * @param term the term to start the <code>TermDocs</code> enumeration.
-     * @return a TermDocs instance.
+     * @param n the document number.
+     * @param deleted the documents that should be regarded as deleted.
+     * @return the <code>DocId</code> of <code>n</code>'s parent.
      * @throws IOException if an error occurs while reading from the index.
      */
-    public TermDocs termDocs(Term term) throws IOException {
-        if (term.field() == FieldNames.UUID) {
-            synchronized (this) {
-                cacheInit();
-                Integer docNo = (Integer) idCache.get(term.text());
-                if (docNo == null) {
-                    return EMPTY;
-                } else {
-                    return new CachingTermDocs(docNo);
+    DocId getParent(int n, BitSet deleted) throws IOException {
+        DocId parent;
+        boolean existing = false;
+        synchronized (parents) {
+            parent = parents[n];
+        }
+
+        if (parent != null) {
+            existing = true;
+
+            // check if valid and reset if necessary
+            if (!parent.isValid(deleted)) {
+                if (log.isDebugEnabled()) {
+                    log.debug(parent + " not valid anymore.");
                 }
+                parent = null;
             }
-        } else if (term.field() == FieldNames.PARENT) {
-            synchronized (this) {
-                cacheInit();
-                List idList = (List) parentCache.get(term.text());
-                if (idList == null) {
-                    return EMPTY;
-                } else {
-                    return new CachingTermDocs(idList.iterator());
+        }
+
+        if (parent == null) {
+            Document doc = document(n);
+            String parentUUID = doc.get(FieldNames.PARENT);
+            if (parentUUID == null || parentUUID.length() == 0) {
+                parent = DocId.NULL;
+            } else {
+                // only create a DocId from document number if there is no
+                // existing DocId
+                if (!existing) {
+                    Term id = new Term(FieldNames.UUID, parentUUID);
+                    TermDocs docs = termDocs(id);
+                    try {
+                        while (docs.next()) {
+                            if (!deleted.get(docs.doc())) {
+                                parent = DocId.create(docs.doc());
+                                break;
+                            }
+                        }
+                    } finally {
+                        docs.close();
+                    }
+                }
+
+                // if still null, then parent is not in this index, or existing
+                // DocId was invalid. thus, only allowed to create DocId from uuid
+                if (parent == null) {
+                    parent = DocId.create(parentUUID);
                 }
             }
-        } else {
-            return super.termDocs(term);
-        }
-    }
 
-    /**
-     * Returns the stored fields of the <code>n</code><sup>th</sup>
-     * <code>Document</code> in this index. This implementation returns cached
-     * versions of <code>Document</code> instance. Thus, the returned document
-     * must not be modified!
-     *
-     * @param n the document number.
-     * @return the <code>n</code><sup>th</sup> <code>Document</code> in this
-     *         index
-     * @throws IOException              if an error occurs while reading from
-     *                                  the index.
-     * @throws IllegalArgumentException if the document with number
-     *                                  <code>n</code> is deleted.
-     */
-    public Document document(int n) throws IOException, IllegalArgumentException {
-        if (isDeleted(n)) {
-            throw new IllegalArgumentException("attempt to access a deleted document");
-        }
-        synchronized (this) {
-            cacheInit();
-            return (Document) documentCache.get(new Integer(n));
+            // finally put to cache
+            synchronized (parents) {
+                parents[n] = parent;
+            }
         }
+        return parent;
     }
 
     /**
-     * Commits pending changes to disc.
-     * @throws IOException if an error occurs while writing changes.
+     * Returns the tick value when this reader was created.
+     *
+     * @return the creation tick for this reader.
      */
-    public void commitDeleted() throws IOException {
-        commit();
+    public long getCreationTick() {
+        return creationTick;
     }
 
+    //--------------------< FilterIndexReader overwrites >----------------------
+
     /**
-     * Provides an efficient lookup of document frequency for terms with field
-     * {@link FieldNames#UUID} and {@link FieldNames#PARENT}. All other calles
-     * are handled by the base class.
+     * If the field of <code>term</code> is {@link FieldNames#UUID} this
+     * <code>CachingIndexReader</code> returns a <code>TermDocs</code> instance
+     * with a cached document id. If <code>term</code> has any other field
+     * the call is delegated to the base <code>IndexReader</code>.<br/>
+     * If <code>term</code> is for a {@link FieldNames#UUID} field and this
+     * <code>CachingIndexReader</code> does not have such a document,
+     * {@link #EMPTY} is returned.
      *
-     * @param t the term to look up the document frequency.
-     * @return the document frequency of term <code>t</code>.
+     * @param term the term to start the <code>TermDocs</code> enumeration.
+     * @return a TermDocs instance.
      * @throws IOException if an error occurs while reading from the index.
      */
-    public int docFreq(Term t) throws IOException {
-        synchronized (this) {
-            cacheInit();
-            if (t.field() == FieldNames.UUID) {
-                return idCache.containsKey(t.text()) ? 1 : 0;
-            } else if (t.field() == FieldNames.PARENT) {
-                List children = (List) parentCache.get(t.text());
-                return children == null ? 0 : children.size();
-            }
-        }
-        return super.docFreq(t);
-    }
+    public TermDocs termDocs(Term term) throws IOException {
+        if (term.field() == FieldNames.UUID) {
+            // check cache if we have one
+            if (cache != null) {
+                DocNumberCache.Entry e = cache.get(term.text());
+                if (e != null) {
+                    // check if valid
+                    // the cache may contain entries from a different reader
+                    // with the same uuid. that happens when a node is updated
+                    // and is reindexed. the node 'travels' from an older index
+                    // to a newer one. the cache will still contain a cache
+                    // entry from the old until it is overwritten by the
+                    // newer index.
+                    if (e.reader == this && !isDeleted(e.doc)) {
+                        return new SingleTermDocs(e.doc);
+                    }
+                }
 
-    /**
-     * Removes the <code>TermEnum</code> from the idCache and calls the base
-     * <code>IndexReader</code>.
-     * @param n the number of the document to delete.
-     * @throws IOException if an error occurs while deleting the document.
-     */
-    protected synchronized void doDelete(int n) throws IOException {
-        if (idCache != null) {
-            Document d = (Document) documentCache.remove(new Integer(n));
-            if (d != null) {
-                idCache.remove(d.get(FieldNames.UUID));
-                String parentUUID = d.get(FieldNames.PARENT);
-                List parents = (List) parentCache.get(parentUUID);
-                if (parents.size() == 1) {
-                    parentCache.remove(parentUUID);
-                } else {
-                    // replace existing list, other threads might use iterator
-                    // on existing list
-                    List repl = new ArrayList(parents);
-                    repl.remove(new Integer(n));
-                    parentCache.put(parentUUID, repl);
+                // not in cache or invalid
+                TermDocs docs = in.termDocs(term);
+                try {
+                    if (docs.next()) {
+                        // put to cache
+                        cache.put(term.text(), this, docs.doc());
+                        // and return
+                        return new SingleTermDocs(docs.doc());
+                    } else {
+                        return EMPTY;
+                    }
+                } finally {
+                    docs.close();
                 }
             }
         }
-        super.doDelete(n);
+        return super.termDocs(term);
     }
 
+
+    //----------------------< internal >----------------------------------------
+
     /**
-     * Initially fills the caches: idCache, documentCache, parentCache.
-     * @throws IOException if an error occurs while reading from the index.
+     * Returns the next creation tick value.
+     *
+     * @return the next creation tick value.
      */
-    private void cacheInit() throws IOException {
-        if (idCache == null) {
-            long time = System.currentTimeMillis();
-            Map ids = new HashMap(in.numDocs());
-            Map documents = new HashMap(in.numDocs());
-            Map parents = new HashMap(in.numDocs());
-            for (int i = 0; i < in.maxDoc(); i++) {
-                if (!in.isDeleted(i)) {
-                    Document d = in.document(i);
-                    Integer docId = new Integer(i);
-                    if (ids.put(d.get(FieldNames.UUID), docId) != null) {
-                        log.warn("Duplicate index entry for node: " + d.get(FieldNames.UUID));
-                    }
-                    documents.put(docId, d);
-                    String parentUUID = d.get(FieldNames.PARENT);
-                    List docIds = (List) parents.get(parentUUID);
-                    if (docIds == null) {
-                        docIds = new ArrayList();
-                        parents.put(parentUUID, docIds);
-                    }
-                    docIds.add(docId);
-                }
-            }
-            idCache = ids;
-            documentCache = documents;
-            parentCache = parents;
-            time = System.currentTimeMillis() - time;
-            log.debug("IndexReader cache populated in: " + time + " ms.");
+    private static long getNextCreationTick() {
+        synchronized (CachingIndexReader.class) {
+            return currentTick++;
         }
     }
 
@@ -263,99 +251,4 @@
         public void close() {
         }
     };
-
-    /**
-     * Implements a <code>TermDocs</code> that takes a list of document
-     * ids.
-     */
-    private static final class CachingTermDocs implements TermDocs {
-
-        /**
-         * The current document number.
-         */
-        private int current = -1;
-
-        /**
-         * Iterator over document numbers as <code>Integer</code> values.
-         */
-        private final Iterator docIds;
-
-        /**
-         * Creates a new <code>CachingTermDocs</code> instance with a single
-         * document id.
-         * @param docId the single document id.
-         */
-        CachingTermDocs(Integer docId) {
-            this(Arrays.asList(new Integer[]{docId}).iterator());
-        }
-
-        /**
-         * Creates a new <code>CachingTermDocs</code> instance that iterates
-         * over the <code>docIds</code>.
-         * @param docIds the actual document numbers / ids.
-         */
-        CachingTermDocs(Iterator docIds) {
-            this.docIds = docIds;
-        }
-
-        /**
-         * @throws UnsupportedOperationException always
-         */
-        public void seek(Term term) {
-            throw new UnsupportedOperationException();
-        }
-
-        /**
-         * @throws UnsupportedOperationException always
-         */
-        public void seek(TermEnum termEnum) {
-            throw new UnsupportedOperationException();
-        }
-
-
-        /**
-         * {@inheritDoc}
-         */
-        public int doc() {
-            return current;
-        }
-
-        /**
-         * {@inheritDoc}
-         */
-        public int freq() {
-            return 1;
-        }
-
-        /**
-         * {@inheritDoc}
-         */
-        public boolean next() {
-            boolean next = docIds.hasNext();
-            if (next) {
-                current = ((Integer) docIds.next()).intValue();
-            }
-            return next;
-        }
-
-        /**
-         * @throws UnsupportedOperationException always
-         */
-        public int read(int[] docs, int[] freqs) {
-            throw new UnsupportedOperationException();
-        }
-
-        /**
-         * @throws UnsupportedOperationException always
-         */
-        public boolean skipTo(int target) {
-            throw new UnsupportedOperationException();
-        }
-
-        /**
-         * {@inheritDoc}
-         */
-        public void close() {
-        }
-    }
 }

Modified: incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/CachingMultiReader.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/CachingMultiReader.java?rev=234492&r1=234491&r2=234492&view=diff
==============================================================================
--- incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/CachingMultiReader.java (original)
+++ incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/CachingMultiReader.java Mon Aug 22 07:20:02 2005
@@ -16,24 +16,34 @@
  */
 package org.apache.jackrabbit.core.query.lucene;
 
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermDocs;
-import org.apache.lucene.index.TermEnum;
 
 import java.io.IOException;
+import java.util.Map;
+import java.util.IdentityHashMap;
 
 /**
  * Extends a <code>MultiReader</code> with support for cached <code>TermDocs</code>
  * on {@link FieldNames#UUID} field.
  */
-class CachingMultiReader extends MultiReader {
+final class CachingMultiReader extends MultiReader {
 
     /**
      * The sub readers.
      */
-    private IndexReader[] subReaders;
+    private ReadOnlyIndexReader[] subReaders;
+
+    /**
+     * Map of OffsetReaders, identified by caching reader they are based on.
+     */
+    private final Map readersByBase = new IdentityHashMap();
+
+    /**
+     * Document number cache if available. May be <code>null</code>.
+     */
+    private final DocNumberCache cache;
 
     /**
      * Doc number starts for each sub reader
@@ -48,57 +58,75 @@
 
     /**
      * Creates a new <code>CachingMultiReader</code> based on sub readers.
-     * <p/>
-     * This <code>CachingMultiReader</code> poses type requirements on the
-     * <code>subReaders</code>: all but one sub readers must be a
-     * {@link ReadOnlyIndexReader}. The single allowed sub reader not of type
-     * {@link ReadOnlyIndexReader} must be the last reader in
-     * <code>subReaders</code>! Otherwise this constructor will throw an
-     * {@link IllegalArgumentException}.
      *
      * @param subReaders the sub readers.
+     * @param cache the document number cache.
      * @throws IOException if an error occurs while reading from the indexes.
-     * @exception IllegalArgumentException if <code>subReaders</code> does
-     * not comply to the above type requirements.
      */
-    public CachingMultiReader(IndexReader[] subReaders)
-            throws IOException, IllegalArgumentException {
+    public CachingMultiReader(ReadOnlyIndexReader[] subReaders,
+                              DocNumberCache cache)
+            throws IOException {
         super(subReaders);
-        // check readers, all but last must be a ReadOnlyIndexReader
-        for (int i = 0; i < subReaders.length - 1; i++) {
-            if (!(subReaders[i] instanceof ReadOnlyIndexReader)) {
-                throw new IllegalArgumentException("subReader " + i + " must be of type ReadOnlyIndexReader");
-            }
-        }
+        this.cache = cache;
         this.subReaders = subReaders;
         starts = new int[subReaders.length + 1];
         int maxDoc = 0;
         for (int i = 0; i < subReaders.length; i++) {
             starts[i] = maxDoc;
             maxDoc += subReaders[i].maxDoc();
+            OffsetReader offsetReader = new OffsetReader(subReaders[i], starts[i]);
+            readersByBase.put(subReaders[i].getBase().getBase(), offsetReader);
         }
         starts[subReaders.length] = maxDoc;
     }
 
     /**
+     * Returns the document number of the parent of <code>n</code> or
+     * <code>-1</code> if <code>n</code> does not have a parent (<code>n</code>
+     * is the root node).
+     *
+     * @param n the document number.
+     * @return the document number of <code>n</code>'s parent.
+     * @throws IOException if an error occurs while reading from the index.
+     */
+    final public int getParent(int n) throws IOException {
+        int i = readerIndex(n);
+        DocId id = subReaders[i].getParent(n - starts[i]);
+        id = id.applyOffset(starts[i]);
+        return id.getDocumentNumber(this);
+    }
+
+    /**
      * {@inheritDoc}
      */
     public TermDocs termDocs(Term term) throws IOException {
         if (term.field() == FieldNames.UUID) {
-            for (int i = 0; i < subReaders.length; i++) {
-                TermDocs docs = subReaders[i].termDocs(term);
-                if (docs != CachingIndexReader.EMPTY) {
-                    // apply offset
-                    return new OffsetTermDocs(docs, starts[i]);
+            // check cache
+            DocNumberCache.Entry e = cache.get(term.text());
+            if (e != null) {
+                // check if valid:
+                // 1) reader must be in the set of readers
+                // 2) doc must not be deleted
+                OffsetReader offsetReader = (OffsetReader) readersByBase.get(e.reader);
+                if (offsetReader != null && !offsetReader.reader.isDeleted(e.doc)) {
+                    return new SingleTermDocs(e.doc + offsetReader.offset);
                 }
             }
-        } else if (term.field() == FieldNames.PARENT) {
-            TermDocs[] termDocs = new TermDocs[subReaders.length];
+
+            // if we get here, entry is either invalid or did not exist
+            // search through readers
             for (int i = 0; i < subReaders.length; i++) {
-                termDocs[i] = subReaders[i].termDocs(term);
+                TermDocs docs = subReaders[i].termDocs(term);
+                try {
+                    if (docs.next()) {
+                        return new SingleTermDocs(docs.doc() + starts[i]);
+                    }
+                } finally {
+                    docs.close();
+                }
             }
-            return new MultiTermDocs(termDocs, starts);
         }
+
         return super.termDocs(term);
     }
 
@@ -121,203 +149,62 @@
         }
     }
 
+    //------------------------< internal >--------------------------------------
+
     /**
-     * Partial <code>TermDocs</code> implementation that applies an offset
-     * to a base <code>TermDocs</code> instance.
+     * Returns the reader index for document <code>n</code>.
+     * Implementation copied from lucene MultiReader class.
+     *
+     * @param n document number.
+     * @return the reader index.
      */
-    private static final class OffsetTermDocs implements TermDocs {
-
-        /**
-         * The base <code>TermDocs</code> instance.
-         */
-        private final TermDocs base;
-
-        /**
-         * The offset to apply
-         */
-        private final int offset;
-
-        /**
-         * Creates a new <code>OffsetTermDocs</code> instance.
-         * @param base the base <code>TermDocs</code>.
-         * @param offset the offset to apply.
-         */
-        OffsetTermDocs(TermDocs base, int offset) {
-            this.base = base;
-            this.offset = offset;
-        }
-
-        /**
-         * @throws UnsupportedOperationException always
-         */
-        public void seek(Term term) {
-            throw new UnsupportedOperationException();
-        }
-
-        /**
-         * @throws UnsupportedOperationException always
-         */
-        public void seek(TermEnum termEnum) {
-            throw new UnsupportedOperationException();
-        }
-
-        /**
-         * {@inheritDoc}
-         */
-        public int doc() {
-            return base.doc() + offset;
-        }
-
-        /**
-         * {@inheritDoc}
-         */
-        public int freq() {
-            return base.freq();
-        }
-
-        /**
-         * {@inheritDoc}
-         */
-        public boolean next() throws IOException {
-            return base.next();
-        }
-
-        /**
-         * @throws UnsupportedOperationException always
-         */
-        public int read(int[] docs, int[] freqs) {
-            throw new UnsupportedOperationException();
-        }
-
-        /**
-         * @throws UnsupportedOperationException always
-         */
-        public boolean skipTo(int target) {
-            throw new UnsupportedOperationException();
-        }
-
-        /**
-         * {@inheritDoc}
-         */
-        public void close() throws IOException {
-            base.close();
+    final private int readerIndex(int n) {
+        int lo = 0;                                      // search starts array
+        int hi = subReaders.length - 1;                  // for first element less
+
+        while (hi >= lo) {
+            int mid = (lo + hi) >> 1;
+            int midValue = starts[mid];
+            if (n < midValue) {
+                hi = mid - 1;
+            } else if (n > midValue) {
+                lo = mid + 1;
+            } else {                                      // found a match
+                while (mid + 1 < subReaders.length && starts[mid + 1] == midValue) {
+                    mid++;                                  // scan to last match
+                }
+                return mid;
+            }
         }
+        return hi;
     }
 
+    //-----------------------< OffsetTermDocs >---------------------------------
+
     /**
-     * Implements a <code>TermDocs</code> which spans multiple other
-     * <code>TermDocs</code>.
+     * Simple helper struct that associates an offset with an IndexReader.
      */
-    private static final class MultiTermDocs implements TermDocs {
-
-        /**
-         * The actual <code>TermDocs</code>.
-         */
-        private final TermDocs[] termDocs;
-
-        /**
-         * The document number offsets for each <code>TermDocs</code>.
-         */
-        private final int[] starts;
-
-        /**
-         * The current <code>TermDocs</code> instance. If <code>null</code>
-         * there are no more documents.
-         */
-        private TermDocs current;
-
-        /**
-         * The current index into {@link #termDocs} and {@link #starts}.
-         */
-        private int idx = 0;
-
-        /**
-         * Creates a new <code>MultiTermDocs</code> instance.
-         * @param termDocs the actual <code>TermDocs</code>.
-         * @param starts the document number offsets for each
-         *  <code>TermDocs</code>
-         */
-        MultiTermDocs(TermDocs[] termDocs, int[] starts) {
-            this.termDocs = termDocs;
-            this.starts = starts;
-            current = termDocs[idx];
-        }
-
-        /**
-         * @throws UnsupportedOperationException always
-         */
-        public void seek(Term term) {
-            throw new UnsupportedOperationException();
-        }
-
-        /**
-         * @throws UnsupportedOperationException always
-         */
-        public void seek(TermEnum termEnum) {
-            throw new UnsupportedOperationException();
-        }
+    private static final class OffsetReader {
 
         /**
-         * {@inheritDoc}
+         * The index reader.
          */
-        public int doc() {
-            return starts[idx] + current.doc();
-        }
+        final ReadOnlyIndexReader reader;
 
         /**
-         * {@inheritDoc}
+         * The reader offset in this multi reader instance.
          */
-        public int freq() {
-            return current.freq();
-        }
+        final int offset;
 
         /**
-         * {@inheritDoc}
+         * Creates a new <code>OffsetReader</code>.
+         *
+         * @param reader the index reader.
+         * @param offset the reader offset in a multi reader.
          */
-        public boolean next() throws IOException {
-            while (current != null && !current.next()) {
-                if (++idx >= termDocs.length) {
-                    // no more TermDocs
-                    current = null;
-                } else {
-                    // move to next TermDocs
-                    current = termDocs[idx];
-                }
-            }
-            return current != null;
-        }
-
-        /**
-         * {@inheritDoc}
-         */
-        public int read(int[] docs, int[] freqs) throws IOException {
-            int count = 0;
-            for (int i = 0; i < docs.length && next(); i++, count++) {
-                docs[i] = doc();
-                freqs[i] = freq();
-            }
-            return count;
-        }
-
-        /**
-         * {@inheritDoc}
-         */
-        public boolean skipTo(int target) throws IOException {
-            do {
-                if (!next()) {
-                    return false;
-                }
-            } while (target > doc());
-            return true;
-        }
-
-        /**
-         * {@inheritDoc}
-         */
-        public void close() throws IOException {
-            for (int i = 0; i < termDocs.length; i++) {
-                termDocs[i].close();
-            }
+        OffsetReader(ReadOnlyIndexReader reader, int offset) {
+            this.reader = reader;
+            this.offset = offset;
         }
     }
 }

Modified: incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/DescendantSelfAxisQuery.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/DescendantSelfAxisQuery.java?rev=234492&r1=234491&r2=234492&view=diff
==============================================================================
--- incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/DescendantSelfAxisQuery.java (original)
+++ incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/DescendantSelfAxisQuery.java Mon Aug 22 07:20:02 2005
@@ -17,8 +17,6 @@
 package org.apache.jackrabbit.core.query.lucene;
 
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermDocs;
 import org.apache.lucene.search.Explanation;
 import org.apache.lucene.search.HitCollector;
 import org.apache.lucene.search.Query;
@@ -26,12 +24,9 @@
 import org.apache.lucene.search.Searcher;
 import org.apache.lucene.search.Similarity;
 import org.apache.lucene.search.Weight;
-import org.apache.lucene.document.Document;
 
 import java.io.IOException;
 import java.util.BitSet;
-import java.util.HashSet;
-import java.util.Set;
 
 /**
  * Implements a lucene <code>Query</code> which filters a sub query by checking
@@ -175,7 +170,8 @@
         public Scorer scorer(IndexReader reader) throws IOException {
             contextScorer = contextQuery.weight(searcher).scorer(reader);
             subScorer = subQuery.weight(searcher).scorer(reader);
-            return new DescendantSelfAxisScorer(searcher.getSimilarity(), reader);
+            CachingMultiReader index = (CachingMultiReader) reader;
+            return new DescendantSelfAxisScorer(searcher.getSimilarity(), index);
         }
 
         /**
@@ -196,12 +192,12 @@
         /**
          * An <code>IndexReader</code> to access the index.
          */
-        private final IndexReader reader;
+        private final CachingMultiReader reader;
 
         /**
          * BitSet storing the id's of selected documents
          */
-        private final BitSet hits;
+        private final BitSet contextHits;
 
         /**
          * BitSet storing the id's of selected documents from the sub query
@@ -209,14 +205,14 @@
         private final BitSet subHits;
 
         /**
-         * List of UUIDs of selected nodes by the context query
+         * The next document id to return
          */
-        private Set contextUUIDs = null;
+        private int nextDoc = -1;
 
         /**
-         * The next document id to return
+         * Set <code>true</code> once the sub contextHits have been calculated.
          */
-        private int nextDoc = -1;
+        private boolean subHitsCalculated = false;
 
         /**
          * Creates a new <code>DescendantSelfAxisScorer</code>.
@@ -224,10 +220,11 @@
          * @param similarity the <code>Similarity</code> instance to use.
          * @param reader     for index access.
          */
-        protected DescendantSelfAxisScorer(Similarity similarity, IndexReader reader) {
+        protected DescendantSelfAxisScorer(Similarity similarity, CachingMultiReader reader) {
             super(similarity);
             this.reader = reader;
-            this.hits = new BitSet(reader.maxDoc());
+            // todo reuse BitSets?
+            this.contextHits = new BitSet(reader.maxDoc());
             this.subHits = new BitSet(reader.maxDoc());
         }
 
@@ -242,38 +239,25 @@
 
                 // check self if necessary
                 if (includeSelf) {
-                    String uuid = reader.document(nextDoc).get(FieldNames.UUID);
-                    if (contextUUIDs.contains(uuid)) {
+                    if (contextHits.get(nextDoc)) {
                         return true;
                     }
                 }
 
                 // check if nextDoc is a descendant of one of the context nodes
-                Document d = reader.document(nextDoc);
-                String parentUUID = d.get(FieldNames.PARENT);
-                while (parentUUID != null && !contextUUIDs.contains(parentUUID)) {
+                int parentDoc = reader.getParent(nextDoc);
+                while (parentDoc != -1 && !contextHits.get(parentDoc)) {
                     // traverse
-                    TermDocs ancestor = reader.termDocs(new Term(FieldNames.UUID, parentUUID));
-                    try {
-                        if (ancestor.next()) {
-                            d = reader.document(ancestor.doc());
-                            parentUUID = d.get(FieldNames.PARENT);
-                            if (parentUUID.length() == 0) {
-                                parentUUID = null;
-                            }
-                        } else {
-                            parentUUID = null;
-                        }
-                    } finally {
-                        ancestor.close();
-                    }
+                    parentDoc = reader.getParent(parentDoc);
                 }
-                if (parentUUID != null) {
-                    // since current doc is a descendant of one of the context
-                    // docs we can promote uuid of doc to the context uuids
-                    contextUUIDs.add(d.get(FieldNames.UUID));
+
+                if (parentDoc != -1) {
+                    // since current parentDoc is a descendant of one of the context
+                    // docs we can promote parentDoc to the context hits
+                    contextHits.set(parentDoc);
                     return true;
                 }
+
                 // try next
                 nextDoc = subHits.nextSetBit(nextDoc + 1);
             }
@@ -303,26 +287,25 @@
         }
 
         private void calculateSubHits() throws IOException {
-            if (contextUUIDs == null) {
-                contextUUIDs = new HashSet();
+            if (!subHitsCalculated) {
+
                 contextScorer.score(new HitCollector() {
                     public void collect(int doc, float score) {
-                        // @todo maintain cache of doc id hierarchy
-                        hits.set(doc);
+                        contextHits.set(doc);
                     }
                 }); // find all
-                for (int i = hits.nextSetBit(0); i >= 0; i = hits.nextSetBit(i + 1)) {
-                    contextUUIDs.add(reader.document(i).get(FieldNames.UUID));
-                }
 
-                // reuse for final hits
-                hits.clear();
+                if (contextHits.isEmpty()) {
+                    // no need to execute sub scorer, context is empty
+                } else {
+                    subScorer.score(new HitCollector() {
+                        public void collect(int doc, float score) {
+                            subHits.set(doc);
+                        }
+                    });
+                }
 
-                subScorer.score(new HitCollector() {
-                    public void collect(int doc, float score) {
-                        subHits.set(doc);
-                    }
-                });
+                subHitsCalculated = true;
             }
         }
 

Added: incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/DocId.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/DocId.java?rev=234492&view=auto
==============================================================================
--- incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/DocId.java (added)
+++ incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/DocId.java Mon Aug 22 07:20:02 2005
@@ -0,0 +1,234 @@
+/*
+ * Copyright 2004-2005 The Apache Software Foundation or its licensors,
+ *                     as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.core.query.lucene;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+
+import java.io.IOException;
+import java.util.BitSet;
+
+/**
+ * Implements a document id which can be based on a Node uuid or a lucene
+ * document number.
+ */
+abstract class DocId {
+
+    /**
+     * Indicates a null DocId. Will be returned if the root node is asked for
+     * its parent.
+     */
+    static final DocId NULL = new DocId() {
+
+        /**
+         * Always returns <code>-1</code>.
+         * @param reader the index reader.
+         * @return always <code>-1</code>.
+         */
+        final int getDocumentNumber(IndexReader reader) {
+            return -1;
+        }
+
+        /**
+         * Always returns <code>this</code>.
+         * @param offset the offset to apply.
+         * @return always <code>this</code>.
+         */
+        final DocId applyOffset(int offset) {
+            return this;
+        }
+
+        /**
+         * Always returns <code>true</code>.
+         * @param deleted the deleted documents.
+         * @return always <code>true</code>.
+         */
+        final boolean isValid(BitSet deleted) {
+            return true;
+        }
+    };
+
+    /**
+     * Returns the document number of this <code>DocId</code>. If this id is
+     * invalid <code>-1</code> is returned.
+     *
+     * @param reader the IndexReader to resolve this <code>DocId</code>.
+     * @return the document number of this <code>DocId</code> or <code>-1</code>
+     *         if it is invalid (e.g. does not exist).
+     * @throws IOException if an error occurs while reading from the index.
+     */
+    abstract int getDocumentNumber(IndexReader reader) throws IOException;
+
+    /**
+     * Applies an offset to this <code>DocId</code>. The returned <code>DocId</code>
+     * may be the same as <code>this</code> if this <code>DocId</code> does
+     * not need to know about an offset.
+     *
+     * @param offset the offset to apply to.
+     * @return <code>DocId</code> with <code>offset</code> applied.
+     */
+    abstract DocId applyOffset(int offset);
+
+    /**
+     * Returns <code>true</code> if this <code>DocId</code> is valid against the
+     * set of <code>deleted</code> documents; otherwise <code>false</code>.
+     *
+     * @param deleted the deleted documents.
+     * @return <code>true</code> if this <code>DocId</code> is not delted;
+     *         otherwise <code>false</code>.
+     */
+    abstract boolean isValid(BitSet deleted);
+
+    /**
+     * Creates a <code>DocId</code> based on a document number.
+     *
+     * @param docNumber the document number.
+     * @return a <code>DocId</code> based on a document number.
+     */
+    static DocId create(int docNumber) {
+        return new PlainDocId(docNumber);
+    }
+
+    /**
+     * Creates a <code>DocId</code> based on a node UUID.
+     *
+     * @param uuid the node uuid.
+     * @return a <code>DocId</code> based on a node UUID.
+     */
+    static DocId create(String uuid) {
+        return new UUIDDocId(uuid);
+    }
+
+    //--------------------------< internal >------------------------------------
+
+    /**
+     * <code>DocId</code> based on a document number.
+     */
+    private static final class PlainDocId extends DocId {
+
+        /**
+         * The document number or <code>-1</code> if not set.
+         */
+        private final int docNumber;
+
+        /**
+         * Creates a <code>DocId</code> based on a document number.
+         *
+         * @param docNumber the lucene document number.
+         */
+        PlainDocId(int docNumber) {
+            this.docNumber = docNumber;
+        }
+
+        /**
+         * @inheritDoc
+         */
+        final int getDocumentNumber(IndexReader reader) {
+            return docNumber;
+        }
+
+        /**
+         * @inheritDoc
+         */
+        final DocId applyOffset(int offset) {
+            return new PlainDocId(docNumber + offset);
+        }
+
+        /**
+         * @inheritDoc
+         */
+        final boolean isValid(BitSet deleted) {
+            return !deleted.get(docNumber);
+        }
+
+        /**
+         * Returns a String representation for this <code>DocId</code>.
+         *
+         * @return a String representation for this <code>DocId</code>.
+         */
+        final public String toString() {
+            return "PlainDocId(" + docNumber + ")";
+        }
+    }
+
+    /**
+     * <code>DocId</code> based on a UUID.
+     */
+    private static final class UUIDDocId extends DocId {
+
+        /**
+         * The node uuid or <code>null</code> if not set.
+         */
+        private final String uuid;
+
+        /**
+         * Creates a <code>DocId</code> based on a Node uuid.
+         *
+         * @param uuid the Node uuid.
+         */
+        UUIDDocId(String uuid) {
+            this.uuid = uuid;
+        }
+
+        /**
+         * @inheritDoc
+         */
+        final int getDocumentNumber(IndexReader reader) throws IOException {
+            Term id = new Term(FieldNames.UUID, uuid);
+            TermDocs docs = reader.termDocs(id);
+            int doc = -1;
+            try {
+                if (docs.next()) {
+                    doc = docs.doc();
+                }
+            } finally {
+                docs.close();
+            }
+            return doc;
+        }
+
+        /**
+         * This implementation will return <code>this</code>. Document number is
+         * not known until resolved in {@link #getDocumentNumber(IndexReader)}.
+         *
+         * @inheritDoc
+         */
+        final DocId applyOffset(int offset) {
+            return this;
+        }
+
+        /**
+         * Always returns <code>true</code>.
+         *
+         * @param deleted the deleted documents.
+         * @return always <code>true</code>.
+         */
+        final boolean isValid(BitSet deleted) {
+            return true;
+        }
+
+        /**
+         * Returns a String representation for this <code>DocId</code>.
+         *
+         * @return a String representation for this <code>DocId</code>.
+         */
+        final public String toString() {
+            return "UUIDDocId(" + uuid + ")";
+        }
+    }
+}

Propchange: incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/DocId.java
------------------------------------------------------------------------------
    svn:eol-style = native

Added: incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/DocNumberCache.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/DocNumberCache.java?rev=234492&view=auto
==============================================================================
--- incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/DocNumberCache.java (added)
+++ incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/DocNumberCache.java Mon Aug 22 07:20:02 2005
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2004-2005 The Apache Software Foundation or its licensors,
+ *                     as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.core.query.lucene;
+
+import org.apache.commons.collections.map.LRUMap;
+import org.apache.log4j.Logger;
+
+/**
+ * Implements a Document number cache with a fixed size and a LRU strategy.
+ */
+final class DocNumberCache {
+
+    /**
+     * Logger instance for this class.
+     */
+    private static final Logger log = Logger.getLogger(DocNumberCache.class);
+
+    /**
+     * Log cache statistics at most every 10 seconds.
+     */
+    private static final long LOG_INTERVAL = 1000 * 10;
+
+    /**
+     * LRU Map where key=uuid value=reader;docNumber
+     */
+    private final LRUMap docNumbers;
+
+    /**
+     * Timestamp of the last cache statistics log.
+     */
+    private long lastLog;
+
+    /**
+     * Cache misses.
+     */
+    private long misses;
+
+    /**
+     * Cache accesses;
+     */
+    private long accesses;
+
+    /**
+     * Creates a new <code>DocNumberCache</code> with a limiting
+     * <code>size</code>.
+     *
+     * @param size the cache limit.
+     */
+    DocNumberCache(int size) {
+        docNumbers = new LRUMap(size);
+    }
+
+    /**
+     * Puts a document number into the cache using a uuid as key. An entry is
+     * only overwritten if the according reader is younger than the reader
+     * associated with the existing entry.
+     *
+     * @param uuid the key.
+     * @param reader the index reader from where the document number was read.
+     * @param n the document number.
+     */
+    synchronized void put(String uuid, CachingIndexReader reader, int n) {
+        Entry e = (Entry) docNumbers.get(uuid);
+        if (e != null) {
+            // existing entry
+            // ignore if reader is older than the one in entry
+            if (reader.getCreationTick() <= e.reader.getCreationTick()) {
+                if (log.isDebugEnabled()) {
+                    log.debug("Ignoring put(). New entry is not from a newer reader. " +
+                            "existing: " + e.reader.getCreationTick() +
+                            ", new: " + reader.getCreationTick());
+                }
+                e = null;
+            }
+        } else {
+            // entry did not exist
+            e = new Entry(reader, n);
+        }
+
+        if (e != null) {
+            docNumbers.put(uuid, e);
+        }
+    }
+
+    /**
+     * Returns the cache entry for <code>uuid</code>, or <code>null</code> if
+     * no entry exists for <code>uuid</code>.
+     *
+     * @param uuid the key.
+     * @return cache entry or <code>null</code>.
+     */
+    synchronized Entry get(String uuid) {
+        Entry entry = (Entry) docNumbers.get(uuid);
+        if (log.isInfoEnabled()) {
+            accesses++;
+            if (entry == null) {
+                misses++;
+            }
+            // log at most after 1000 accesses and every 10 seconds
+            if (accesses > 1000 && System.currentTimeMillis() - lastLog > LOG_INTERVAL) {
+                long ratio = 100;
+                if (misses != 0) {
+                    ratio -= misses * 100L / accesses;
+                }
+                StringBuffer statistics = new StringBuffer();
+                statistics.append("size=").append(docNumbers.size());
+                statistics.append("/").append(docNumbers.maxSize());
+                statistics.append(", #accesses=").append(accesses);
+                statistics.append(", #hits=").append((accesses - misses));
+                statistics.append(", #misses=").append(misses);
+                statistics.append(", cacheRatio=").append(ratio).append("%");
+                log.info(statistics);
+                accesses = 0;
+                misses = 0;
+                lastLog = System.currentTimeMillis();
+            }
+        }
+        return entry;
+    }
+
+    public static final class Entry {
+
+        /**
+         * The IndexReader.
+         */
+        final CachingIndexReader reader;
+
+        /**
+         * The document number.
+         */
+        final int doc;
+
+        Entry(CachingIndexReader reader, int doc) {
+            this.reader = reader;
+            this.doc = doc;
+        }
+    }
+}

Propchange: incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/DocNumberCache.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java?rev=234492&r1=234491&r2=234492&view=diff
==============================================================================
--- incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java (original)
+++ incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java Mon Aug 22 07:20:02 2005
@@ -120,6 +120,11 @@
     private CachingMultiReader multiReader;
 
     /**
+     * Shared document number cache across all persistent indexes.
+     */
+    private final DocNumberCache cache;
+
+    /**
      * Monitor to use to synchronize access to {@link #multiReader} and
      * {@link #updateInProgress}.
      */
@@ -157,6 +162,8 @@
 
         this.indexDir = indexDir;
         this.handler = handler;
+        this.cache = new DocNumberCache(handler.getCacheSize());
+
         boolean doInitialIndex = false;
         if (indexNames.exists(indexDir)) {
             indexNames.read(indexDir);
@@ -180,7 +187,8 @@
                 if (!sub.exists() && !sub.mkdir()) {
                     throw new IOException("Unable to create directory: " + sub.getAbsolutePath());
                 }
-                PersistentIndex index = new PersistentIndex(indexNames.getName(i), sub, false, handler.getAnalyzer());
+                PersistentIndex index = new PersistentIndex(indexNames.getName(i),
+                        sub, false, handler.getAnalyzer(), cache);
                 index.setMaxMergeDocs(handler.getMaxMergeDocs());
                 index.setMergeFactor(handler.getMergeFactor());
                 index.setMinMergeDocs(handler.getMinMergeDocs());
@@ -360,12 +368,12 @@
             // some other read thread might have created the reader in the
             // meantime -> check again
             if (multiReader == null) {
-                IndexReader[] readers = new IndexReader[indexes.size() + 1];
+                ReadOnlyIndexReader[] readers = new ReadOnlyIndexReader[indexes.size() + 1];
                 for (int i = 0; i < indexes.size(); i++) {
                     readers[i] = ((PersistentIndex) indexes.get(i)).getReadOnlyIndexReader();
                 }
                 readers[readers.length - 1] = volatileIndex.getReadOnlyIndexReader();
-                multiReader = new CachingMultiReader(readers);
+                multiReader = new CachingMultiReader(readers, cache);
             }
             multiReader.incrementRefCount();
             return multiReader;
@@ -468,8 +476,10 @@
     private void internalAddDocument(Document doc) throws IOException {
         volatileIndex.addDocument(doc);
         if (volatileIndex.getRedoLog().getSize() >= handler.getMinMergeDocs()) {
-            log.info("Committing in-memory index");
+            long time = System.currentTimeMillis();
             commit();
+            time = System.currentTimeMillis() - time;
+            log.info("Committed in-memory index in " + time + "ms.");
         }
     }
 
@@ -486,7 +496,8 @@
 
             File sub = newIndexFolder();
             String name = sub.getName();
-            PersistentIndex index = new PersistentIndex(name, sub, true, handler.getAnalyzer());
+            PersistentIndex index = new PersistentIndex(name, sub, true,
+                    handler.getAnalyzer(), cache);
             index.setMaxMergeDocs(handler.getMaxMergeDocs());
             index.setMergeFactor(handler.getMergeFactor());
             index.setMinMergeDocs(handler.getMinMergeDocs());
@@ -565,7 +576,8 @@
         if (indexes.size() == 0) {
             File sub = newIndexFolder();
             String name = sub.getName();
-            PersistentIndex index = new PersistentIndex(name, sub, true, handler.getAnalyzer());
+            PersistentIndex index = new PersistentIndex(name, sub, true,
+                    handler.getAnalyzer(), cache);
             index.setMaxMergeDocs(handler.getMaxMergeDocs());
             index.setMergeFactor(handler.getMergeFactor());
             index.setMinMergeDocs(handler.getMinMergeDocs());
@@ -664,7 +676,8 @@
         // create new index
         File sub = newIndexFolder();
         String name = sub.getName();
-        PersistentIndex index = new PersistentIndex(name, sub, true, handler.getAnalyzer());
+        PersistentIndex index = new PersistentIndex(name, sub, true,
+                handler.getAnalyzer(), cache);
         index.setMaxMergeDocs(handler.getMaxMergeDocs());
         index.setMergeFactor(handler.getMergeFactor());
         index.setMinMergeDocs(handler.getMinMergeDocs());

Modified: incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/PersistentIndex.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/PersistentIndex.java?rev=234492&r1=234491&r2=234492&view=diff
==============================================================================
--- incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/PersistentIndex.java (original)
+++ incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/PersistentIndex.java Mon Aug 22 07:20:02 2005
@@ -53,14 +53,16 @@
      * @param indexDir the directory to store the index.
      * @param create if <code>true</code> an existing index is deleted.
      * @param analyzer the analyzer for text tokenizing.
+     * @param cache the document number cache
      * @throws IOException if an error occurs while opening / creating the
      *  index.
      * @throws IOException if an error occurs while opening / creating
      *  the index.
      */
-    PersistentIndex(String name, File indexDir, boolean create, Analyzer analyzer)
+    PersistentIndex(String name, File indexDir, boolean create,
+                    Analyzer analyzer, DocNumberCache cache)
             throws IOException {
-        super(analyzer, FSDirectory.getDirectory(indexDir, create));
+        super(analyzer, FSDirectory.getDirectory(indexDir, create), cache);
         this.name = name;
 
         // check if index is locked, probably from an unclean repository

Modified: incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/ReadOnlyIndexReader.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/ReadOnlyIndexReader.java?rev=234492&r1=234491&r2=234492&view=diff
==============================================================================
--- incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/ReadOnlyIndexReader.java (original)
+++ incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/ReadOnlyIndexReader.java Mon Aug 22 07:20:02 2005
@@ -58,6 +58,28 @@
         reader.addClient(this);
     }
 
+    /**
+     * Returns the <code>DocId</code> of the parent of <code>n</code> or
+     * {@link DocId#NULL} if <code>n</code> does not have a parent
+     * (<code>n</code> is the root node).
+     *
+     * @param n the document number.
+     * @return the <code>DocId</code> of <code>n</code>'s parent.
+     * @throws IOException if an error occurs while reading from the index.
+     */
+    public DocId getParent(int n) throws IOException {
+        return getBase().getParent(n, deleted);
+    }
+
+    /**
+     * Returns the {@link SharedIndexReader} this reader is based on.
+     *
+     * @return the {@link SharedIndexReader} this reader is based on.
+     */
+    public SharedIndexReader getBase() {
+        return (SharedIndexReader) in;
+    }
+
     //---------------------< IndexReader overwrites >---------------------------
 
     /**

Modified: incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java?rev=234492&r1=234491&r2=234492&view=diff
==============================================================================
--- incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java (original)
+++ incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java Mon Aug 22 07:20:02 2005
@@ -124,6 +124,13 @@
     private boolean autoRepair = true;
 
     /**
+     * The uuid resolver cache size.
+     * <p/>
+     * Default value is: <code>1000</code>.
+     */
+    private int cacheSize = 1000;
+
+    /**
      * Default constructor.
      */
     public SearchIndex() {
@@ -476,5 +483,13 @@
 
     public boolean getAutoRepair() {
         return autoRepair;
+    }
+
+    public void setCacheSize(int size) {
+        cacheSize = size;
+    }
+
+    public int getCacheSize() {
+        return cacheSize;
     }
 }

Modified: incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/SharedIndexReader.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/SharedIndexReader.java?rev=234492&r1=234491&r2=234492&view=diff
==============================================================================
--- incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/SharedIndexReader.java (original)
+++ incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/SharedIndexReader.java Mon Aug 22 07:20:02 2005
@@ -22,6 +22,7 @@
 
 import java.util.IdentityHashMap;
 import java.util.Map;
+import java.util.BitSet;
 import java.io.IOException;
 
 /**
@@ -54,6 +55,20 @@
     }
 
     /**
+     * Returns the <code>DocId</code> of the parent of <code>n</code> or
+     * {@link DocId#NULL} if <code>n</code> does not have a parent
+     * (<code>n</code> is the root node).
+     *
+     * @param n the document number.
+     * @param deleted the documents that should be regarded as deleted.
+     * @return the <code>DocId</code> of <code>n</code>'s parent.
+     * @throws IOException if an error occurs while reading from the index.
+     */
+    public DocId getParent(int n, BitSet deleted) throws IOException {
+        return getBase().getParent(n, deleted);
+    }
+
+    /**
      * Registeres <code>client</code> with this reader. As long as clients are
      * registered, this shared reader will not release resources on {@link
      * #close()} and will not actually close but only marks itself to close when
@@ -106,4 +121,14 @@
     public TermDocs termDocs(Term term) throws IOException {
         return in.termDocs(term);
     }
+
+    /**
+     * Returns the {@link CachingIndexReader} this reader is based on.
+     *
+     * @return the {@link CachingIndexReader} this reader is based on.
+     */
+    public CachingIndexReader getBase() {
+        return (CachingIndexReader) in;
+    }
+
 }

Added: incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/SingleTermDocs.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/SingleTermDocs.java?rev=234492&view=auto
==============================================================================
--- incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/SingleTermDocs.java (added)
+++ incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/SingleTermDocs.java Mon Aug 22 07:20:02 2005
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2004-2005 The Apache Software Foundation or its licensors,
+ *                     as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.core.query.lucene;
+
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/**
+ * Implements a TermDocs with a single document.
+ */
+class SingleTermDocs implements TermDocs {
+
+    /**
+     * Single document number;
+     */
+    private final int doc;
+
+    /**
+     * Flag to return the document number once.
+     */
+    private boolean next = true;
+
+    /**
+     * Creates a <code>SingleTermDocs</code> that returns <code>doc</code> as
+     * its single document.
+     *
+     * @param doc the document number.
+     */
+    SingleTermDocs(int doc) {
+        this.doc = doc;
+    }
+
+    /**
+     * @throws UnsupportedOperationException always
+     */
+    public void seek(Term term) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * @throws UnsupportedOperationException always
+     */
+    public void seek(TermEnum termEnum) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public int doc() {
+        return doc;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public int freq() {
+        return 1;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public boolean next() throws IOException {
+        boolean hasNext = next;
+        next = false;
+        return hasNext;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public int read(int[] docs, int[] freqs) throws IOException {
+        if (next && docs.length > 0) {
+            docs[0] = doc;
+            freqs[0] = 1;
+            next = false;
+            return 1;
+        }
+        return 0;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public boolean skipTo(int target) throws IOException {
+        return false;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public void close() throws IOException {
+    }
+}

Propchange: incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/SingleTermDocs.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/VolatileIndex.java
URL: http://svn.apache.org/viewcvs/incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/VolatileIndex.java?rev=234492&r1=234491&r2=234492&view=diff
==============================================================================
--- incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/VolatileIndex.java (original)
+++ incubator/jackrabbit/trunk/core/src/java/org/apache/jackrabbit/core/query/lucene/VolatileIndex.java Mon Aug 22 07:20:02 2005
@@ -64,7 +64,7 @@
      * @throws IOException if an error occurs while opening the index.
      */
     VolatileIndex(Analyzer analyzer, RedoLog log) throws IOException {
-        super(analyzer, new RAMDirectory());
+        super(analyzer, new RAMDirectory(), null);
         redoLog = log;
     }
 



Mime
View raw message