jackrabbit-oak-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From thom...@apache.org
Subject svn commit: r1498912 - in /jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak: cache/ plugins/mongomk/ plugins/mongomk/util/
Date Tue, 02 Jul 2013 12:54:49 GMT
Author: thomasm
Date: Tue Jul  2 12:54:48 2013
New Revision: 1498912

URL: http://svn.apache.org/r1498912
Log:
OAK-863 - Refactor caching in MongoMK; add a LIRS cache to understand why we have a low cache hit ratio

Added:
    jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java
    jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheValue.java
    jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/EmpiricalWeigher.java
      - copied, changed from r1498811, jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/EmpericalWeigher.java
Removed:
    jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/EmpericalWeigher.java
Modified:
    jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheStats.java
    jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoDocumentStore.java
    jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoMK.java
    jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoMicroKernelService.java
    jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/Node.java
    jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/util/Utils.java

Added: jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java?rev=1498912&view=auto
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java (added)
+++ jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java Tue Jul  2 12:54:48 2013
@@ -0,0 +1,1152 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.cache;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.ExecutionException;
+
+import javax.annotation.Nullable;
+
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheStats;
+import com.google.common.cache.Weigher;
+import com.google.common.collect.ImmutableMap;
+
+/**
+ * A scan resistant cache. It is meant to cache objects that are relatively
+ * costly to acquire, for example file content.
+ * <p>
+ * This implementation is multi-threading safe and supports concurrent access.
+ * Null keys or null values are not allowed. The map fill factor is at most 75%.
+ * <p>
+ * Each entry is assigned a distinct memory size, and the cache will try to use
+ * at most the specified amount of memory. The memory unit is not relevant,
+ * however it is suggested to use bytes as the unit.
+ * <p>
+ * This class implements an approximation of the the LIRS replacement algorithm
+ * invented by Xiaodong Zhang and Song Jiang as described in
+ * http://www.cse.ohio-state.edu/~zhang/lirs-sigmetrics-02.html with a few
+ * smaller changes: An additional queue for non-resident entries is used, to
+ * prevent unbound memory usage. The maximum size of this queue is at most the
+ * size of the rest of the stack. About 6.25% of the mapped entries are cold.
+ * <p>
+ * Internally, the cache is split into a number of segments, and each segment is
+ * an individual LIRS cache.
+ * <p>
+ * Accessed entries are only moved to the top of the stack if at least a number
+ * of other entries have been moved to the front (1% by default). Write access
+ * and moving entries to the top of the stack is synchronized per segment.
+ *
+ * @author Thomas Mueller
+ * @param <K> the key type
+ * @param <V> the value type
+ */
+public class CacheLIRS<K, V> implements Cache<K, V> {
+
+    /**
+     * The maximum memory this cache should use.
+     */
+    private long maxMemory;
+
+    /**
+     * The average memory used by one entry.
+     */
+    private int averageMemory;
+
+    private final Segment<K, V>[] segments;
+
+    private final int segmentCount;
+    private final int segmentShift;
+    private final int segmentMask;
+    private final int stackMoveDistance;
+    
+    private final Weigher<K, V> weigher;
+    
+    /**
+     * Create a new cache with the given number of entries, and the default
+     * settings (an average size of 1 per entry, 16 segments, and stack move
+     * distance equals to the maximum number of entries divided by 100).
+     *
+     * @param maxEntries the maximum number of entries
+     */
+    public CacheLIRS(int maxEntries) {
+        this(null, maxEntries, 1, 16, maxEntries / 100);
+    }
+
+    /**
+     * Create a new cache with the given memory size.
+     *
+     * @param maxMemory the maximum memory to use (1 or larger)
+     * @param averageMemory the average memory (1 or larger)
+     * @param segmentCount the number of cache segments (must be a power of 2)
+     * @param stackMoveDistance how many other item are to be moved to the top
+     *        of the stack before the current item is moved
+     */
+    @SuppressWarnings("unchecked")
+    CacheLIRS(Weigher<K, V> weigher, long maxMemory, int averageMemory, int segmentCount, int stackMoveDistance) {
+        this.weigher = weigher;
+        setMaxMemory(maxMemory);
+        setAverageMemory(averageMemory);
+        if (Integer.bitCount(segmentCount) != 1) {
+            throw new IllegalArgumentException("The segment count must be a power of 2, is " + segmentCount);
+        }
+        this.segmentCount = segmentCount;
+        this.segmentMask = segmentCount - 1;
+        this.stackMoveDistance = stackMoveDistance;
+        segments = new Segment[segmentCount];
+        invalidateAll();
+        this.segmentShift = Integer.numberOfTrailingZeros(segments[0].entries.length);
+    }
+
+    /**
+     * Remove all entries.
+     */
+    @Override
+    public void invalidateAll() {
+        long max = Math.max(1, maxMemory / segmentCount);
+        for (int i = 0; i < segmentCount; i++) {
+            segments[i] = new Segment<K, V>(this,
+                    max, averageMemory, stackMoveDistance);
+        }
+    }
+
+    private Entry<K, V> find(Object key) {
+        int hash = getHash(key);
+        return getSegment(hash).find(key, hash);
+    }
+
+    /**
+     * Check whether there is a resident entry for the given key. This
+     * method does not adjust the internal state of the cache.
+     *
+     * @param key the key (may not be null)
+     * @return true if there is a resident entry
+     */
+    public boolean containsKey(Object key) {
+        int hash = getHash(key);
+        return getSegment(hash).containsKey(key, hash);
+    }
+
+    /**
+     * Get the value for the given key if the entry is cached. This method does
+     * not modify the internal state.
+     *
+     * @param key the key (may not be null)
+     * @return the value, or null if there is no resident entry
+     */
+    public V peek(K key) {
+        Entry<K, V> e = find(key);
+        return e == null ? null : e.value;
+    }
+
+    /**
+     * Add an entry to the cache. The entry may or may not exist in the
+     * cache yet. This method will usually mark unknown entries as cold and
+     * known entries as hot.
+     *
+     * @param key the key (may not be null)
+     * @param value the value (may not be null)
+     * @param memory the memory used for the given entry
+     * @return the old value, or null if there was no resident entry
+     */
+    public V put(K key, V value, int memory) {
+        int hash = getHash(key);
+        return getSegment(hash).put(key, hash, value, memory);
+    }
+
+    /**
+     * Add an entry to the cache using the average memory size.
+     *
+     * @param key the key (may not be null)
+     * @param value the value (may not be null)
+     */
+    @Override
+    public void put(K key, V value) {
+        put(key, value, sizeOf(key, value));
+    }
+    
+    @Override
+    public V get(K key, Callable<? extends V> valueLoader)
+            throws ExecutionException {
+        int hash = getHash(key);
+        return getSegment(hash).get(key, hash, valueLoader);
+    }
+
+    /**
+     * Get the size of the given value. The default implementation returns the
+     * average memory as configured for this cache.
+     *
+     * @param key the key
+     * @param value the value
+     * @return the size
+     */
+    protected int sizeOf(K key, V value) {
+        if (weigher == null) {
+            return averageMemory;
+        }
+        return weigher.weigh(key, value);
+    }
+    
+    /**
+     * Remove an entry. Both resident and non-resident entries can be
+     * removed.
+     *
+     * @param key the key (may not be null)
+     */
+    @Override
+    public synchronized void invalidate(Object key) {
+        int hash = getHash(key);
+        getSegment(hash).invalidate(key, hash);
+    }
+    
+    @SuppressWarnings("unchecked")
+    @Override
+    public void invalidateAll(Iterable<?> keys) {
+        for (K k : (Iterable<K>) keys) {
+            invalidate(k);
+        }
+    }
+
+    /**
+     * Get the memory used for the given key.
+     *
+     * @param key the key (may not be null)
+     * @return the memory, or 0 if there is no resident entry
+     */
+    public int getMemory(K key) {
+        int hash = getHash(key);
+        return getSegment(hash).getMemory(key, hash);
+    }
+
+    /**
+     * Get the value for the given key if the entry is cached. This method
+     * adjusts the internal state of the cache sometimes, to ensure commonly
+     * used entries stay in the cache.
+     *
+     * @param key the key (may not be null)
+     * @return the value, or null if there is no resident entry
+     */
+    public V get(Object key) {
+        int hash = getHash(key);
+        return getSegment(hash).get(key, hash);
+    }
+
+    private Segment<K, V> getSegment(int hash) {
+        int segmentIndex = (hash >>> segmentShift) & segmentMask;
+        return segments[segmentIndex];
+    }
+
+    /**
+     * Get the hash code for the given key. The hash code is
+     * further enhanced to spread the values more evenly.
+     *
+     * @param key the key
+     * @return the hash code
+     */
+    static int getHash(Object key) {
+        int hash = key.hashCode();
+        // a supplemental secondary hash function
+        // to protect against hash codes that don't differ much
+        hash = ((hash >>> 16) ^ hash) * 0x45d9f3b;
+        hash = ((hash >>> 16) ^ hash) * 0x45d9f3b;
+        hash = (hash >>> 16) ^ hash;
+        return hash;
+    }
+
+    /**
+     * Get the currently used memory.
+     *
+     * @return the used memory
+     */
+    public long getUsedMemory() {
+        long x = 0;
+        for (Segment<K, V> s : segments) {
+            x += s.usedMemory;
+        }
+        return x;
+    }
+
+    /**
+     * Set the maximum memory this cache should use. This will not
+     * immediately cause entries to get removed however; it will only change
+     * the limit. To resize the internal array, call the clear method.
+     *
+     * @param maxMemory the maximum size (1 or larger)
+     */
+    public void setMaxMemory(long maxMemory) {
+        if (maxMemory <= 0) {
+            throw new IllegalArgumentException("Max memory must be larger than 0");
+        }
+        this.maxMemory = maxMemory;
+        if (segments != null) {
+            long max = 1 + maxMemory / segments.length;
+            for (Segment<K, V> s : segments) {
+                s.setMaxMemory(max);
+            }
+        }
+    }
+
+    /**
+     * Set the average memory used per entry. It is used to calculate the
+     * length of the internal array.
+     *
+     * @param averageMemory the average memory used (1 or larger)
+     */
+    public void setAverageMemory(int averageMemory) {
+        if (averageMemory <= 0) {
+            throw new IllegalArgumentException("Average memory must be larger than 0");
+        }
+        this.averageMemory = averageMemory;
+        if (segments != null) {
+            for (Segment<K, V> s : segments) {
+                s.setAverageMemory(averageMemory);
+            }
+        }
+    }
+
+    /**
+     * Get the average memory used per entry.
+     *
+     * @return the average memory
+     */
+    public int getAverageMemory() {
+        return averageMemory;
+    }
+
+    /**
+     * Get the maximum memory to use.
+     *
+     * @return the maximum memory
+     */
+    public long getMaxMemory() {
+        return maxMemory;
+    }
+
+    /**
+     * Get the entry set for all resident entries.
+     *
+     * @return the entry set
+     */
+    public synchronized Set<Map.Entry<K, V>> entrySet() {
+        HashMap<K, V> map = new HashMap<K, V>();
+        for (K k : keySet()) {
+            map.put(k,  find(k).value);
+        }
+        return map.entrySet();
+    }
+
+    /**
+     * Get the set of keys for resident entries.
+     *
+     * @return the set of keys
+     */
+    public synchronized Set<K> keySet() {
+        HashSet<K> set = new HashSet<K>();
+        for (Segment<K, V> s : segments) {
+            set.addAll(s.keySet());
+        }
+        return set;
+    }
+
+    /**
+     * Get the number of non-resident entries in the cache.
+     *
+     * @return the number of non-resident entries
+     */
+    public int sizeNonResident() {
+        int x = 0;
+        for (Segment<K, V> s : segments) {
+            x += s.queue2Size;
+        }
+        return x;
+    }
+
+    /**
+     * Get the length of the internal map array.
+     *
+     * @return the size of the array
+     */
+    public int sizeMapArray() {
+        int x = 0;
+        for (Segment<K, V> s : segments) {
+            x += s.entries.length;
+        }
+        return x;
+    }
+
+    /**
+     * Get the number of hot entries in the cache.
+     *
+     * @return the number of hot entries
+     */
+    public int sizeHot() {
+        int x = 0;
+        for (Segment<K, V> s : segments) {
+            x += s.mapSize - s.queueSize - s.queue2Size;
+        }
+        return x;
+    }
+
+    /**
+     * Get the number of resident entries.
+     *
+     * @return the number of entries
+     */
+    @Override
+    public long size() {
+        int x = 0;
+        for (Segment<K, V> s : segments) {
+            x += s.mapSize - s.queue2Size;
+        }
+        return x;
+    }
+
+    /**
+     * Get the list of keys. This method allows to read the internal state of
+     * the cache.
+     *
+     * @param cold if true, only keys for the cold entries are returned
+     * @param nonResident true for non-resident entries
+     * @return the key list
+     */
+    public synchronized List<K> keys(boolean cold, boolean nonResident) {
+        ArrayList<K> keys = new ArrayList<K>();
+        for (Segment<K, V> s : segments) {
+            keys.addAll(s.keys(cold, nonResident));
+        }
+        return keys;
+    }
+    
+    @Override
+    public CacheStats stats() {
+        long hitCount = 0;
+        long missCount = 0; 
+        long loadSuccessCount = 0;
+        long loadExceptionCount = 0;
+        long totalLoadTime = 0;
+        long evictionCount = 0;
+        for (Segment<K, V> s : segments) {
+            hitCount += s.hitCount;
+            missCount += s.missCount;
+            loadSuccessCount += s.loadSuccessCount;
+            loadExceptionCount += s.loadExceptionCount;
+            totalLoadTime += s.totalLoadTime;
+            evictionCount += s.evictionCount;
+        }
+        return new CacheStats(hitCount, missCount, loadSuccessCount, 
+                loadExceptionCount, totalLoadTime, evictionCount);
+    }
+
+    /**
+     * A cache segment
+     *
+     * @param <K> the key type
+     * @param <V> the value type
+     */
+    static class Segment<K, V> {
+
+        /**
+         * The number of (hot, cold, and non-resident) entries in the map.
+         */
+        int mapSize;
+
+        /**
+         * The size of the LIRS queue for resident cold entries.
+         */
+        int queueSize;
+
+        /**
+         * The size of the LIRS queue for non-resident cold entries.
+         */
+        int queue2Size;
+
+        /**
+         * The map array. The size is always a power of 2.
+         */
+        Entry<K, V>[] entries;
+
+        /**
+         * The currently used memory.
+         */
+        long usedMemory;
+        
+        long hitCount;
+        long missCount;
+        long loadSuccessCount;
+        long loadExceptionCount;
+        long totalLoadTime;
+        long evictionCount;
+
+        /**
+         * The cache.
+         */
+        private final CacheLIRS<K, V> cache;
+
+        /**
+         * How many other item are to be moved to the top of the stack before
+         * the current item is moved.
+         */
+        private final int stackMoveDistance;
+
+        /**
+         * The maximum memory this cache should use.
+         */
+        private long maxMemory;
+
+        /**
+         * The average memory used by one entry.
+         */
+        private int averageMemory;
+
+        /**
+         * The bit mask that is applied to the key hash code to get the index in the
+         * map array. The mask is the length of the array minus one.
+         */
+        private int mask;
+
+        /**
+         * The LIRS stack size.
+         */
+        private int stackSize;
+
+        /**
+         * The stack of recently referenced elements. This includes all hot entries,
+         * the recently referenced cold entries, and all non-resident cold entries.
+         */
+        private Entry<K, V> stack;
+
+        /**
+         * The queue of resident cold entries.
+         */
+        private Entry<K, V> queue;
+
+        /**
+         * The queue of non-resident cold entries.
+         */
+        private Entry<K, V> queue2;
+
+        /**
+         * The number of times any item was moved to the top of the stack.
+         */
+        private int stackMoveCounter;
+        
+        /**
+         * Create a new cache.
+         *
+         * @param maxMemory the maximum memory to use
+         * @param averageMemory the average memory usage of an object
+         * @param stackMoveDistance the number of other entries to be moved to
+         *        the top of the stack before moving an entry to the top
+         */
+        Segment(CacheLIRS<K, V> cache, long maxMemory, int averageMemory, int stackMoveDistance) {
+            this.cache = cache;
+            setMaxMemory(maxMemory);
+            setAverageMemory(averageMemory);
+            this.stackMoveDistance = stackMoveDistance;
+            clear();
+        }
+
+        private void clear() {
+
+            // calculate the size of the map array
+            // assume a fill factor of at most 80%
+            long maxLen = (long) (maxMemory / averageMemory / 0.75);
+            // the size needs to be a power of 2
+            long l = 8;
+            while (l < maxLen) {
+                l += l;
+            }
+            // the array size is at most 2^31 elements
+            int len = (int) Math.min(1L << 31, l);
+            // the bit mask has all bits set
+            mask = len - 1;
+
+            // initialize the stack and queue heads
+            stack = new Entry<K, V>();
+            stack.stackPrev = stack.stackNext = stack;
+            queue = new Entry<K, V>();
+            queue.queuePrev = queue.queueNext = queue;
+            queue2 = new Entry<K, V>();
+            queue2.queuePrev = queue2.queueNext = queue2;
+
+            // first set to null - avoiding out of memory
+            entries = null;
+            @SuppressWarnings("unchecked")
+            Entry<K, V>[] e = new Entry[len];
+            entries = e;
+
+            mapSize = 0;
+            usedMemory = 0;
+            stackSize = queueSize = queue2Size = 0;
+        }
+
+        /**
+         * Get the memory used for the given key.
+         *
+         * @param key the key (may not be null)
+         * @param hash the hash
+         * @return the memory, or 0 if there is no resident entry
+         */
+        int getMemory(K key, int hash) {
+            Entry<K, V> e = find(key, hash);
+            return e == null ? 0 : e.memory;
+        }
+
+        /**
+         * Get the value for the given key if the entry is cached. This method
+         * adjusts the internal state of the cache sometimes, to ensure commonly
+         * used entries stay in the cache.
+         *
+         * @param key the key (may not be null)
+         * @param hash the hash
+         * @return the value, or null if there is no resident entry
+         */
+        V get(Object key, int hash) {
+            Entry<K, V> e = find(key, hash);
+            if (e == null) {
+                // the entry was not found
+                missCount++;
+                return null;
+            }
+            V value = e.value;
+            if (value == null) {
+                // it was a non-resident entry
+                missCount++;
+                return null;
+            }
+            if (e.isHot()) {
+                if (e != stack.stackNext) {
+                    if (stackMoveDistance == 0 || stackMoveCounter - e.topMove > stackMoveDistance) {
+                        access(key, hash);
+                    }
+                }
+            } else {
+                access(key, hash);
+            }
+            hitCount++;
+            return value;
+        }
+
+        /**
+         * Access an item, moving the entry to the top of the stack or front of the
+         * queue if found.
+         *
+         * @param key the key
+         */
+        private synchronized void access(Object key, int hash) {
+            Entry<K, V> e = find(key, hash);
+            if (e == null || e.value == null) {
+                return;
+            }
+            if (e.isHot()) {
+                if (e != stack.stackNext) {
+                    if (stackMoveDistance == 0 || stackMoveCounter - e.topMove > stackMoveDistance) {
+                        // move a hot entry to the top of the stack
+                        // unless it is already there
+                        boolean wasEnd = e == stack.stackPrev;
+                        removeFromStack(e);
+                        if (wasEnd) {
+                            // if moving the last entry, the last entry
+                            // could not be cold, which is not allowed
+                            pruneStack();
+                        }
+                        addToStack(e);
+                    }
+                }
+            } else {
+                removeFromQueue(e);
+                if (e.stackNext != null) {
+                    // resident cold entries become hot
+                    // if they are on the stack
+                    removeFromStack(e);
+                    // which means a hot entry needs to become cold
+                    convertOldestHotToCold();
+                } else {
+                    // cold entries that are not on the stack
+                    // move to the front of the queue
+                    addToQueue(queue, e);
+                }
+                // in any case, the cold entry is moved to the top of the stack
+                addToStack(e);
+            }
+        }
+        
+        synchronized V get(K key, int hash, Callable<? extends V> valueLoader) throws ExecutionException {
+            V value = get(key, hash);
+            if (value == null) {
+                long start = System.nanoTime();
+                try {
+                    value = valueLoader.call();
+                    loadSuccessCount++;
+                } catch (Exception e) {
+                    loadExceptionCount++;
+                    throw new ExecutionException(e);
+                } finally {
+                    long time = System.nanoTime() - start;
+                    totalLoadTime += time;
+                }
+                put(key, hash, value, cache.sizeOf(key, value));
+            }
+            return value;
+        }
+
+        /**
+         * Add an entry to the cache. The entry may or may not exist in the
+         * cache yet. This method will usually mark unknown entries as cold and
+         * known entries as hot.
+         *
+         * @param key the key (may not be null)
+         * @param hash the hash
+         * @param value the value (may not be null)
+         * @param memory the memory used for the given entry
+         * @return the old value, or null if there was no resident entry
+         */
+        synchronized V put(K key, int hash, V value, int memory) {
+            if (value == null) {
+                throw new NullPointerException("The value may not be null");
+            }
+            V old;
+            Entry<K, V> e = find(key, hash);
+            if (e == null) {
+                old = null;
+            } else {
+                old = e.value;
+                invalidate(key, hash);
+            }
+            e = new Entry<K, V>();
+            e.key = key;
+            e.value = value;
+            e.memory = memory;
+            int index = hash & mask;
+            e.mapNext = entries[index];
+            entries[index] = e;
+            usedMemory += memory;
+            if (usedMemory > maxMemory && mapSize > 0) {
+                // an old entry needs to be removed
+                evict(e);
+            }
+            mapSize++;
+            // added entries are always added to the stack
+            addToStack(e);
+            return old;
+        }
+
+        /**
+         * Remove an entry. Both resident and non-resident entries can be
+         * removed.
+         *
+         * @param key the key (may not be null)
+         * @param hash the hash
+         */
+        synchronized void invalidate(Object key, int hash) {
+            int index = hash & mask;
+            Entry<K, V> e = entries[index];
+            if (e == null) {
+                return;
+            }
+            if (e.key.equals(key)) {
+                entries[index] = e.mapNext;
+            } else {
+                Entry<K, V> last;
+                do {
+                    last = e;
+                    e = e.mapNext;
+                    if (e == null) {
+                        return;
+                    }
+                } while (!e.key.equals(key));
+                last.mapNext = e.mapNext;
+            }
+            mapSize--;
+            usedMemory -= e.memory;
+            if (e.stackNext != null) {
+                removeFromStack(e);
+            }
+            if (e.isHot()) {
+                // when removing a hot entry, the newest cold entry gets hot,
+                // so the number of hot entries does not change
+                e = queue.queueNext;
+                if (e != queue) {
+                    removeFromQueue(e);
+                    if (e.stackNext == null) {
+                        addToStackBottom(e);
+                    }
+                }
+            } else {
+                removeFromQueue(e);
+            }
+            pruneStack();
+        }
+
+        /**
+         * Evict cold entries (resident and non-resident) until the memory limit is
+         * reached. The new entry is added as a cold entry, except if it is the only
+         * entry.
+         *
+         * @param newCold a new cold entry
+         */
+        private void evict(Entry<K, V> newCold) {
+            // ensure there are not too many hot entries:
+            // left shift of 5 is multiplication by 32, that means if there are less
+            // than 1/32 (3.125%) cold entries, a new hot entry needs to become cold
+            while ((queueSize << 5) < mapSize) {
+                convertOldestHotToCold();
+            }
+            if (stackSize > 0) {
+                // the new cold entry is at the top of the queue
+                addToQueue(queue, newCold);
+            }
+            // the oldest resident cold entries become non-resident
+            // but at least one cold entry (the new one) must stay
+            while (usedMemory > maxMemory && queueSize > 1) {
+                Entry<K, V> e = queue.queuePrev;
+                usedMemory -= e.memory;
+                removeFromQueue(e);
+                e.value = null;
+                e.memory = 0;
+                addToQueue(queue2, e);
+                // the size of the non-resident-cold entries needs to be limited
+                while (queue2Size + queue2Size > stackSize) {
+                    e = queue2.queuePrev;
+                    int hash = getHash(e.key);
+                    invalidate(e.key, hash);
+                }
+            }
+        }
+
+        private void convertOldestHotToCold() {
+            // the last entry of the stack is known to be hot
+            Entry<K, V> last = stack.stackPrev;
+            // remove from stack - which is done anyway in the stack pruning, but we
+            // can do it here as well
+            removeFromStack(last);
+            // adding an entry to the queue will make it cold
+            addToQueue(queue, last);
+            pruneStack();
+        }
+
+        /**
+         * Ensure the last entry of the stack is cold.
+         */
+        private void pruneStack() {
+            while (true) {
+                Entry<K, V> last = stack.stackPrev;
+                if (last == stack || last.isHot()) {
+                    break;
+                }
+                // the cold entry is still in the queue
+                removeFromStack(last);
+            }
+        }
+
+        /**
+         * Try to find an entry in the map.
+         *
+         * @param key the key
+         * @param hash the hash
+         * @return the entry (might be a non-resident)
+         */
+        Entry<K, V> find(Object key, int hash) {
+            int index = hash & mask;
+            Entry<K, V> e = entries[index];
+            while (e != null && !e.key.equals(key)) {
+                e = e.mapNext;
+            }
+            return e;
+        }
+
+        private void addToStack(Entry<K, V> e) {
+            e.stackPrev = stack;
+            e.stackNext = stack.stackNext;
+            e.stackNext.stackPrev = e;
+            stack.stackNext = e;
+            stackSize++;
+            e.topMove = stackMoveCounter++;
+        }
+
+        private void addToStackBottom(Entry<K, V> e) {
+            e.stackNext = stack;
+            e.stackPrev = stack.stackPrev;
+            e.stackPrev.stackNext = e;
+            stack.stackPrev = e;
+            stackSize++;
+        }
+
+        private void removeFromStack(Entry<K, V> e) {
+            e.stackPrev.stackNext = e.stackNext;
+            e.stackNext.stackPrev = e.stackPrev;
+            e.stackPrev = e.stackNext = null;
+            stackSize--;
+        }
+
+        private void addToQueue(Entry<K, V> q, Entry<K, V> e) {
+            e.queuePrev = q;
+            e.queueNext = q.queueNext;
+            e.queueNext.queuePrev = e;
+            q.queueNext = e;
+            if (e.value != null) {
+                queueSize++;
+            } else {
+                queue2Size++;
+            }
+        }
+
+        private void removeFromQueue(Entry<K, V> e) {
+            e.queuePrev.queueNext = e.queueNext;
+            e.queueNext.queuePrev = e.queuePrev;
+            e.queuePrev = e.queueNext = null;
+            if (e.value != null) {
+                queueSize--;
+            } else {
+                queue2Size--;
+            }
+        }
+
+        /**
+         * Get the list of keys. This method allows to read the internal state of
+         * the cache.
+         *
+         * @param cold if true, only keys for the cold entries are returned
+         * @param nonResident true for non-resident entries
+         * @return the key list
+         */
+        synchronized List<K> keys(boolean cold, boolean nonResident) {
+            ArrayList<K> keys = new ArrayList<K>();
+            if (cold) {
+                Entry<K, V> start = nonResident ? queue2 : queue;
+                for (Entry<K, V> e = start.queueNext; e != start; e = e.queueNext) {
+                    keys.add(e.key);
+                }
+            } else {
+                for (Entry<K, V> e = stack.stackNext; e != stack; e = e.stackNext) {
+                    keys.add(e.key);
+                }
+            }
+            return keys;
+        }
+
+        /**
+         * Check whether there is a resident entry for the given key. This
+         * method does not adjust the internal state of the cache.
+         *
+         * @param key the key (may not be null)
+         * @param hash the hash
+         * @return true if there is a resident entry
+         */
+        boolean containsKey(Object key, int hash) {
+            Entry<K, V> e = find(key, hash);
+            return e != null && e.value != null;
+        }
+
+        /**
+         * Get the set of keys for resident entries.
+         *
+         * @return the set of keys
+         */
+        synchronized Set<K> keySet() {
+            HashSet<K> set = new HashSet<K>();
+            for (Entry<K, V> e = stack.stackNext; e != stack; e = e.stackNext) {
+                set.add(e.key);
+            }
+            for (Entry<K, V> e = queue.queueNext; e != queue; e = e.queueNext) {
+                set.add(e.key);
+            }
+            return set;
+        }
+
+        /**
+         * Set the maximum memory this cache should use. This will not
+         * immediately cause entries to get removed however; it will only change
+         * the limit. To resize the internal array, call the clear method.
+         *
+         * @param maxMemory the maximum size (1 or larger)
+         */
+        void setMaxMemory(long maxMemory) {
+            if (maxMemory <= 0) {
+                throw new IllegalArgumentException("Max memory must be larger than 0");
+            }
+            this.maxMemory = maxMemory;
+        }
+
+        /**
+         * Set the average memory used per entry. It is used to calculate the
+         * length of the internal array.
+         *
+         * @param averageMemory the average memory used (1 or larger)
+         */
+        void setAverageMemory(int averageMemory) {
+            if (averageMemory <= 0) {
+                throw new IllegalArgumentException("Average memory must be larger than 0");
+            }
+            this.averageMemory = averageMemory;
+        }
+
+    }
+
+    /**
+     * A cache entry. Each entry is either hot (low inter-reference recency;
+     * LIR), cold (high inter-reference recency; HIR), or non-resident-cold. Hot
+     * entries are in the stack only. Cold entries are in the queue, and may be
+     * in the stack. Non-resident-cold entries have their value set to null and
+     * are in the stack and in the non-resident queue.
+     *
+     * @param <K> the key type
+     * @param <V> the value type
+     */
+    static class Entry<K, V> {
+
+        /**
+         * The key.
+         */
+        K key;
+
+        /**
+         * The value. Set to null for non-resident-cold entries.
+         */
+        V value;
+
+        /**
+         * The estimated memory used.
+         */
+        int memory;
+
+        /**
+         * When the item was last moved to the top of the stack.
+         */
+        int topMove;
+
+        /**
+         * The next entry in the stack.
+         */
+        Entry<K, V> stackNext;
+
+        /**
+         * The previous entry in the stack.
+         */
+        Entry<K, V> stackPrev;
+
+        /**
+         * The next entry in the queue (either the resident queue or the
+         * non-resident queue).
+         */
+        Entry<K, V> queueNext;
+
+        /**
+         * The previous entry in the queue.
+         */
+        Entry<K, V> queuePrev;
+
+        /**
+         * The next entry in the map
+         */
+        Entry<K, V> mapNext;
+
+        /**
+         * Whether this entry is hot. Cold entries are in one of the two queues.
+         *
+         * @return whether the entry is hot
+         */
+        boolean isHot() {
+            return queueNext == null;
+        }
+
+    }
+    
+    /**
+     * A builder for the cache.
+     */
+    public static class Builder {
+        
+        private Weigher<?, ?> weigher;
+        private long maxWeight;
+
+        public Builder recordStats() {
+            return this;
+        }
+
+        public <K, V> Builder weigher(Weigher<K, V> weigher) {
+            this.weigher = weigher;
+            return this;
+        }
+
+        public Builder maximumWeight(long maxWeight) {
+            this.maxWeight = maxWeight;
+            return this;
+        }
+
+        public <K, V> Cache<K, V> build() {
+            @SuppressWarnings("unchecked")
+            Weigher<K, V> w = (Weigher<K, V>) weigher;
+            return new CacheLIRS<K, V>(w, maxWeight, 100, 16, 16);
+        }
+        
+    }
+
+    /**
+     * Create a builder.
+     * 
+     * @return the builder
+     */
+    public static Builder newBuilder() {
+        return new Builder();
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    @Nullable
+    public V getIfPresent(Object key) {
+        return peek((K) key);
+    }
+
+    @Override
+    public ImmutableMap<K, V> getAllPresent(Iterable<?> keys) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public ConcurrentMap<K, V> asMap() {
+        ConcurrentMap<K, V> map = new ConcurrentSkipListMap<K, V>();
+        for (K key : keySet()) {
+            V value = peek(key);
+            if (value != null) {
+                map.put(key, value);
+            }
+        }
+        return map;
+    }
+
+    @Override
+    public void cleanUp() {
+        // nothing to do
+    }
+
+    @Override
+    public void putAll(Map<? extends K, ? extends V> m) {
+        for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) {
+            put(e.getKey(), e.getValue());
+        }
+    }
+
+}

Modified: jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheStats.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheStats.java?rev=1498912&r1=1498911&r2=1498912&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheStats.java (original)
+++ jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheStats.java Tue Jul  2 12:54:48 2013
@@ -16,7 +16,6 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-
 package org.apache.jackrabbit.oak.cache;
 
 import java.util.Map;
@@ -26,18 +25,24 @@ import com.google.common.cache.Cache;
 import com.google.common.cache.Weigher;
 import org.apache.jackrabbit.oak.api.jmx.CacheStatsMBean;
 
-public class CacheStats implements CacheStatsMBean{
-    private final Cache<Object,Object> cache;
-    private final Weigher weigher;
+/**
+ * Cache statistics.
+ */
+public class CacheStats implements CacheStatsMBean {
+    private final Cache<Object, Object> cache;
+    private final Weigher<Object, Object> weigher;
     private final long maxWeight;
     private final String name;
-    private com.google.common.cache.CacheStats lastSnapshot =
-            new com.google.common.cache.CacheStats(0,0,0,0,0,0);
-
-    public CacheStats(Cache cache, String name, Weigher weigher, long maxWeight) {
-        this.cache = cache;
+    private com.google.common.cache.CacheStats lastSnapshot = 
+            new com.google.common.cache.CacheStats(
+            0, 0, 0, 0, 0, 0);
+
+    @SuppressWarnings("unchecked")
+    public CacheStats(Cache<?, ?> cache, String name, 
+            Weigher<?, ?> weigher, long maxWeight) {
+        this.cache = (Cache<Object, Object>) cache;
         this.name = name;
-        this.weigher = weigher;
+        this.weigher = (Weigher<Object, Object>) weigher;
         this.maxWeight = maxWeight;
     }
 
@@ -108,12 +113,14 @@ public class CacheStats implements Cache
 
     @Override
     public long estimateCurrentWeight() {
-        if(weigher == null){
+        if (weigher == null) {
             return -1;
         }
         long size = 0;
-        for(Map.Entry e : cache.asMap().entrySet()){
-            size += weigher.weigh(e.getKey(),e.getValue());
+        for (Map.Entry<?, ?> e : cache.asMap().entrySet()) {
+            Object k = e.getKey();
+            Object v = e.getValue();
+            size += weigher.weigh(k, v);
         }
         return size;
     }
@@ -124,7 +131,7 @@ public class CacheStats implements Cache
     }
 
     @Override
-    public synchronized void resetStats(){
+    public synchronized void resetStats() {
         //Cache stats cannot be rest at Guava level. Instead we
         //take a snapshot and then subtract it from future stats calls
         lastSnapshot = cache.stats();
@@ -134,9 +141,9 @@ public class CacheStats implements Cache
     public String cacheInfoAsString() {
         return Objects.toStringHelper("CacheStats")
                 .add("hitCount", getHitCount())
-                .add("hitRate", String.format("%1.2f",getHitRate()))
+                .add("hitRate", String.format("%1.2f", getHitRate()))
                 .add("missCount", getMissCount())
-                .add("missRate", String.format("%1.2f",getMissRate()))
+                .add("missRate", String.format("%1.2f", getMissRate()))
                 .add("requestCount", getRequestCount())
                 .add("loadCount", getLoadCount())
                 .add("loadSuccessCount", getLoadSuccessCount())
@@ -162,13 +169,15 @@ public class CacheStats implements Cache
      * Based on http://stackoverflow.com/a/3758880/1035417
      */
     private static String humanReadableByteCount(long bytes, boolean si) {
-        if(bytes < 0){
+        if (bytes < 0) {
             return "0";
         }
         int unit = si ? 1000 : 1024;
-        if (bytes < unit) return bytes + " B";
+        if (bytes < unit) {
+            return bytes + " B";
+        }
         int exp = (int) (Math.log(bytes) / Math.log(unit));
-        String pre = (si ? "kMGTPE" : "KMGTPE").charAt(exp-1) + (si ? "" : "i");
+        String pre = (si ? "kMGTPE" : "KMGTPE").charAt(exp - 1) + (si ? "" : "i");
         return String.format("%.1f %sB", bytes / Math.pow(unit, exp), pre);
     }
 }

Added: jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheValue.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheValue.java?rev=1498912&view=auto
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheValue.java (added)
+++ jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheValue.java Tue Jul  2 12:54:48 2013
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.cache;
+
+/**
+ * A cache value.
+ */
+public interface CacheValue {
+
+    /**
+     * The estimated amount of memory used by this object, in bytes.
+     * 
+     * @return the estimated number of bytes
+     */
+    int getMemory();
+    
+}

Copied: jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/EmpiricalWeigher.java (from r1498811, jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/EmpericalWeigher.java)
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/EmpiricalWeigher.java?p2=jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/EmpiricalWeigher.java&p1=jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/EmpericalWeigher.java&r1=1498811&r2=1498912&rev=1498912&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/EmpericalWeigher.java (original)
+++ jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/EmpiricalWeigher.java Tue Jul  2 12:54:48 2013
@@ -17,32 +17,20 @@
  * under the License.
  */
 
-package org.apache.jackrabbit.oak.plugins.mongomk;
+package org.apache.jackrabbit.oak.cache;
+
 
 import com.google.common.cache.Weigher;
-import org.apache.jackrabbit.oak.plugins.mongomk.util.Utils;
 
 /**
- * Determines the weight of object based on the memory taken by them. The memory esimates
- * are based on emperical data and not exact
+ * Determines the weight of object based on the memory taken by them. The memory estimates
+ * are based on empirical data and not exact
  */
-public class EmpericalWeigher implements Weigher<String, Object> {
+public class EmpiricalWeigher implements Weigher<String, CacheValue> {
 
     @Override
-    public int weigh(String key, Object value) {
-        int size = key.length() * 2;
-
-        if (value instanceof Node) {
-            size += ((Node) value).getMemory();
-        } else if (value instanceof Node.Children) {
-            size += ((Node.Children) value).getMemory();
-        } else if (value instanceof MongoDocumentStore.CachedDocument) {
-            size += Utils.estimateMemoryUsage(((MongoDocumentStore.CachedDocument) value).value);
-        } else if (value instanceof String) {
-            size += ((String) value).length() * 2;
-        } else if (value != null) {
-            throw new IllegalArgumentException("Cannot determine weight for object of type " + value.getClass());
-        }
-        return size;
+    public int weigh(String key, CacheValue value) {
+        return key.length() * 2 + value.getMemory();
     }
+    
 }

Modified: jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoDocumentStore.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoDocumentStore.java?rev=1498912&r1=1498911&r2=1498912&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoDocumentStore.java (original)
+++ jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoDocumentStore.java Tue Jul  2 12:54:48 2013
@@ -31,11 +31,11 @@ import org.apache.jackrabbit.mk.api.Micr
 import org.apache.jackrabbit.oak.plugins.mongomk.UpdateOp.Operation;
 import org.apache.jackrabbit.oak.plugins.mongomk.util.Utils;
 import org.apache.jackrabbit.oak.cache.CacheStats;
+import org.apache.jackrabbit.oak.cache.CacheValue;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
 import com.mongodb.BasicDBObject;
 import com.mongodb.DB;
 import com.mongodb.DBCollection;
@@ -64,7 +64,6 @@ public class MongoDocumentStore implemen
     private long timeSum;
     
     private final Cache<String, CachedDocument> nodesCache;
-    
     private final CacheStats cacheStats;
 
     public MongoDocumentStore(DB db, MongoMK.Builder builder) {
@@ -83,12 +82,7 @@ public class MongoDocumentStore implemen
         nodes.ensureIndex(index, options);
 
         // TODO expire entries if the parent was changed
-        nodesCache = CacheBuilder.newBuilder()
-                .weigher(builder.getWeigher())
-                .recordStats() 
-                .maximumWeight(builder.getDocumentCacheSize())
-                .build();
-
+        nodesCache = builder.buildCache(builder.getDocumentCacheSize());
         cacheStats = new CacheStats(nodesCache, "MongoMk-Documents", builder.getWeigher(),
                 builder.getDocumentCacheSize());
     }
@@ -469,12 +463,20 @@ public class MongoDocumentStore implemen
     /**
      * A cache entry.
      */
-    static class CachedDocument {
+    static class CachedDocument implements CacheValue {
+        
         final long time = System.currentTimeMillis();
         final Map<String, Object> value;
+        
         CachedDocument(Map<String, Object> value) {
             this.value = value;
         }
+        
+        @Override
+        public int getMemory() {
+            return Utils.estimateMemoryUsage(value);
+        }
+        
     }
 
     @Override

Modified: jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoMK.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoMK.java?rev=1498912&r1=1498911&r2=1498912&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoMK.java (original)
+++ jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoMK.java Tue Jul  2 12:54:48 2013
@@ -38,9 +38,6 @@ import java.util.concurrent.atomic.Atomi
 import javax.annotation.Nonnull;
 import javax.annotation.Nullable;
 
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.Weigher;
 import org.apache.jackrabbit.mk.api.MicroKernel;
 import org.apache.jackrabbit.mk.api.MicroKernelException;
 import org.apache.jackrabbit.mk.blobs.BlobStore;
@@ -49,17 +46,23 @@ import org.apache.jackrabbit.mk.json.Jso
 import org.apache.jackrabbit.mk.json.JsopStream;
 import org.apache.jackrabbit.mk.json.JsopTokenizer;
 import org.apache.jackrabbit.mk.json.JsopWriter;
+import org.apache.jackrabbit.oak.cache.CacheLIRS;
+import org.apache.jackrabbit.oak.cache.CacheStats;
+import org.apache.jackrabbit.oak.cache.CacheValue;
+import org.apache.jackrabbit.oak.cache.EmpiricalWeigher;
+import org.apache.jackrabbit.oak.commons.PathUtils;
 import org.apache.jackrabbit.oak.plugins.mongomk.DocumentStore.Collection;
 import org.apache.jackrabbit.oak.plugins.mongomk.Node.Children;
 import org.apache.jackrabbit.oak.plugins.mongomk.Revision.RevisionComparator;
 import org.apache.jackrabbit.oak.plugins.mongomk.blob.MongoBlobStore;
 import org.apache.jackrabbit.oak.plugins.mongomk.util.TimingDocumentStoreWrapper;
 import org.apache.jackrabbit.oak.plugins.mongomk.util.Utils;
-import org.apache.jackrabbit.oak.commons.PathUtils;
-import org.apache.jackrabbit.oak.cache.CacheStats;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.Weigher;
 import com.mongodb.DB;
 
 /**
@@ -72,6 +75,12 @@ public class MongoMK implements MicroKer
      */
     static final int MANY_CHILDREN_THRESHOLD = Integer.getInteger(
             "oak.mongoMK.manyChildren", 50);
+    
+    /**
+     * Enable the LIRS cache.
+     */
+    static final boolean LIRS_CACHE = Boolean.parseBoolean(
+            System.getProperty("oak.mongoMK.lirsCache", "true"));
 
     private static final Logger LOG = LoggerFactory.getLogger(MongoMK.class);
 
@@ -93,7 +102,7 @@ public class MongoMK implements MicroKer
      */
     private static final boolean FAST_DIFF = Boolean.parseBoolean(
             System.getProperty("oak.mongoMK.fastDiff", "true"));
-    
+        
     /**
      * How long to remember the relative order of old revision of all cluster
      * nodes, in milliseconds. The default is one hour.
@@ -146,10 +155,11 @@ public class MongoMK implements MicroKer
      */
     private final Cache<String, Node.Children> nodeChildrenCache;
     private final CacheStats nodeChildrenCacheStats;
+    
     /**
      * Diff cache.
      */
-    private final Cache<String, String> diffCache;
+    private final Cache<String, Diff> diffCache;
     private final CacheStats diffCacheStats;
 
     /**
@@ -222,27 +232,15 @@ public class MongoMK implements MicroKer
         //TODO Make stats collection configurable as it add slight overhead
         //TODO Expose the stats as JMX beans
 
-        nodeCache = CacheBuilder.newBuilder()
-                        .weigher(builder.getWeigher())
-                        .maximumWeight(builder.getNodeCacheSize())
-                        .recordStats()
-                        .build();
+        nodeCache = builder.buildCache(builder.getNodeCacheSize());
         nodeCacheStats = new CacheStats(nodeCache, "MongoMk-Node",
                 builder.getWeigher(), builder.getNodeCacheSize());
 
-        nodeChildrenCache =  CacheBuilder.newBuilder()
-                        .weigher(builder.getWeigher())
-                        .recordStats()
-                        .maximumWeight(builder.getChildrenCacheSize())
-                        .build();
+        nodeChildrenCache = builder.buildCache(builder.getChildrenCacheSize());
         nodeChildrenCacheStats = new CacheStats(nodeChildrenCache, "MongoMk-NodeChildren",
                 builder.getWeigher(), builder.getChildrenCacheSize());
 
-        diffCache = CacheBuilder.newBuilder()
-                .recordStats()
-                .weigher(builder.getWeigher())
-                .maximumWeight(builder.getDiffCacheSize())
-                .build();
+        diffCache = builder.buildCache(builder.getDiffCacheSize());
         diffCacheStats = new CacheStats(diffCache, "MongoMk-DiffCache",
                 builder.getWeigher(), builder.getDiffCacheSize());
 
@@ -777,12 +775,12 @@ public class MongoMK implements MicroKer
                        final int depth) throws MicroKernelException {
         String key = fromRevisionId + "-" + toRevisionId + "-" + path + "-" + depth;
         try {
-            return diffCache.get(key, new Callable<String>() {
+            return diffCache.get(key, new Callable<Diff>() {
                 @Override
-                public String call() throws Exception {
-                    return diffImpl(fromRevisionId, toRevisionId, path, depth);
+                public Diff call() throws Exception {
+                    return new Diff(diffImpl(fromRevisionId, toRevisionId, path, depth));
                 }
-            });
+            }).diff;
         } catch (ExecutionException e) {
             if (e.getCause() instanceof MicroKernelException) {
                 throw (MicroKernelException) e.getCause();
@@ -1532,6 +1530,38 @@ public class MongoMK implements MicroKer
         }
     }
 
+    public CacheStats getNodeCacheStats() {
+        return nodeCacheStats;
+    }
+
+    public CacheStats getNodeChildrenCacheStats() {
+        return nodeChildrenCacheStats;
+    }
+
+    public CacheStats getDiffCacheStats() {
+        return diffCacheStats;
+    }
+    
+    public ClusterNodeInfo getClusterInfo() {
+        return clusterNodeInfo;
+    }
+
+    public int getPendingWriteCount() {
+        return unsavedLastRevisions.size();
+    }
+
+    public boolean isCached(String path) {
+        return store.isCached(Collection.NODES, Utils.getIdFromPath(path));
+    }
+    
+    public void stopBackground() {
+        stopBackground = true;
+    }
+    
+    RevisionComparator getRevisionComparator() {
+        return revisionComparator;
+    }
+    
     /**
      * A background thread.
      */
@@ -1564,17 +1594,23 @@ public class MongoMK implements MicroKer
             }
         }
     }
+    
+    /**
+     * A (cached) result of the diff operation.
+     */
+    private static class Diff implements CacheValue {
+        
+        final String diff;
+        
+        Diff(String diff) {
+            this.diff = diff;
+        }
 
-    public CacheStats getNodeCacheStats() {
-        return nodeCacheStats;
-    }
-
-    public CacheStats getNodeChildrenCacheStats() {
-        return nodeChildrenCacheStats;
-    }
-
-    public CacheStats getDiffCacheStats() {
-        return diffCacheStats;
+        @Override
+        public int getMemory() {
+            return diff.length() * 2;
+        }
+        
     }
 
     /**
@@ -1587,7 +1623,7 @@ public class MongoMK implements MicroKer
         private int clusterId  = Integer.getInteger("oak.mongoMK.clusterId", 0);
         private int asyncDelay = 1000;
         private boolean timing;
-        private Weigher<String, Object> weigher = new EmpericalWeigher();
+        private Weigher<String, CacheValue> weigher = new EmpiricalWeigher();
         private long nodeCacheSize;
         private long childrenCacheSize;
         private long diffCacheSize;
@@ -1694,11 +1730,11 @@ public class MongoMK implements MicroKer
             return asyncDelay;
         }
 
-        public Weigher<String, Object> getWeigher() {
+        public Weigher<String, CacheValue> getWeigher() {
             return weigher;
         }
 
-        public Builder withWeigher(Weigher<String, Object> weigher) {
+        public Builder withWeigher(Weigher<String, CacheValue> weigher) {
             this.weigher = weigher;
             return this;
         }
@@ -1735,26 +1771,22 @@ public class MongoMK implements MicroKer
         public MongoMK open() {
             return new MongoMK(this);
         }
-    }
-
-    public ClusterNodeInfo getClusterInfo() {
-        return clusterNodeInfo;
-    }
-
-    public int getPendingWriteCount() {
-        return unsavedLastRevisions.size();
-    }
-
-    public boolean isCached(String path) {
-        return store.isCached(Collection.NODES, Utils.getIdFromPath(path));
-    }
-    
-    public void stopBackground() {
-        stopBackground = true;
-    }
-    
-    RevisionComparator getRevisionComparator() {
-        return revisionComparator;
+        
+        /**
+         * Create a cache.
+         * 
+         * @param <V> the value type
+         * @param maxWeight
+         * @return the cache
+         */
+        public <V extends CacheValue> Cache<String, V> buildCache(long maxWeight) {
+            if (LIRS_CACHE) {
+                return CacheLIRS.newBuilder().weigher(weigher).
+                        maximumWeight(maxWeight).recordStats().build();
+            }
+            return CacheBuilder.newBuilder().weigher(weigher).
+                    maximumWeight(maxWeight).recordStats().build();
+        }
     }
 
 }

Modified: jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoMicroKernelService.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoMicroKernelService.java?rev=1498912&r1=1498911&r2=1498912&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoMicroKernelService.java (original)
+++ jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoMicroKernelService.java Tue Jul  2 12:54:48 2013
@@ -130,7 +130,7 @@ public class MongoMicroKernelService {
         );
 
         DocumentStore ds = mk.getDocumentStore();
-        if(ds instanceof MongoDocumentStore){
+        if (ds instanceof MongoDocumentStore) {
             MongoDocumentStore mds = (MongoDocumentStore) ds;
             registrations.add(
                     registerMBean(wb,
@@ -144,7 +144,7 @@ public class MongoMicroKernelService {
 
     @Deactivate
     private void deactivate() {
-        for(Registration r : registrations){
+        for (Registration r : registrations) {
             r.unregister();
         }
 

Modified: jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/Node.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/Node.java?rev=1498912&r1=1498911&r2=1498912&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/Node.java (original)
+++ jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/Node.java Tue Jul  2 12:54:48 2013
@@ -22,12 +22,13 @@ import java.util.Set;
 import java.util.Map.Entry;
 
 import org.apache.jackrabbit.mk.json.JsopWriter;
+import org.apache.jackrabbit.oak.cache.CacheValue;
 import org.apache.jackrabbit.oak.plugins.mongomk.util.Utils;
 
 /**
  * Represents a node held in memory (in the cache for example).
  */
-public class Node {
+public class Node implements CacheValue {
 
     final String path;
     final Revision rev;
@@ -102,7 +103,8 @@ public class Node {
     public void setLastRevision(Revision lastRevision) {
         this.lastRevision = lastRevision;
     }
-
+    
+    @Override
     public int getMemory() {
         int size = 180 + path.length() * 2;
         for (Entry<String, String> e : properties.entrySet()) {
@@ -114,12 +116,13 @@ public class Node {
     /**
      * A list of children for a node.
      */
-    static class Children {
+    static class Children implements CacheValue {
 
         final ArrayList<String> children = new ArrayList<String>();
         boolean hasMore;
         long offset;
 
+        @Override
         public int getMemory() {
             int size = 114;
             for (String c : children) {

Modified: jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/util/Utils.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/util/Utils.java?rev=1498912&r1=1498911&r2=1498912&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/util/Utils.java (original)
+++ jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/util/Utils.java Tue Jul  2 12:54:48 2013
@@ -179,6 +179,7 @@ public class Utils {
      * 
      * @param source the source map
      * @param target the target map
+     * @param <K> the type of the map key
      */
     public static <K> void deepCopyMap(Map<K, Object> source, Map<K, Object> target) {
         for (Entry<K, Object> e : source.entrySet()) {



Mime
View raw message