cassandra-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From alek...@apache.org
Subject git commit: Remove memory emergency pressure valve logic
Date Mon, 11 Mar 2013 16:25:41 GMT
Updated Branches:
  refs/heads/trunk 0f72806c3 -> e79d9fbf8


Remove memory emergency pressure valve logic

patch by Aleksey Yeschenko; reviewed by Jonathan Ellis for
CASSANDRA-3534


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/e79d9fbf
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/e79d9fbf
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/e79d9fbf

Branch: refs/heads/trunk
Commit: e79d9fbf84a35021cafa21d428e08fdd9bee584e
Parents: 0f72806
Author: Aleksey Yeschenko <aleksey@apache.org>
Authored: Mon Mar 11 19:24:27 2013 +0300
Committer: Aleksey Yeschenko <aleksey@apache.org>
Committed: Mon Mar 11 19:24:27 2013 +0300

----------------------------------------------------------------------
 CHANGES.txt                                        |    1 +
 NEWS.txt                                           |    6 +++-
 conf/cassandra.yaml                                |   25 --------------
 .../apache/cassandra/cache/AutoSavingCache.java    |   13 -------
 src/java/org/apache/cassandra/config/Config.java   |    3 --
 .../cassandra/config/DatabaseDescriptor.java       |   16 ---------
 .../org/apache/cassandra/service/CacheService.java |   16 ---------
 .../cassandra/service/CacheServiceMBean.java       |    5 ---
 .../org/apache/cassandra/service/GCInspector.java  |   20 -----------
 .../apache/cassandra/service/StorageService.java   |   26 ---------------
 test/conf/cassandra.yaml                           |    1 -
 .../unit/org/apache/cassandra/db/RowCacheTest.java |    9 ++---
 12 files changed, 9 insertions(+), 132 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/e79d9fbf/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 3ea8849..9cf5271 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -13,6 +13,7 @@
  * Change order of directory searching for c*.in.sh (CASSANDRA-3983)
  * Add tool to reset SSTable level (CASSANDRA-5271)
  * Allow custom configuration loader (CASSANDRA-5045)
+ * Remove memory emergency pressure valve logic (CASSANDRA-3534)
 
 
 1.2.3

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e79d9fbf/NEWS.txt
----------------------------------------------------------------------
diff --git a/NEWS.txt b/NEWS.txt
index b68a5f4..250545e 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -15,7 +15,11 @@ Upgrading
 ---------
     - Replication and strategy options do not accept unknown options anymore.
       This was already the case for CQL3 in 1.2 but this is now the case for
-      thrift to.
+      thrift too.
+    - reduce_cache_sizes_at, reduce_cache_capacity_to, and
+      flush_largest_memtables_at options have been removed from cassandra.yaml.
+    - CacheServiceMBean.reduceCacheSizes() has been removed.
+      Use CacheServiceMBean.set{Key,Row}CacheCapacityInMB() instead.
 
 
 1.2.3

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e79d9fbf/conf/cassandra.yaml
----------------------------------------------------------------------
diff --git a/conf/cassandra.yaml b/conf/cassandra.yaml
index 2691e47..b3ef2cd 100644
--- a/conf/cassandra.yaml
+++ b/conf/cassandra.yaml
@@ -237,31 +237,6 @@ seed_provider:
           # Ex: "<ip1>,<ip2>,<ip3>"
           - seeds: "127.0.0.1"
 
-# emergency pressure valve: each time heap usage after a full (CMS)
-# garbage collection is above this fraction of the max, Cassandra will
-# flush the largest memtables.  
-#
-# Set to 1.0 to disable.  Setting this lower than
-# CMSInitiatingOccupancyFraction is not likely to be useful.
-#
-# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
-# it is most effective under light to moderate load, or read-heavy
-# workloads; under truly massive write load, it will often be too
-# little, too late.
-flush_largest_memtables_at: 0.75
-
-# emergency pressure valve #2: the first time heap usage after a full
-# (CMS) garbage collection is above this fraction of the max,
-# Cassandra will reduce cache maximum _capacity_ to the given fraction
-# of the current _size_.  Should usually be set substantially above
-# flush_largest_memtables_at, since that will have less long-term
-# impact on the system.  
-# 
-# Set to 1.0 to disable.  Setting this lower than
-# CMSInitiatingOccupancyFraction is not likely to be useful.
-reduce_cache_sizes_at: 0.85
-reduce_cache_capacity_to: 0.6
-
 # For workloads with more data than can fit in memory, Cassandra's
 # bottleneck will be reads that need to fetch data from
 # disk. "concurrent_reads" should be set to (16 * number_of_drives) in

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e79d9fbf/src/java/org/apache/cassandra/cache/AutoSavingCache.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cache/AutoSavingCache.java b/src/java/org/apache/cassandra/cache/AutoSavingCache.java
index 7e4bb77..072385e 100644
--- a/src/java/org/apache/cassandra/cache/AutoSavingCache.java
+++ b/src/java/org/apache/cassandra/cache/AutoSavingCache.java
@@ -176,19 +176,6 @@ public class AutoSavingCache<K extends CacheKey, V> extends InstrumentingCache<K
         return CompactionManager.instance.submitCacheWrite(getWriter(keysToSave));
     }
 
-    public void reduceCacheSize()
-    {
-        if (getCapacity() > 0)
-        {
-            int newCapacity = (int) (DatabaseDescriptor.getReduceCacheCapacityTo() * weightedSize());
-
-            logger.warn(String.format("Reducing %s capacity from %d to %s to reduce memory
pressure",
-                                      cacheType, getCapacity(), newCapacity));
-
-            setCapacity(newCapacity);
-        }
-    }
-
     public class Writer extends CompactionInfo.Holder
     {
         private final Set<K> keys;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e79d9fbf/src/java/org/apache/cassandra/config/Config.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/config/Config.java b/src/java/org/apache/cassandra/config/Config.java
index 2bc1b0e..ce86609 100644
--- a/src/java/org/apache/cassandra/config/Config.java
+++ b/src/java/org/apache/cassandra/config/Config.java
@@ -144,9 +144,6 @@ public class Config
 
     public InternodeCompression internode_compression = InternodeCompression.none;
 
-    public Double flush_largest_memtables_at = 1.0;
-    public Double reduce_cache_sizes_at = 1.0;
-    public double reduce_cache_capacity_to = 0.6;
     public int hinted_handoff_throttle_in_kb = 1024;
     public int max_hints_delivery_threads = 1;
     public boolean compaction_preheat_key_cache = true;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e79d9fbf/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
index fc49bcd..98f671d 100644
--- a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
+++ b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
@@ -37,7 +37,6 @@ import org.apache.cassandra.config.EncryptionOptions.ServerEncryptionOptions;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.DefsTable;
 import org.apache.cassandra.db.SystemTable;
-import org.apache.cassandra.db.Table;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.FSWriteError;
@@ -1107,21 +1106,6 @@ public class DatabaseDescriptor
         return conf.client_encryption_options;
     }
 
-    public static double getFlushLargestMemtablesAt()
-    {
-        return conf.flush_largest_memtables_at;
-    }
-
-    public static double getReduceCacheSizesAt()
-    {
-        return conf.reduce_cache_sizes_at;
-    }
-
-    public static double getReduceCacheCapacityTo()
-    {
-        return conf.reduce_cache_capacity_to;
-    }
-
     public static int getHintedHandoffThrottleInKB()
     {
         return conf.hinted_handoff_throttle_in_kb;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e79d9fbf/src/java/org/apache/cassandra/service/CacheService.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/CacheService.java b/src/java/org/apache/cassandra/service/CacheService.java
index b0aa693..e389a5d 100644
--- a/src/java/org/apache/cassandra/service/CacheService.java
+++ b/src/java/org/apache/cassandra/service/CacheService.java
@@ -296,22 +296,6 @@ public class CacheService implements CacheServiceMBean
         return keyCache.size();
     }
 
-    public void reduceCacheSizes()
-    {
-        reduceRowCacheSize();
-        reduceKeyCacheSize();
-    }
-
-    public void reduceRowCacheSize()
-    {
-        rowCache.reduceCacheSize();
-    }
-
-    public void reduceKeyCacheSize()
-    {
-        keyCache.reduceCacheSize();
-    }
-
     public void saveCaches() throws ExecutionException, InterruptedException
     {
         List<Future<?>> futures = new ArrayList<Future<?>>(2);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e79d9fbf/src/java/org/apache/cassandra/service/CacheServiceMBean.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/CacheServiceMBean.java b/src/java/org/apache/cassandra/service/CacheServiceMBean.java
index 8b8adf2..8e2f530 100644
--- a/src/java/org/apache/cassandra/service/CacheServiceMBean.java
+++ b/src/java/org/apache/cassandra/service/CacheServiceMBean.java
@@ -42,11 +42,6 @@ public interface CacheServiceMBean
     public void setKeyCacheCapacityInMB(long capacity);
 
     /**
-     * sets each cache's maximum capacity to "reduce_cache_capacity_to" of its current size
-     */
-    public void reduceCacheSizes();
-
-    /**
      * save row and key caches
      *
      * @throws ExecutionException when attempting to retrieve the result of a task that aborted
by throwing an exception

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e79d9fbf/src/java/org/apache/cassandra/service/GCInspector.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/GCInspector.java b/src/java/org/apache/cassandra/service/GCInspector.java
index bca8e1c..9961bf9 100644
--- a/src/java/org/apache/cassandra/service/GCInspector.java
+++ b/src/java/org/apache/cassandra/service/GCInspector.java
@@ -31,7 +31,6 @@ import javax.management.ObjectName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.io.sstable.SSTableDeletingTask;
 import org.apache.cassandra.utils.StatusLogger;
 
@@ -50,8 +49,6 @@ public class GCInspector
     final List<GarbageCollectorMXBean> beans = new ArrayList<GarbageCollectorMXBean>();
     final MemoryMXBean membean = ManagementFactory.getMemoryMXBean();
 
-    private volatile boolean cacheSizesReduced;
-
     public GCInspector()
     {
         MBeanServer server = ManagementFactory.getPlatformMBeanServer();
@@ -125,24 +122,7 @@ public class GCInspector
 
             // if we just finished a full collection and we're still using a lot of memory,
try to reduce the pressure
             if (gc.getName().equals("ConcurrentMarkSweep"))
-            {
                 SSTableDeletingTask.rescheduleFailedTasks();
-
-                double usage = (double) memoryUsed / memoryMax;
-
-                if (memoryUsed > DatabaseDescriptor.getReduceCacheSizesAt() * memoryMax
&& !cacheSizesReduced)
-                {
-                    cacheSizesReduced = true;
-                    logger.warn("Heap is " + usage + " full.  You may need to reduce memtable
and/or cache sizes.  Cassandra is now reducing cache sizes to free up memory.  Adjust reduce_cache_sizes_at
threshold in cassandra.yaml if you don't want Cassandra to do this automatically");
-                    CacheService.instance.reduceCacheSizes();
-                }
-
-                if (memoryUsed > DatabaseDescriptor.getFlushLargestMemtablesAt() * memoryMax)
-                {
-                    logger.warn("Heap is " + usage + " full.  You may need to reduce memtable
and/or cache sizes.  Cassandra will now flush up to the two largest memtables to free up memory.
 Adjust flush_largest_memtables_at threshold in cassandra.yaml if you don't want Cassandra
to do this automatically");
-                    StorageService.instance.flushLargestMemtables();
-                }
-            }
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e79d9fbf/src/java/org/apache/cassandra/service/StorageService.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/StorageService.java b/src/java/org/apache/cassandra/service/StorageService.java
index 77b3c4a..9101a4c 100644
--- a/src/java/org/apache/cassandra/service/StorageService.java
+++ b/src/java/org/apache/cassandra/service/StorageService.java
@@ -3517,32 +3517,6 @@ public class StorageService extends NotificationBroadcasterSupport
implements IE
     }
 
     /**
-     * Flushes the two largest memtables by ops and by throughput
-     */
-    public void flushLargestMemtables()
-    {
-        ColumnFamilyStore largest = null;
-        for (ColumnFamilyStore cfs : ColumnFamilyStore.all())
-        {
-            long total = cfs.getTotalMemtableLiveSize();
-
-            if (total > 0 && (largest == null || total > largest.getTotalMemtableLiveSize()))
-            {
-                logger.debug(total + " estimated memtable size for " + cfs);
-                largest = cfs;
-            }
-        }
-        if (largest == null)
-        {
-            logger.info("Unable to reduce heap usage since there are no dirty column families");
-            return;
-        }
-
-        logger.warn("Flushing " + largest + " to relieve memory pressure");
-        largest.forceFlush();
-    }
-
-    /**
      * Seed data to the endpoints that will be responsible for it at the future
      *
      * @param rangesToStreamByTable tables and data ranges with endpoints included for each

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e79d9fbf/test/conf/cassandra.yaml
----------------------------------------------------------------------
diff --git a/test/conf/cassandra.yaml b/test/conf/cassandra.yaml
index 133e033..a207bc6 100644
--- a/test/conf/cassandra.yaml
+++ b/test/conf/cassandra.yaml
@@ -31,5 +31,4 @@ server_encryption_options:
     truststore: conf/.truststore
     truststore_password: cassandra
 incremental_backups: true
-flush_largest_memtables_at: 1.0
 compaction_throughput_mb_per_sec: 0

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e79d9fbf/test/unit/org/apache/cassandra/db/RowCacheTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RowCacheTest.java b/test/unit/org/apache/cassandra/db/RowCacheTest.java
index c75cbc7..3dfef4d 100644
--- a/test/unit/org/apache/cassandra/db/RowCacheTest.java
+++ b/test/unit/org/apache/cassandra/db/RowCacheTest.java
@@ -114,7 +114,7 @@ public class RowCacheTest extends SchemaLoader
     public void testRowCacheLoad() throws Exception
     {
         CacheService.instance.setRowCacheCapacityInMB(1);
-        rowCacheLoad(100, Integer.MAX_VALUE, false);
+        rowCacheLoad(100, Integer.MAX_VALUE);
         CacheService.instance.setRowCacheCapacityInMB(0);
     }
 
@@ -122,11 +122,11 @@ public class RowCacheTest extends SchemaLoader
     public void testRowCachePartialLoad() throws Exception
     {
         CacheService.instance.setRowCacheCapacityInMB(1);
-        rowCacheLoad(100, 50, true);
+        rowCacheLoad(100, 50);
         CacheService.instance.setRowCacheCapacityInMB(0);
     }
 
-    public void rowCacheLoad(int totalKeys, int keysToSave, boolean reduceLoadCapacity) throws
Exception
+    public void rowCacheLoad(int totalKeys, int keysToSave) throws Exception
     {
         CompactionManager.instance.disableAutoCompaction();
 
@@ -144,9 +144,6 @@ public class RowCacheTest extends SchemaLoader
         // force the cache to disk
         CacheService.instance.rowCache.submitWrite(keysToSave).get();
 
-        if (reduceLoadCapacity)
-            CacheService.instance.reduceRowCacheSize();
-
         // empty the cache again to make sure values came from disk
         CacheService.instance.invalidateRowCache();
         assert CacheService.instance.rowCache.size() == 0;


Mime
View raw message