cassandra-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From zzn...@apache.org
Subject cassandra git commit: Fixed checkAvailableDiskSpace to properly recalculate expected disk usage of compaction task before reducing scope
Date Tue, 10 Jan 2017 02:53:40 GMT
Repository: cassandra
Updated Branches:
  refs/heads/trunk cc02e9059 -> b3ffdf8c4


Fixed checkAvailableDiskSpace to properly recalculate expected disk usage of compaction task
before reducing scope

patch by Jon Haddad; reviewed by Nate McCall for CASSANDRA-12979


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/b3ffdf8c
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/b3ffdf8c
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/b3ffdf8c

Branch: refs/heads/trunk
Commit: b3ffdf8c4ac9bbd302555756e2df73bd833e69fa
Parents: cc02e90
Author: Jon Haddad <jon@jonhaddad.com>
Authored: Wed Jan 4 11:52:54 2017 -0800
Committer: Nate McCall <zznate.m@gmail.com>
Committed: Tue Jan 10 15:52:22 2017 +1300

----------------------------------------------------------------------
 CHANGES.txt                                     |  2 +-
 NEWS.txt                                        |  3 ++
 .../cassandra/db/compaction/CompactionTask.java | 41 +++++++++++++++-----
 3 files changed, 36 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/b3ffdf8c/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index f96e166..d0c4960 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -7,6 +7,7 @@
  * Update bundled cqlsh python driver to 3.7.0 (CASSANDRA-12736)
  * Reject invalid replication settings when creating or altering a keyspace (CASSANDRA-12681)
  * Clean up the SSTableReader#getScanner API wrt removal of RateLimiter (CASSANDRA-12422)
+ * CompactionTasks now correctly drops sstables out of compaction when not enough disk space
is available (CASSANDRA-12979)
 
 
 3.12
@@ -31,7 +32,6 @@
 Merged from 3.0:
  * Remove support for non-JavaScript UDFs (CASSANDRA-12883)
 
-
 3.10
  * Fixed query monitoring for range queries (CASSANDRA-13050)
  * Remove outboundBindAny configuration property (CASSANDRA-12673)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b3ffdf8c/NEWS.txt
----------------------------------------------------------------------
diff --git a/NEWS.txt b/NEWS.txt
index 25903c7..e155768 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -47,6 +47,9 @@ Upgrading
    - Only Java and JavaScript are now supported UDF languages.
      The sandbox in 3.0 already prevented the use of script languages except Java
      and JavaScript.
+   - Compaction now correctly drops sstables out of CompactionTask when there
+     isn't enough disk space to perform the full compaction.  This should reduce
+     pending compaction tasks on systems with little remaining disk space.
 
 3.10
 ====

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b3ffdf8c/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionTask.java b/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
index a9d6c7c..b2e9b8c 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionTask.java
@@ -86,12 +86,15 @@ public class CompactionTask extends AbstractCompactionTask
         return transaction.originals().size();
     }
 
-    public boolean reduceScopeForLimitedSpace()
+    public boolean reduceScopeForLimitedSpace(long expectedSize)
     {
         if (partialCompactionsAcceptable() && transaction.originals().size() >
1)
         {
             // Try again w/o the largest one.
-            logger.warn("Insufficient space to compact all requested files {}", StringUtils.join(transaction.originals(),
", "));
+            logger.warn("insufficient space to compact all requested files. {}MB required,
{}",
+                        (float) expectedSize / 1024 / 1024,
+                        StringUtils.join(transaction.originals(), ", "));
+
             // Note that we have removed files that are still marked as compacting.
             // This suboptimal but ok since the caller will unmark all the sstables at the
end.
             SSTableReader removedSSTable = cfs.getMaxSizeFile(transaction.originals());
@@ -124,9 +127,8 @@ public class CompactionTask extends AbstractCompactionTask
 
         // note that we need to do a rough estimate early if we can fit the compaction on
disk - this is pessimistic, but
         // since we might remove sstables from the compaction in checkAvailableDiskSpace
it needs to be done here
-        long expectedWriteSize = cfs.getExpectedCompactedFileSize(transaction.originals(),
compactionType);
-        long earlySSTableEstimate = Math.max(1, expectedWriteSize / strategy.getMaxSSTableBytes());
-        checkAvailableDiskSpace(earlySSTableEstimate, expectedWriteSize);
+
+        checkAvailableDiskSpace();
 
         // sanity check: all sstables must belong to the same cfs
         assert !Iterables.any(transaction.originals(), new Predicate<SSTableReader>()
@@ -317,7 +319,12 @@ public class CompactionTask extends AbstractCompactionTask
         return minRepairedAt;
     }
 
-    protected void checkAvailableDiskSpace(long estimatedSSTables, long expectedWriteSize)
+    /*
+    Checks if we have enough disk space to execute the compaction.  Drops the largest sstable
out of the Task until
+    there's enough space (in theory) to handle the compaction.  Does not take into account
space that will be taken by
+    other compactions.
+     */
+    protected void checkAvailableDiskSpace()
     {
         if(!cfs.isCompactionDiskSpaceCheckEnabled() && compactionType == OperationType.COMPACTION)
         {
@@ -325,10 +332,26 @@ public class CompactionTask extends AbstractCompactionTask
             return;
         }
 
-        while (!getDirectories().hasAvailableDiskSpace(estimatedSSTables, expectedWriteSize))
+        CompactionStrategyManager strategy = cfs.getCompactionStrategyManager();
+
+        while(true)
         {
-            if (!reduceScopeForLimitedSpace())
-                throw new RuntimeException(String.format("Not enough space for compaction,
estimated sstables = %d, expected write size = %d", estimatedSSTables, expectedWriteSize));
+            long expectedWriteSize = cfs.getExpectedCompactedFileSize(transaction.originals(),
compactionType);
+            long estimatedSSTables = Math.max(1, expectedWriteSize / strategy.getMaxSSTableBytes());
+
+            if(cfs.getDirectories().hasAvailableDiskSpace(estimatedSSTables, expectedWriteSize))
+                break;
+
+            if (!reduceScopeForLimitedSpace(expectedWriteSize))
+            {
+                // we end up here if we can't take any more sstables out of the compaction.
+                // usually means we've run out of disk space
+                String msg = String.format("Not enough space for compaction, estimated sstables
= %d, expected write size = %d", estimatedSSTables, expectedWriteSize);
+                logger.warn(msg);
+                throw new RuntimeException(msg);
+            }
+            logger.warn("Not enough space for compaction, {}MB estimated.  Reducing scope.",
+                            (float) expectedWriteSize / 1024 / 1024);
         }
     }
 


Mime
View raw message