cassandra-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bened...@apache.org
Subject [09/12] cassandra git commit: Merge branch 'cassandra-2.1' into cassandra-2.2
Date Tue, 07 Jul 2015 15:46:14 GMT
Merge branch 'cassandra-2.1' into cassandra-2.2

Conflicts:
	CHANGES.txt
	src/java/org/apache/cassandra/db/compaction/Scrubber.java
	src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/ebe18bb2
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/ebe18bb2
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/ebe18bb2

Branch: refs/heads/cassandra-2.2
Commit: ebe18bb2ff62602fd5f55b969ecf665d2d3e5ace
Parents: 7d31068 4c94ef2
Author: Benedict Elliott Smith <benedict@apache.org>
Authored: Tue Jul 7 16:28:19 2015 +0100
Committer: Benedict Elliott Smith <benedict@apache.org>
Committed: Tue Jul 7 16:28:19 2015 +0100

----------------------------------------------------------------------
 CHANGES.txt                                     |  5 ++
 .../cassandra/db/compaction/Scrubber.java       | 38 +++++++++++----
 .../io/sstable/format/SSTableReader.java        | 50 ++++++++++++++------
 .../cassandra/tools/StandaloneScrubber.java     |  2 +-
 .../unit/org/apache/cassandra/db/ScrubTest.java | 25 ++++++++++
 5 files changed, 95 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/ebe18bb2/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index 7c6b4ad,2cbc7c4..a863ad8
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,28 -1,12 +1,33 @@@
 -2.1.9
++2.2.0-rc3
+ Merged from 2.0:
 - * Scrub (recover) sstables even when -Index.db is missing, (CASSANDRA-9591)
++ * Scrub (recover) sstables even when -Index.db is missing (CASSANDRA-9591)
+ 
+ 
 -2.1.8
 +2.2.0-rc2
 + * Re-enable memory-mapped I/O on Windows (CASSANDRA-9658)
 + * Warn when an extra-large partition is compacted (CASSANDRA-9643)
 + * (cqlsh) Allow setting the initial connection timeout (CASSANDRA-9601)
 + * BulkLoader has --transport-factory option but does not use it (CASSANDRA-9675)
 + * Allow JMX over SSL directly from nodetool (CASSANDRA-9090)
 + * Update cqlsh for UDFs (CASSANDRA-7556)
 + * Change Windows kernel default timer resolution (CASSANDRA-9634)
 + * Deprected sstable2json and json2sstable (CASSANDRA-9618)
 + * Allow native functions in user-defined aggregates (CASSANDRA-9542)
 + * Don't repair system_distributed by default (CASSANDRA-9621)
 + * Fix mixing min, max, and count aggregates for blob type (CASSANRA-9622)
 + * Rename class for DATE type in Java driver (CASSANDRA-9563)
 + * Duplicate compilation of UDFs on coordinator (CASSANDRA-9475)
 + * Fix connection leak in CqlRecordWriter (CASSANDRA-9576)
 + * Mlockall before opening system sstables & remove boot_without_jna option (CASSANDRA-9573)
 + * Add functions to convert timeuuid to date or time, deprecate dateOf and unixTimestampOf
(CASSANDRA-9229)
 + * Make sure we cancel non-compacting sstables from LifecycleTransaction (CASSANDRA-9566)
 + * Fix deprecated repair JMX API (CASSANDRA-9570)
 + * Add logback metrics (CASSANDRA-9378)
 + * Update and refactor ant test/test-compression to run the tests in parallel (CASSANDRA-9583)
 + * Fix upgrading to new directory for secondary index (CASSANDRA-9687)
 +Merged from 2.1:
   * (cqlsh) Fix bad check for CQL compatibility when DESCRIBE'ing
     COMPACT STORAGE tables with no clustering columns
 - * Warn when an extra-large partition is compacted (CASSANDRA-9643)
   * Eliminate strong self-reference chains in sstable ref tidiers (CASSANDRA-9656)
   * Ensure StreamSession uses canonical sstable reader instances (CASSANDRA-9700) 
   * Ensure memtable book keeping is not corrupted in the event we shrink usage (CASSANDRA-9681)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/ebe18bb2/src/java/org/apache/cassandra/db/compaction/Scrubber.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/compaction/Scrubber.java
index 10952e7,b1c12e0..5a0b354
--- a/src/java/org/apache/cassandra/db/compaction/Scrubber.java
+++ b/src/java/org/apache/cassandra/db/compaction/Scrubber.java
@@@ -109,9 -101,17 +109,18 @@@ public class Scrubber implements Closea
                          ? new ScrubController(cfs)
                          : new CompactionController(cfs, Collections.singleton(sstable),
CompactionManager.getDefaultGcBefore(cfs));
          this.isCommutative = cfs.metadata.isCounter();
+ 
+         boolean hasIndexFile = (new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX))).exists();
 +        this.isIndex = cfs.isIndex();
+         if (!hasIndexFile)
+         {
+             // if there's any corruption in the -Data.db then rows can't be skipped over.
but it's worth a shot.
+             outputHandler.warn("Missing component: " + sstable.descriptor.filenameFor(Component.PRIMARY_INDEX));
+         }
 -
 +        this.checkData = checkData && !this.isIndex; //LocalByPartitionerType does
not support validation
-         this.expectedBloomFilterSize = Math.max(cfs.metadata.getMinIndexInterval(), (int)(SSTableReader.getApproximateKeyCount(toScrub)));
+         this.expectedBloomFilterSize = Math.max(
+             cfs.metadata.getMinIndexInterval(),
+             hasIndexFile ? (int)(SSTableReader.getApproximateKeyCount(toScrub)) : 0);
  
          // loop through each row, deserializing to check for damage.
          // we'll also loop through the index at the same time, using the position from the
index to recover if the
@@@ -130,12 -134,15 +143,13 @@@
      public void scrub()
      {
          outputHandler.output(String.format("Scrubbing %s (%s bytes)", sstable, dataFile.length()));
-         try (SSTableRewriter writer = new SSTableRewriter(cfs, transaction, sstable.maxDataAge,
isOffline);)
 -        Set<SSTableReader> oldSSTable = Sets.newHashSet(sstable);
 -        SSTableRewriter writer = new SSTableRewriter(cfs, oldSSTable, sstable.maxDataAge,
isOffline);
 -        try
++        try (SSTableRewriter writer = new SSTableRewriter(cfs, transaction, sstable.maxDataAge,
isOffline))
          {
-             nextIndexKey = ByteBufferUtil.readWithShortLength(indexFile);
+             nextIndexKey = indexAvailable() ? ByteBufferUtil.readWithShortLength(indexFile)
: null;
+             if (indexAvailable())
              {
                  // throw away variable so we don't have a side effect in the assert
 -                long firstRowPositionFromIndex = sstable.metadata.comparator.rowIndexEntrySerializer().deserialize(indexFile,
sstable.descriptor.version).position;
 +                long firstRowPositionFromIndex = rowIndexEntrySerializer.deserialize(indexFile,
sstable.descriptor.version).position;
                  assert firstRowPositionFromIndex == 0 : firstRowPositionFromIndex;
              }
  
@@@ -174,11 -182,12 +188,11 @@@
                      dataSizeFromIndex = nextRowPositionFromIndex - dataStartFromIndex;
                  }
  
 -                dataSize = dataSizeFromIndex;
                  // avoid an NPE if key is null
                  String keyName = key == null ? "(unreadable key)" : ByteBufferUtil.bytesToHex(key.getKey());
 -                outputHandler.debug(String.format("row %s is %s bytes", keyName, dataSize));
 +                outputHandler.debug(String.format("row %s is %s bytes", keyName, dataSizeFromIndex));
  
-                 assert currentIndexKey != null || indexFile.isEOF();
+                 assert currentIndexKey != null || !indexAvailable();
  
                  try
                  {
@@@ -191,14 -200,16 +205,14 @@@
                                  ByteBufferUtil.bytesToHex(key.getKey()), ByteBufferUtil.bytesToHex(currentIndexKey))));
                      }
  
-                     if (dataSizeFromIndex > dataFile.length())
 -                    if (dataSize > dataFile.length())
 -                        throw new IOError(new IOException("Impossible row size (greater
than file length): " + dataSize));
++                    if (indexFile != null && dataSizeFromIndex > dataFile.length())
 +                        throw new IOError(new IOException("Impossible row size (greater
than file length): " + dataSizeFromIndex));
  
-                     if (dataStart != dataStartFromIndex)
+                     if (indexFile != null && dataStart != dataStartFromIndex)
                          outputHandler.warn(String.format("Data file row position %d differs
from index file row position %d", dataStart, dataStartFromIndex));
  
 -                    if (indexFile != null && dataSize != dataSizeFromIndex)
 -                        outputHandler.warn(String.format("Data file row size %d different
from index file row size %d", dataSize, dataSizeFromIndex));
 +                    SSTableIdentityIterator atoms = new SSTableIdentityIterator(sstable,
dataFile, key, checkData);
  
 -                    SSTableIdentityIterator atoms = new SSTableIdentityIterator(sstable,
dataFile, key, dataSize, validateColumns);
                      if (prevKey != null && prevKey.compareTo(key) > 0)
                      {
                          saveOutOfOrderRow(prevKey, key, atoms);
@@@ -316,10 -327,11 +330,11 @@@
          currentRowPositionFromIndex = nextRowPositionFromIndex;
          try
          {
-             nextIndexKey = indexFile.isEOF() ? null : ByteBufferUtil.readWithShortLength(indexFile);
-             nextRowPositionFromIndex = indexFile.isEOF()
+             nextIndexKey = !indexAvailable() ? null : ByteBufferUtil.readWithShortLength(indexFile);
+ 
+             nextRowPositionFromIndex = !indexAvailable()
                      ? dataFile.length()
 -                    : sstable.metadata.comparator.rowIndexEntrySerializer().deserialize(indexFile,
sstable.descriptor.version).position;
 +                    : rowIndexEntrySerializer.deserialize(indexFile, sstable.descriptor.version).position;
          }
          catch (Throwable th)
          {


Mime
View raw message