cassandra-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From marc...@apache.org
Subject [2/2] git commit: Merge branch 'cassandra-2.0' into cassandra-2.1
Date Thu, 19 Jun 2014 08:09:38 GMT
Merge branch 'cassandra-2.0' into cassandra-2.1

Conflicts:
	CHANGES.txt
	src/java/org/apache/cassandra/db/ColumnFamily.java
	src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/3ff7a776
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/3ff7a776
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/3ff7a776

Branch: refs/heads/cassandra-2.1
Commit: 3ff7a776a76d5ca9c3ee569067896f6098075885
Parents: baf524f 303ff22
Author: Marcus Eriksson <marcuse@apache.org>
Authored: Thu Jun 19 09:51:28 2014 +0200
Committer: Marcus Eriksson <marcuse@apache.org>
Committed: Thu Jun 19 09:51:28 2014 +0200

----------------------------------------------------------------------
 CHANGES.txt                                     |   1 +
 .../org/apache/cassandra/db/ColumnFamily.java   |  21 ++--
 .../db/compaction/LazilyCompactedRow.java       |  10 +-
 .../cassandra/io/sstable/SSTableWriter.java     |   3 +
 .../apache/cassandra/db/ColumnFamilyTest.java   |  12 +++
 .../db/compaction/CompactionsTest.java          | 100 ++++++++++++++++++-
 6 files changed, 132 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/3ff7a776/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index af6e8c0,65e3161..ad5fb1c
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,25 -1,23 +1,26 @@@
 -2.0.9
 - * Fix assertion error in CL.ANY timeout handling (CASSANDRA-7364)
 - * Handle empty CFs in Memtable#maybeUpdateLiveRatio() (CASSANDRA-7401)
 +2.1.0
 + * Avoid incremental compaction on Windows (CASSANDRA-7365)
 + * Fix exception when querying a composite-keyed table with a collection index
 +   (CASSANDRA-7372)
 + * Use node's host id in place of counter ids (CASSANDRA-7366)
   * Fix native protocol CAS batches (CASSANDRA-7337)
 + * Reduce likelihood of contention on local paxos locking (CASSANDRA-7359)
 + * Upgrade to Pig 0.12.1 (CASSANDRA-6556)
 + * Make sure we clear out repair sessions from netstats (CASSANDRA-7329)
 + * Don't fail streams on failure detector downs (CASSANDRA-3569)
 + * Add optional keyspace to DROP INDEX statement (CASSANDRA-7314)
 + * Reduce run time for CQL tests (CASSANDRA-7327)
 + * Fix heap size calculation on Windows (CASSANDRA-7352, 7353)
 + * RefCount native frames from netty (CASSANDRA-7245)
 + * Use tarball dir instead of /var for default paths (CASSANDRA-7136)
 + * Remove rows_per_partition_to_cache keyword (CASSANDRA-7193)
 +Merged from 2.0:
 + * Fix assertion error in CL.ANY timeout handling (CASSANDRA-7364)
   * Add per-CF range read request latency metrics (CASSANDRA-7338)
   * Fix NPE in StreamTransferTask.createMessageForRetry() (CASSANDRA-7323)
 - * Add conditional CREATE/DROP USER support (CASSANDRA-7264)
 - * Swap local and global default read repair chances (CASSANDRA-7320)
 - * Add missing iso8601 patterns for date strings (CASSANDRA-6973)
 - * Support selecting multiple rows in a partition using IN (CASSANDRA-6875)
 - * cqlsh: always emphasize the partition key in DESC output (CASSANDRA-7274)
 - * Copy compaction options to make sure they are reloaded (CASSANDRA-7290)
 - * Add option to do more aggressive tombstone compactions (CASSANDRA-6563)
 - * Don't try to compact already-compacting files in HHOM (CASSANDRA-7288)
 - * Add authentication support to shuffle (CASSANDRA-6484)
 - * Cqlsh counts non-empty lines for "Blank lines" warning (CASSANDRA-7325)
   * Make StreamSession#closeSession() idempotent (CASSANDRA-7262)
   * Fix infinite loop on exception while streaming (CASSANDRA-7330)
 - * Reference sstables before populating key cache (CASSANDRA-7234)
+  * Account for range tombstones in min/max column names (CASSANDRA-7235)
  Merged from 1.2:
   * cqlsh: ignore .cassandra permission errors (CASSANDRA-7266)
   * Errors in FlushRunnable may leave threads hung (CASSANDRA-7275)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3ff7a776/src/java/org/apache/cassandra/db/ColumnFamily.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/ColumnFamily.java
index 45b8eff,638eacc..38e1591
--- a/src/java/org/apache/cassandra/db/ColumnFamily.java
+++ b/src/java/org/apache/cassandra/db/ColumnFamily.java
@@@ -402,36 -418,31 +402,41 @@@ public abstract class ColumnFamily impl
          int maxLocalDeletionTime = Integer.MIN_VALUE;
          List<ByteBuffer> minColumnNamesSeen = Collections.emptyList();
          List<ByteBuffer> maxColumnNamesSeen = Collections.emptyList();
 +        boolean hasLegacyCounterShards = false;
+ 
+         if (deletionInfo().getTopLevelDeletion().localDeletionTime < Integer.MAX_VALUE)
+             tombstones.update(deletionInfo().getTopLevelDeletion().localDeletionTime);
+         Iterator<RangeTombstone> it = deletionInfo().rangeIterator();
+         while (it.hasNext())
+         {
+             RangeTombstone rangeTombstone = it.next();
+             tombstones.update(rangeTombstone.getLocalDeletionTime());
+ 
+             minColumnNamesSeen = ColumnNameHelper.minComponents(minColumnNamesSeen, rangeTombstone.min,
metadata.comparator);
+             maxColumnNamesSeen = ColumnNameHelper.maxComponents(maxColumnNamesSeen, rangeTombstone.max,
metadata.comparator);
+         }
+ 
 -        for (Column column : this)
 +        for (Cell cell : this)
          {
-             if (deletionInfo().getTopLevelDeletion().localDeletionTime < Integer.MAX_VALUE)
-                 tombstones.update(deletionInfo().getTopLevelDeletion().localDeletionTime);
-             Iterator<RangeTombstone> it = deletionInfo().rangeIterator();
-             while (it.hasNext())
-             {
-                 RangeTombstone rangeTombstone = it.next();
-                 tombstones.update(rangeTombstone.getLocalDeletionTime());
-             }
 -            minTimestampSeen = Math.min(minTimestampSeen, column.minTimestamp());
 -            maxTimestampSeen = Math.max(maxTimestampSeen, column.maxTimestamp());
 -            maxLocalDeletionTime = Math.max(maxLocalDeletionTime, column.getLocalDeletionTime());
 -            int deletionTime = column.getLocalDeletionTime();
 +            minTimestampSeen = Math.min(minTimestampSeen, cell.timestamp());
 +            maxTimestampSeen = Math.max(maxTimestampSeen, cell.timestamp());
 +            maxLocalDeletionTime = Math.max(maxLocalDeletionTime, cell.getLocalDeletionTime());
 +            int deletionTime = cell.getLocalDeletionTime();
              if (deletionTime < Integer.MAX_VALUE)
                  tombstones.update(deletionTime);
 -            minColumnNamesSeen = ColumnNameHelper.minComponents(minColumnNamesSeen, column.name,
metadata.comparator);
 -            maxColumnNamesSeen = ColumnNameHelper.maxComponents(maxColumnNamesSeen, column.name,
metadata.comparator);
 +            minColumnNamesSeen = ColumnNameHelper.minComponents(minColumnNamesSeen, cell.name(),
metadata.comparator);
 +            maxColumnNamesSeen = ColumnNameHelper.maxComponents(maxColumnNamesSeen, cell.name(),
metadata.comparator);
 +            if (cell instanceof CounterCell)
 +                hasLegacyCounterShards = hasLegacyCounterShards || ((CounterCell) cell).hasLegacyShards();
          }
 -        return new ColumnStats(getColumnCount(), minTimestampSeen, maxTimestampSeen, maxLocalDeletionTime,
tombstones, minColumnNamesSeen, maxColumnNamesSeen);
 +        return new ColumnStats(getColumnCount(),
 +                               minTimestampSeen,
 +                               maxTimestampSeen,
 +                               maxLocalDeletionTime,
 +                               tombstones,
 +                               minColumnNamesSeen,
 +                               maxColumnNamesSeen,
 +                               hasLegacyCounterShards);
      }
  
      public boolean isMarkedForDelete()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3ff7a776/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java
index d0f3610,7cd0842..73a1927
--- a/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java
+++ b/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java
@@@ -244,6 -252,11 +244,10 @@@ public class LazilyCompactedRow extend
                  }
                  else
                  {
+                     tombstones.update(t.getLocalDeletionTime());
+ 
+                     minColumnNameSeen = ColumnNameHelper.minComponents(minColumnNameSeen,
t.min, controller.cfs.metadata.comparator);
+                     maxColumnNameSeen = ColumnNameHelper.maxComponents(maxColumnNameSeen,
t.max, controller.cfs.metadata.comparator);
 -
                      return t;
                  }
              }
@@@ -259,22 -270,11 +263,16 @@@
                      container.clear();
                      return null;
                  }
 -                Column reduced = purged.iterator().next();
 +
 +                int localDeletionTime = container.deletionInfo().getTopLevelDeletion().localDeletionTime;
 +                if (localDeletionTime < Integer.MAX_VALUE)
 +                    tombstones.update(localDeletionTime);
-                 Iterator<RangeTombstone> rangeTombstoneIterator = container.deletionInfo().rangeIterator();
-                 while (rangeTombstoneIterator.hasNext())
-                 {
-                     RangeTombstone rangeTombstone = rangeTombstoneIterator.next();
-                     tombstones.update(rangeTombstone.getLocalDeletionTime());
-                 }
 +
 +                Cell reduced = iter.next();
                  container.clear();
  
 -                // PrecompactedRow.removeDeleted has only checked the top-level CF deletion
times,
 -                // not the range tombstones. For that we use the columnIndexer tombstone
tracker.
 +                // removeDeleted have only checked the top-level CF deletion times,
 +                // not the range tombstone. For that we use the columnIndexer tombstone
tracker.
                  if (indexBuilder.tombstoneTracker().isDeleted(reduced))
                  {
                      indexer.remove(reduced);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3ff7a776/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
index 9567f0e,3a2dca0..f2168c5
--- a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
@@@ -266,9 -243,12 +266,12 @@@ public class SSTableWriter extends SSTa
          {
              RangeTombstone rangeTombstone = rangeTombstoneIterator.next();
              tombstones.update(rangeTombstone.getLocalDeletionTime());
+ 
+             minColumnNames = ColumnNameHelper.minComponents(minColumnNames, rangeTombstone.min,
metadata.comparator);
+             maxColumnNames = ColumnNameHelper.maxComponents(maxColumnNames, rangeTombstone.max,
metadata.comparator);
          }
  
 -        Iterator<OnDiskAtom> iter = metadata.getOnDiskIterator(in, columnCount, ColumnSerializer.Flag.PRESERVE_SIZE,
Integer.MIN_VALUE, version);
 +        Iterator<OnDiskAtom> iter = metadata.getOnDiskIterator(in, ColumnSerializer.Flag.PRESERVE_SIZE,
Integer.MIN_VALUE, version);
          try
          {
              while (iter.hasNext())

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3ff7a776/test/unit/org/apache/cassandra/db/ColumnFamilyTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/db/ColumnFamilyTest.java
index 151cbdc,e13d0d7..2141020
--- a/test/unit/org/apache/cassandra/db/ColumnFamilyTest.java
+++ b/test/unit/org/apache/cassandra/db/ColumnFamilyTest.java
@@@ -206,5 -163,17 +206,17 @@@ public class ColumnFamilyTest extends S
          cf.delete(new DeletionInfo(timestamp, localDeletionTime));
          ColumnStats stats = cf.getColumnStats();
          assertEquals(timestamp, stats.maxTimestamp);
+ 
 -        cf.delete(new RangeTombstone(ByteBufferUtil.bytes("col2"), ByteBufferUtil.bytes("col21"),
timestamp, localDeletionTime));
++        cf.delete(new RangeTombstone(Util.cellname("col2"), Util.cellname("col21"), timestamp,
localDeletionTime));
+ 
+         stats = cf.getColumnStats();
+         assertEquals(ByteBufferUtil.bytes("col2"), stats.minColumnNames.get(0));
+         assertEquals(ByteBufferUtil.bytes("col21"), stats.maxColumnNames.get(0));
+ 
 -        cf.delete(new RangeTombstone(ByteBufferUtil.bytes("col6"), ByteBufferUtil.bytes("col61"),
timestamp, localDeletionTime));
++        cf.delete(new RangeTombstone(Util.cellname("col6"), Util.cellname("col61"), timestamp,
localDeletionTime));
+         stats = cf.getColumnStats();
+ 
+         assertEquals(ByteBufferUtil.bytes("col2"), stats.minColumnNames.get(0));
+         assertEquals(ByteBufferUtil.bytes("col61"), stats.maxColumnNames.get(0));
      }
  }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/3ff7a776/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
index 8a24771,1879838..c60b650
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
@@@ -40,8 -43,11 +42,12 @@@ import org.apache.cassandra.dht.BytesTo
  import org.apache.cassandra.dht.Range;
  import org.apache.cassandra.dht.Token;
  import org.apache.cassandra.io.sstable.Component;
 -import org.apache.cassandra.io.sstable.SSTableMetadata;
  import org.apache.cassandra.io.sstable.SSTableReader;
  import org.apache.cassandra.io.sstable.SSTableScanner;
+ import org.apache.cassandra.io.sstable.SSTableWriter;
++import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
++import org.apache.cassandra.io.sstable.metadata.StatsMetadata;
+ import org.apache.cassandra.service.StorageService;
  import org.apache.cassandra.utils.ByteBufferUtil;
  import org.apache.cassandra.utils.FBUtilities;
  import org.apache.cassandra.utils.Pair;
@@@ -344,6 -349,96 +350,98 @@@ public class CompactionsTest extends Sc
      }
  
      @Test
+     public void testRangeTombstones() throws IOException, ExecutionException, InterruptedException
+     {
+         boolean lazy = false;
+ 
+         do
+         {
+             Keyspace keyspace = Keyspace.open(KEYSPACE1);
+             ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard2");
+             cfs.clearUnsafe();
+ 
+             // disable compaction while flushing
+             cfs.disableAutoCompaction();
+ 
+             final CFMetaData cfmeta = cfs.metadata;
 -            Directories dir = Directories.create(cfmeta.ksName, cfmeta.cfName);
++            Directories dir = cfs.directories;
+ 
+             ArrayList<DecoratedKey> keys = new ArrayList<DecoratedKey>();
+ 
+             for (int i=0; i < 4; i++)
+             {
+                 keys.add(Util.dk(""+i));
+             }
+ 
+             ArrayBackedSortedColumns cf = ArrayBackedSortedColumns.factory.create(cfmeta);
+             cf.addColumn(Util.column("01", "a", 1)); // this must not resurrect
+             cf.addColumn(Util.column("a", "a", 3));
 -            cf.deletionInfo().add(new RangeTombstone(ByteBufferUtil.bytes("0"), ByteBufferUtil.bytes("b"),
2, (int) (System.currentTimeMillis()/1000)),cfmeta.comparator);
++            cf.deletionInfo().add(new RangeTombstone(Util.cellname("0"), Util.cellname("b"),
2, (int) (System.currentTimeMillis()/1000)),cfmeta.comparator);
+ 
+             SSTableWriter writer = new SSTableWriter(cfs.getTempSSTablePath(dir.getDirectoryForNewSSTables()),
+                                                      0,
++                                                     0,
+                                                      cfs.metadata,
+                                                      StorageService.getPartitioner(),
 -                                                     SSTableMetadata.createCollector(cfs.metadata.comparator));
++                                                     new MetadataCollector(cfs.metadata.comparator));
+ 
+ 
+             writer.append(Util.dk("0"), cf);
+             writer.append(Util.dk("1"), cf);
+             writer.append(Util.dk("3"), cf);
+ 
+             cfs.addSSTable(writer.closeAndOpenReader());
+             writer = new SSTableWriter(cfs.getTempSSTablePath(dir.getDirectoryForNewSSTables()),
+                                        0,
++                                       0,
+                                        cfs.metadata,
+                                        StorageService.getPartitioner(),
 -                                       SSTableMetadata.createCollector(cfs.metadata.comparator));
++                                       new MetadataCollector(cfs.metadata.comparator));
+ 
+             writer.append(Util.dk("0"), cf);
+             writer.append(Util.dk("1"), cf);
+             writer.append(Util.dk("2"), cf);
+             writer.append(Util.dk("3"), cf);
+             cfs.addSSTable(writer.closeAndOpenReader());
+ 
+             Collection<SSTableReader> toCompact = cfs.getSSTables();
+             assert toCompact.size() == 2;
+ 
+             // forcing lazy comapction
+             if (lazy)
+                 DatabaseDescriptor.setInMemoryCompactionLimit(0);
+ 
+             // Force compaction on first sstables. Since each row is in only one sstable,
we will be using EchoedRow.
+             Util.compact(cfs, toCompact);
+             assertEquals(1, cfs.getSSTables().size());
+ 
+             // Now assert we do have the 4 keys
+             assertEquals(4, Util.getRangeSlice(cfs).size());
+ 
+             ArrayList<DecoratedKey> k = new ArrayList<DecoratedKey>();
+             for (Row r : Util.getRangeSlice(cfs))
+             {
+                 k.add(r.key);
 -                assertEquals(ByteBufferUtil.bytes("a"),r.cf.getColumn(ByteBufferUtil.bytes("a")).value());
 -                assertNull(r.cf.getColumn(ByteBufferUtil.bytes("01")));
 -                assertEquals(3,r.cf.getColumn(ByteBufferUtil.bytes("a")).timestamp());
++                assertEquals(ByteBufferUtil.bytes("a"),r.cf.getColumn(Util.cellname("a")).value());
++                assertNull(r.cf.getColumn(Util.cellname("01")));
++                assertEquals(3,r.cf.getColumn(Util.cellname("a")).timestamp());
+             }
+ 
+             for (SSTableReader sstable : cfs.getSSTables())
+             {
 -                SSTableMetadata stats = sstable.getSSTableMetadata();
++                StatsMetadata stats = sstable.getSSTableMetadata();
+                 assertEquals(ByteBufferUtil.bytes("0"), stats.minColumnNames.get(0));
+                 assertEquals(ByteBufferUtil.bytes("b"), stats.maxColumnNames.get(0));
+             }
+ 
+             assertEquals(keys, k);
+ 
+             lazy=!lazy;
+         }
+         while (lazy);
+     }
+ 
+     @Test
      public void testCompactionLog() throws Exception
      {
          SystemKeyspace.discardCompactionsInProgress();


Mime
View raw message