Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 31688200C48 for ; Thu, 23 Feb 2017 15:22:07 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id 30236160B62; Thu, 23 Feb 2017 14:22:07 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id D9A5B160B78 for ; Thu, 23 Feb 2017 15:22:05 +0100 (CET) Received: (qmail 44349 invoked by uid 500); 23 Feb 2017 14:22:04 -0000 Mailing-List: contact commits-help@cassandra.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@cassandra.apache.org Delivered-To: mailing list commits@cassandra.apache.org Received: (qmail 44050 invoked by uid 99); 23 Feb 2017 14:22:04 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 23 Feb 2017 14:22:04 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 71267DFF09; Thu, 23 Feb 2017 14:22:04 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: slebresne@apache.org To: commits@cassandra.apache.org Date: Thu, 23 Feb 2017 14:22:07 -0000 Message-Id: <8bab1617d75c4c29bc91e113b19ac589@git.apache.org> In-Reply-To: <95ff1611566f463b99d492258d333864@git.apache.org> References: <95ff1611566f463b99d492258d333864@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [4/5] cassandra git commit: Merge branch 'cassandra-3.0' into cassandra-3.11 archived-at: Thu, 23 Feb 2017 14:22:07 -0000 Merge branch 'cassandra-3.0' into cassandra-3.11 * cassandra-3.0: Legacy deserializer can create unexpected boundary range tombstones Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/1dc1aa19 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/1dc1aa19 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/1dc1aa19 Branch: refs/heads/cassandra-3.11 Commit: 1dc1aa1982a7ab84034c95fa6ce6b3e4e2346fd2 Parents: 6487876 ab71748 Author: Sylvain Lebresne Authored: Thu Feb 23 14:37:35 2017 +0100 Committer: Sylvain Lebresne Committed: Thu Feb 23 14:37:35 2017 +0100 ---------------------------------------------------------------------- CHANGES.txt | 1 + .../cassandra/db/UnfilteredDeserializer.java | 334 ++++++++++--------- .../cassandra/db/rows/RangeTombstoneMarker.java | 1 - .../apache/cassandra/service/DataResolver.java | 31 +- .../cassandra/db/OldFormatDeserializerTest.java | 110 ++++++ .../cassandra/service/DataResolverTest.java | 125 ++++++- 6 files changed, 432 insertions(+), 170 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/1dc1aa19/CHANGES.txt ---------------------------------------------------------------------- diff --cc CHANGES.txt index 233898f,386029e..f5b9d28 --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -1,18 -1,19 +1,19 @@@ -3.0.12 +3.11.0 + * Fix equality comparisons of columns using the duration type (CASSANDRA-13174) + * Obfuscate password in stress-graphs (CASSANDRA-12233) + * Move to FastThreadLocalThread and FastThreadLocal (CASSANDRA-13034) + * nodetool stopdaemon errors out (CASSANDRA-13030) + * Tables in system_distributed should not use gcgs of 0 (CASSANDRA-12954) + * Fix primary index calculation for SASI (CASSANDRA-12910) + * More fixes to the TokenAllocator (CASSANDRA-12990) + * NoReplicationTokenAllocator should work with zero replication factor (CASSANDRA-12983) +Merged from 3.0: + * Legacy deserializer can create unexpected boundary range tombstones (CASSANDRA-13237) * Remove unnecessary assertion from AntiCompactionTest (CASSANDRA-13070) * Fix cqlsh COPY for dates before 1900 (CASSANDRA-13185) -Merged from 2.2 - * Fix flaky LongLeveledCompactionStrategyTest (CASSANDRA-12202) - * Fix failing COPY TO STDOUT (CASSANDRA-12497) - * Fix ColumnCounter::countAll behaviour for reverse queries (CASSANDRA-13222) - * Exceptions encountered calling getSeeds() breaks OTC thread (CASSANDRA-13018) -Merged from 2.1: - * Log stacktrace of uncaught exceptions (CASSANDRA-13108) - -3.0.11 * Use keyspace replication settings on system.size_estimates table (CASSANDRA-9639) * Add vm.max_map_count StartupCheck (CASSANDRA-13008) - * Hint related logging should include the IP address of the destination in addition to + * Hint related logging should include the IP address of the destination in addition to host ID (CASSANDRA-13205) * Reloading logback.xml does not work (CASSANDRA-13173) * Lightweight transactions temporarily fail after upgrade from 2.1 to 3.0 (CASSANDRA-13109) http://git-wip-us.apache.org/repos/asf/cassandra/blob/1dc1aa19/src/java/org/apache/cassandra/db/UnfilteredDeserializer.java ---------------------------------------------------------------------- diff --cc src/java/org/apache/cassandra/db/UnfilteredDeserializer.java index 2c3bc1b,42a806a..79b8636 --- a/src/java/org/apache/cassandra/db/UnfilteredDeserializer.java +++ b/src/java/org/apache/cassandra/db/UnfilteredDeserializer.java @@@ -317,16 -331,7 +331,7 @@@ public abstract class UnfilteredDeseria } } - private boolean isRow(LegacyLayout.LegacyAtom atom) - { - if (atom.isCell()) - return true; - - LegacyLayout.LegacyRangeTombstone tombstone = atom.asRangeTombstone(); - return tombstone.isCollectionTombstone() || tombstone.isRowDeletion(metadata); - } - - public int compareNextTo(Slice.Bound bound) throws IOException + public int compareNextTo(ClusteringBound bound) throws IOException { if (!hasNext()) throw new IllegalStateException(); @@@ -397,13 -405,27 +405,28 @@@ private Unfiltered next; - private UnfilteredIterator(DeletionTime partitionDeletion) + UnfilteredIterator(CFMetaData metadata, + DeletionTime partitionDeletion, + SerializationHelper helper, + Supplier atomReader) { + this.metadata = metadata; + this.helper = helper; this.grouper = new LegacyLayout.CellGrouper(metadata, helper); this.tombstoneTracker = new TombstoneTracker(partitionDeletion); - this.atoms = new AtomIterator(); + this.atoms = new AtomIterator(atomReader); } + private boolean isRow(LegacyLayout.LegacyAtom atom) + { + if (atom.isCell()) + return true; + + LegacyLayout.LegacyRangeTombstone tombstone = atom.asRangeTombstone(); + return tombstone.isCollectionTombstone() || tombstone.isRowDeletion(metadata); + } + ++ public boolean hasNext() { // Note that we loop on next == null because TombstoneTracker.openNew() could return null below or the atom might be shadowed. @@@ -478,188 -500,200 +501,195 @@@ { throw new UnsupportedOperationException(); } - } - // Wraps the input of the deserializer to provide an iterator (and skip shadowed atoms). - // Note: this could use guava AbstractIterator except that we want to be able to clear - // the internal state of the iterator so it's cleaner to do it ourselves. - private class AtomIterator implements PeekingIterator - { - private boolean isDone; - private LegacyLayout.LegacyAtom next; - - private AtomIterator() + // Wraps the input of the deserializer to provide an iterator (and skip shadowed atoms). + // Note: this could use guava AbstractIterator except that we want to be able to clear + // the internal state of the iterator so it's cleaner to do it ourselves. + private class AtomIterator implements PeekingIterator { - } + private final Supplier atomReader; + private boolean isDone; + private LegacyLayout.LegacyAtom next; - public boolean hasNext() - { - if (isDone) - return false; + private AtomIterator(Supplier atomReader) + { + this.atomReader = atomReader; + } - if (next == null) + public boolean hasNext() { - next = readAtom(); + if (isDone) + return false; + if (next == null) { - isDone = true; - return false; + next = atomReader.get(); + if (next == null) + { + isDone = true; + return false; + } } + return true; } - return true; - } - private LegacyLayout.LegacyAtom readAtom() - { - try + public LegacyLayout.LegacyAtom next() { - return LegacyLayout.readLegacyAtom(metadata, in, readAllAsDynamic); + if (!hasNext()) + throw new UnsupportedOperationException(); + LegacyLayout.LegacyAtom toReturn = next; + next = null; + return toReturn; } - catch (IOException e) + + public LegacyLayout.LegacyAtom peek() { - throw new IOError(e); + if (!hasNext()) + throw new UnsupportedOperationException(); + return next; } - } - public LegacyLayout.LegacyAtom next() - { - if (!hasNext()) - throw new UnsupportedOperationException(); - LegacyLayout.LegacyAtom toReturn = next; - next = null; - return toReturn; - } + public void clearState() + { + this.next = null; + this.isDone = false; + } - public LegacyLayout.LegacyAtom peek() - { - if (!hasNext()) + public void remove() + { throw new UnsupportedOperationException(); - return next; + } } - public void clearState() + /** + * Tracks which range tombstones are open when deserializing the old format. + */ + private class TombstoneTracker { - this.next = null; - this.isDone = false; - } + private final DeletionTime partitionDeletion; - public void remove() - { - throw new UnsupportedOperationException(); - } - } + // Open tombstones sorted by their closing bound (i.e. first tombstone is the first to close). + // As we only track non-fully-shadowed ranges, the first range is necessarily the currently + // open tombstone (the one with the higher timestamp). + private final SortedSet openTombstones; - /** - * Tracks which range tombstones are open when deserializing the old format. - */ - private class TombstoneTracker - { - private final DeletionTime partitionDeletion; + public TombstoneTracker(DeletionTime partitionDeletion) + { + this.partitionDeletion = partitionDeletion; + this.openTombstones = new TreeSet<>((rt1, rt2) -> metadata.comparator.compare(rt1.stop.bound, rt2.stop.bound)); + } - // Open tombstones sorted by their closing bound (i.e. first tombstone is the first to close). - // As we only track non-fully-shadowed ranges, the first range is necessarily the currently - // open tombstone (the one with the higher timestamp). - private final SortedSet openTombstones; + /** + * Checks if the provided atom is fully shadowed by the open tombstones of this tracker (or the partition deletion). + */ + public boolean isShadowed(LegacyLayout.LegacyAtom atom) + { + assert !hasClosingMarkerBefore(atom); + long timestamp = atom.isCell() ? atom.asCell().timestamp : atom.asRangeTombstone().deletionTime.markedForDeleteAt(); - public TombstoneTracker(DeletionTime partitionDeletion) - { - this.partitionDeletion = partitionDeletion; - this.openTombstones = new TreeSet<>((rt1, rt2) -> metadata.comparator.compare(rt1.stop.bound, rt2.stop.bound)); - } + if (partitionDeletion.deletes(timestamp)) + return true; - /** - * Checks if the provided atom is fully shadowed by the open tombstones of this tracker (or the partition deletion). - */ - public boolean isShadowed(LegacyLayout.LegacyAtom atom) - { - assert !hasClosingMarkerBefore(atom); - long timestamp = atom.isCell() ? atom.asCell().timestamp : atom.asRangeTombstone().deletionTime.markedForDeleteAt(); + SortedSet coveringTombstones = isRow(atom) ? openTombstones : openTombstones.tailSet(atom.asRangeTombstone()); + return Iterables.any(coveringTombstones, tombstone -> tombstone.deletionTime.deletes(timestamp)); + } - if (partitionDeletion.deletes(timestamp)) - return true; + /** + * Whether the currently open marker closes stricly before the provided row/RT. + */ + public boolean hasClosingMarkerBefore(LegacyLayout.LegacyAtom atom) + { + return !openTombstones.isEmpty() + && metadata.comparator.compare(openTombstones.first().stop.bound, atom.clustering()) < 0; + } - SortedSet coveringTombstones = isRow(atom) ? openTombstones : openTombstones.tailSet(atom.asRangeTombstone()); - return Iterables.any(coveringTombstones, tombstone -> tombstone.deletionTime.deletes(timestamp)); - } + /** + * Returns the unfiltered corresponding to closing the currently open marker (and update the tracker accordingly). + */ + public Unfiltered popClosingMarker() + { + assert !openTombstones.isEmpty(); - /** - * Whether the currently open marker closes stricly before the provided row/RT. - */ - public boolean hasClosingMarkerBefore(LegacyLayout.LegacyAtom atom) - { - return !openTombstones.isEmpty() - && metadata.comparator.compare(openTombstones.first().stop.bound, atom.clustering()) < 0; - } + Iterator iter = openTombstones.iterator(); + LegacyLayout.LegacyRangeTombstone first = iter.next(); + iter.remove(); - /** - * Returns the unfiltered corresponding to closing the currently open marker (and update the tracker accordingly). - */ - public Unfiltered popClosingMarker() - { - assert !openTombstones.isEmpty(); + // If that was the last open tombstone, we just want to close it. Otherwise, we have a boundary with the + // next tombstone + if (!iter.hasNext()) + return new RangeTombstoneBoundMarker(first.stop.bound, first.deletionTime); - Iterator iter = openTombstones.iterator(); - LegacyLayout.LegacyRangeTombstone first = iter.next(); - iter.remove(); + LegacyLayout.LegacyRangeTombstone next = iter.next(); + return RangeTombstoneBoundaryMarker.makeBoundary(false, first.stop.bound, first.stop.bound.invert(), first.deletionTime, next.deletionTime); + } + + /** + * Update the tracker given the provided newly open tombstone. This return the Unfiltered corresponding to the opening + * of said tombstone: this can be a simple open mark, a boundary (if there was an open tombstone superseded by this new one) + * or even null (if the new tombstone start is supersedes by the currently open tombstone). + * + * Note that this method assume the added tombstone is not fully shadowed, i.e. that !isShadowed(tombstone). It also + * assumes no opened tombstone closes before that tombstone (so !hasClosingMarkerBefore(tombstone)). + */ + public Unfiltered openNew(LegacyLayout.LegacyRangeTombstone tombstone) + { + if (openTombstones.isEmpty()) + { + openTombstones.add(tombstone); + return new RangeTombstoneBoundMarker(tombstone.start.bound, tombstone.deletionTime); + } - // If that was the last open tombstone, we just want to close it. Otherwise, we have a boundary with the - // next tombstone - if (!iter.hasNext()) - return new RangeTombstoneBoundMarker(first.stop.bound, first.deletionTime); + // Add the new tombstone, and then check if it changes the currently open deletion or not. + // Note: we grab the first tombstone (which represents the currently open deletion time) before adding + // because add() can remove that first. + Iterator iter = openTombstones.iterator(); + LegacyLayout.LegacyRangeTombstone first = iter.next(); - LegacyLayout.LegacyRangeTombstone next = iter.next(); - return RangeTombstoneBoundaryMarker.makeBoundary(false, first.stop.bound, first.stop.bound.invert(), first.deletionTime, next.deletionTime); - } + add(tombstone); - /** - * Update the tracker given the provided newly open tombstone. This return the Unfiltered corresponding to the opening - * of said tombstone: this can be a simple open mark, a boundary (if there was an open tombstone superseded by this new one) - * or even null (if the new tombston start is supersedes by the currently open tombstone). - * - * Note that this method assume the added tombstone is not fully shadowed, i.e. that !isShadowed(tombstone). It also - * assumes no opened tombstone closes before that tombstone (so !hasClosingMarkerBefore(tombstone)). - */ - public Unfiltered openNew(LegacyLayout.LegacyRangeTombstone tombstone) - { - if (openTombstones.isEmpty()) - { - openTombstones.add(tombstone); - return new RangeTombstoneBoundMarker(tombstone.start.bound, tombstone.deletionTime); + // If the newly opened tombstone superseds the currently open one, we have to produce a boundary to change + // the currently open deletion time, otherwise we have nothing to do. + return tombstone.deletionTime.supersedes(first.deletionTime) + ? RangeTombstoneBoundaryMarker.makeBoundary(false, tombstone.start.bound.invert(), tombstone.start.bound, first.deletionTime, tombstone.deletionTime) + : null; } - Iterator iter = openTombstones.iterator(); - LegacyLayout.LegacyRangeTombstone first = iter.next(); - if (tombstone.deletionTime.supersedes(first.deletionTime)) + /** + * Adds a new tombstone to openTombstones, removing anything that would be shadowed by this new tombstone. + */ + private void add(LegacyLayout.LegacyRangeTombstone tombstone) { - // We're supperseding the currently open tombstone, so we should produce a boundary that close the currently open - // one and open the new one. We should also add the tombstone, but if it stop after the first one, we should - // also remove that first tombstone as it won't be useful anymore. - if (metadata.comparator.compare(tombstone.stop.bound, first.stop.bound) >= 0) - iter.remove(); + // First, remove existing tombstone that is shadowed by this tombstone. + Iterator iter = openTombstones.iterator(); + while (iter.hasNext()) + { + LegacyLayout.LegacyRangeTombstone existing = iter.next(); + // openTombstones is ordered by stop bound and the new tombstone can't be shadowing anything that + // stop after it. + if (metadata.comparator.compare(tombstone.stop.bound, existing.stop.bound) < 0) + break; + + // Note that we remove an existing tombstone even if it is equal to the new one because in that case, + // either the existing strictly stops before the new one and we don't want it, or it stops exactly + // like the new one but we're going to inconditionally add the new one anyway. + if (!existing.deletionTime.supersedes(tombstone.deletionTime)) + iter.remove(); + } openTombstones.add(tombstone); - return RangeTombstoneBoundaryMarker.makeBoundary(false, tombstone.start.bound.invert(), tombstone.start.bound, first.deletionTime, tombstone.deletionTime); } - else + + public boolean hasOpenTombstones() { - // If the new tombstone don't supersedes the currently open tombstone, we don't have anything to return, we - // just add the new tombstone (because we know tombstone is not fully shadowed, this imply the new tombstone - // simply extend after the first one and we'll deal with it later) - assert metadata.comparator.compare(tombstone.start.bound, first.stop.bound) <= 0; - openTombstones.add(tombstone); - return null; + return !openTombstones.isEmpty(); } - } - public boolean hasOpenTombstones() - { - return !openTombstones.isEmpty(); - } - private boolean formBoundary(LegacyLayout.LegacyRangeTombstone close, LegacyLayout.LegacyRangeTombstone open) - { - return metadata.comparator.compare(close.stop.bound, open.start.bound) == 0; - } -- - public void clearState() - { - openTombstones.clear(); + public void clearState() + { + openTombstones.clear(); + } } } + } } http://git-wip-us.apache.org/repos/asf/cassandra/blob/1dc1aa19/src/java/org/apache/cassandra/db/rows/RangeTombstoneMarker.java ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/1dc1aa19/src/java/org/apache/cassandra/service/DataResolver.java ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/1dc1aa19/test/unit/org/apache/cassandra/db/OldFormatDeserializerTest.java ---------------------------------------------------------------------- diff --cc test/unit/org/apache/cassandra/db/OldFormatDeserializerTest.java index 0000000,1060569..3008362 mode 000000,100644..100644 --- a/test/unit/org/apache/cassandra/db/OldFormatDeserializerTest.java +++ b/test/unit/org/apache/cassandra/db/OldFormatDeserializerTest.java @@@ -1,0 -1,110 +1,110 @@@ + /* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + package org.apache.cassandra.db; + + import java.util.function.Supplier; + + import org.junit.Test; + + import org.apache.cassandra.config.CFMetaData; + import org.apache.cassandra.db.UnfilteredDeserializer.OldFormatDeserializer.UnfilteredIterator; + import org.apache.cassandra.db.marshal.Int32Type; + import org.apache.cassandra.db.rows.RangeTombstoneMarker; + import org.apache.cassandra.db.rows.SerializationHelper; + import org.apache.cassandra.db.rows.Unfiltered; + import org.apache.cassandra.dht.Murmur3Partitioner; + import org.apache.cassandra.net.MessagingService; + import org.apache.cassandra.utils.ByteBufferUtil; + import org.apache.cassandra.utils.FBUtilities; + + import static org.junit.Assert.*; + + public class OldFormatDeserializerTest + { + @Test + public void testRangeTombstones() throws Exception + { + CFMetaData metadata = CFMetaData.Builder.create("ks", "table") + .withPartitioner(Murmur3Partitioner.instance) + .addPartitionKey("k", Int32Type.instance) + .addClusteringColumn("v", Int32Type.instance) + .build(); + + Supplier atomSupplier = supplier(rt(0, 10, 42), + rt(5, 15, 42)); + + UnfilteredIterator iterator = new UnfilteredIterator(metadata, + DeletionTime.LIVE, + new SerializationHelper(metadata, MessagingService.current_version, SerializationHelper.Flag.LOCAL), + atomSupplier); + + // As the deletion time are the same, we want this to produce a single range tombstone covering from 0 to 15. + + assertTrue(iterator.hasNext()); + + Unfiltered first = iterator.next(); + assertTrue(first.isRangeTombstoneMarker()); + RangeTombstoneMarker start = (RangeTombstoneMarker)first; + assertTrue(start.isOpen(false)); + assertFalse(start.isClose(false)); + assertEquals(0, toInt(start.openBound(false))); + assertEquals(42, start.openDeletionTime(false).markedForDeleteAt()); + + Unfiltered second = iterator.next(); + assertTrue(second.isRangeTombstoneMarker()); + RangeTombstoneMarker end = (RangeTombstoneMarker)second; + assertTrue(end.isClose(false)); + assertFalse(end.isOpen(false)); + assertEquals(15, toInt(end.closeBound(false))); + assertEquals(42, end.closeDeletionTime(false).markedForDeleteAt()); + + assertFalse(iterator.hasNext()); + } + + private static int toInt(ClusteringPrefix prefix) + { + assertTrue(prefix.size() == 1); + return ByteBufferUtil.toInt(prefix.get(0)); + } + + private static Supplier supplier(LegacyLayout.LegacyAtom... atoms) + { + return new Supplier() + { + int i = 0; + + public LegacyLayout.LegacyAtom get() + { + return i >= atoms.length ? null : atoms[i++]; + } + }; + } + + private static LegacyLayout.LegacyAtom rt(int start, int end, int deletion) + { + return new LegacyLayout.LegacyRangeTombstone(bound(start, true), bound(end, false), new DeletionTime(deletion, FBUtilities.nowInSeconds())); + } + + private static LegacyLayout.LegacyBound bound(int b, boolean isStart) + { - return new LegacyLayout.LegacyBound(isStart ? Slice.Bound.inclusiveStartOf(ByteBufferUtil.bytes(b)) : Slice.Bound.inclusiveEndOf(ByteBufferUtil.bytes(b)), ++ return new LegacyLayout.LegacyBound(isStart ? ClusteringBound.inclusiveStartOf(ByteBufferUtil.bytes(b)) : ClusteringBound.inclusiveEndOf(ByteBufferUtil.bytes(b)), + false, + null); + } -} ++} http://git-wip-us.apache.org/repos/asf/cassandra/blob/1dc1aa19/test/unit/org/apache/cassandra/service/DataResolverTest.java ---------------------------------------------------------------------- diff --cc test/unit/org/apache/cassandra/service/DataResolverTest.java index 7c38224,2f72093..8e4f385 --- a/test/unit/org/apache/cassandra/service/DataResolverTest.java +++ b/test/unit/org/apache/cassandra/service/DataResolverTest.java @@@ -556,6 -554,73 +556,73 @@@ public class DataResolverTes assertRepairContainsDeletions(msg2, null, one_two, withExclusiveEndIf(three_four, timestamp2 >= timestamp1), five_six); } + /** + * Test cases where a boundary of a source is covered by another source deletion and timestamp on one or both side + * of the boundary are equal to the "merged" deletion. + * This is a test for CASSANDRA-13237 to make sure we handle this case properly. + */ + @Test + public void testRepairRangeTombstoneBoundary() throws UnknownHostException + { + testRepairRangeTombstoneBoundary(1, 0, 1); + messageRecorder.sent.clear(); + testRepairRangeTombstoneBoundary(1, 1, 0); + messageRecorder.sent.clear(); + testRepairRangeTombstoneBoundary(1, 1, 1); + } + + /** + * Test for CASSANDRA-13237, checking we don't fail (and handle correctly) the case where a RT boundary has the + * same deletion on both side (while is useless but could be created by legacy code pre-CASSANDRA-13237 and could + * thus still be sent). + */ + public void testRepairRangeTombstoneBoundary(int timestamp1, int timestamp2, int timestamp3) throws UnknownHostException + { - DataResolver resolver = new DataResolver(ks, command, ConsistencyLevel.ALL, 2); ++ DataResolver resolver = new DataResolver(ks, command, ConsistencyLevel.ALL, 2, System.nanoTime()); + InetAddress peer1 = peer(); + InetAddress peer2 = peer(); + + // 1st "stream" + RangeTombstone one_nine = tombstone("0", true , "9", true, timestamp1, nowInSec); + UnfilteredPartitionIterator iter1 = iter(new RowUpdateBuilder(cfm, nowInSec, 1L, dk) + .addRangeTombstone(one_nine) + .buildUpdate()); + + // 2nd "stream" (build more manually to ensure we have the boundary we want) + RangeTombstoneBoundMarker open_one = marker("0", true, true, timestamp2, nowInSec); + RangeTombstoneBoundaryMarker boundary_five = boundary("5", false, timestamp2, nowInSec, timestamp3, nowInSec); + RangeTombstoneBoundMarker close_nine = marker("9", false, true, timestamp3, nowInSec); + UnfilteredPartitionIterator iter2 = iter(dk, open_one, boundary_five, close_nine); + + resolver.preprocess(readResponseMessage(peer1, iter1)); + resolver.preprocess(readResponseMessage(peer2, iter2)); + + boolean shouldHaveRepair = timestamp1 != timestamp2 || timestamp1 != timestamp3; + + // No results, we've only reconciled tombstones. + try (PartitionIterator data = resolver.resolve()) + { + assertFalse(data.hasNext()); + assertRepairFuture(resolver, shouldHaveRepair ? 1 : 0); + } + + assertEquals(shouldHaveRepair? 1 : 0, messageRecorder.sent.size()); + + if (!shouldHaveRepair) + return; + + MessageOut msg = getSentMessage(peer2); + assertRepairMetadata(msg); + assertRepairContainsNoColumns(msg); + + RangeTombstone expected = timestamp1 != timestamp2 + // We've repaired the 1st part + ? tombstone("0", true, "5", false, timestamp1, nowInSec) + // We've repaired the 2nd part + : tombstone("5", true, "9", true, timestamp1, nowInSec); + assertRepairContainsDeletions(msg, null, expected); + } + // Forces the start to be exclusive if the condition holds private static RangeTombstone withExclusiveStartIf(RangeTombstone rt, boolean condition) { @@@ -883,14 -940,40 +950,40 @@@ private RangeTombstone tombstone(Object start, boolean inclusiveStart, Object end, boolean inclusiveEnd, long markedForDeleteAt, int localDeletionTime) { - Kind startKind = inclusiveStart ? Kind.INCL_START_BOUND : Kind.EXCL_START_BOUND; - Kind endKind = inclusiveEnd ? Kind.INCL_END_BOUND : Kind.EXCL_END_BOUND; - - ClusteringBound startBound = ClusteringBound.create(startKind, cfm.comparator.make(start).getRawValues()); - ClusteringBound endBound = ClusteringBound.create(endKind, cfm.comparator.make(end).getRawValues()); - RangeTombstone.Bound startBound = rtBound(start, true, inclusiveStart); - RangeTombstone.Bound endBound = rtBound(end, false, inclusiveEnd); ++ ClusteringBound startBound = rtBound(start, true, inclusiveStart); ++ ClusteringBound endBound = rtBound(end, false, inclusiveEnd); return new RangeTombstone(Slice.make(startBound, endBound), new DeletionTime(markedForDeleteAt, localDeletionTime)); } - private RangeTombstone.Bound rtBound(Object value, boolean isStart, boolean inclusive) ++ private ClusteringBound rtBound(Object value, boolean isStart, boolean inclusive) + { - RangeTombstone.Bound.Kind kind = isStart ++ ClusteringBound.Kind kind = isStart + ? (inclusive ? Kind.INCL_START_BOUND : Kind.EXCL_START_BOUND) + : (inclusive ? Kind.INCL_END_BOUND : Kind.EXCL_END_BOUND); + - return new RangeTombstone.Bound(kind, cfm.comparator.make(value).getRawValues()); ++ return ClusteringBound.create(kind, cfm.comparator.make(value).getRawValues()); + } + - private RangeTombstone.Bound rtBoundary(Object value, boolean inclusiveOnEnd) ++ private ClusteringBoundary rtBoundary(Object value, boolean inclusiveOnEnd) + { - RangeTombstone.Bound.Kind kind = inclusiveOnEnd ++ ClusteringBound.Kind kind = inclusiveOnEnd + ? Kind.INCL_END_EXCL_START_BOUNDARY + : Kind.EXCL_END_INCL_START_BOUNDARY; - return new RangeTombstone.Bound(kind, cfm.comparator.make(value).getRawValues()); ++ return ClusteringBoundary.create(kind, cfm.comparator.make(value).getRawValues()); + } + + private RangeTombstoneBoundMarker marker(Object value, boolean isStart, boolean inclusive, long markedForDeleteAt, int localDeletionTime) + { + return new RangeTombstoneBoundMarker(rtBound(value, isStart, inclusive), new DeletionTime(markedForDeleteAt, localDeletionTime)); + } + + private RangeTombstoneBoundaryMarker boundary(Object value, boolean inclusiveOnEnd, long markedForDeleteAt1, int localDeletionTime1, long markedForDeleteAt2, int localDeletionTime2) + { + return new RangeTombstoneBoundaryMarker(rtBoundary(value, inclusiveOnEnd), + new DeletionTime(markedForDeleteAt1, localDeletionTime1), + new DeletionTime(markedForDeleteAt2, localDeletionTime2)); + } + private UnfilteredPartitionIterator fullPartitionDelete(CFMetaData cfm, DecoratedKey dk, long timestamp, int nowInSec) { return new SingletonUnfilteredPartitionIterator(PartitionUpdate.fullPartitionDelete(cfm, dk, timestamp, nowInSec).unfilteredIterator(), false);