cassandra-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ifesdj...@apache.org
Subject cassandra-dtest git commit: Add tests for SuperColumn tables
Date Mon, 25 Sep 2017 09:29:39 GMT
Repository: cassandra-dtest
Updated Branches:
  refs/heads/master 2a1ce8450 -> 12dd47219


Add tests for SuperColumn tables

Patch by Alex Petrov; reviewed by Philip Thompson for CASSANDRA-12373

Project: http://git-wip-us.apache.org/repos/asf/cassandra-dtest/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra-dtest/commit/12dd4721
Tree: http://git-wip-us.apache.org/repos/asf/cassandra-dtest/tree/12dd4721
Diff: http://git-wip-us.apache.org/repos/asf/cassandra-dtest/diff/12dd4721

Branch: refs/heads/master
Commit: 12dd47219cda7ae9d5bfe149b2aa034d251fc849
Parents: 2a1ce84
Author: Alex Petrov <oleksandr.petrov@gmail.com>
Authored: Thu Oct 13 21:59:41 2016 +0200
Committer: Alex Petrov <oleksandr.petrov@gmail.com>
Committed: Mon Sep 25 11:28:43 2017 +0200

----------------------------------------------------------------------
 thrift_tests.py                      |  70 ++++-
 upgrade_tests/thrift_upgrade_test.py | 426 ++++++++++++++++++++++++++++++
 upgrade_tests/upgrade_base.py        |  14 +-
 3 files changed, 505 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/12dd4721/thrift_tests.py
----------------------------------------------------------------------
diff --git a/thrift_tests.py b/thrift_tests.py
index 00efda9..9b46665 100644
--- a/thrift_tests.py
+++ b/thrift_tests.py
@@ -8,6 +8,7 @@ from thrift.protocol import TBinaryProtocol
 from thrift.Thrift import TApplicationException
 from thrift.transport import TSocket, TTransport
 
+from tools.assertions import assert_length_equal
 from dtest import (CASSANDRA_VERSION_FROM_BUILD, DISABLE_VNODES, NUM_TOKENS,
                    ReusableClusterTester, debug, init_default_config)
 from thrift_bindings.v22 import Cassandra
@@ -22,7 +23,7 @@ from thrift_bindings.v22.Cassandra import (CfDef, Column, ColumnDef,
                                            Mutation, NotFoundException,
                                            SlicePredicate, SliceRange,
                                            SuperColumn)
-from tools.assertions import assert_none, assert_one
+from tools.assertions import assert_all, assert_none, assert_one
 from tools.decorators import since
 
 
@@ -1238,6 +1239,73 @@ class TestMutations(ThriftTester):
         _insert_simple()
         assert get_range_slice(client, ColumnParent('Super1'), SlicePredicate(column_names=['c1',
'c1']), '', '', 1000, ConsistencyLevel.ONE) == []
 
+    @since('2.1')
+    def test_super_cql_read_compatibility(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1')
+
+        _insert_super("key1")
+        _insert_super("key2")
+
+        node1 = self.cluster.nodelist()[0]
+        session = self.patient_cql_connection(node1)
+
+        session.execute('USE "Keyspace1"')
+
+        assert_all(session, "SELECT * FROM \"Super1\"",
+                   [["key1", "sc1", 4, "value4"],
+                    ["key1", "sc2", 5, "value5"],
+                    ["key1", "sc2", 6, "value6"],
+                    ["key2", "sc1", 4, "value4"],
+                    ["key2", "sc2", 5, "value5"],
+                    ["key2", "sc2", 6, "value6"]])
+
+        assert_all(session, "SELECT * FROM \"Super1\" WHERE key=textAsBlob('key1')",
+                   [["key1", "sc1", 4, "value4"],
+                    ["key1", "sc2", 5, "value5"],
+                    ["key1", "sc2", 6, "value6"]])
+
+        assert_all(session, "SELECT * FROM \"Super1\" WHERE key=textAsBlob('key1') AND column1=textAsBlob('sc2')",
+                   [["key1", "sc2", 5, "value5"],
+                    ["key1", "sc2", 6, "value6"]])
+
+        assert_all(session, "SELECT * FROM \"Super1\" WHERE key=textAsBlob('key1') AND column1=textAsBlob('sc2')
AND column2 = 5",
+                   [["key1", "sc2", 5, "value5"]])
+
+        assert_all(session, "SELECT * FROM \"Super1\" WHERE key = textAsBlob('key1') AND
column1 = textAsBlob('sc2')",
+                   [["key1", "sc2", 5, "value5"],
+                    ["key1", "sc2", 6, "value6"]])
+
+        assert_all(session, "SELECT column2, value FROM \"Super1\" WHERE key = textAsBlob('key1')
AND column1 = textAsBlob('sc2')",
+                   [[5, "value5"],
+                    [6, "value6"]])
+
+    @since('2.1')
+    def test_super_cql_write_compatibility(self):
+        _set_keyspace('Keyspace1')
+        self.truncate_all('Super1')
+
+        node1 = self.cluster.nodelist()[0]
+        session = self.patient_cql_connection(node1)
+
+        session.execute('USE "Keyspace1"')
+
+        query = "INSERT INTO \"Super1\" (key, column1, column2, value) VALUES (textAsBlob(%s),
textAsBlob(%s), %s, textAsBlob(%s)) USING TIMESTAMP 1234"
+        session.execute(query, ("key1", "sc1", 4, "value4"))
+        session.execute(query, ("key1", "sc2", 5, "value5"))
+        session.execute(query, ("key1", "sc2", 6, "value6"))
+        session.execute(query, ("key2", "sc1", 4, "value4"))
+        session.execute(query, ("key2", "sc2", 5, "value5"))
+        session.execute(query, ("key2", "sc2", 6, "value6"))
+
+        p = SlicePredicate(slice_range=SliceRange('sc1', 'sc2', False, 2))
+        result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
+        assert_length_equal(result, 2)
+        self.assertEqual(result[0].super_column.name, 'sc1')
+        self.assertEqual(result[0].super_column.columns[0], Column(_i64(4), 'value4', 1234))
+        self.assertEqual(result[1].super_column.name, 'sc2')
+        self.assertEqual(result[1].super_column.columns, [Column(_i64(5), 'value5', 1234),
Column(_i64(6), 'value6', 1234)])
+
     def test_range_with_remove(self):
         _set_keyspace('Keyspace1')
         self.truncate_all('Standard1')

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/12dd4721/upgrade_tests/thrift_upgrade_test.py
----------------------------------------------------------------------
diff --git a/upgrade_tests/thrift_upgrade_test.py b/upgrade_tests/thrift_upgrade_test.py
new file mode 100644
index 0000000..aefc037
--- /dev/null
+++ b/upgrade_tests/thrift_upgrade_test.py
@@ -0,0 +1,426 @@
+# coding: utf-8
+
+import itertools
+from unittest import skipUnless
+
+from cassandra.query import dict_factory
+from nose.tools import assert_equal, assert_not_in
+
+from dtest import RUN_STATIC_UPGRADE_MATRIX, Tester, debug
+from thrift_bindings.v22 import Cassandra
+from thrift_bindings.v22.Cassandra import (Column, ColumnDef,
+                                           ColumnParent, ConsistencyLevel,
+                                           SlicePredicate, SliceRange)
+from thrift_tests import _i64, get_thrift_client
+from tools.assertions import assert_length_equal
+from tools.decorators import since
+from upgrade_base import UpgradeTester
+from upgrade_manifest import build_upgrade_pairs
+
+
+def _create_dense_super_cf(name):
+    return Cassandra.CfDef('ks', name, column_type='Super',
+                           key_validation_class='AsciiType',     # pk
+                           comparator_type='AsciiType',          # ck
+                           default_validation_class='AsciiType', # SC value
+                           subcomparator_type='LongType')        # SC key
+
+def _create_sparse_super_cf(name):
+    cd1 = ColumnDef('col1', 'LongType', None, None)
+    cd2 = ColumnDef('col2', 'LongType', None, None)
+    return Cassandra.CfDef('ks', name, column_type='Super',
+                           column_metadata=[cd1, cd2],
+                           key_validation_class='AsciiType',
+                           comparator_type='AsciiType',
+                           subcomparator_type='AsciiType')
+
+def _validate_sparse_cql(cursor, cf='sparse_super_1', column1=u'column1', col1=u'col1', col2=u'col2',
key='key'):
+    cursor.execute('use ks')
+
+    assert_equal(list(cursor.execute("SELECT * FROM {}".format(cf))),
+                 [{key: 'k1', column1: 'key1', col1: 200, col2: 300},
+                  {key: 'k1', column1: 'key2', col1: 200, col2: 300},
+                  {key: 'k2', column1: 'key1', col1: 200, col2: 300},
+                  {key: 'k2', column1: 'key2', col1: 200, col2: 300}])
+
+    assert_equal(list(cursor.execute("SELECT * FROM {} WHERE {} = 'k1'".format(cf, key))),
+                 [{key: 'k1', column1: 'key1', col1: 200, col2: 300},
+                  {key: 'k1', column1: 'key2', col1: 200, col2: 300}])
+
+    assert_equal(list(cursor.execute("SELECT * FROM {} WHERE {} = 'k2' AND {} = 'key1'".format(cf,
key, column1))),
+                 [{key: 'k2', column1: 'key1', col1: 200, col2: 300}])
+
+
+def _validate_sparse_thrift(client, cf='sparse_super_1'):
+    client.transport.open()
+    client.set_keyspace('ks')
+    result = client.get_slice('k1', ColumnParent(cf), SlicePredicate(slice_range=SliceRange('',
'', False, 5)), ConsistencyLevel.ONE)
+    assert_length_equal(result, 2)
+    assert_equal(result[0].super_column.name, 'key1')
+    assert_equal(result[1].super_column.name, 'key2')
+
+    for cosc in result:
+        assert_equal(cosc.super_column.columns[0].name, 'col1')
+        assert_equal(cosc.super_column.columns[0].value, _i64(200))
+        assert_equal(cosc.super_column.columns[1].name, 'col2')
+        assert_equal(cosc.super_column.columns[1].value, _i64(300))
+        assert_equal(cosc.super_column.columns[2].name, 'value1')
+        assert_equal(cosc.super_column.columns[2].value, _i64(100))
+
+
+def _validate_dense_cql(cursor, cf='dense_super_1', key=u'key', column1=u'column1', column2=u'column2',
value=u'value'):
+    cursor.execute('use ks')
+
+    assert_equal(list(cursor.execute("SELECT * FROM {}".format(cf))),
+                 [{key: 'k1', column1: 'key1', column2: 100, value: 'value1'},
+                  {key: 'k1', column1: 'key2', column2: 100, value: 'value1'},
+                  {key: 'k2', column1: 'key1', column2: 200, value: 'value2'},
+                  {key: 'k2', column1: 'key2', column2: 200, value: 'value2'}])
+
+    assert_equal(list(cursor.execute("SELECT * FROM {} WHERE {} = 'k1'".format(cf, key))),
+                 [{key: 'k1', column1: 'key1', column2: 100, value: 'value1'},
+                  {key: 'k1', column1: 'key2', column2: 100, value: 'value1'}])
+
+    assert_equal(list(cursor.execute("SELECT * FROM {} WHERE {} = 'k1' AND {} = 'key1'".format(cf,
key, column1))),
+                 [{key: 'k1', column1: 'key1', column2: 100, value: 'value1'}])
+
+    assert_equal(list(cursor.execute("SELECT * FROM {} WHERE {} = 'k1' AND {} = 'key1' AND
{} = 100".format(cf, key, column1, column2))),
+                 [{key: 'k1', column1: 'key1', column2: 100, value: 'value1'}])
+
+
+def _validate_dense_thrift(client, cf='dense_super_1'):
+    client.transport.open()
+    client.set_keyspace('ks')
+    result = client.get_slice('k1', ColumnParent(cf), SlicePredicate(slice_range=SliceRange('',
'', False, 5)), ConsistencyLevel.ONE)
+    assert_length_equal(result, 2)
+    assert_equal(result[0].super_column.name, 'key1')
+    assert_equal(result[1].super_column.name, 'key2')
+
+    print(result[0])
+    print(result[1])
+    for cosc in result:
+        assert_equal(cosc.super_column.columns[0].name, _i64(100))
+        assert_equal(cosc.super_column.columns[0].value, 'value1')
+
+
+class UpgradeSuperColumnsThrough(Tester):
+    def upgrade_to_version(self, tag, nodes=None):
+        debug('Upgrading to ' + tag)
+        if nodes is None:
+            nodes = self.cluster.nodelist()
+
+        for node in nodes:
+            debug('Shutting down node: ' + node.name)
+            node.drain()
+            node.watch_log_for("DRAINED")
+            node.stop(wait_other_notice=False)
+
+        # Update Cassandra Directory
+        for node in nodes:
+            node.set_install_dir(version=tag)
+            node.set_configuration_options(values={'start_rpc': 'true'})
+            debug("Set new cassandra dir for %s: %s" % (node.name, node.get_install_dir()))
+        self.cluster.set_install_dir(version=tag)
+
+        # Restart nodes on new version
+        for node in nodes:
+            debug('Starting %s on new version (%s)' % (node.name, tag))
+            # Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
+            node.set_log_level("INFO")
+            node.start(wait_other_notice=True, wait_for_binary_proto=True)
+            node.nodetool('upgradesstables -a')
+
+    def prepare(self, num_nodes=1, cassandra_version="github:apache/cassandra-2.2"):
+        cluster = self.cluster
+
+        # Forcing cluster version on purpose
+        cluster.set_install_dir(version=cassandra_version)
+
+        cluster.populate(num_nodes)
+        for node in self.cluster.nodelist():
+            node.set_configuration_options(values={'start_rpc': 'true'})
+
+        cluster.start()
+        return cluster
+
+    def dense_supercolumn_3_0_created_test(self):
+        cluster = self.prepare(cassandra_version='github:apache/cassandra-3.0')
+        node = self.cluster.nodelist()[0]
+        cursor = self.patient_cql_connection(node, row_factory=dict_factory)
+
+        cursor.execute("CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy','replication_factor':
'1' };")
+
+        host, port = node.network_interfaces['thrift']
+        client = get_thrift_client(host, port)
+
+        client.transport.open()
+        client.set_keyspace('ks')
+
+        client.system_add_column_family(_create_dense_super_cf('dense_super_1'))
+
+        for i in xrange(1, 3):
+            client.insert('k1', ColumnParent('dense_super_1', 'key{}'.format(i)), Column(_i64(100),
'value1', 0), ConsistencyLevel.ONE)
+            client.insert('k2', ColumnParent('dense_super_1', 'key{}'.format(i)), Column(_i64(200),
'value2', 0), ConsistencyLevel.ONE)
+
+        _validate_dense_thrift(client, cf='dense_super_1')
+
+        node.stop()
+        self.set_node_to_current_version(node)
+        node.set_configuration_options(values={'start_rpc': 'true'})
+        node.start()
+
+        cursor = self.patient_cql_connection(node, row_factory=dict_factory)
+        client = get_thrift_client(host, port)
+
+        _validate_dense_thrift(client, cf='dense_super_1')
+        _validate_dense_cql(cursor, cf='dense_super_1')
+
+    def dense_supercolumn_test(self):
+        cluster = self.prepare()
+        node = self.cluster.nodelist()[0]
+        node.nodetool("enablethrift")
+        cursor = self.patient_cql_connection(node, row_factory=dict_factory)
+
+        cursor.execute("CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy','replication_factor':
'1' };")
+
+        host, port = node.network_interfaces['thrift']
+        client = get_thrift_client(host, port)
+
+        client.transport.open()
+        client.set_keyspace('ks')
+
+        client.system_add_column_family(_create_dense_super_cf('dense_super_1'))
+
+        for i in xrange(1, 3):
+            client.insert('k1', ColumnParent('dense_super_1', 'key{}'.format(i)), Column(_i64(100),
'value1', 0), ConsistencyLevel.ONE)
+            client.insert('k2', ColumnParent('dense_super_1', 'key{}'.format(i)), Column(_i64(200),
'value2', 0), ConsistencyLevel.ONE)
+
+        _validate_dense_thrift(client, cf='dense_super_1')
+        _validate_dense_cql(cursor, cf='dense_super_1')
+
+        self.upgrade_to_version('github:apache/cassandra-3.0')
+
+        cursor = self.patient_cql_connection(node, row_factory=dict_factory)
+        client = get_thrift_client(host, port)
+
+        _validate_dense_thrift(client, cf='dense_super_1')
+
+        node.stop()
+        self.set_node_to_current_version(node)
+        node.set_configuration_options(values={'start_rpc': 'true'})
+        node.start()
+
+        cursor = self.patient_cql_connection(node, row_factory=dict_factory)
+        client = get_thrift_client(host, port)
+
+        _validate_dense_thrift(client, cf='dense_super_1')
+        _validate_dense_cql(cursor, cf='dense_super_1')
+
+    def sparse_supercolumn_test(self):
+        cluster = self.prepare()
+        node = self.cluster.nodelist()[0]
+        node.nodetool("enablethrift")
+        cursor = self.patient_cql_connection(node, row_factory=dict_factory)
+
+        cursor.execute("CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy','replication_factor':
'1' };")
+
+        host, port = node.network_interfaces['thrift']
+        client = get_thrift_client(host, port)
+
+        client.transport.open()
+        client.set_keyspace('ks')
+
+        cf = _create_sparse_super_cf('sparse_super_2')
+        client.system_add_column_family(cf)
+
+        for i in xrange(1, 3):
+            client.insert('k1', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("value1",
_i64(100), 0), ConsistencyLevel.ONE)
+            client.insert('k1', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("col1",
_i64(200), 0), ConsistencyLevel.ONE)
+            client.insert('k1', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("col2",
_i64(300), 0), ConsistencyLevel.ONE)
+
+            client.insert('k2', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("value2",
_i64(100), 0), ConsistencyLevel.ONE)
+            client.insert('k2', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("col1",
_i64(200), 0), ConsistencyLevel.ONE)
+            client.insert('k2', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("col2",
_i64(300), 0), ConsistencyLevel.ONE)
+
+        _validate_sparse_thrift(client, cf='sparse_super_2')
+        _validate_sparse_cql(cursor, cf='sparse_super_2')
+
+        self.upgrade_to_version('github:apache/cassandra-3.0')
+
+        cursor = self.patient_cql_connection(node, row_factory=dict_factory)
+        client = get_thrift_client(host, port)
+
+        _validate_sparse_thrift(client, cf='sparse_super_2')
+
+        node.stop()
+        self.set_node_to_current_version(node)
+        node.set_configuration_options(values={'start_rpc': 'true'})
+        node.start()
+
+        cursor = self.patient_cql_connection(node, row_factory=dict_factory)
+        client = get_thrift_client(host, port)
+
+        _validate_sparse_thrift(client, cf='sparse_super_2')
+        _validate_sparse_cql(cursor, cf='sparse_super_2')
+
+
+@since('2.1', max_version='4.0.0')
+class TestThrift(UpgradeTester):
+    """
+    Verify dense and sparse supercolumn functionality with and without renamed columns
+    in 3.X after upgrading from 2.x.
+
+    @jira_ticket CASSANDRA-12373
+    """
+
+    def dense_supercolumn_test(self):
+        cursor = self.prepare(nodes=2, rf=2, row_factory=dict_factory)
+        cluster = self.cluster
+
+        node = self.cluster.nodelist()[0]
+        node.nodetool("enablethrift")
+        host, port = node.network_interfaces['thrift']
+        client = get_thrift_client(host, port)
+
+        client.transport.open()
+        client.set_keyspace('ks')
+
+        client.system_add_column_family(_create_dense_super_cf('dense_super_1'))
+
+        for i in xrange(1, 3):
+            client.insert('k1', ColumnParent('dense_super_1', 'key{}'.format(i)), Column(_i64(100),
'value1', 0), ConsistencyLevel.ONE)
+            client.insert('k2', ColumnParent('dense_super_1', 'key{}'.format(i)), Column(_i64(200),
'value2', 0), ConsistencyLevel.ONE)
+
+        _validate_dense_cql(cursor)
+        _validate_dense_thrift(client)
+
+        for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory, use_thrift=True):
+            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            client = get_thrift_client(host, port)
+            _validate_dense_cql(cursor)
+            _validate_dense_thrift(client)
+
+    def dense_supercolumn_test_with_renames(self):
+        cursor = self.prepare(row_factory=dict_factory)
+        cluster = self.cluster
+
+        node = self.cluster.nodelist()[0]
+        node.nodetool("enablethrift")
+
+        host, port = node.network_interfaces['thrift']
+        client = get_thrift_client(host, port)
+
+        client.transport.open()
+        client.set_keyspace('ks')
+
+        client.system_add_column_family(_create_dense_super_cf('dense_super_2'))
+
+        for i in xrange(1, 3):
+            client.insert('k1', ColumnParent('dense_super_2', 'key{}'.format(i)), Column(_i64(100),
'value1', 0), ConsistencyLevel.ONE)
+            client.insert('k2', ColumnParent('dense_super_2', 'key{}'.format(i)), Column(_i64(200),
'value2', 0), ConsistencyLevel.ONE)
+
+        cursor.execute("ALTER TABLE ks.dense_super_2 RENAME key TO renamed_key")
+        cursor.execute("ALTER TABLE ks.dense_super_2 RENAME column1 TO renamed_column1")
+        cursor.execute("ALTER TABLE ks.dense_super_2 RENAME column2 TO renamed_column2")
+        cursor.execute("ALTER TABLE ks.dense_super_2 RENAME value TO renamed_value")
+
+        _validate_dense_cql(cursor, cf='dense_super_2', key=u'renamed_key', column1=u'renamed_column1',
column2=u'renamed_column2', value=u'renamed_value')
+        _validate_dense_thrift(client, cf='dense_super_2')
+
+        for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory, use_thrift=True):
+            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            client = get_thrift_client(host, port)
+            _validate_dense_cql(cursor, cf='dense_super_2', key=u'renamed_key', column1=u'renamed_column1',
column2=u'renamed_column2', value=u'renamed_value')
+            _validate_dense_thrift(client, cf='dense_super_2')
+
+    def sparse_supercolumn_test_with_renames(self):
+        cursor = self.prepare(row_factory=dict_factory)
+        cluster = self.cluster
+
+        node = self.cluster.nodelist()[0]
+        node.nodetool("enablethrift")
+
+        host, port = node.network_interfaces['thrift']
+        client = get_thrift_client(host, port)
+
+        client.transport.open()
+        client.set_keyspace('ks')
+
+        cf = _create_sparse_super_cf('sparse_super_1')
+        client.system_add_column_family(cf)
+
+        cursor.execute("ALTER TABLE ks.sparse_super_1 RENAME key TO renamed_key")
+        cursor.execute("ALTER TABLE ks.sparse_super_1 RENAME column1 TO renamed_column1")
+
+        for i in xrange(1, 3):
+            client.insert('k1', ColumnParent('sparse_super_1', 'key{}'.format(i)), Column("value1",
_i64(100), 0), ConsistencyLevel.ONE)
+            client.insert('k1', ColumnParent('sparse_super_1', 'key{}'.format(i)), Column("col1",
_i64(200), 0), ConsistencyLevel.ONE)
+            client.insert('k1', ColumnParent('sparse_super_1', 'key{}'.format(i)), Column("col2",
_i64(300), 0), ConsistencyLevel.ONE)
+
+            client.insert('k2', ColumnParent('sparse_super_1', 'key{}'.format(i)), Column("value2",
_i64(100), 0), ConsistencyLevel.ONE)
+            client.insert('k2', ColumnParent('sparse_super_1', 'key{}'.format(i)), Column("col1",
_i64(200), 0), ConsistencyLevel.ONE)
+            client.insert('k2', ColumnParent('sparse_super_1', 'key{}'.format(i)), Column("col2",
_i64(300), 0), ConsistencyLevel.ONE)
+
+        _validate_sparse_thrift(client)
+        _validate_sparse_cql(cursor, column1=u'renamed_column1', key=u'renamed_key')
+
+        for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory, use_thrift=True):
+            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            client = get_thrift_client(host, port)
+            _validate_sparse_cql(cursor, column1=u'renamed_column1', key=u'renamed_key')
+            _validate_sparse_thrift(client)
+
+    def sparse_supercolumn_test(self):
+        cursor = self.prepare(row_factory=dict_factory)
+        cluster = self.cluster
+
+        node = self.cluster.nodelist()[0]
+        node.nodetool("enablethrift")
+
+        host, port = node.network_interfaces['thrift']
+        client = get_thrift_client(host, port)
+
+        client.transport.open()
+        client.set_keyspace('ks')
+
+        cf = _create_sparse_super_cf('sparse_super_2')
+        client.system_add_column_family(cf)
+
+        for i in xrange(1, 3):
+            client.insert('k1', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("value1",
_i64(100), 0), ConsistencyLevel.ONE)
+            client.insert('k1', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("col1",
_i64(200), 0), ConsistencyLevel.ONE)
+            client.insert('k1', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("col2",
_i64(300), 0), ConsistencyLevel.ONE)
+
+            client.insert('k2', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("value2",
_i64(100), 0), ConsistencyLevel.ONE)
+            client.insert('k2', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("col1",
_i64(200), 0), ConsistencyLevel.ONE)
+            client.insert('k2', ColumnParent('sparse_super_2', 'key{}'.format(i)), Column("col2",
_i64(300), 0), ConsistencyLevel.ONE)
+
+        _validate_sparse_thrift(client, cf='sparse_super_2')
+        _validate_sparse_cql(cursor, cf='sparse_super_2')
+
+        for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory, use_thrift=True):
+            debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
+            client = get_thrift_client(host, port)
+            _validate_sparse_thrift(client, cf='sparse_super_2')
+            _validate_sparse_cql(cursor, cf='sparse_super_2')
+
+
+topology_specs = [
+    {'NODES': 3,
+     'RF': 3,
+     'CL': ConsistencyLevel.ALL},
+    {'NODES': 2,
+     'RF': 1},
+]
+specs = [dict(s, UPGRADE_PATH=p, __test__=True)
+         for s, p in itertools.product(topology_specs, build_upgrade_pairs())]
+
+for spec in specs:
+    suffix = 'Nodes{num_nodes}RF{rf}_{pathname}'.format(num_nodes=spec['NODES'],
+                                                        rf=spec['RF'],
+                                                        pathname=spec['UPGRADE_PATH'].name)
+    gen_class_name = TestThrift.__name__ + suffix
+    assert_not_in(gen_class_name, globals())
+
+    upgrade_applies_to_env = RUN_STATIC_UPGRADE_MATRIX or spec['UPGRADE_PATH'].upgrade_meta.matches_current_env_version_family
+    globals()[gen_class_name] = skipUnless(upgrade_applies_to_env, 'test not applicable to
env.')(type(gen_class_name, (TestThrift,), spec))

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/12dd4721/upgrade_tests/upgrade_base.py
----------------------------------------------------------------------
diff --git a/upgrade_tests/upgrade_base.py b/upgrade_tests/upgrade_base.py
index 65957bd..484c4bf 100644
--- a/upgrade_tests/upgrade_base.py
+++ b/upgrade_tests/upgrade_base.py
@@ -72,7 +72,7 @@ class UpgradeTester(Tester):
         os.environ['CASSANDRA_VERSION'] = self.UPGRADE_PATH.starting_version
         super(UpgradeTester, self).setUp()
 
-    def prepare(self, ordered=False, create_keyspace=True, use_cache=False,
+    def prepare(self, ordered=False, create_keyspace=True, use_cache=False, use_thrift=False,
                 nodes=None, rf=None, protocol_version=None, cl=None, **kwargs):
         nodes = self.NODES if nodes is None else nodes
         rf = self.RF if rf is None else rf
@@ -86,12 +86,15 @@ class UpgradeTester(Tester):
 
         cluster = self.cluster
 
-        if (ordered):
+        if ordered:
             cluster.set_partitioner("org.apache.cassandra.dht.ByteOrderedPartitioner")
 
-        if (use_cache):
+        if use_cache:
             cluster.set_configuration_options(values={'row_cache_size_in_mb': 100})
 
+        if use_thrift:
+            cluster.set_configuration_options(values={'start_rpc': 'true'})
+
         start_rpc = kwargs.pop('start_rpc', False)
         if start_rpc:
             cluster.set_configuration_options(values={'start_rpc': True})
@@ -119,7 +122,7 @@ class UpgradeTester(Tester):
 
         return session
 
-    def do_upgrade(self, session, return_nodes=False, **kwargs):
+    def do_upgrade(self, session, use_thrift=False, return_nodes=False, **kwargs):
         """
         Upgrades the first node in the cluster and returns a list of
         (is_upgraded, Session) tuples.  If `is_upgraded` is true, the
@@ -164,6 +167,9 @@ class UpgradeTester(Tester):
         node1.set_log_level("DEBUG" if DEBUG else "TRACE" if TRACE else "INFO")
         node1.set_configuration_options(values={'internode_compression': 'none'})
 
+        if use_thrift:
+            node1.set_configuration_options(values={'start_rpc': 'true'})
+
         if self.enable_for_jolokia:
             remove_perf_disable_shared_mem(node1)
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


Mime
View raw message