http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/sstable_generation_loading_test.py
----------------------------------------------------------------------
diff --git a/sstable_generation_loading_test.py 
b/sstable_generation_loading_test.py
index 335f384..ab99a03 100644
--- a/sstable_generation_loading_test.py
+++ b/sstable_generation_loading_test.py
@@ -1,13 +1,17 @@
 import os
 import subprocess
 import time
-from distutils import dir_util
+import distutils.dir_util
+import pytest
+import logging
 
 from ccmlib import common as ccmcommon
 
-from dtest import Tester, debug, create_ks, create_cf
+from dtest import Tester, create_ks, create_cf
 from tools.assertions import assert_all, assert_none, assert_one
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 # WARNING: sstableloader tests should be added to 
TestSSTableGenerationAndLoading (below),
@@ -16,12 +20,14 @@ from tools.decorators import since
 
 # Also used by upgrade_tests/storage_engine_upgrade_test
 # to test loading legacy sstables
-class BaseSStableLoaderTest(Tester):
-    __test__ = False
+class TestBaseSStableLoader(Tester):
+
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.allow_log_errors = True
+
     upgrade_from = None
     compact = False
-    jvm_args = ()
-    allow_log_errors = True
 
     def create_schema(self, session, ks, compression):
         create_ks(session, ks, rf=2)
@@ -29,34 +35,34 @@ class BaseSStableLoaderTest(Tester):
         create_cf(session, "counter1", compression=compression, columns={'v': 
'counter'},
                   compact_storage=self.compact)
 
-    def sstableloader_compression_none_to_none_test(self):
+    def test_sstableloader_compression_none_to_none(self):
         self.load_sstable_with_configuration(None, None)
 
-    def sstableloader_compression_none_to_snappy_test(self):
+    def test_sstableloader_compression_none_to_snappy(self):
         self.load_sstable_with_configuration(None, 'Snappy')
 
-    def sstableloader_compression_none_to_deflate_test(self):
+    def test_sstableloader_compression_none_to_deflate(self):
         self.load_sstable_with_configuration(None, 'Deflate')
 
-    def sstableloader_compression_snappy_to_none_test(self):
+    def test_sstableloader_compression_snappy_to_none(self):
         self.load_sstable_with_configuration('Snappy', None)
 
-    def sstableloader_compression_snappy_to_snappy_test(self):
+    def test_sstableloader_compression_snappy_to_snappy(self):
         self.load_sstable_with_configuration('Snappy', 'Snappy')
 
-    def sstableloader_compression_snappy_to_deflate_test(self):
+    def test_sstableloader_compression_snappy_to_deflate(self):
         self.load_sstable_with_configuration('Snappy', 'Deflate')
 
-    def sstableloader_compression_deflate_to_none_test(self):
+    def test_sstableloader_compression_deflate_to_none(self):
         self.load_sstable_with_configuration('Deflate', None)
 
-    def sstableloader_compression_deflate_to_snappy_test(self):
+    def test_sstableloader_compression_deflate_to_snappy(self):
         self.load_sstable_with_configuration('Deflate', 'Snappy')
 
-    def sstableloader_compression_deflate_to_deflate_test(self):
+    def test_sstableloader_compression_deflate_to_deflate(self):
         self.load_sstable_with_configuration('Deflate', 'Deflate')
 
-    def sstableloader_with_mv_test(self):
+    def test_sstableloader_with_mv(self):
         """
         @jira_ticket CASSANDRA-11275
         """
@@ -70,32 +76,33 @@ class BaseSStableLoaderTest(Tester):
         self.load_sstable_with_configuration(ks='"Keyspace1"', 
create_schema=create_schema_with_mv)
 
     def copy_sstables(self, cluster, node):
-        for x in xrange(0, cluster.data_dir_count):
+        for x in range(0, cluster.data_dir_count):
             data_dir = os.path.join(node.get_path(), 'data{0}'.format(x))
             copy_root = os.path.join(node.get_path(), 'data{0}_copy'.format(x))
             for ddir in os.listdir(data_dir):
                 keyspace_dir = os.path.join(data_dir, ddir)
                 if os.path.isdir(keyspace_dir) and ddir != 'system':
                     copy_dir = os.path.join(copy_root, ddir)
-                    dir_util.copy_tree(keyspace_dir, copy_dir)
+                    distutils.dir_util.copy_tree(keyspace_dir, copy_dir)
 
     def load_sstables(self, cluster, node, ks):
         cdir = node.get_install_dir()
         sstableloader = os.path.join(cdir, 'bin', 
ccmcommon.platform_binary('sstableloader'))
         env = ccmcommon.make_cassandra_env(cdir, node.get_path())
         host = node.address()
-        for x in xrange(0, cluster.data_dir_count):
+        for x in range(0, cluster.data_dir_count):
             sstablecopy_dir = os.path.join(node.get_path(), 
'data{0}_copy'.format(x), ks.strip('"'))
             for cf_dir in os.listdir(sstablecopy_dir):
                 full_cf_dir = os.path.join(sstablecopy_dir, cf_dir)
                 if os.path.isdir(full_cf_dir):
                     cmd_args = [sstableloader, '--nodes', host, full_cf_dir]
                     p = subprocess.Popen(cmd_args, stderr=subprocess.PIPE, 
stdout=subprocess.PIPE, env=env)
-                    exit_status = p.wait()
-                    debug('stdout: {out}'.format(out=p.stdout))
-                    debug('stderr: {err}'.format(err=p.stderr))
-                    self.assertEqual(0, exit_status,
-                                     "sstableloader exited with a non-zero 
status: {}".format(exit_status))
+                    stdout, stderr = p.communicate()
+                    exit_status = p.returncode
+                    logger.debug('stdout: 
{out}'.format(out=stdout.decode("utf-8")))
+                    logger.debug('stderr: 
{err}'.format(err=stderr.decode("utf-8")))
+                    assert 0 == exit_status, \
+                        "sstableloader exited with a non-zero status: 
{}".format(exit_status)
 
     def load_sstable_with_configuration(self, pre_compression=None, 
post_compression=None, ks="ks", create_schema=create_schema):
         """
@@ -109,24 +116,24 @@ class BaseSStableLoaderTest(Tester):
         NUM_KEYS = 1000
 
         for compression_option in (pre_compression, post_compression):
-            self.assertIn(compression_option, (None, 'Snappy', 'Deflate'))
+            assert compression_option in (None, 'Snappy', 'Deflate')
 
-        debug("Testing sstableloader with pre_compression=%s and 
post_compression=%s" % (pre_compression, post_compression))
+        logger.debug("Testing sstableloader with pre_compression=%s and 
post_compression=%s" % (pre_compression, post_compression))
         if self.upgrade_from:
-            debug("Testing sstableloader with upgrade_from=%s and compact=%s" 
% (self.upgrade_from, self.compact))
+            logger.debug("Testing sstableloader with upgrade_from=%s and 
compact=%s" % (self.upgrade_from, self.compact))
 
         cluster = self.cluster
         if self.upgrade_from:
-            debug("Generating sstables with version %s" % (self.upgrade_from))
+            logger.debug("Generating sstables with version %s" % 
(self.upgrade_from))
             default_install_dir = self.cluster.get_install_dir()
             # Forcing cluster version on purpose
             cluster.set_install_dir(version=self.upgrade_from)
-        debug("Using jvm_args={}".format(self.jvm_args))
+        logger.debug("Using jvm_args={}".format(self.jvm_args))
         cluster.populate(2).start(jvm_args=list(self.jvm_args))
         node1, node2 = cluster.nodelist()
         time.sleep(.5)
 
-        debug("creating keyspace and inserting")
+        logger.debug("creating keyspace and inserting")
         session = self.cql_connection(node1)
         self.create_schema(session, ks, pre_compression)
 
@@ -139,28 +146,28 @@ class BaseSStableLoaderTest(Tester):
         node2.nodetool('drain')
         node2.stop()
 
-        debug("Making a copy of the sstables")
+        logger.debug("Making a copy of the sstables")
         # make a copy of the sstables
         self.copy_sstables(cluster, node1)
 
-        debug("Wiping out the data and restarting cluster")
+        logger.debug("Wiping out the data and restarting cluster")
         # wipe out the node data.
         cluster.clear()
 
         if self.upgrade_from:
-            debug("Running sstableloader with version from %s" % 
(default_install_dir))
+            logger.debug("Running sstableloader with version from %s" % 
(default_install_dir))
             # Return to previous version
             cluster.set_install_dir(install_dir=default_install_dir)
 
         cluster.start(jvm_args=list(self.jvm_args))
         time.sleep(5)  # let gossip figure out what is going on
 
-        debug("re-creating the keyspace and column families.")
+        logger.debug("re-creating the keyspace and column families.")
         session = self.cql_connection(node1)
         self.create_schema(session, ks, post_compression)
         time.sleep(2)
 
-        debug("Calling sstableloader")
+        logger.debug("Calling sstableloader")
         # call sstableloader to re-load each cf.
         self.load_sstables(cluster, node1, ks)
 
@@ -171,42 +178,41 @@ class BaseSStableLoaderTest(Tester):
                 query = "SELECT * FROM counter1 WHERE KEY='{}'".format(i)
                 assert_one(session, query, [str(i), 1])
 
-        debug("Reading data back")
+        logger.debug("Reading data back")
         # Now we should have sstables with the loaded data, and the existing
         # data. Lets read it all to make sure it is all there.
         read_and_validate_data(session)
 
-        debug("scrubbing, compacting, and repairing")
+        logger.debug("scrubbing, compacting, and repairing")
         # do some operations and try reading the data again.
         node1.nodetool('scrub')
         node1.nodetool('compact')
         node1.nodetool('repair')
 
-        debug("Reading data back one more time")
+        logger.debug("Reading data back one more time")
         read_and_validate_data(session)
 
         # check that RewindableDataInputStreamPlus spill files are properly 
cleaned up
         if self.upgrade_from:
-            for x in xrange(0, cluster.data_dir_count):
+            for x in range(0, cluster.data_dir_count):
                 data_dir = os.path.join(node1.get_path(), 'data{0}'.format(x))
                 for ddir in os.listdir(data_dir):
                     keyspace_dir = os.path.join(data_dir, ddir)
                     temp_files = 
self.glob_data_dirs(os.path.join(keyspace_dir, '*', "tmp", "*.dat"))
-                    debug("temp files: " + str(temp_files))
-                    self.assertEquals(0, len(temp_files), "Temporary files 
were not cleaned up.")
+                    logger.debug("temp files: " + str(temp_files))
+                    assert 0 == len(temp_files), "Temporary files were not 
cleaned up."
 
 
-class TestSSTableGenerationAndLoading(BaseSStableLoaderTest):
-    __test__ = True
+class TestSSTableGenerationAndLoading(TestBaseSStableLoader):
 
-    def sstableloader_uppercase_keyspace_name_test(self):
+    def test_sstableloader_uppercase_keyspace_name(self):
         """
         Make sure sstableloader works with upper case keyspace
         @jira_ticket CASSANDRA-10806
         """
         self.load_sstable_with_configuration(ks='"Keyspace1"')
 
-    def incompressible_data_in_compressed_table_test(self):
+    def test_incompressible_data_in_compressed_table(self):
         """
         tests for the bug that caused #3370:
         https://issues.apache.org/jira/browse/CASSANDRA-3370
@@ -227,10 +233,10 @@ class 
TestSSTableGenerationAndLoading(BaseSStableLoaderTest):
         create_cf(session, 'cf', compression="Deflate")
 
         # make unique column names, and values that are incompressible
-        for col in xrange(10):
+        for col in range(10):
             col_name = str(col)
             col_val = os.urandom(5000)
-            col_val = col_val.encode('hex')
+            col_val = col_val.hex()
             cql = "UPDATE cf SET v='%s' WHERE KEY='0' AND c='%s'" % (col_val, 
col_name)
             # print cql
             session.execute(cql)
@@ -238,9 +244,9 @@ class 
TestSSTableGenerationAndLoading(BaseSStableLoaderTest):
         node1.flush()
         time.sleep(2)
         rows = list(session.execute("SELECT * FROM cf WHERE KEY = '0' AND c < 
'8'"))
-        self.assertGreater(len(rows), 0)
+        assert len(rows) > 0
 
-    def remove_index_file_test(self):
+    def test_remove_index_file(self):
         """
         tests for situations similar to that found in #343:
         https://issues.apache.org/jira/browse/CASSANDRA-343
@@ -280,9 +286,9 @@ class 
TestSSTableGenerationAndLoading(BaseSStableLoaderTest):
             for fname in os.listdir(path):
                 if fname.endswith('Data.db'):
                     data_found += 1
-        self.assertGreater(data_found, 0, "After removing index, filter, 
stats, and digest files, the data file was deleted!")
+        assert data_found > 0, "After removing index, filter, stats, and 
digest files > the data file was deleted!"
 
-    def sstableloader_with_mv_test(self):
+    def test_sstableloader_with_mv(self):
         """
         @jira_ticket CASSANDRA-11275
         """
@@ -296,7 +302,7 @@ class 
TestSSTableGenerationAndLoading(BaseSStableLoaderTest):
         self.load_sstable_with_configuration(ks='"Keyspace1"', 
create_schema=create_schema_with_mv)
 
     @since('4.0')
-    def sstableloader_with_failing_2i_test(self):
+    def test_sstableloader_with_failing_2i(self):
         """
         @jira_ticket CASSANDRA-10130
 
@@ -341,7 +347,7 @@ class 
TestSSTableGenerationAndLoading(BaseSStableLoaderTest):
 
         # Load SSTables with a failure during index creation
         node.byteman_submit(['./byteman/index_build_failure.btm'])
-        with self.assertRaises(Exception):
+        with pytest.raises(Exception):
             self.load_sstables(cluster, node, 'k')
 
         # Check that the index isn't marked as built and the old SSTable data 
has been loaded but not indexed

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/sstablesplit_test.py
----------------------------------------------------------------------
diff --git a/sstablesplit_test.py b/sstablesplit_test.py
index 371f91a..382f618 100644
--- a/sstablesplit_test.py
+++ b/sstablesplit_test.py
@@ -1,15 +1,17 @@
-from __future__ import division
-
 import time
+import logging
+
 from math import floor
 from os.path import getsize
 
-from dtest import Tester, debug
+from dtest import Tester
+
+logger = logging.getLogger(__name__)
 
 
 class TestSSTableSplit(Tester):
 
-    def split_test(self):
+    def test_split(self):
         """
         Check that after running compaction, sstablessplit can succesfully 
split
         The resultant sstable.  Check that split is reversible and that data 
is readable
@@ -20,7 +22,7 @@ class TestSSTableSplit(Tester):
         node = cluster.nodelist()[0]
         version = cluster.version()
 
-        debug("Run stress to insert data")
+        logger.debug("Run stress to insert data")
 
         node.stress(['write', 'n=1000', 'no-warmup', '-rate', 'threads=50',
                      '-col', 'n=FIXED(10)', 'SIZE=FIXED(1024)'])
@@ -30,20 +32,20 @@ class TestSSTableSplit(Tester):
         self._do_compaction(node)
         self._do_split(node, version)
 
-        debug("Run stress to ensure data is readable")
+        logger.debug("Run stress to ensure data is readable")
         node.stress(['read', 'n=1000', '-rate', 'threads=25',
                      '-col', 'n=FIXED(10)', 'SIZE=FIXED(1024)'])
 
     def _do_compaction(self, node):
-        debug("Compact sstables.")
+        logger.debug("Compact sstables.")
         node.flush()
         node.compact()
         keyspace = 'keyspace1'
         sstables = node.get_sstables(keyspace, '')
-        debug("Number of sstables after compaction: %s" % len(sstables))
+        logger.debug("Number of sstables after compaction: %s" % len(sstables))
 
     def _do_split(self, node, version):
-        debug("Run sstablesplit")
+        logger.debug("Run sstablesplit")
         time.sleep(5.0)
         node.stop()
 
@@ -55,7 +57,7 @@ class TestSSTableSplit(Tester):
         # get the initial sstables and their total size
         origsstables = node.get_sstables(keyspace, '')
         origsstable_size = sum([getsize(sstable) for sstable in origsstables])
-        debug("Original sstable and sizes before split: {}".format([(name, 
getsize(name)) for name in origsstables]))
+        logger.debug("Original sstable and sizes before split: 
{}".format([(name, getsize(name)) for name in origsstables]))
 
         # calculate the expected number of sstables post-split
         expected_num_sstables = floor(origsstable_size / expected_sstable_size)
@@ -65,24 +67,24 @@ class TestSSTableSplit(Tester):
                                        no_snapshot=True, debug=True)
 
         for (out, error, rc) in result:
-            debug("stdout: {}".format(out))
-            debug("stderr: {}".format(error))
-            debug("rc: {}".format(rc))
+            logger.debug("stdout: {}".format(out))
+            logger.debug("stderr: {}".format(error))
+            logger.debug("rc: {}".format(rc))
 
         # get the sstables post-split and their total size
         sstables = node.get_sstables(keyspace, '')
-        debug("Number of sstables after split: %s. expected %s" % 
(len(sstables), expected_num_sstables))
-        self.assertLessEqual(expected_num_sstables, len(sstables) + 1)
-        self.assertLessEqual(1, len(sstables))
+        logger.debug("Number of sstables after split: %s. expected %s" % 
(len(sstables), expected_num_sstables))
+        assert expected_num_sstables <= len(sstables) + 1
+        assert 1 <= len(sstables)
 
         # make sure none of the tables are bigger than the max expected size
         sstable_sizes = [getsize(sstable) for sstable in sstables]
         # add a bit extra for overhead
-        self.assertLessEqual(max(sstable_sizes), expected_sstable_size + 512)
+        assert max(sstable_sizes) <= expected_sstable_size + 512
         # make sure node can start with changed sstables
         node.start(wait_for_binary_proto=True)
 
-    def single_file_split_test(self):
+    def test_single_file_split(self):
         """
         Covers CASSANDRA-8623
 
@@ -92,7 +94,7 @@ class TestSSTableSplit(Tester):
         cluster.populate(1).start(wait_for_binary_proto=True)
         node = cluster.nodelist()[0]
 
-        debug("Run stress to insert data")
+        logger.debug("Run stress to insert data")
         node.stress(['write', 'n=300', 'no-warmup', '-rate', 'threads=50',
                      '-col', 'n=FIXED(10)', 'SIZE=FIXED(1024)'])
 
@@ -101,9 +103,9 @@ class TestSSTableSplit(Tester):
         result = node.run_sstablesplit(keyspace='keyspace1', size=1, 
no_snapshot=True)
 
         for (stdout, stderr, rc) in result:
-            debug(stderr)
-            failure = stderr.find("java.lang.AssertionError: Data component is 
missing")
-            self.assertEqual(failure, -1, "Error during sstablesplit")
+            logger.debug(stderr.decode("utf-8"))
+            failure = stderr.decode("utf-8").find("java.lang.AssertionError: 
Data component is missing")
+            assert failure, -1 == "Error during sstablesplit"
 
         sstables = node.get_sstables('keyspace1', '')
-        self.assertGreaterEqual(len(sstables), 1, sstables)
+        assert len(sstables), 1 >= sstables

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/sstableutil_test.py
----------------------------------------------------------------------
diff --git a/sstableutil_test.py b/sstableutil_test.py
index 0886a26..30584c1 100644
--- a/sstableutil_test.py
+++ b/sstableutil_test.py
@@ -2,14 +2,18 @@ import glob
 import os
 import subprocess
 import time
+import pytest
+import logging
 
 from ccmlib import common
 from ccmlib.node import ToolError
 
-from dtest import Tester, debug
-from tools.decorators import since
+from dtest import Tester
 from tools.intervention import InterruptCompaction
 
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 # These must match the stress schema names
 KeyspaceName = 'keyspace1'
 TableName = 'standard1'
@@ -24,9 +28,9 @@ def _normcase_all(xs):
 
 
 @since('3.0')
-class SSTableUtilTest(Tester):
+class TestSSTableUtil(Tester):
 
-    def compaction_test(self):
+    def test_compaction(self):
         """
         @jira_ticket CASSANDRA-7066
 
@@ -38,14 +42,14 @@ class SSTableUtilTest(Tester):
 
         self._create_data(node, KeyspaceName, TableName, 100000)
         finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName)
-        self.assertEqual(0, len(tmpfiles))
+        assert 0 == len(tmpfiles)
 
         node.compact()
         time.sleep(5)
         finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName)
-        self.assertEqual(0, len(tmpfiles))
+        assert 0 == len(tmpfiles)
 
-    def abortedcompaction_test(self):
+    def test_abortedcompaction(self):
         """
         @jira_ticket CASSANDRA-7066
         @jira_ticket CASSANDRA-11497
@@ -61,14 +65,14 @@ class SSTableUtilTest(Tester):
 
         self._create_data(node, KeyspaceName, TableName, numrecords)
         finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName)
-        self.assertTrue(len(finalfiles) > 0, "Expected to find some final 
files")
-        self.assertEqual(0, len(tmpfiles), "Expected no tmp files")
+        assert len(finalfiles) > 0, "Expected to find some final files"
+        assert 0 == len(tmpfiles), "Expected no tmp files"
 
         t = InterruptCompaction(node, TableName, filename=log_file_name, 
delay=2)
         t.start()
 
         try:
-            debug("Compacting...")
+            logger.debug("Compacting...")
             node.compact()
         except ToolError:
             pass  # expected to fail
@@ -81,7 +85,7 @@ class SSTableUtilTest(Tester):
         # In most cases we should end up with some temporary files to clean 
up, but it may happen
         # that no temporary files are created if compaction finishes too early 
or starts too late
         # see CASSANDRA-11497
-        debug("Got {} final files and {} tmp files after compaction was 
interrupted"
+        logger.debug("Got {} final files and {} tmp files after compaction was 
interrupted"
               .format(len(finalfiles), len(tmpfiles)))
 
         self._invoke_sstableutil(KeyspaceName, TableName, cleanup=True)
@@ -89,15 +93,15 @@ class SSTableUtilTest(Tester):
         self._check_files(node, KeyspaceName, TableName, finalfiles, [])
 
         # restart to make sure not data is lost
-        debug("Restarting node...")
+        logger.debug("Restarting node...")
         node.start(wait_for_binary_proto=True)
         # in some environments, a compaction may start that would change 
sstable files. We should wait if so
         node.wait_for_compactions()
 
         finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName)
-        self.assertEqual(0, len(tmpfiles))
+        assert 0 == len(tmpfiles)
 
-        debug("Running stress to ensure data is readable")
+        logger.debug("Running stress to ensure data is readable")
         self._read_data(node, numrecords)
 
     def _create_data(self, node, ks, table, numrecords):
@@ -132,17 +136,17 @@ class SSTableUtilTest(Tester):
         else:
             expected_tmpfiles = _normcase_all(expected_tmpfiles)
 
-        debug("Comparing all files...")
-        self.assertEqual(sstablefiles, allfiles)
+        logger.debug("Comparing all files...")
+        assert sstablefiles == allfiles
 
-        debug("Comparing final files...")
-        self.assertEqual(expected_finalfiles, finalfiles)
+        logger.debug("Comparing final files...")
+        assert expected_finalfiles == finalfiles
 
-        debug("Comparing tmp files...")
-        self.assertEqual(expected_tmpfiles, tmpfiles)
+        logger.debug("Comparing tmp files...")
+        assert expected_tmpfiles == tmpfiles
 
-        debug("Comparing op logs...")
-        self.assertEqual(expected_oplogs, oplogs)
+        logger.debug("Comparing op logs...")
+        assert expected_oplogs == oplogs
 
         return finalfiles, tmpfiles
 
@@ -150,7 +154,7 @@ class SSTableUtilTest(Tester):
         """
         Invoke sstableutil and return the list of files, if any
         """
-        debug("About to invoke sstableutil with type {}...".format(type))
+        logger.debug("About to invoke sstableutil with type 
{}...".format(type))
         node1 = self.cluster.nodelist()[0]
         env = common.make_cassandra_env(node1.get_install_cassandra_root(), 
node1.get_node_cassandra_root())
         tool_bin = node1.get_tool('sstableutil')
@@ -168,14 +172,14 @@ class SSTableUtilTest(Tester):
 
         (stdout, stderr) = p.communicate()
 
-        self.assertEqual(p.returncode, 0, "Error invoking sstableutil; 
returned {code}".format(code=p.returncode))
+        assert p.returncode == 0, "Error invoking sstableutil; returned 
{code}".format(code=p.returncode)
 
         if stdout:
-            debug(stdout)
+            logger.debug(stdout.decode("utf-8"))
 
         match = ks + os.sep + table + '-'
-        ret = sorted(filter(lambda s: match in s, stdout.splitlines()))
-        debug("Got {} files of type {}".format(len(ret), type))
+        ret = sorted([s for s in stdout.decode("utf-8").splitlines() if match 
in s])
+        logger.debug("Got {} files of type {}".format(len(ret), type))
         return ret
 
     def _get_sstable_files(self, node, ks, table):
@@ -184,9 +188,10 @@ class SSTableUtilTest(Tester):
         """
         ret = []
         for data_dir in node.data_directories():
-            keyspace_dir = os.path.join(data_dir, ks)
+            # note, the /var/folders -> /private/var/folders stuff is to fixup 
mac compatibility
+            keyspace_dir = os.path.abspath(os.path.join(data_dir, 
ks)).replace("/var/folders", "/private/var/folders")
             for ext in ('*.db', '*.txt', '*.adler32', '*.crc32'):
-                ret.extend(glob.glob(os.path.join(keyspace_dir, table + '-*', 
ext)))
+                
ret.extend(glob.glob(os.path.abspath(os.path.join(keyspace_dir, table + '-*', 
ext))))
 
         return sorted(ret)
 

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/stress_tool_test.py
----------------------------------------------------------------------
diff --git a/stress_tool_test.py b/stress_tool_test.py
index d7f43c0..9a1ccd2 100644
--- a/stress_tool_test.py
+++ b/stress_tool_test.py
@@ -1,8 +1,11 @@
-from __future__ import division
+import pytest
+import logging
 
 from dtest import Tester
 from tools.data import rows_to_list
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 @since('3.0')
@@ -13,7 +16,7 @@ class TestStressSparsenessRatio(Tester):
     Tests for the `row-population-ratio` parameter to `cassandra-stress`.
     """
 
-    def uniform_ratio_test(self):
+    def test_uniform_ratio(self):
         """
         Tests that the ratio-specifying string 'uniform(5..15)/50' results in
         ~80% of the values written being non-null.
@@ -22,7 +25,7 @@ class TestStressSparsenessRatio(Tester):
                                    expected_ratio=.8,
                                    delta=.1)
 
-    def fixed_ratio_test(self):
+    def test_fixed_ratio(self):
         """
         Tests that the string 'fixed(1)/3' results in ~1/3 of the values
         written being non-null.
@@ -50,4 +53,4 @@ class TestStressSparsenessRatio(Tester):
         num_nones = sum(row.count(None) for row in written)
         num_results = sum(len(row) for row in written)
 
-        self.assertAlmostEqual(float(num_nones) / num_results, expected_ratio, 
delta=delta)
+        assert abs(float(num_nones) / num_results - expected_ratio) <= delta

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/super_column_cache_test.py
----------------------------------------------------------------------
diff --git a/super_column_cache_test.py b/super_column_cache_test.py
index 405d883..f0147a8 100644
--- a/super_column_cache_test.py
+++ b/super_column_cache_test.py
@@ -1,20 +1,32 @@
+import pytest
+import logging
+
+from dtest_setup_overrides import DTestSetupOverrides
+
 from dtest import Tester
-from thrift_bindings.v22.ttypes import \
+from thrift_bindings.thrift010.ttypes import \
     ConsistencyLevel as ThriftConsistencyLevel
-from thrift_bindings.v22.ttypes import (CfDef, Column, ColumnOrSuperColumn,
+from thrift_bindings.thrift010.ttypes import (CfDef, Column, 
ColumnOrSuperColumn,
                                         ColumnParent, KsDef, Mutation,
                                         SlicePredicate, SliceRange,
                                         SuperColumn)
-from thrift_tests import get_thrift_client
+from thrift_test import get_thrift_client
 from tools.misc import ImmutableMapping
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 @since('2.0', max_version='4')
 class TestSCCache(Tester):
-    cluster_options = ImmutableMapping({'start_rpc': 'true'})
 
-    def sc_with_row_cache_test(self):
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_dtest_setup_overrides(self):
+        dtest_setup_overrides = DTestSetupOverrides()
+        dtest_setup_overrides.cluster_options = ImmutableMapping({'start_rpc': 
'true'})
+        return dtest_setup_overrides
+
+    def test_sc_with_row_cache(self):
         """ Test for bug reported in #4190 """
         cluster = self.cluster
 
@@ -57,12 +69,12 @@ class TestSCCache(Tester):
         column_parent = ColumnParent(column_family='Users')
         predicate = SlicePredicate(slice_range=SliceRange("", "", False, 100))
         super_columns = client.get_slice('mina', column_parent, predicate, 
ThriftConsistencyLevel.ONE)
-        self.assertEqual(1, len(super_columns))
+        assert 1 == len(super_columns)
         super_column = super_columns[0].super_column
-        self.assertEqual('attrs', super_column.name)
-        self.assertEqual(1, len(super_column.columns))
-        self.assertEqual('name', super_column.columns[0].name)
-        self.assertEqual('Mina', super_column.columns[0].value)
+        assert 'attrs' == super_column.name
+        assert 1 == len(super_column.columns)
+        assert 'name' == super_column.columns[0].name
+        assert 'Mina' == super_column.columns[0].value
 
         # add a 'country' subcolumn
         column = Column(name='country', value='Canada', timestamp=100)
@@ -71,16 +83,16 @@ class TestSCCache(Tester):
             ThriftConsistencyLevel.ONE)
 
         super_columns = client.get_slice('mina', column_parent, predicate, 
ThriftConsistencyLevel.ONE)
-        self.assertEqual(1, len(super_columns))
+        assert 1 == len(super_columns)
         super_column = super_columns[0].super_column
-        self.assertEqual('attrs', super_column.name)
-        self.assertEqual(2, len(super_column.columns))
+        assert 'attrs' == super_column.name
+        assert 2 == len(super_column.columns)
 
-        self.assertEqual('country', super_column.columns[0].name)
-        self.assertEqual('Canada', super_column.columns[0].value)
+        assert 'country' == super_column.columns[0].name
+        assert 'Canada' == super_column.columns[0].value
 
-        self.assertEqual('name', super_column.columns[1].name)
-        self.assertEqual('Mina', super_column.columns[1].value)
+        assert 'name' == super_column.columns[1].name
+        assert 'Mina' == super_column.columns[1].value
 
         # add a 'region' subcolumn
         column = Column(name='region', value='Quebec', timestamp=100)
@@ -89,16 +101,16 @@ class TestSCCache(Tester):
             ThriftConsistencyLevel.ONE)
 
         super_columns = client.get_slice('mina', column_parent, predicate, 
ThriftConsistencyLevel.ONE)
-        self.assertEqual(1, len(super_columns))
+        assert 1 == len(super_columns)
         super_column = super_columns[0].super_column
-        self.assertEqual('attrs', super_column.name)
-        self.assertEqual(3, len(super_column.columns))
+        assert 'attrs' == super_column.name
+        assert 3 == len(super_column.columns)
 
-        self.assertEqual('country', super_column.columns[0].name)
-        self.assertEqual('Canada', super_column.columns[0].value)
+        assert 'country' == super_column.columns[0].name
+        assert 'Canada' == super_column.columns[0].value
 
-        self.assertEqual('name', super_column.columns[1].name)
-        self.assertEqual('Mina', super_column.columns[1].value)
+        assert 'name' == super_column.columns[1].name
+        assert 'Mina' == super_column.columns[1].value
 
-        self.assertEqual('region', super_column.columns[2].name)
-        self.assertEqual('Quebec', super_column.columns[2].value)
+        assert 'region' == super_column.columns[2].name
+        assert 'Quebec' == super_column.columns[2].value

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/super_counter_test.py
----------------------------------------------------------------------
diff --git a/super_counter_test.py b/super_counter_test.py
index 7a4c63b..b9ca007 100644
--- a/super_counter_test.py
+++ b/super_counter_test.py
@@ -1,12 +1,17 @@
 import time
+import pytest
+import logging
 
-from cql.cassandra.ttypes import (CfDef, ColumnParent, ColumnPath,
-                                  ConsistencyLevel, CounterColumn)
-from dtest import Tester, debug, create_ks
-from thrift_tests import get_thrift_client
+from dtest_setup_overrides import DTestSetupOverrides
+from dtest import Tester, create_ks
+from thrift_test import get_thrift_client
 from tools.misc import ImmutableMapping
 
-from tools.decorators import since
+from thrift_bindings.thrift010.Cassandra import (CfDef, ColumnParent, 
ColumnPath,
+                                                 ConsistencyLevel, 
CounterColumn)
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
 
 
 @since('2.0', max_version='4')
@@ -15,9 +20,13 @@ class TestSuperCounterClusterRestart(Tester):
     This test is part of this issue:
     https://issues.apache.org/jira/browse/CASSANDRA-3821
     """
-    cluster_options = ImmutableMapping({'start_rpc': 'true'})
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_dtest_setup_overrides(self):
+        dtest_setup_overrides = DTestSetupOverrides()
+        dtest_setup_overrides.cluster_options = ImmutableMapping({'start_rpc': 
'true'})
+        return dtest_setup_overrides
 
-    def functional_test(self):
+    def test_functional(self):
         NUM_SUBCOLS = 100
         NUM_ADDS = 100
 
@@ -42,8 +51,8 @@ class TestSuperCounterClusterRestart(Tester):
         # let the sediment settle to to the bottom before drinking...
         time.sleep(2)
 
-        for subcol in xrange(NUM_SUBCOLS):
-            for add in xrange(NUM_ADDS):
+        for subcol in range(NUM_SUBCOLS):
+            for add in range(NUM_ADDS):
                 column_parent = ColumnParent(column_family='cf',
                                              super_column='subcol_%d' % subcol)
                 counter_column = CounterColumn('col_0', 1)
@@ -52,10 +61,10 @@ class TestSuperCounterClusterRestart(Tester):
         time.sleep(1)
         cluster.flush()
 
-        debug("Stopping cluster")
+        logger.debug("Stopping cluster")
         cluster.stop()
         time.sleep(5)
-        debug("Starting cluster")
+        logger.debug("Starting cluster")
         cluster.start()
         time.sleep(5)
 
@@ -65,17 +74,17 @@ class TestSuperCounterClusterRestart(Tester):
 
         from_db = []
 
-        for i in xrange(NUM_SUBCOLS):
+        for i in range(NUM_SUBCOLS):
             column_path = ColumnPath(column_family='cf', column='col_0',
                                      super_column='subcol_%d' % i)
             column_or_super_column = thrift_conn.get('row_0', column_path,
                                                      ConsistencyLevel.QUORUM)
             val = column_or_super_column.counter_column.value
-            debug(str(val)),
+            logger.debug(str(val)),
             from_db.append(val)
-        debug("")
+        logger.debug("")
 
-        expected = [NUM_ADDS for i in xrange(NUM_SUBCOLS)]
+        expected = [NUM_ADDS for i in range(NUM_SUBCOLS)]
 
         if from_db != expected:
             raise Exception("Expected a bunch of the same values out of the 
db. Got this: " + str(from_db))

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/system_keyspaces_test.py
----------------------------------------------------------------------
diff --git a/system_keyspaces_test.py b/system_keyspaces_test.py
index 2a5c099..c8ddd77 100644
--- a/system_keyspaces_test.py
+++ b/system_keyspaces_test.py
@@ -1,7 +1,13 @@
+import pytest
+import logging
+
 from cassandra import Unauthorized
 from dtest import Tester
 from tools.assertions import assert_all, assert_exception, assert_none
-from tools.decorators import since
+
+since = pytest.mark.since
+logger = logging.getLogger(__name__)
+
 
 
 class TestSystemKeyspaces(Tester):

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/49b2dda4/thrift_bindings/thrift010/Cassandra-remote
----------------------------------------------------------------------
diff --git a/thrift_bindings/thrift010/Cassandra-remote 
b/thrift_bindings/thrift010/Cassandra-remote
new file mode 100644
index 0000000..6f3daa9
--- /dev/null
+++ b/thrift_bindings/thrift010/Cassandra-remote
@@ -0,0 +1,425 @@
+#!/usr/bin/env python
+#
+# Autogenerated by Thrift Compiler (0.10.0)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#  options string: py
+#
+
+import sys
+import pprint
+if sys.version_info[0] > 2:
+    from urllib.parse import urlparse
+else:
+    from urlparse import urlparse
+from thrift.transport import TTransport, TSocket, TSSLSocket, THttpClient
+from thrift.protocol.TBinaryProtocol import TBinaryProtocol
+
+from cassandra import Cassandra
+from cassandra.ttypes import *
+
+if len(sys.argv) <= 1 or sys.argv[1] == '--help':
+    print('')
+    print('Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] 
[-s[sl]] [-novalidate] [-ca_certs certs] [-keyfile keyfile] [-certfile 
certfile] function [arg1 [arg2...]]')
+    print('')
+    print('Functions:')
+    print('  void login(AuthenticationRequest auth_request)')
+    print('  void set_keyspace(string keyspace)')
+    print('  ColumnOrSuperColumn get(string key, ColumnPath column_path, 
ConsistencyLevel consistency_level)')
+    print('   get_slice(string key, ColumnParent column_parent, SlicePredicate 
predicate, ConsistencyLevel consistency_level)')
+    print('  i32 get_count(string key, ColumnParent column_parent, 
SlicePredicate predicate, ConsistencyLevel consistency_level)')
+    print('   multiget_slice( keys, ColumnParent column_parent, SlicePredicate 
predicate, ConsistencyLevel consistency_level)')
+    print('   multiget_count( keys, ColumnParent column_parent, SlicePredicate 
predicate, ConsistencyLevel consistency_level)')
+    print('   get_range_slices(ColumnParent column_parent, SlicePredicate 
predicate, KeyRange range, ConsistencyLevel consistency_level)')
+    print('   get_paged_slice(string column_family, KeyRange range, string 
start_column, ConsistencyLevel consistency_level)')
+    print('   get_indexed_slices(ColumnParent column_parent, IndexClause 
index_clause, SlicePredicate column_predicate, ConsistencyLevel 
consistency_level)')
+    print('  void insert(string key, ColumnParent column_parent, Column 
column, ConsistencyLevel consistency_level)')
+    print('  void add(string key, ColumnParent column_parent, CounterColumn 
column, ConsistencyLevel consistency_level)')
+    print('  CASResult cas(string key, string column_family,  expected,  
updates, ConsistencyLevel serial_consistency_level, ConsistencyLevel 
commit_consistency_level)')
+    print('  void remove(string key, ColumnPath column_path, i64 timestamp, 
ConsistencyLevel consistency_level)')
+    print('  void remove_counter(string key, ColumnPath path, ConsistencyLevel 
consistency_level)')
+    print('  void batch_mutate( mutation_map, ConsistencyLevel 
consistency_level)')
+    print('  void atomic_batch_mutate( mutation_map, ConsistencyLevel 
consistency_level)')
+    print('  void truncate(string cfname)')
+    print('   get_multi_slice(MultiSliceRequest request)')
+    print('   describe_schema_versions()')
+    print('   describe_keyspaces()')
+    print('  string describe_cluster_name()')
+    print('  string describe_version()')
+    print('   describe_ring(string keyspace)')
+    print('   describe_local_ring(string keyspace)')
+    print('   describe_token_map()')
+    print('  string describe_partitioner()')
+    print('  string describe_snitch()')
+    print('  KsDef describe_keyspace(string keyspace)')
+    print('   describe_splits(string cfName, string start_token, string 
end_token, i32 keys_per_split)')
+    print('  string trace_next_query()')
+    print('   describe_splits_ex(string cfName, string start_token, string 
end_token, i32 keys_per_split)')
+    print('  string system_add_column_family(CfDef cf_def)')
+    print('  string system_drop_column_family(string column_family)')
+    print('  string system_add_keyspace(KsDef ks_def)')
+    print('  string system_drop_keyspace(string keyspace)')
+    print('  string system_update_keyspace(KsDef ks_def)')
+    print('  string system_update_column_family(CfDef cf_def)')
+    print('  CqlResult execute_cql_query(string query, Compression 
compression)')
+    print('  CqlResult execute_cql3_query(string query, Compression 
compression, ConsistencyLevel consistency)')
+    print('  CqlPreparedResult prepare_cql_query(string query, Compression 
compression)')
+    print('  CqlPreparedResult prepare_cql3_query(string query, Compression 
compression)')
+    print('  CqlResult execute_prepared_cql_query(i32 itemId,  values)')
+    print('  CqlResult execute_prepared_cql3_query(i32 itemId,  values, 
ConsistencyLevel consistency)')
+    print('  void set_cql_version(string version)')
+    print('')
+    sys.exit(0)
+
+pp = pprint.PrettyPrinter(indent=2)
+host = 'localhost'
+port = 9090
+uri = ''
+framed = False
+ssl = False
+validate = True
+ca_certs = None
+keyfile = None
+certfile = None
+http = False
+argi = 1
+
+if sys.argv[argi] == '-h':
+    parts = sys.argv[argi + 1].split(':')
+    host = parts[0]
+    if len(parts) > 1:
+        port = int(parts[1])
+    argi += 2
+
+if sys.argv[argi] == '-u':
+    url = urlparse(sys.argv[argi + 1])
+    parts = url[1].split(':')
+    host = parts[0]
+    if len(parts) > 1:
+        port = int(parts[1])
+    else:
+        port = 80
+    uri = url[2]
+    if url[4]:
+        uri += '?%s' % url[4]
+    http = True
+    argi += 2
+
+if sys.argv[argi] == '-f' or sys.argv[argi] == '-framed':
+    framed = True
+    argi += 1
+
+if sys.argv[argi] == '-s' or sys.argv[argi] == '-ssl':
+    ssl = True
+    argi += 1
+
+if sys.argv[argi] == '-novalidate':
+    validate = False
+    argi += 1
+
+if sys.argv[argi] == '-ca_certs':
+    ca_certs = sys.argv[argi+1]
+    argi += 2
+
+if sys.argv[argi] == '-keyfile':
+    keyfile = sys.argv[argi+1]
+    argi += 2
+
+if sys.argv[argi] == '-certfile':
+    certfile = sys.argv[argi+1]
+    argi += 2
+
+cmd = sys.argv[argi]
+args = sys.argv[argi + 1:]
+
+if http:
+    transport = THttpClient.THttpClient(host, port, uri)
+else:
+    if ssl:
+        socket = TSSLSocket.TSSLSocket(host, port, validate=validate, 
ca_certs=ca_certs, keyfile=keyfile, certfile=certfile)
+    else:
+        socket = TSocket.TSocket(host, port)
+    if framed:
+        transport = TTransport.TFramedTransport(socket)
+    else:
+        transport = TTransport.TBufferedTransport(socket)
+protocol = TBinaryProtocol(transport)
+client = Cassandra.Client(protocol)
+transport.open()
+
+if cmd == 'login':
+    if len(args) != 1:
+        print('login requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.login(eval(args[0]),))
+
+elif cmd == 'set_keyspace':
+    if len(args) != 1:
+        print('set_keyspace requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.set_keyspace(args[0],))
+
+elif cmd == 'get':
+    if len(args) != 3:
+        print('get requires 3 args')
+        sys.exit(1)
+    pp.pprint(client.get(args[0], eval(args[1]), eval(args[2]),))
+
+elif cmd == 'get_slice':
+    if len(args) != 4:
+        print('get_slice requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.get_slice(args[0], eval(args[1]), eval(args[2]), 
eval(args[3]),))
+
+elif cmd == 'get_count':
+    if len(args) != 4:
+        print('get_count requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.get_count(args[0], eval(args[1]), eval(args[2]), 
eval(args[3]),))
+
+elif cmd == 'multiget_slice':
+    if len(args) != 4:
+        print('multiget_slice requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.multiget_slice(eval(args[0]), eval(args[1]), 
eval(args[2]), eval(args[3]),))
+
+elif cmd == 'multiget_count':
+    if len(args) != 4:
+        print('multiget_count requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.multiget_count(eval(args[0]), eval(args[1]), 
eval(args[2]), eval(args[3]),))
+
+elif cmd == 'get_range_slices':
+    if len(args) != 4:
+        print('get_range_slices requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.get_range_slices(eval(args[0]), eval(args[1]), 
eval(args[2]), eval(args[3]),))
+
+elif cmd == 'get_paged_slice':
+    if len(args) != 4:
+        print('get_paged_slice requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.get_paged_slice(args[0], eval(args[1]), args[2], 
eval(args[3]),))
+
+elif cmd == 'get_indexed_slices':
+    if len(args) != 4:
+        print('get_indexed_slices requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.get_indexed_slices(eval(args[0]), eval(args[1]), 
eval(args[2]), eval(args[3]),))
+
+elif cmd == 'insert':
+    if len(args) != 4:
+        print('insert requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.insert(args[0], eval(args[1]), eval(args[2]), 
eval(args[3]),))
+
+elif cmd == 'add':
+    if len(args) != 4:
+        print('add requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.add(args[0], eval(args[1]), eval(args[2]), 
eval(args[3]),))
+
+elif cmd == 'cas':
+    if len(args) != 6:
+        print('cas requires 6 args')
+        sys.exit(1)
+    pp.pprint(client.cas(args[0], args[1], eval(args[2]), eval(args[3]), 
eval(args[4]), eval(args[5]),))
+
+elif cmd == 'remove':
+    if len(args) != 4:
+        print('remove requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.remove(args[0], eval(args[1]), eval(args[2]), 
eval(args[3]),))
+
+elif cmd == 'remove_counter':
+    if len(args) != 3:
+        print('remove_counter requires 3 args')
+        sys.exit(1)
+    pp.pprint(client.remove_counter(args[0], eval(args[1]), eval(args[2]),))
+
+elif cmd == 'batch_mutate':
+    if len(args) != 2:
+        print('batch_mutate requires 2 args')
+        sys.exit(1)
+    pp.pprint(client.batch_mutate(eval(args[0]), eval(args[1]),))
+
+elif cmd == 'atomic_batch_mutate':
+    if len(args) != 2:
+        print('atomic_batch_mutate requires 2 args')
+        sys.exit(1)
+    pp.pprint(client.atomic_batch_mutate(eval(args[0]), eval(args[1]),))
+
+elif cmd == 'truncate':
+    if len(args) != 1:
+        print('truncate requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.truncate(args[0],))
+
+elif cmd == 'get_multi_slice':
+    if len(args) != 1:
+        print('get_multi_slice requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.get_multi_slice(eval(args[0]),))
+
+elif cmd == 'describe_schema_versions':
+    if len(args) != 0:
+        print('describe_schema_versions requires 0 args')
+        sys.exit(1)
+    pp.pprint(client.describe_schema_versions())
+
+elif cmd == 'describe_keyspaces':
+    if len(args) != 0:
+        print('describe_keyspaces requires 0 args')
+        sys.exit(1)
+    pp.pprint(client.describe_keyspaces())
+
+elif cmd == 'describe_cluster_name':
+    if len(args) != 0:
+        print('describe_cluster_name requires 0 args')
+        sys.exit(1)
+    pp.pprint(client.describe_cluster_name())
+
+elif cmd == 'describe_version':
+    if len(args) != 0:
+        print('describe_version requires 0 args')
+        sys.exit(1)
+    pp.pprint(client.describe_version())
+
+elif cmd == 'describe_ring':
+    if len(args) != 1:
+        print('describe_ring requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.describe_ring(args[0],))
+
+elif cmd == 'describe_local_ring':
+    if len(args) != 1:
+        print('describe_local_ring requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.describe_local_ring(args[0],))
+
+elif cmd == 'describe_token_map':
+    if len(args) != 0:
+        print('describe_token_map requires 0 args')
+        sys.exit(1)
+    pp.pprint(client.describe_token_map())
+
+elif cmd == 'describe_partitioner':
+    if len(args) != 0:
+        print('describe_partitioner requires 0 args')
+        sys.exit(1)
+    pp.pprint(client.describe_partitioner())
+
+elif cmd == 'describe_snitch':
+    if len(args) != 0:
+        print('describe_snitch requires 0 args')
+        sys.exit(1)
+    pp.pprint(client.describe_snitch())
+
+elif cmd == 'describe_keyspace':
+    if len(args) != 1:
+        print('describe_keyspace requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.describe_keyspace(args[0],))
+
+elif cmd == 'describe_splits':
+    if len(args) != 4:
+        print('describe_splits requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.describe_splits(args[0], args[1], args[2], 
eval(args[3]),))
+
+elif cmd == 'trace_next_query':
+    if len(args) != 0:
+        print('trace_next_query requires 0 args')
+        sys.exit(1)
+    pp.pprint(client.trace_next_query())
+
+elif cmd == 'describe_splits_ex':
+    if len(args) != 4:
+        print('describe_splits_ex requires 4 args')
+        sys.exit(1)
+    pp.pprint(client.describe_splits_ex(args[0], args[1], args[2], 
eval(args[3]),))
+
+elif cmd == 'system_add_column_family':
+    if len(args) != 1:
+        print('system_add_column_family requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.system_add_column_family(eval(args[0]),))
+
+elif cmd == 'system_drop_column_family':
+    if len(args) != 1:
+        print('system_drop_column_family requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.system_drop_column_family(args[0],))
+
+elif cmd == 'system_add_keyspace':
+    if len(args) != 1:
+        print('system_add_keyspace requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.system_add_keyspace(eval(args[0]),))
+
+elif cmd == 'system_drop_keyspace':
+    if len(args) != 1:
+        print('system_drop_keyspace requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.system_drop_keyspace(args[0],))
+
+elif cmd == 'system_update_keyspace':
+    if len(args) != 1:
+        print('system_update_keyspace requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.system_update_keyspace(eval(args[0]),))
+
+elif cmd == 'system_update_column_family':
+    if len(args) != 1:
+        print('system_update_column_family requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.system_update_column_family(eval(args[0]),))
+
+elif cmd == 'execute_cql_query':
+    if len(args) != 2:
+        print('execute_cql_query requires 2 args')
+        sys.exit(1)
+    pp.pprint(client.execute_cql_query(args[0], eval(args[1]),))
+
+elif cmd == 'execute_cql3_query':
+    if len(args) != 3:
+        print('execute_cql3_query requires 3 args')
+        sys.exit(1)
+    pp.pprint(client.execute_cql3_query(args[0], eval(args[1]), 
eval(args[2]),))
+
+elif cmd == 'prepare_cql_query':
+    if len(args) != 2:
+        print('prepare_cql_query requires 2 args')
+        sys.exit(1)
+    pp.pprint(client.prepare_cql_query(args[0], eval(args[1]),))
+
+elif cmd == 'prepare_cql3_query':
+    if len(args) != 2:
+        print('prepare_cql3_query requires 2 args')
+        sys.exit(1)
+    pp.pprint(client.prepare_cql3_query(args[0], eval(args[1]),))
+
+elif cmd == 'execute_prepared_cql_query':
+    if len(args) != 2:
+        print('execute_prepared_cql_query requires 2 args')
+        sys.exit(1)
+    pp.pprint(client.execute_prepared_cql_query(eval(args[0]), eval(args[1]),))
+
+elif cmd == 'execute_prepared_cql3_query':
+    if len(args) != 3:
+        print('execute_prepared_cql3_query requires 3 args')
+        sys.exit(1)
+    pp.pprint(client.execute_prepared_cql3_query(eval(args[0]), eval(args[1]), 
eval(args[2]),))
+
+elif cmd == 'set_cql_version':
+    if len(args) != 1:
+        print('set_cql_version requires 1 args')
+        sys.exit(1)
+    pp.pprint(client.set_cql_version(args[0],))
+
+else:
+    print('Unrecognized method %s' % cmd)
+    sys.exit(1)
+
+transport.close()


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org
For additional commands, e-mail: commits-h...@cassandra.apache.org

Reply via email to