Repository: cassandra-dtest
Updated Branches:
  refs/heads/master 125d94206 -> f4eda3a50 (forced update)


Allow storage port to be configurable per node

Patch by Ariel Weisberg; Reviewed by Jason Brown for CASSANDRA-7544


Project: http://git-wip-us.apache.org/repos/asf/cassandra-dtest/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra-dtest/commit/f4eda3a5
Tree: http://git-wip-us.apache.org/repos/asf/cassandra-dtest/tree/f4eda3a5
Diff: http://git-wip-us.apache.org/repos/asf/cassandra-dtest/diff/f4eda3a5

Branch: refs/heads/master
Commit: f4eda3a5064be979bfe75140ecb0dcad95f35358
Parents: b4a1229
Author: Ariel Weisberg <ariel.weisb...@datastax.com>
Authored: Wed Dec 14 18:51:17 2016 -0500
Committer: Ariel Weisberg <aweisb...@apple.com>
Committed: Thu Jan 25 19:59:05 2018 -0500

----------------------------------------------------------------------
 bootstrap_test.py                               |  2 +-
 byteman/4.0/decommission_failure_inject.btm     |  4 +-
 .../4.0/election_counter_leader_favor_node2.btm | 14 +++++++
 .../4.0/inject_failure_streaming_to_node2.btm   |  2 +-
 byteman/election_counter_leader_favor_node2.btm | 14 -------
 .../election_counter_leader_favor_node2.btm     | 14 +++++++
 counter_tests.py                                |  9 ++++-
 cql_tracing_test.py                             |  6 +--
 rebuild_test.py                                 |  6 ++-
 repair_tests/incremental_repair_test.py         | 10 +++--
 repair_tests/repair_test.py                     | 20 +++++-----
 replace_address_test.py                         | 17 ++++----
 secondary_indexes_test.py                       |  1 +
 snitch_test.py                                  | 41 ++++++++++++++------
 topology_test.py                                | 14 +++----
 upgrade_tests/upgrade_supercolumns_test.py      |  4 +-
 16 files changed, 112 insertions(+), 66 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/f4eda3a5/bootstrap_test.py
----------------------------------------------------------------------
diff --git a/bootstrap_test.py b/bootstrap_test.py
index d29390c..efa84ec 100644
--- a/bootstrap_test.py
+++ b/bootstrap_test.py
@@ -498,7 +498,7 @@ class TestBootstrap(BaseBootstrapTest):
         # Now start it, it should not be allowed to join.
         mark = node4.mark_log()
         node4.start(no_wait=True, wait_other_notice=False)
-        node4.watch_log_for("A node with address /127.0.0.4 already exists, 
cancelling join", from_mark=mark)
+        node4.watch_log_for("A node with address {} already exists, cancelling 
join".format(node4.address_for_current_version_slashy()), from_mark=mark)
 
     def decommissioned_wiped_node_can_join_test(self):
         """

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/f4eda3a5/byteman/4.0/decommission_failure_inject.btm
----------------------------------------------------------------------
diff --git a/byteman/4.0/decommission_failure_inject.btm 
b/byteman/4.0/decommission_failure_inject.btm
index a6418fc..b2571fc 100644
--- a/byteman/4.0/decommission_failure_inject.btm
+++ b/byteman/4.0/decommission_failure_inject.btm
@@ -10,8 +10,8 @@ METHOD prepareSynAck
 AT INVOKE startStreamingFiles
 BIND peer = $0.peer
 # set flag to only run this rule once.
-IF peer.equals(InetAddress.getByName("127.0.0.1")) AND NOT flagged("done")
+IF 
peer.equals(org.apache.cassandra.locator.InetAddressAndPort.getByName("127.0.0.1"))
 AND NOT flagged("done")
 DO
    flag("done");
    throw new java.lang.RuntimeException("Triggering network failure")
-ENDRULE
\ No newline at end of file
+ENDRULE

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/f4eda3a5/byteman/4.0/election_counter_leader_favor_node2.btm
----------------------------------------------------------------------
diff --git a/byteman/4.0/election_counter_leader_favor_node2.btm 
b/byteman/4.0/election_counter_leader_favor_node2.btm
new file mode 100644
index 0000000..ae5b880
--- /dev/null
+++ b/byteman/4.0/election_counter_leader_favor_node2.btm
@@ -0,0 +1,14 @@
+#
+# Cheat during the leader election for a counter mutation and favour node 2, 
127.0.0.2
+#
+# Note that this happens only if the node is known to be available.
+#
+RULE election counter leader cheat
+CLASS org.apache.cassandra.service.StorageProxy
+METHOD findSuitableEndpoint
+AT EXIT
+BIND isthere:boolean = 
$localEndpoints.contains(org.apache.cassandra.locator.InetAddressAndPort.getByName("127.0.0.2"));
+if isthere
+DO
+    return 
org.apache.cassandra.locator.InetAddressAndPort.getByName("127.0.0.2");
+ENDRULE

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/f4eda3a5/byteman/4.0/inject_failure_streaming_to_node2.btm
----------------------------------------------------------------------
diff --git a/byteman/4.0/inject_failure_streaming_to_node2.btm 
b/byteman/4.0/inject_failure_streaming_to_node2.btm
index 761950f..a856030 100644
--- a/byteman/4.0/inject_failure_streaming_to_node2.btm
+++ b/byteman/4.0/inject_failure_streaming_to_node2.btm
@@ -10,7 +10,7 @@ METHOD startStreamingFiles
 AT ENTRY
 BIND peer = $0.peer
 # set flag to only run this rule once.
-IF peer.equals(InetAddress.getByName("127.0.0.2")) AND NOT flagged("done")
+IF 
peer.equals(org.apache.cassandra.locator.InetAddressAndPort.getByName("127.0.0.2"))
 AND NOT flagged("done")
 DO
    flag("done");
    throw new java.lang.RuntimeException("Triggering network failure")

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/f4eda3a5/byteman/election_counter_leader_favor_node2.btm
----------------------------------------------------------------------
diff --git a/byteman/election_counter_leader_favor_node2.btm 
b/byteman/election_counter_leader_favor_node2.btm
deleted file mode 100644
index f3d1ac3..0000000
--- a/byteman/election_counter_leader_favor_node2.btm
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Cheat during the leader election for a counter mutation and favour node 2, 
127.0.0.2
-#
-# Note that this happens only if the node is known to be available.
-#
-RULE election counter leader cheat
-CLASS org.apache.cassandra.service.StorageProxy
-METHOD findSuitableEndpoint
-AT EXIT
-BIND isthere:boolean = 
$localEndpoints.contains(java.net.InetAddress.getByName("127.0.0.2"));
-if isthere
-DO
-    return java.net.InetAddress.getByName("127.0.0.2");
-ENDRULE

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/f4eda3a5/byteman/pre4.0/election_counter_leader_favor_node2.btm
----------------------------------------------------------------------
diff --git a/byteman/pre4.0/election_counter_leader_favor_node2.btm 
b/byteman/pre4.0/election_counter_leader_favor_node2.btm
new file mode 100644
index 0000000..f3d1ac3
--- /dev/null
+++ b/byteman/pre4.0/election_counter_leader_favor_node2.btm
@@ -0,0 +1,14 @@
+#
+# Cheat during the leader election for a counter mutation and favour node 2, 
127.0.0.2
+#
+# Note that this happens only if the node is known to be available.
+#
+RULE election counter leader cheat
+CLASS org.apache.cassandra.service.StorageProxy
+METHOD findSuitableEndpoint
+AT EXIT
+BIND isthere:boolean = 
$localEndpoints.contains(java.net.InetAddress.getByName("127.0.0.2"));
+if isthere
+DO
+    return java.net.InetAddress.getByName("127.0.0.2");
+ENDRULE

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/f4eda3a5/counter_tests.py
----------------------------------------------------------------------
diff --git a/counter_tests.py b/counter_tests.py
index f2c746b..1de495d 100644
--- a/counter_tests.py
+++ b/counter_tests.py
@@ -90,8 +90,13 @@ class TestCounters(Tester):
         nodes = cluster.nodelist()
         # Have node 1 and 3 cheat a bit during the leader election for a 
counter mutation; note that cheating
         # takes place iff there is an actual chance for node 2 to be picked.
-        
nodes[0].update_startup_byteman_script('./byteman/election_counter_leader_favor_node2.btm')
-        
nodes[2].update_startup_byteman_script('./byteman/election_counter_leader_favor_node2.btm')
+        if cluster.version() < '4.0':
+            
nodes[0].update_startup_byteman_script('./byteman/pre4.0/election_counter_leader_favor_node2.btm')
+            
nodes[2].update_startup_byteman_script('./byteman/pre4.0/election_counter_leader_favor_node2.btm')
+        else:
+            
nodes[0].update_startup_byteman_script('./byteman/4.0/election_counter_leader_favor_node2.btm')
+            
nodes[2].update_startup_byteman_script('./byteman/4.0/election_counter_leader_favor_node2.btm')
+
         cluster.start(wait_for_binary_proto=True)
         session = self.patient_cql_connection(nodes[0])
         create_ks(session, 'ks', 3)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/f4eda3a5/cql_tracing_test.py
----------------------------------------------------------------------
diff --git a/cql_tracing_test.py b/cql_tracing_test.py
index 549e4d0..f25679c 100644
--- a/cql_tracing_test.py
+++ b/cql_tracing_test.py
@@ -78,9 +78,9 @@ class TestCqlTracing(Tester):
         debug(out)
         self.assertIn('Tracing session: ', out)
 
-        self.assertIn('/127.0.0.1', out)
-        self.assertIn('/127.0.0.2', out)
-        self.assertIn('/127.0.0.3', out)
+        self.assertIn(node1.address_for_current_version_slashy(), out)
+        
self.assertIn(self.cluster.nodelist()[1].address_for_current_version_slashy(), 
out)
+        
self.assertIn(self.cluster.nodelist()[2].address_for_current_version_slashy(), 
out)
 
         self.assertIn('Parsing INSERT INTO ks.users ', out)
         self.assertIn('Request complete ', out)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/f4eda3a5/rebuild_test.py
----------------------------------------------------------------------
diff --git a/rebuild_test.py b/rebuild_test.py
index 273a749..795c945 100644
--- a/rebuild_test.py
+++ b/rebuild_test.py
@@ -140,7 +140,9 @@ class TestRebuild(Tester):
         """
         self.ignore_log_patterns = list(self.ignore_log_patterns) + [r'Error 
while rebuilding node',
                                                                      
r'Streaming error occurred on session with peer 127.0.0.3',
-                                                                     r'Remote 
peer 127.0.0.3 failed stream session']
+                                                                     r'Remote 
peer 127.0.0.3 failed stream session',
+                                                                     
r'Streaming error occurred on session with peer 127.0.0.3:7000',
+                                                                     r'Remote 
peer 127.0.0.3:7000 failed stream session']
         cluster = self.cluster
         cluster.set_configuration_options(values={'endpoint_snitch': 
'org.apache.cassandra.locator.PropertyFileSnitch'})
 
@@ -423,7 +425,7 @@ class TestRebuild(Tester):
         node3.nodetool('rebuild -ks ks1 -ts (%s,%s] -s %s' % (tokens[2], 
str(pow(2, 63) - 1), node2_address))
 
         # verify that node2 streamed to node3
-        log_matches = node2.grep_log('Session with /%s is complete' % 
node3_address)
+        log_matches = node2.grep_log('Session with %s is complete' % 
node3.address_for_current_version())
         self.assertTrue(len(log_matches) > 0)
 
         # verify that node1 did not participate

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/f4eda3a5/repair_tests/incremental_repair_test.py
----------------------------------------------------------------------
diff --git a/repair_tests/incremental_repair_test.py 
b/repair_tests/incremental_repair_test.py
index 95fb26d..e3017c2 100644
--- a/repair_tests/incremental_repair_test.py
+++ b/repair_tests/incremental_repair_test.py
@@ -132,6 +132,7 @@ class TestIncRepair(Tester):
         # table, that all nodes are listed as participants, and that all 
sstables are
         # (still) marked pending repair
         expected_participants = {n.address() for n in cluster.nodelist()}
+        expected_participants_wp = {n.address_and_port() for n in 
cluster.nodelist()}
         recorded_pending_ids = set()
         for node in cluster.nodelist():
             session = self.patient_exclusive_cql_connection(node)
@@ -139,6 +140,8 @@ class TestIncRepair(Tester):
             self.assertEqual(len(results), 1)
             result = results[0]
             self.assertEqual(set(result.participants), expected_participants)
+            if hasattr(result, "participants_wp"):
+                self.assertEqual(set(result.participants_wp), 
expected_participants_wp)
             self.assertEqual(result.state, ConsistentState.FINALIZED, 
"4=FINALIZED")
             pending_id = result.parent_id
             self.assertAllPendingRepairSSTables(node, 'ks', pending_id)
@@ -168,9 +171,10 @@ class TestIncRepair(Tester):
         for node in self.cluster.nodelist():
             session = self.patient_exclusive_cql_connection(node)
             session.execute("INSERT INTO system.repairs "
-                            "(parent_id, cfids, coordinator, last_update, 
participants, ranges, repaired_at, started_at, state) "
-                            "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)",
-                            [session_id, {cfid}, node1.address(), now, 
{n.address() for n in self.cluster.nodelist()},
+                            "(parent_id, cfids, coordinator, coordinator_port, 
last_update, participants, participants_wp, ranges, repaired_at, started_at, 
state) "
+                            "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 
%s)",
+                            [session_id, {cfid}, node1.address(), 7000, now, 
{n.address() for n in self.cluster.nodelist()},
+                             {str(n.address()) + ":7000" for n in 
self.cluster.nodelist()},
                              ranges, now, now, ConsistentState.REPAIRING])  # 
2=REPAIRING
 
         time.sleep(1)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/f4eda3a5/repair_tests/repair_test.py
----------------------------------------------------------------------
diff --git a/repair_tests/repair_test.py b/repair_tests/repair_test.py
index 1d5bf8f..238871f 100644
--- a/repair_tests/repair_test.py
+++ b/repair_tests/repair_test.py
@@ -135,7 +135,7 @@ class BaseRepairTest(Tester):
         debug("Repair time: {end}".format(end=time.time() - start))
 
         # Validate that only one range was transfered
-        out_of_sync_logs = node1.grep_log("/([0-9.]+) and /([0-9.]+) have 
([0-9]+) range\(s\) out of sync")
+        out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) 
out of sync".format(cluster.address_regex(), cluster.address_regex()))
 
         self.assertEqual(len(out_of_sync_logs), 2, "Lines matching: " + 
str([elt[0] for elt in out_of_sync_logs]))
 
@@ -515,10 +515,10 @@ class TestRepair(BaseRepairTest):
                 self.assertEqual(len(filter(lambda x: len(x) != 0, res)), 0, 
res)
 
         # check log for no repair happened for gcable data
-        out_of_sync_logs = node2.grep_log("/([0-9.]+) and /([0-9.]+) have 
([0-9]+) range\(s\) out of sync for cf1")
+        out_of_sync_logs = node2.grep_log("{} and {} have ([0-9]+) range\(s\) 
out of sync for cf1".format(cluster.address_regex(), cluster.address_regex()))
         self.assertEqual(len(out_of_sync_logs), 0, "GC-able data does not need 
to be repaired with empty data: " + str([elt[0] for elt in out_of_sync_logs]))
         # check log for actual repair for non gcable data
-        out_of_sync_logs = node2.grep_log("/([0-9.]+) and /([0-9.]+) have 
([0-9]+) range\(s\) out of sync for cf2")
+        out_of_sync_logs = node2.grep_log("{} and {} have ([0-9]+) range\(s\) 
out of sync for cf2".format(cluster.address_regex(), cluster.address_regex()))
         self.assertGreater(len(out_of_sync_logs), 0, "Non GC-able data should 
be repaired")
 
     def _range_tombstone_digest(self, sequential):
@@ -594,7 +594,7 @@ class TestRepair(BaseRepairTest):
         node2.repair(_repair_options(self.cluster.version(), ks='ks', 
sequential=sequential))
 
         # check log for no repair happened for gcable data
-        out_of_sync_logs = node2.grep_log("/([0-9.]+) and /([0-9.]+) have 
([0-9]+) range\(s\) out of sync for table1")
+        out_of_sync_logs = node2.grep_log("{} and {} have ([0-9]+) range\(s\) 
out of sync for table1".format(cluster.address_regex(), 
cluster.address_regex()))
         self.assertEqual(len(out_of_sync_logs), 0, "Digest mismatch for range 
tombstone: {}".format(str([elt[0] for elt in out_of_sync_logs])))
 
     def local_dc_repair_test(self):
@@ -613,7 +613,7 @@ class TestRepair(BaseRepairTest):
         node1.repair(opts)
 
         # Verify that only nodes in dc1 are involved in repair
-        out_of_sync_logs = node1.grep_log("/([0-9.]+) and /([0-9.]+) have 
([0-9]+) range\(s\) out of sync")
+        out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) 
out of sync".format(cluster.address_regex(), cluster.address_regex()))
         self.assertEqual(len(out_of_sync_logs), 1, "Lines matching: 
{}".format(len(out_of_sync_logs)))
 
         line, m = out_of_sync_logs[0]
@@ -642,7 +642,7 @@ class TestRepair(BaseRepairTest):
         node1.repair(opts)
 
         # Verify that only nodes in dc1 and dc2 are involved in repair
-        out_of_sync_logs = node1.grep_log("/([0-9.]+) and /([0-9.]+) have 
([0-9]+) range\(s\) out of sync")
+        out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) 
out of sync".format(cluster.address_regex(), cluster.address_regex()))
         self.assertEqual(len(out_of_sync_logs), 2, "Lines matching: " + 
str([elt[0] for elt in out_of_sync_logs]))
         valid_out_of_sync_pairs = [{node1.address(), node2.address()},
                                    {node2.address(), node3.address()}]
@@ -672,7 +672,7 @@ class TestRepair(BaseRepairTest):
         node1.repair(opts)
 
         # Verify that only nodes in dc1 and dc2 are involved in repair
-        out_of_sync_logs = node1.grep_log("/([0-9.]+) and /([0-9.]+) have 
([0-9]+) range\(s\) out of sync")
+        out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) 
out of sync".format(cluster.address_regex(), cluster.address_regex()))
         self.assertEqual(len(out_of_sync_logs), 2, "Lines matching: " + 
str([elt[0] for elt in out_of_sync_logs]))
         valid_out_of_sync_pairs = [{node1.address(), node2.address()},
                                    {node2.address(), node3.address()}]
@@ -831,7 +831,7 @@ class TestRepair(BaseRepairTest):
         node1.repair(opts)
         self.assertEqual(len(node1.grep_log('are consistent for standard1')), 
0, "Nodes 1 and 2 should not be consistent.")
         self.assertEqual(len(node3.grep_log('Repair command')), 0, "Node 3 
should not have been involved in the repair.")
-        out_of_sync_logs = node1.grep_log("/([0-9.]+) and /([0-9.]+) have 
([0-9]+) range\(s\) out of sync")
+        out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) 
out of sync".format(cluster.address_regex(), cluster.address_regex()))
         self.assertEqual(len(out_of_sync_logs), 0, "We repaired the wrong CF, 
so things should still be broke")
 
         # Repair only the range node 1 owns on the right  CF, assert 
everything is fixed
@@ -840,7 +840,7 @@ class TestRepair(BaseRepairTest):
         node1.repair(opts)
         self.assertEqual(len(node1.grep_log('are consistent for standard1')), 
0, "Nodes 1 and 2 should not be consistent.")
         self.assertEqual(len(node3.grep_log('Repair command')), 0, "Node 3 
should not have been involved in the repair.")
-        out_of_sync_logs = node1.grep_log("/([0-9.]+) and /([0-9.]+) have 
([0-9]+) range\(s\) out of sync")
+        out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) 
out of sync".format(cluster.address_regex(), cluster.address_regex()))
         _, matches = out_of_sync_logs[0]
         out_of_sync_nodes = {matches.group(1), matches.group(2)}
         valid_out_of_sync_pairs = [{node1.address(), node2.address()}]
@@ -937,7 +937,7 @@ class TestRepair(BaseRepairTest):
         self.assertEqual(len(node1.grep_log('are consistent for standard1')), 
0, "Nodes 1 and 2 should not be consistent.")
         self.assertEqual(len(node3.grep_log('Repair command')), 0, "Node 3 
should not have been involved in the repair.")
 
-        out_of_sync_logs = node1.grep_log("/([0-9.]+) and /([0-9.]+) have 
([0-9]+) range\(s\) out of sync")
+        out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) 
out of sync".format(cluster.address_regex(), cluster.address_regex()))
         _, matches = out_of_sync_logs[0]
         out_of_sync_nodes = {matches.group(1), matches.group(2)}
 

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/f4eda3a5/replace_address_test.py
----------------------------------------------------------------------
diff --git a/replace_address_test.py b/replace_address_test.py
index f80af17..2b60077 100644
--- a/replace_address_test.py
+++ b/replace_address_test.py
@@ -35,7 +35,7 @@ class BaseReplaceAddressTest(Tester):
 
     def _setup(self, n=3, opts=None, enable_byteman=False, 
mixed_versions=False):
         debug("Starting cluster with {} nodes.".format(n))
-        self.cluster.populate(n)
+        self.cluster.populate(n, use_vnodes=not DISABLE_VNODES)
         if opts is not None:
             debug("Setting cluster options: {}".format(opts))
             self.cluster.set_configuration_options(opts)
@@ -148,14 +148,15 @@ class BaseReplaceAddressTest(Tester):
     def _verify_replacement(self, node, same_address):
         if not same_address:
             if self.cluster.cassandra_version() >= '2.2.7':
-                node.watch_log_for("Node /{} is replacing /{}"
-                                   .format(self.replacement_node.address(),
-                                           self.replaced_node.address()),
+                address_prefix = '' if self.cluster.cassandra_version() >= 
'4.0' else '/'
+                node.watch_log_for("Node {}{} is replacing {}{}"
+                                   .format(address_prefix, 
self.replacement_node.address_for_current_version(),
+                                           address_prefix, 
self.replaced_node.address_for_current_version()),
                                    timeout=60, filename='debug.log')
-                node.watch_log_for("Node /{} will complete replacement of /{} 
for tokens"
-                                   .format(self.replacement_node.address(),
-                                           self.replaced_node.address()), 
timeout=10)
-                node.watch_log_for("removing endpoint 
/{}".format(self.replaced_node.address()),
+                node.watch_log_for("Node {}{} will complete replacement of 
{}{} for tokens"
+                                   .format(address_prefix, 
self.replacement_node.address_for_current_version(),
+                                           address_prefix, 
self.replaced_node.address_for_current_version()), timeout=10)
+                node.watch_log_for("removing endpoint 
{}{}".format(address_prefix, self.replaced_node.address_for_current_version()),
                                    timeout=60, filename='debug.log')
             else:
                 node.watch_log_for("between /{} and /{}; /{} is the new owner"

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/f4eda3a5/secondary_indexes_test.py
----------------------------------------------------------------------
diff --git a/secondary_indexes_test.py b/secondary_indexes_test.py
index df5872a..9cb6cc0 100644
--- a/secondary_indexes_test.py
+++ b/secondary_indexes_test.py
@@ -1153,6 +1153,7 @@ class TestPreJoinCallback(Tester):
             r'Streaming error occurred',
             r'\[Stream.*\] Streaming error occurred',
             r'\[Stream.*\] Remote peer 127.0.0.\d failed stream session',
+            r'\[Stream.*\] Remote peer 127.0.0.\d:7000 failed stream session',
             r'Error while waiting on bootstrap to complete. Bootstrap will 
have to be restarted.'
         ]
         Tester.__init__(self, *args, **kwargs)

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/f4eda3a5/snitch_test.py
----------------------------------------------------------------------
diff --git a/snitch_test.py b/snitch_test.py
index dc8a30a..334a1a1 100644
--- a/snitch_test.py
+++ b/snitch_test.py
@@ -22,6 +22,7 @@ class TestGossipingPropertyFileSnitch(Tester):
         s.connect((address, port))
         s.close()
 
+
     def test_prefer_local_reconnect_on_listen_address(self):
         """
         @jira_ticket CASSANDRA-9748
@@ -35,15 +36,29 @@ class TestGossipingPropertyFileSnitch(Tester):
         NODE1_LISTEN_ADDRESS = '127.0.0.1'
         NODE1_BROADCAST_ADDRESS = '127.0.0.3'
 
+        NODE1_LISTEN_FMT_ADDRESS = '/127.0.0.1'
+        NODE1_BROADCAST_FMT_ADDRESS = '/127.0.0.3'
+
+        NODE1_40_LISTEN_ADDRESS = '127.0.0.1:7000'
+        NODE1_40_BROADCAST_ADDRESS = '127.0.0.3:7000'
+
         NODE2_LISTEN_ADDRESS = '127.0.0.2'
         NODE2_BROADCAST_ADDRESS = '127.0.0.4'
 
+        NODE2_LISTEN_FMT_ADDRESS = '/127.0.0.2'
+        NODE2_BROADCAST_FMT_ADDRESS = '/127.0.0.4'
+
+        NODE2_40_LISTEN_ADDRESS = '127.0.0.2:7000'
+        NODE2_40_BROADCAST_ADDRESS = '127.0.0.4:7000'
+
         STORAGE_PORT = 7000
 
         cluster = self.cluster
         cluster.populate(2)
         node1, node2 = cluster.nodelist()
 
+        running40 = node1.get_base_cassandra_version() >= 4.0
+
         cluster.seeds = [NODE1_BROADCAST_ADDRESS]
         cluster.set_configuration_options(values={'endpoint_snitch': 
'org.apache.cassandra.locator.GossipingPropertyFileSnitch',
                                                   
'listen_on_broadcast_address': 'true'})
@@ -58,8 +73,8 @@ class TestGossipingPropertyFileSnitch(Tester):
                 snitch_file.write("prefer_local=true" + os.linesep)
 
         node1.start(wait_for_binary_proto=True)
-        node1.watch_log_for("Starting Messaging Service on 
/{}:{}".format(NODE1_LISTEN_ADDRESS, STORAGE_PORT), timeout=60)
-        node1.watch_log_for("Starting Messaging Service on 
/{}:{}".format(NODE1_BROADCAST_ADDRESS, STORAGE_PORT), timeout=60)
+        node1.watch_log_for("Starting Messaging Service on 
{}:{}".format(NODE1_40_LISTEN_ADDRESS[:-5] if running40 else 
NODE1_LISTEN_FMT_ADDRESS, STORAGE_PORT), timeout=60)
+        node1.watch_log_for("Starting Messaging Service on 
{}:{}".format(NODE1_40_BROADCAST_ADDRESS[:-5] if running40 else 
NODE1_BROADCAST_FMT_ADDRESS, STORAGE_PORT), timeout=60)
         self._test_connect(NODE1_LISTEN_ADDRESS, STORAGE_PORT)
         self._test_connect(NODE1_BROADCAST_ADDRESS, STORAGE_PORT)
 
@@ -71,16 +86,19 @@ class TestGossipingPropertyFileSnitch(Tester):
         original_rows = list(session.execute("SELECT * FROM 
{}".format(stress_table)))
 
         node2.start(wait_for_binary_proto=True, wait_other_notice=False)
-        node2.watch_log_for("Starting Messaging Service on 
/{}:{}".format(NODE2_LISTEN_ADDRESS, STORAGE_PORT), timeout=60)
-        node2.watch_log_for("Starting Messaging Service on 
/{}:{}".format(NODE2_BROADCAST_ADDRESS, STORAGE_PORT), timeout=60)
+        node2.watch_log_for("Starting Messaging Service on 
{}:{}".format(NODE2_40_LISTEN_ADDRESS[:-5] if running40 else 
NODE2_LISTEN_FMT_ADDRESS, STORAGE_PORT), timeout=60)
+        node2.watch_log_for("Starting Messaging Service on 
{}:{}".format(NODE2_40_BROADCAST_ADDRESS[:-5] if running40 else 
NODE2_BROADCAST_FMT_ADDRESS, STORAGE_PORT), timeout=60)
         self._test_connect(NODE2_LISTEN_ADDRESS, STORAGE_PORT)
         self._test_connect(NODE2_BROADCAST_ADDRESS, STORAGE_PORT)
 
         # Intiated -> Initiated typo was fixed in 3.10
-        node1.watch_log_for("Ini?tiated reconnect to an Internal IP /{} for 
the /{}".format(NODE2_LISTEN_ADDRESS,
-                                                                               
             NODE2_BROADCAST_ADDRESS), filename='debug.log', timeout=60)
-        node2.watch_log_for("Ini?tiated reconnect to an Internal IP /{} for 
the /{}".format(NODE1_LISTEN_ADDRESS,
-                                                                               
             NODE1_BROADCAST_ADDRESS), filename='debug.log', timeout=60)
+        reconnectFmtString = "Ini?tiated reconnect to an Internal IP {} for 
the {}"
+        if node1.get_base_cassandra_version() >= 3.10:
+            reconnectFmtString = "Initiated reconnect to an Internal IP {} for 
the {}"
+        node1.watch_log_for(reconnectFmtString.format(NODE2_40_LISTEN_ADDRESS 
if running40 else NODE2_LISTEN_FMT_ADDRESS,
+                                               NODE2_40_BROADCAST_ADDRESS if 
running40 else NODE2_BROADCAST_FMT_ADDRESS), filename='debug.log', timeout=60)
+        node2.watch_log_for(reconnectFmtString.format(NODE1_40_LISTEN_ADDRESS 
if running40 else NODE1_LISTEN_FMT_ADDRESS,
+                                               NODE1_40_BROADCAST_ADDRESS if 
running40 else NODE1_BROADCAST_FMT_ADDRESS), filename='debug.log', timeout=60)
 
         # read data from node2 just to make sure data and connectivity is OK
         session = self.patient_exclusive_cql_connection(node2)
@@ -92,10 +110,11 @@ class TestGossipingPropertyFileSnitch(Tester):
         debug(out)
 
         self.assertIn("/{}".format(NODE1_BROADCAST_ADDRESS), out)
-        self.assertIn("INTERNAL_IP:6:{}".format(NODE1_LISTEN_ADDRESS), out)
+        self.assertIn("INTERNAL_IP:{}:{}".format('9' if running40 else '6', 
NODE1_LISTEN_ADDRESS), out)
+        
self.assertIn("INTERNAL_ADDRESS_AND_PORT:7:{}".format(NODE1_40_LISTEN_ADDRESS), 
out)
         self.assertIn("/{}".format(NODE2_BROADCAST_ADDRESS), out)
-        self.assertIn("INTERNAL_IP:6:{}".format(NODE2_LISTEN_ADDRESS), out)
-
+        self.assertIn("INTERNAL_IP:{}:{}".format('9' if running40 else '6', 
NODE2_LISTEN_ADDRESS), out)
+        
self.assertIn("INTERNAL_ADDRESS_AND_PORT:7:{}".format(NODE1_40_LISTEN_ADDRESS), 
out)
 
 class TestDynamicEndpointSnitch(Tester):
     @attr('resource-intensive')

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/f4eda3a5/topology_test.py
----------------------------------------------------------------------
diff --git a/topology_test.py b/topology_test.py
index 797eca3..02d1806 100644
--- a/topology_test.py
+++ b/topology_test.py
@@ -197,7 +197,7 @@ class TestTopology(Tester):
 
         Test decommission operation is resumable
         """
-        self.ignore_log_patterns = [r'Streaming error occurred', r'Error while 
decommissioning node', r'Remote peer 127.0.0.2 failed stream session']
+        self.ignore_log_patterns = [r'Streaming error occurred', r'Error while 
decommissioning node', r'Remote peer 127.0.0.2 failed stream session', r'Remote 
peer 127.0.0.2:7000 failed stream session']
         cluster = self.cluster
         
cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec':
 1})
         cluster.populate(3, install_byteman=True).start(wait_other_notice=True)
@@ -228,7 +228,7 @@ class TestTopology(Tester):
 
         # Check decommision is done and we skipped transfereed ranges
         node2.watch_log_for('DECOMMISSIONED', from_mark=mark)
-        node2.grep_log("Skipping transferred range .* of keyspace ks, endpoint 
/127.0.0.3", filename='debug.log')
+        node2.grep_log("Skipping transferred range .* of keyspace ks, endpoint 
{}".format(node2.address_for_current_version_slashy()), filename='debug.log')
 
         # Check data is correctly forwarded to node1 and node3
         cluster.remove(node2)
@@ -264,17 +264,17 @@ class TestTopology(Tester):
         cluster.flush()
 
         # Move nodes to balance the cluster
-        def move_node(node, token, ip):
+        def move_node(node, token):
             mark = node.mark_log()
             node.move(token)  # can't assume 0 is balanced with m3p
-            node.watch_log_for('{} state jump to NORMAL'.format(ip), 
from_mark=mark, timeout=180)
+            node.watch_log_for('{} state jump to 
NORMAL'.format(node.address_for_current_version()), from_mark=mark, timeout=180)
             time.sleep(3)
 
         balancing_tokens = cluster.balanced_tokens(3)
 
-        move_node(node1, balancing_tokens[0], '127.0.0.1')
-        move_node(node2, balancing_tokens[1], '127.0.0.2')
-        move_node(node3, balancing_tokens[2], '127.0.0.3')
+        move_node(node1, balancing_tokens[0])
+        move_node(node2, balancing_tokens[1])
+        move_node(node3, balancing_tokens[2])
 
         time.sleep(1)
 

http://git-wip-us.apache.org/repos/asf/cassandra-dtest/blob/f4eda3a5/upgrade_tests/upgrade_supercolumns_test.py
----------------------------------------------------------------------
diff --git a/upgrade_tests/upgrade_supercolumns_test.py 
b/upgrade_tests/upgrade_supercolumns_test.py
index 5b96e64..6ba9db1 100644
--- a/upgrade_tests/upgrade_supercolumns_test.py
+++ b/upgrade_tests/upgrade_supercolumns_test.py
@@ -119,8 +119,8 @@ class TestSCUpgrade(Tester):
         cluster.remove(node=node1)
 
     def upgrade_super_columns_through_all_versions_test(self):
-        
self._upgrade_super_columns_through_versions_test(upgrade_path=['git:cassandra-2.2',
 'git:cassandra-3.0',
-                                                                        
'git:cassandra-3.9', 'git:trunk'])
+        
self._upgrade_super_columns_through_versions_test(upgrade_path=['git:cassandra-2.2',
 'git:cassandra-3.X',
+                                                                        
'git:trunk'])
 
     def upgrade_super_columns_through_limited_versions_test(self):
         
self._upgrade_super_columns_through_versions_test(upgrade_path=['git:cassandra-3.0',
 'git:trunk'])


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org
For additional commands, e-mail: commits-h...@cassandra.apache.org

Reply via email to