This is an automated email from the ASF dual-hosted git repository.

bereng pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra-dtest.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 58820de9 Extend maximum expiration date
58820de9 is described below

commit 58820de92eef140991a3e45f68f9152ae2fbc490
Author: Bereng <berenguerbl...@gmail.com>
AuthorDate: Mon Sep 5 07:23:04 2022 +0200

    Extend maximum expiration date
    
    patch by Berenguer Blasi; reviewed by Andrés de la Peña for CASSANDRA-14227
---
 ttl_test.py                                    | 21 +++++++++++++++------
 upgrade_tests/upgrade_through_versions_test.py |  7 ++++---
 2 files changed, 19 insertions(+), 9 deletions(-)

diff --git a/ttl_test.py b/ttl_test.py
index 96552de2..c7ee5d9a 100644
--- a/ttl_test.py
+++ b/ttl_test.py
@@ -1,5 +1,7 @@
 import os
 import time
+import datetime
+
 import pytest
 import logging
 
@@ -365,8 +367,11 @@ class TestTTL(Tester):
     def _base_expiration_overflow_policy_test(self, default_ttl, policy):
         """
         Checks that expiration date overflow policy is correctly applied
-        @jira_ticket CASSANDRA-14092
+        @jira_ticket CASSANDRA-14092 and CASSANDRA-14227
         """
+        # Post 5.0 TTL may overflow in 2038 (legacy) or 2106 C14227
+        overflow_policy_applies = "NONE" != 
self.cluster.nodelist()[0].get_conf_option("storage_compatibility_mode") \
+                                  or datetime.date.today().year >= 2086
         MAX_TTL = 20 * 365 * 24 * 60 * 60  # 20 years in seconds
         default_time_to_live = MAX_TTL if default_ttl else None
         self.prepare(default_time_to_live=default_time_to_live)
@@ -384,9 +389,9 @@ class TestTTL(Tester):
         try:
             result = self.session1.execute_async(query + ";")
             result.result()
-            if policy == 'REJECT':
+            if policy == 'REJECT' and overflow_policy_applies:
                 pytest.fail("should throw InvalidRequest")
-            if self.cluster.version() >= '3.0':  # client warn only on 3.0+
+            if self.cluster.version() >= '3.0' and overflow_policy_applies:  # 
client warn only on 3.0+
                 if policy == 'CAP':
                     logger.debug("Warning is {}".format(result.warnings[0]))
                     assert 'exceeds maximum supported expiration' in 
result.warnings[0], 'Warning not found'
@@ -399,10 +404,10 @@ class TestTTL(Tester):
 
         self.cluster.flush()
         # Data should be present unless policy is reject
-        assert_row_count(self.session1, 'ttl_table', 0 if policy == 'REJECT' 
else 1)
+        assert_row_count(self.session1, 'ttl_table', 0 if (policy == 'REJECT' 
and overflow_policy_applies) else 1)
 
         # Check that warning is always logged, unless policy is REJECT
-        if policy != 'REJECT':
+        if policy != 'REJECT' and overflow_policy_applies:
             node1 = self.cluster.nodelist()[0]
             prefix = 'default ' if default_ttl else ''
             warning = node1.grep_log("Request on table {}.{} with {}ttl of {} 
seconds exceeds maximum supported expiration"
@@ -599,7 +604,11 @@ class TestRecoverNegativeExpirationDate(TestHelper):
         node.watch_log_for('Loading new SSTables', timeout=10)
 
         logger.debug("Check that there are no rows present")
-        assert_row_count(session, 'ttl_table', 0)
+        # CASSANDRA-14227 5.0 upwards we have long TTL that can read 
overflowed rows
+        if self.cluster.version() >= '5.0':
+            assert_row_count(session, 'ttl_table', 1)
+        else:
+            assert_row_count(session, 'ttl_table', 0)
 
         logger.debug("Shutting down node")
         self.cluster.stop()
diff --git a/upgrade_tests/upgrade_through_versions_test.py 
b/upgrade_tests/upgrade_through_versions_test.py
index 183f2e87..1fc39d7c 100644
--- a/upgrade_tests/upgrade_through_versions_test.py
+++ b/upgrade_tests/upgrade_through_versions_test.py
@@ -432,6 +432,7 @@ class TestUpgrade(Tester):
 
                     self.upgrade_to_version(version_meta, partial=True, 
nodes=(node,), internode_ssl=internode_ssl)
 
+                    logger.debug(str(self.fixture_dtest_setup.subprocs))
                     self._check_on_subprocs(self.fixture_dtest_setup.subprocs)
                     logger.debug('Successfully upgraded %d of %d nodes to %s' %
                           (num + 1, len(self.cluster.nodelist()), 
version_meta.version))
@@ -488,7 +489,7 @@ class TestUpgrade(Tester):
         if not all(subproc_statuses):
             message = "A subprocess has terminated early. Subprocess statuses: 
"
             for s in subprocs:
-                message += "{name} (is_alive: {aliveness}), 
".format(name=s.name, aliveness=s.is_alive())
+                message += "{name} (is_alive: {aliveness}, exitCode: 
{exitCode}), ".format(name=s.name, aliveness=s.is_alive(), exitCode=s.exitcode)
             message += "attempting to terminate remaining subprocesses now."
             self._terminate_subprocs()
             raise RuntimeError(message)
@@ -654,7 +655,7 @@ class TestUpgrade(Tester):
         # queue of verified writes, which are update candidates
         verification_done_queue = Queue(maxsize=500)
 
-        writer = Process(target=data_writer, args=(self, to_verify_queue, 
verification_done_queue, 25))
+        writer = Process(name="data_writer", target=data_writer, args=(self, 
to_verify_queue, verification_done_queue, 25))
         # daemon subprocesses are killed automagically when the parent process 
exits
         writer.daemon = True
         self.fixture_dtest_setup.subprocs.append(writer)
@@ -663,7 +664,7 @@ class TestUpgrade(Tester):
         if wait_for_rowcount > 0:
             self._wait_until_queue_condition('rows written (but not 
verified)', to_verify_queue, operator.ge, wait_for_rowcount, 
max_wait_s=max_wait_s)
 
-        verifier = Process(target=data_checker, args=(self, to_verify_queue, 
verification_done_queue))
+        verifier = Process(name="data_checker", target=data_checker, 
args=(self, to_verify_queue, verification_done_queue))
         # daemon subprocesses are killed automagically when the parent process 
exits
         verifier.daemon = True
         self.fixture_dtest_setup.subprocs.append(verifier)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org
For additional commands, e-mail: commits-h...@cassandra.apache.org

Reply via email to