Script 'mail_helper' called by obssrc Hello community, here is the log from the commit of package crmsh for openSUSE:Factory checked in at 2023-03-29 23:28:26 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/crmsh (Old) and /work/SRC/openSUSE:Factory/.crmsh.new.31432 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "crmsh" Wed Mar 29 23:28:26 2023 rev:289 rq:1075283 version:4.5.0+20230329.6d95249b Changes: -------- --- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes 2023-03-27 18:18:10.683707462 +0200 +++ /work/SRC/openSUSE:Factory/.crmsh.new.31432/crmsh.changes 2023-03-29 23:28:28.963868277 +0200 @@ -1,0 +2,27 @@ +Wed Mar 29 13:58:52 UTC 2023 - xli...@suse.com + +- Update to version 4.5.0+20230329.6d95249b: + * Dev: behave: save stderr when running command on remote + +------------------------------------------------------------------- +Wed Mar 29 10:08:43 UTC 2023 - xli...@suse.com + +- Update to version 4.5.0+20230329.0b20d25b: + * Dev: unittest: Adjust unit test for previous change + * Dev: bootstrap: Generate the public key on the remote if it does not exist + * Dev: behave: Add functional test for missing public key case + +------------------------------------------------------------------- +Wed Mar 29 07:35:40 UTC 2023 - xli...@suse.com + +- Update to version 4.5.0+20230329.34448a8c: + * Dev: unittest: Adjust unit test for previous change + * Dev: bootstrap: Remove /var/lib/crm and ~/.config/crm/crm.conf when removing node + +------------------------------------------------------------------- +Mon Mar 27 15:36:13 UTC 2023 - xli...@suse.com + +- Update to version 4.5.0+20230327.9a683c11: + * Dev: behave: Add more test cases for bug 1209193 + +------------------------------------------------------------------- Old: ---- crmsh-4.5.0+20230327.c76ad5d5.tar.bz2 New: ---- crmsh-4.5.0+20230329.6d95249b.tar.bz2 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ crmsh.spec ++++++ --- /var/tmp/diff_new_pack.3qLQpy/_old 2023-03-29 23:28:29.471870992 +0200 +++ /var/tmp/diff_new_pack.3qLQpy/_new 2023-03-29 23:28:29.475871013 +0200 @@ -36,7 +36,7 @@ Summary: High Availability cluster command-line interface License: GPL-2.0-or-later Group: %{pkg_group} -Version: 4.5.0+20230327.c76ad5d5 +Version: 4.5.0+20230329.6d95249b Release: 0 URL: http://crmsh.github.io Source0: %{name}-%{version}.tar.bz2 ++++++ _servicedata ++++++ --- /var/tmp/diff_new_pack.3qLQpy/_old 2023-03-29 23:28:29.519871248 +0200 +++ /var/tmp/diff_new_pack.3qLQpy/_new 2023-03-29 23:28:29.523871270 +0200 @@ -9,7 +9,7 @@ </service> <service name="tar_scm"> <param name="url">https://github.com/ClusterLabs/crmsh.git</param> - <param name="changesrevision">a108bd873f627434cea01de0805f0ebdc4b63b54</param> + <param name="changesrevision">6d95249b45798e626c6ea395dd3182d54b577219</param> </service> </servicedata> (No newline at EOF) ++++++ crmsh-4.5.0+20230327.c76ad5d5.tar.bz2 -> crmsh-4.5.0+20230329.6d95249b.tar.bz2 ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230327.c76ad5d5/crmsh/bootstrap.py new/crmsh-4.5.0+20230329.6d95249b/crmsh/bootstrap.py --- old/crmsh-4.5.0+20230327.c76ad5d5/crmsh/bootstrap.py 2023-03-27 16:00:29.000000000 +0200 +++ new/crmsh-4.5.0+20230329.6d95249b/crmsh/bootstrap.py 2023-03-29 15:37:54.000000000 +0200 @@ -142,7 +142,7 @@ self.rm_list = [SYSCONFIG_SBD, CSYNC2_CFG, corosync.conf(), CSYNC2_KEY, COROSYNC_AUTH, "/var/lib/heartbeat/crm/*", "/var/lib/pacemaker/cib/*", "/var/lib/corosync/*", "/var/lib/pacemaker/pengine/*", PCMK_REMOTE_AUTH, - "/var/lib/csync2/*"] + "/var/lib/csync2/*", "~/.config/crm/*"] @classmethod def set_context(cls, options): @@ -1039,6 +1039,7 @@ # which breaks shell expansion used in cmd cmd = ''' [ -f ~/.ssh/id_rsa ] || ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -C "Cluster internal on $(hostname)" -N '' +[ -f ~/.ssh/id_rsa.pub ] || ssh-keygen -y -f ~/.ssh/id_rsa > ~/.ssh/id_rsa.pub ''' result = utils.su_subprocess_run( local_sudoer, @@ -1049,6 +1050,7 @@ ) if result.returncode != 0: raise ValueError(codecs.decode(result.stdout, 'utf-8', 'replace')) + cmd = 'cat ~/.ssh/id_rsa.pub' result = utils.su_subprocess_run( local_sudoer, @@ -2606,6 +2608,8 @@ else: utils.fatal("Specified node {} is not configured in cluster! Unable to remove.".format(_context.cluster_node)) + # In case any crm command can re-generate upgrade_seq again + utils.get_stdout_or_raise_error("rm -rf /var/lib/crmsh", _context.cluster_node) bootstrap_finished() diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230327.c76ad5d5/test/features/bootstrap_bugs.feature new/crmsh-4.5.0+20230329.6d95249b/test/features/bootstrap_bugs.feature --- old/crmsh-4.5.0+20230327.c76ad5d5/test/features/bootstrap_bugs.feature 2023-03-27 16:00:29.000000000 +0200 +++ new/crmsh-4.5.0+20230329.6d95249b/test/features/bootstrap_bugs.feature 2023-03-29 15:37:54.000000000 +0200 @@ -137,8 +137,8 @@ When Run "crm cluster stop" on "hanode1" Then Service "corosync" is "stopped" on "hanode1" - @clean @skip_non_root + @clean Scenario: Passwordless for root, not for sudoer(bsc#1209193) Given Cluster service is "stopped" on "hanode1" And Cluster service is "stopped" on "hanode2" @@ -156,8 +156,29 @@ And Run "test -f /tmp/1209193" on "hanode1" And Run "test -f /tmp/1209193" on "hanode2" + @skip_non_root @clean + Scenario: Missing public key + Given Cluster service is "stopped" on "hanode1" + And Cluster service is "stopped" on "hanode2" + When Run "crm cluster init -y" on "hanode1" + Then Cluster service is "started" on "hanode1" + When Run "crm cluster join -c hanode1 -y" on "hanode2" + Then Cluster service is "started" on "hanode2" + When Run "rm -f /root/.ssh/id_rsa.pub" on "hanode1" + When Run "rm -f /root/.ssh/id_rsa.pub" on "hanode2" + When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1" + When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode2" + When Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode1" + And Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode2" + And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode1" + And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode2" + And Run "crm status" on "hanode1" + Then Check user shell for hacluster between "hanode1 hanode2" + Then Check passwordless for hacluster between "hanode1 hanode2" + @skip_non_root + @clean Scenario: Do upgrade job without root passwordless Given Cluster service is "stopped" on "hanode1" And Cluster service is "stopped" on "hanode2" diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230327.c76ad5d5/test/features/bootstrap_sbd_normal.feature new/crmsh-4.5.0+20230329.6d95249b/test/features/bootstrap_sbd_normal.feature --- old/crmsh-4.5.0+20230327.c76ad5d5/test/features/bootstrap_sbd_normal.feature 2023-03-27 16:00:29.000000000 +0200 +++ new/crmsh-4.5.0+20230329.6d95249b/test/features/bootstrap_sbd_normal.feature 2023-03-29 15:37:54.000000000 +0200 @@ -178,8 +178,8 @@ Then Node "hanode2" is UNCLEAN Then Wait "60" seconds for "hanode2" successfully fenced - @clean @skip_non_root + @clean Scenario: Setup sbd and test fence node, use hacluster to fence Given Has disk "/dev/sda1" on "hanode1" Given Cluster service is "stopped" on "hanode1" diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230327.c76ad5d5/test/features/qdevice_setup_remove.feature new/crmsh-4.5.0+20230329.6d95249b/test/features/qdevice_setup_remove.feature --- old/crmsh-4.5.0+20230327.c76ad5d5/test/features/qdevice_setup_remove.feature 2023-03-27 16:00:29.000000000 +0200 +++ new/crmsh-4.5.0+20230329.6d95249b/test/features/qdevice_setup_remove.feature 2023-03-29 15:37:54.000000000 +0200 @@ -122,6 +122,36 @@ And Service "corosync-qdevice" is "started" on "hanode1" And Show status from qnetd + @skip_non_root + @clean + Scenario: Passwordless for root, not for sudoer (bsc#1209193) + When Run "crm cluster init -y" on "hanode1" + Then Cluster service is "started" on "hanode1" + When Run "crm cluster join -c hanode1 -y" on "hanode2" + Then Cluster service is "started" on "hanode2" + When Run "useradd -m -s /bin/bash xin" on "hanode1" + When Run "echo "xin ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin" on "hanode1" + When Run "rm -f /root/.config/crm/crm.conf" on "hanode1" + When Run "useradd -m -s /bin/bash xin" on "hanode2" + When Run "echo "xin ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/xin" on "hanode2" + When Run "rm -f /root/.config/crm/crm.conf" on "hanode2" + When Run "su xin -c "sudo crm cluster init qdevice --qnetd-hostname=qnetd-node -y"" on "hanode1" + Then Service "corosync-qdevice" is "started" on "hanode1" + And Service "corosync-qdevice" is "started" on "hanode2" + + @skip_non_root + @clean + Scenario: Missing crm/crm.conf (bsc#1209193) + When Run "crm cluster init -y" on "hanode1" + Then Cluster service is "started" on "hanode1" + When Run "crm cluster join -c hanode1 -y" on "hanode2" + Then Cluster service is "started" on "hanode2" + When Run "rm -f /root/.config/crm/crm.conf" on "hanode1" + When Run "rm -f /root/.config/crm/crm.conf" on "hanode2" + When Run "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on "hanode1" + Then Service "corosync-qdevice" is "started" on "hanode1" + And Service "corosync-qdevice" is "started" on "hanode2" + @clean Scenario: One qnetd for multi cluster, add in parallel When Run "crm cluster init -n cluster1 -y" on "hanode1" diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230327.c76ad5d5/test/features/steps/utils.py new/crmsh-4.5.0+20230329.6d95249b/test/features/steps/utils.py --- old/crmsh-4.5.0+20230327.c76ad5d5/test/features/steps/utils.py 2023-03-27 16:00:29.000000000 +0200 +++ new/crmsh-4.5.0+20230329.6d95249b/test/features/steps/utils.py 2023-03-29 15:37:54.000000000 +0200 @@ -78,9 +78,11 @@ context.failed = True else: out = utils.to_ascii(results[0][1][1]) + err = utils.to_ascii(results[0][1][2]) context.stdout = out + context.stderr = err context.return_code = 0 - return 0, out, None + return 0, out, err def check_service_state(context, service_name, state, addr): diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230327.c76ad5d5/test/unittests/test_bootstrap.py new/crmsh-4.5.0+20230329.6d95249b/test/unittests/test_bootstrap.py --- old/crmsh-4.5.0+20230327.c76ad5d5/test/unittests/test_bootstrap.py 2023-03-27 16:00:29.000000000 +0200 +++ new/crmsh-4.5.0+20230329.6d95249b/test/unittests/test_bootstrap.py 2023-03-29 15:37:54.000000000 +0200 @@ -385,6 +385,7 @@ 'ssh -o StrictHostKeyChecking=no remote_sudoer@remote_host sudo -H -u remote_user /bin/sh', input=''' [ -f ~/.ssh/id_rsa ] || ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -C "Cluster internal on $(hostname)" -N '' +[ -f ~/.ssh/id_rsa.pub ] || ssh-keygen -y -f ~/.ssh/id_rsa > ~/.ssh/id_rsa.pub '''.encode('utf-8'), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -1428,6 +1429,7 @@ mock_this_node.assert_called_once_with() mock_error.assert_called_once_with("Removing self requires --force") + @mock.patch('crmsh.utils.get_stdout_or_raise_error') @mock.patch('crmsh.bootstrap.remove_self') @mock.patch('crmsh.utils.this_node') @mock.patch('crmsh.bootstrap.confirm') @@ -1438,7 +1440,7 @@ @mock.patch('crmsh.bootstrap.init') @mock.patch('crmsh.bootstrap.Context') def test_bootstrap_remove_self(self, mock_context, mock_init, mock_active, - mock_error, mock_qdevice, mock_hostname, mock_confirm, mock_this_node, mock_self): + mock_error, mock_qdevice, mock_hostname, mock_confirm, mock_this_node, mock_self, mock_run): mock_context_inst = mock.Mock(cluster_node="node1", force=True, qdevice_rm_flag=None) mock_context.return_value = mock_context_inst mock_active.return_value = [True, True] @@ -1458,6 +1460,7 @@ mock_this_node.assert_called_once_with() mock_error.assert_not_called() mock_self.assert_called_once_with(True) + mock_run.assert_called_once_with('rm -rf /var/lib/crmsh', 'node1') @mock.patch('crmsh.xmlutil.listnodes') @mock.patch('crmsh.utils.this_node') @@ -1492,6 +1495,7 @@ mock_this_node.assert_called_once_with() mock_error.assert_called_once_with("Specified node node2 is not configured in cluster! Unable to remove.") + @mock.patch('crmsh.utils.get_stdout_or_raise_error') @mock.patch('crmsh.utils.fetch_cluster_node_list_from_node') @mock.patch('crmsh.bootstrap.remove_node_from_cluster') @mock.patch('crmsh.xmlutil.listnodes') @@ -1505,7 +1509,7 @@ @mock.patch('crmsh.bootstrap.Context') def test_bootstrap_remove(self, mock_context, mock_init, mock_active, mock_error, mock_qdevice, mock_hostname, mock_confirm, mock_this_node, - mock_list, mock_remove, mock_fetch): + mock_list, mock_remove, mock_fetch, mock_run): mock_context_inst = mock.Mock(cluster_node="node2", qdevice_rm_flag=None, force=True) mock_context.return_value = mock_context_inst mock_active.side_effect = [True, False] @@ -1526,6 +1530,7 @@ mock_confirm.assert_not_called() mock_error.assert_not_called() mock_remove.assert_called_once_with() + mock_run.assert_called_once_with('rm -rf /var/lib/crmsh', 'node2') @mock.patch('crmsh.utils.fatal') @mock.patch('crmsh.utils.get_stdout_stderr_auto_ssh_no_input')