Hello community,

here is the log from the commit of package kubernetes-salt for openSUSE:Factory 
checked in at 2018-06-15 14:46:36
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/kubernetes-salt (Old)
 and      /work/SRC/openSUSE:Factory/.kubernetes-salt.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "kubernetes-salt"

Fri Jun 15 14:46:36 2018 rev:26 rq:616508 version:4.0.0+git_r837_d08a652

Changes:
--------
--- /work/SRC/openSUSE:Factory/kubernetes-salt/kubernetes-salt.changes  
2018-06-08 23:16:54.876923710 +0200
+++ /work/SRC/openSUSE:Factory/.kubernetes-salt.new/kubernetes-salt.changes     
2018-06-15 14:46:38.635532904 +0200
@@ -1,0 +2,68 @@
+Wed Jun 13 09:24:18 UTC 2018 - containers-bui...@suse.de
+
+- Commit a4480ed by Alvaro Saurin alvaro.sau...@gmail.com
+ Do not set the `bootstrap_complete` flag in all the nodes: do it only in the
+ nodes that had some role assigned. Remove the `bootstrap_in_progress` even if
+ the orchestration fails. Fixed typo in target.
+ 
+ bsc#1094078
+
+
+-------------------------------------------------------------------
+Wed Jun 13 09:22:07 UTC 2018 - containers-bui...@suse.de
+
+- Commit cf5b83b by Rafael Fernández López eresli...@ereslibre.es
+ Remove mine information when removing a node
+ 
+ This will avoid to render stale information about critical components, like
+ `etcd` endpoints in the `etcd` configuration.
+ 
+ `etcd` is very sensitive to this kind of misleading (stale) information, if
+ more endpoints are provided in `ETCD_INITIAL_CLUSTER` than the ones that
+ actually exist in the cluster, a new instance of etcd will refuse to start.
+ 
+ Fixes: bsc#1097001 Fixes: bsc#1097147
+
+
+-------------------------------------------------------------------
+Mon Jun 11 14:17:04 UTC 2018 - containers-bui...@suse.de
+
+- Commit 23ce1f2 by Rafael Fernández López eresli...@ereslibre.es
+ Force `etc-hosts` sls to be run before `etcd`
+ 
+ Before the real update orchestration happens we are updating etcd
+ certificates, so this machine isn't left isolated. However, in this process,
+ the configuration for etcd might refer to the new machine names if this
+ happens during the upgrade of 2.0 to 3.0. This might leave the etcd instances
+ in a state in which they cannot resolve other etcd peer names (because their
+ `/etc/hosts` file is outdated).
+ 
+ In order to prevent this, force the `etc-hosts` sls to be run before we
+ execute the `etcd` sls, so we are sure that `/etc/hosts` will contain both
+ the old and the new names during the upgrade, and etcd will be able to refer
+ to other peers using the new hostnames.
+ 
+ Fixes: bsc#1096750
+
+
+-------------------------------------------------------------------
+Mon Jun 11 11:57:40 UTC 2018 - containers-bui...@suse.de
+
+- Commit ec6238c by Rafael Fernández López eresli...@ereslibre.es
+ Also stop `kubelet` on masters when performing an upgrade
+ 
+ If some important change lands between Kubernetes updates, it might happen
+ that since we don't disable the `kubelet` service on the master nodes, when
+ the machine gets rebooted, `systemd` will try to start the
+ `kubelet` service, failing in a burst mode.
+ 
+ This will prevent our salt states from trying to start it again, because the
+ service will be in a failed state. Stop the service and disable it on the
+ masters too when we are performing an upgrade, this way we are sure that
+ we'll try to start and enable it when we have performed the required changes
+ for it to succeed.
+ 
+ Fixes: bsc#1096768
+
+
+-------------------------------------------------------------------

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ kubernetes-salt.spec ++++++
--- /var/tmp/diff_new_pack.6eD1eR/_old  2018-06-15 14:46:39.323507738 +0200
+++ /var/tmp/diff_new_pack.6eD1eR/_new  2018-06-15 14:46:39.331507445 +0200
@@ -32,7 +32,7 @@
 
 Name:           kubernetes-salt
 %define gitrepo salt
-Version:        4.0.0+git_r829_b3f572e
+Version:        4.0.0+git_r837_d08a652
 Release:        0
 BuildArch:      noarch
 Summary:        Production-Grade Container Scheduling and Management

++++++ master.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/force-removal.sls 
new/salt-master/salt/orch/force-removal.sls
--- old/salt-master/salt/orch/force-removal.sls 2018-06-06 15:44:49.000000000 
+0200
+++ new/salt-master/salt/orch/force-removal.sls 2018-06-13 11:25:22.000000000 
+0200
@@ -39,6 +39,12 @@
     - kwarg:
         destructive: True
 
+remove-target-mine:
+  salt.function:
+    - tgt: {{ target }}
+    - name: mine.flush
+    - fail_minions: {{ target }}
+
 remove-target-salt-key:
   salt.wheel:
     - name: key.reject
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/kubernetes.sls 
new/salt-master/salt/orch/kubernetes.sls
--- old/salt-master/salt/orch/kubernetes.sls    2018-06-06 15:44:49.000000000 
+0200
+++ new/salt-master/salt/orch/kubernetes.sls    2018-06-13 11:25:22.000000000 
+0200
@@ -12,10 +12,11 @@
 {%- set additional_etcd_members = 
salt.caasp_etcd.get_additional_etcd_members(num_wanted=num_etcd_members,
                                                                               
etcd_members=etcd_members) %}
 
-# Ensure the node is marked as bootstrapping
+# Ensure all the nodes are marked with a 'bootstrap_in_progress' flag
 set-bootstrap-in-progress-flag:
   salt.function:
-    - tgt: '*'
+    - tgt: 'roles:(ca|admin|kube-master|kube-minion|etcd)'
+    - tgt_type: grain_pcre
     - name: grains.setval
     - arg:
       - bootstrap_in_progress
@@ -77,7 +78,7 @@
 
 disable-rebootmgr:
   salt.state:
-    - tgt: 'roles:(admin|kube-master|minion|etcd)'
+    - tgt: 'roles:(admin|kube-master|kube-minion|etcd)'
     - tgt_type: grain_pcre
     - sls:
       - rebootmgr
@@ -232,11 +233,12 @@
     - require:
       - super-master-wait-for-services
 
-# This flag indicates at least one bootstrap has completed at some
-# point in time on this node.
+# Set the bootstrap complete in all the nodes where we really succeeded
+# (if `admin-wait-for-services` fails, we will not set the flag)
 set-bootstrap-complete-flag:
   salt.function:
-    - tgt: '*'
+    - tgt: 'bootstrap_in_progress:true'
+    - tgt_type: grain
     - name: grains.setval
     - arg:
       - bootstrap_complete
@@ -244,14 +246,14 @@
     - require:
       - admin-wait-for-services
 
-# Ensure the node is marked as finished bootstrapping
+# Ensure we remove the bootstrap_in_progress in all the nodes where it was set
+# NOTE: we must remove this flag even if the orchestration fails
 clear-bootstrap-in-progress-flag:
   salt.function:
-    - tgt: '*'
+    - tgt: 'bootstrap_in_progress:true'
+    - tgt_type: grain
     - name: grains.delval
     - arg:
       - bootstrap_in_progress
     - kwarg:
         destructive: True
-    - require:
-      - set-bootstrap-complete-flag
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/removal.sls 
new/salt-master/salt/orch/removal.sls
--- old/salt-master/salt/orch/removal.sls       2018-06-06 15:44:49.000000000 
+0200
+++ new/salt-master/salt/orch/removal.sls       2018-06-13 11:25:22.000000000 
+0200
@@ -271,6 +271,14 @@
     - require:
       - shutdown-target
 
+# remove target information from the mine
+remove-target-mine:
+  salt.function:
+    - tgt: '{{ target }}'
+    - name: mine.flush
+    - require:
+        - remove-from-cluster-in-super-master
+
 # remove the Salt key and the mine for the target
 remove-target-salt-key:
   salt.wheel:
@@ -278,10 +286,10 @@
     - include_accepted: True
     - match: {{ target }}
     - require:
-      - remove-from-cluster-in-super-master
+      - remove-target-mine
 
 # remove target's data in the Salt Master's cache
-remove-target-mine:
+remove-target-mine-cache:
   salt.runner:
     - name: cache.clear_all
     - tgt: '{{ target }}'
@@ -316,7 +324,7 @@
       - saltutil.clear_cache
       - mine.update
     - require:
-      - remove-target-mine
+      - remove-target-mine-cache
 
 update-modules-after-removal:
   salt.function:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/update.sls 
new/salt-master/salt/orch/update.sls
--- old/salt-master/salt/orch/update.sls        2018-06-06 15:44:49.000000000 
+0200
+++ new/salt-master/salt/orch/update.sls        2018-06-13 11:25:22.000000000 
+0200
@@ -131,11 +131,16 @@
 #
 # Let's force etcd to refresh certificates on all machines, restarting the 
etcd service so we can
 # continue with the upgrade, as certificates will be valid for the old and the 
new SAN.
+#
+# We run the etc-hosts sls to make the machines refresh their references first 
(including old CaaSP
+# 2.0 and 3.0 naming). This way, etcd will be able to work with both namings 
during the upgrade
+# process.
 etcd-setup:
   salt.state:
     - tgt: '{{ is_etcd_tgt }}'
     - tgt_type: compound
     - sls:
+      - etc-hosts
       - etcd
     - batch: 1
     - require:
@@ -151,6 +156,7 @@
     - tgt: '{{ master_id }}'
     - sls:
       - container-feeder.stop
+      - kubelet.stop
       - kube-apiserver.stop
       - kube-controller-manager.stop
       - kube-scheduler.stop


Reply via email to