Hello community,

here is the log from the commit of package kubernetes-salt for openSUSE:Factory 
checked in at 2018-06-08 23:16:50
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/kubernetes-salt (Old)
 and      /work/SRC/openSUSE:Factory/.kubernetes-salt.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "kubernetes-salt"

Fri Jun  8 23:16:50 2018 rev:25 rq:614578 version:4.0.0+git_r829_b3f572e

Changes:
--------
--- /work/SRC/openSUSE:Factory/kubernetes-salt/kubernetes-salt.changes  
2018-06-05 12:52:57.857522171 +0200
+++ /work/SRC/openSUSE:Factory/.kubernetes-salt.new/kubernetes-salt.changes     
2018-06-08 23:16:54.876923710 +0200
@@ -1,0 +2,19 @@
+Wed Jun  6 13:48:35 UTC 2018 - containers-bui...@suse.de
+
+- Commit c77b0ee by Alvaro Saurin alvaro.sau...@gmail.com
+ Use the cache whenever something bad happens when refreshing the Pillar from
+ Velum.
+ 
+ bsc#1093123
+
+
+-------------------------------------------------------------------
+Tue Jun  5 14:14:40 UTC 2018 - containers-bui...@suse.de
+
+- Commit 450cfdb by Alvaro Saurin alvaro.sau...@gmail.com
+ Perform some checks before starting the node removal.
+ 
+ feature#node_removal
+
+
+-------------------------------------------------------------------

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ kubernetes-salt.spec ++++++
--- /var/tmp/diff_new_pack.ErBkO7/_old  2018-06-08 23:16:55.596897705 +0200
+++ /var/tmp/diff_new_pack.ErBkO7/_new  2018-06-08 23:16:55.604897416 +0200
@@ -32,7 +32,7 @@
 
 Name:           kubernetes-salt
 %define gitrepo salt
-Version:        4.0.0+git_r823_5652fd8
+Version:        4.0.0+git_r829_b3f572e
 Release:        0
 BuildArch:      noarch
 Summary:        Production-Grade Container Scheduling and Management

++++++ master.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/_pillar/velum.py 
new/salt-master/salt/_pillar/velum.py
--- old/salt-master/salt/_pillar/velum.py       2018-06-01 16:37:28.000000000 
+0200
+++ new/salt-master/salt/_pillar/velum.py       2018-06-06 15:44:49.000000000 
+0200
@@ -67,20 +67,25 @@
         with open(os.environ['VELUM_INTERNAL_API_PASSWORD_FILE'], 'r') as f:
             password = f.read().strip()
 
-    data = __salt__['http.query'](url=url,
-                                  ca_bundle=ca_bundle,
-                                  username=username,
-                                  password=password,
-                                  decode=True,
-                                  decode_type='json')
+    try:
+        data = __salt__['http.query'](url=url,
+                                      ca_bundle=ca_bundle,
+                                      username=username,
+                                      password=password,
+                                      decode=True,
+                                      decode_type='json')
+    except Exception as e:
+        log.error('Error when getting pillar from Velum: {0}. Will try to use 
the cache...'.format(e))
+        data = {}
 
-    if 'dict' in data:
+    if data and 'dict' in data:
         try:
             cache.store('caasp/pillar', minion_id, data['dict'])
         except Exception as e:
             log.warning('Error when populating the cache: {0}. Moving on, not 
critical'.format(e))
         return data['dict']
-    elif cache.contains('caasp/pillar', minion_id):
+
+    if cache.contains('caasp/pillar', minion_id):
         log.warning('Serving pillar from cache for minion {0}, since {1} was 
not available'.format(minion_id, url))
         return cache.fetch('caasp/pillar', minion_id)
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/etcd/remove-pre-orchestration.sls 
new/salt-master/salt/etcd/remove-pre-orchestration.sls
--- old/salt-master/salt/etcd/remove-pre-orchestration.sls      1970-01-01 
01:00:00.000000000 +0100
+++ new/salt-master/salt/etcd/remove-pre-orchestration.sls      2018-06-06 
15:44:49.000000000 +0200
@@ -0,0 +1,3 @@
+etcd:
+  # check the etcd cluster is healthy
+  caasp_etcd.healthy
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/kube-apiserver/remove-pre-orchestration.sls 
new/salt-master/salt/kube-apiserver/remove-pre-orchestration.sls
--- old/salt-master/salt/kube-apiserver/remove-pre-orchestration.sls    
1970-01-01 01:00:00.000000000 +0100
+++ new/salt-master/salt/kube-apiserver/remove-pre-orchestration.sls    
2018-06-06 15:44:49.000000000 +0200
@@ -0,0 +1,40 @@
+include:
+  - kubectl-config
+
+{%- set target          = salt.caasp_pillar.get('target') %}
+{%- set target_nodename = salt.caasp_net.get_nodename(host=target) %}
+
+# Check the local ("internal") API server is reachable, and
+# then the API-through-haproxy is working fine too.
+
+{%- set api_server = 'api.' + pillar['internal_infra_domain'] %}
+
+{%- for port in ['int_ssl_port', 'ssl_port'] %}
+
+check-kube-apiserver-wait-port-{{ port }}:
+  caasp_retriable.retry:
+    - target:     caasp_http.wait_for_successful_query
+    - name:       {{ 'https://' + api_server + ':' + pillar['api'][port] 
}}/healthz
+    - wait_for:   300
+    # retry just in case the API server returns a transient error
+    - retry:
+        attempts: 3
+    - ca_bundle:  {{ pillar['ssl']['ca_file'] }}
+    - status:     200
+    - opts:
+        http_request_timeout: 30
+
+{% endfor %}
+
+{%- from '_macros/kubectl.jinja' import kubectl with context %}
+
+# A simple check: we can do a simple query (a `get nodes`)
+# to the API server
+{{ kubectl("check-kubectl-get-nodes", "get nodes") }}
+
+# Try to describe the target.
+# If kubectl cannot describe the node, we should abort before trying
+# to go further and maybe fail and leave the cluster in a unstable state.
+# Users should force-remove the node then...
+{{ kubectl("check-kubectl-describe-target",
+           "describe nodes " + target_nodename) }}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kubelet/init.sls 
new/salt-master/salt/kubelet/init.sls
--- old/salt-master/salt/kubelet/init.sls       2018-06-01 16:37:28.000000000 
+0200
+++ new/salt-master/salt/kubelet/init.sls       2018-06-06 15:44:49.000000000 
+0200
@@ -98,25 +98,6 @@
     - require:
       - service: kubelet
 
-{% if salt['grains.get']('kubelet:should_uncordon', false) %}
-uncordon-node:
-  caasp_cmd.run:
-    - name: |
-        kubectl --request-timeout=1m uncordon {{ grains['nodename'] }}
-    - retry:
-        attempts: 10
-        interval: 3
-        until: |
-          test "$(kubectl --request-timeout=1m --kubeconfig={{ 
pillar['paths']['kubeconfig'] }} get nodes {{ grains['nodename'] }} 
-o=jsonpath='{.spec.unschedulable}' 2>/dev/null)" != "true"
-    - require:
-      - file: {{ pillar['paths']['kubeconfig'] }}
-  grains.absent:
-    - name: kubelet:should_uncordon
-    - destructive: True
-    - require:
-      - caasp_cmd: uncordon-node
-{% endif %}
-
 #######################
 # config files
 #######################
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/salt-master/salt/kubelet/update-post-start-services.sls 
new/salt-master/salt/kubelet/update-post-start-services.sls
--- old/salt-master/salt/kubelet/update-post-start-services.sls 2018-06-01 
16:37:28.000000000 +0200
+++ new/salt-master/salt/kubelet/update-post-start-services.sls 2018-06-06 
15:44:49.000000000 +0200
@@ -1,6 +1,36 @@
 # invoked by the "update" orchestration after starting
 # all the services after rebooting
 
+include:
+  - kubectl-config
+
+{% if salt['grains.get']('kubelet:should_uncordon', false) %}
+
+uncordon-node:
+  caasp_cmd.run:
+    - name: |
+        kubectl --request-timeout=1m uncordon {{ grains['nodename'] }}
+    - retry:
+        attempts: 10
+        interval: 3
+        until: |
+          test "$(kubectl --request-timeout=1m --kubeconfig={{ 
pillar['paths']['kubeconfig'] }} get nodes {{ grains['nodename'] }} 
-o=jsonpath='{.spec.unschedulable}' 2>/dev/null)" != "true"
+    - require:
+      - file: {{ pillar['paths']['kubeconfig'] }}
+  grains.absent:
+    - name: kubelet:should_uncordon
+    - destructive: True
+    - require:
+      - caasp_cmd: uncordon-node
+
+{% else %}
+
+uncordon-node:
+  cmd.run:
+    - name: "echo {{ grains['nodename'] }} should not be uncordoned. Skipping."
+
+{% endif %}
+
 remove-old-node-entry:
   cmd.run:
     - name: kubectl --request-timeout=1m delete node {{ grains['machine_id'] + 
"." + pillar['internal_infra_domain'] }}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/removal.sls 
new/salt-master/salt/orch/removal.sls
--- old/salt-master/salt/orch/removal.sls       2018-06-01 16:37:28.000000000 
+0200
+++ new/salt-master/salt/orch/removal.sls       2018-06-06 15:44:49.000000000 
+0200
@@ -40,7 +40,7 @@
                                                                               
etcd_members=etcd_members,
                                                                               
excluded=nodes_down) %}
 
-# Ensure we mark all nodes with the "as node is being removed" grain.
+# Ensure we mark all nodes with the "a node is being removed" grain.
 # This will ensure the update-etc-hosts orchestration is not run.
 set-cluster-wide-removal-grain:
   salt.function:
@@ -51,8 +51,14 @@
       - removal_in_progress
       - true
 
-# make sure we have a solid ground before starting the removal
+# Make sure we have a solid ground before starting the removal
 # (ie, expired certs produce really funny errors)
+# We could highstate everything, but that would
+# 1) take a significant amount of time
+# 2) restart many services
+# instead of that, we will
+# * update some things, and
+# * do some checks before removing anything
 update-config:
   salt.state:
     - tgt: 'P@roles:(kube-master|kube-minion|etcd) and {{ 
all_responsive_nodes_tgt }}'
@@ -64,6 +70,17 @@
     - require:
       - set-cluster-wide-removal-grain
 
+pre-removal-checks:
+  salt.state:
+    - tgt: '{{ super_master_tgt }}'
+    - sls:
+      - etcd.remove-pre-orchestration
+      - kube-apiserver.remove-pre-orchestration
+    - pillar:
+        target: {{ target }}
+    - require:
+      - update-config
+
 {##############################
  # set grains
  #############################}
@@ -76,7 +93,7 @@
       - node_removal_in_progress
       - true
     - require:
-      - update-config
+      - pre-removal-checks
 
 {%- if replacement %}
 
@@ -88,7 +105,7 @@
       - node_addition_in_progress
       - true
     - require:
-      - update-config
+      - pre-removal-checks
 
   {#- and then we can assign these (new) roles to the replacement #}
   {% for role in replacement_roles %}
@@ -100,7 +117,7 @@
       - roles
       - {{ role }}
     - require:
-      - update-config
+      - pre-removal-checks
       - assign-addition-grain
   {% endfor %}
 
@@ -115,7 +132,7 @@
       - saltutil.refresh_grains
       - mine.update
     - require:
-      - update-config
+      - pre-removal-checks
       - assign-removal-grain
   {%- for role in replacement_roles %}
       - assign-{{ role }}-role-to-replacement
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/update.sls 
new/salt-master/salt/orch/update.sls
--- old/salt-master/salt/orch/update.sls        2018-06-01 16:37:28.000000000 
+0200
+++ new/salt-master/salt/orch/update.sls        2018-06-06 15:44:49.000000000 
+0200
@@ -100,7 +100,7 @@
     - require:
       - admin-apply-haproxy
 
-# Perform any migrations necessary before starting the update orchestration. 
All services and
+# Perform any necessary migrations before starting the update orchestration. 
All services and
 # machines should be running and we can migrate some data on the whole cluster 
and then proceed
 # with the real update.
 pre-orchestration-migration:
@@ -159,7 +159,7 @@
     - require:
         - etcd-setup
 
-# Perform any migratrions necessary before services are shutdown
+# Perform any necessary migrations before services are shutdown
 {{ master_id }}-pre-reboot:
   salt.state:
     - tgt: '{{ master_id }}'
@@ -192,7 +192,7 @@
     - require:
       - {{ master_id }}-reboot
 
-# Perform any migratrions necessary before salt starts doing
+# Perform any necessary migrations before salt starts doing
 # "real work" again
 {{ master_id }}-post-reboot:
   salt.state:
@@ -219,16 +219,6 @@
     - require:
       - {{ master_id }}-apply-haproxy
 
-# Perform any migratrions after services are started
-{{ master_id }}-post-start-services:
-  salt.state:
-    - tgt: '{{ master_id }}'
-    - sls:
-      - cni.update-post-start-services
-      - kubelet.update-post-start-services
-    - require:
-      - {{ master_id }}-start-services
-
 {{ master_id }}-reboot-needed-grain:
   salt.function:
     - tgt: '{{ master_id }}'
@@ -238,10 +228,25 @@
     - kwarg:
         destructive: True
     - require:
-      - {{ master_id }}-post-start-services
+      - {{ master_id }}-start-services
 
 {% endfor %}
 
+# Perform migrations after all masters have been updated
+all-masters-post-start-services:
+  salt.state:
+    - tgt: '{{ is_master_tgt }}'
+    - tgt_type: compound
+    - batch: 3
+    - sls:
+      - cni.update-post-start-services
+      - kubelet.update-post-start-services
+    - require:
+      - etcd-setup
+{%- for master_id in masters.keys() %}
+      - {{ master_id }}-reboot-needed-grain
+{%- endfor %}
+
 {%- set workers = salt.saltutil.runner('mine.get', 
tgt=is_updateable_worker_tgt, fun='network.interfaces', tgt_type='compound') %}
 {%- for worker_id, ip in workers.items() %}
 
@@ -256,13 +261,13 @@
       - cri.stop
       - etcd.stop
     - require:
-      - pre-orchestration-migration
+      - all-masters-post-start-services
       # wait until all the masters have been updated
 {%- for master_id in masters.keys() %}
       - {{ master_id }}-reboot-needed-grain
 {%- endfor %}
 
-# Perform any migrations necessary before rebooting
+# Perform any necessary migrations before rebooting
 {{ worker_id }}-pre-reboot:
   salt.state:
     - tgt: '{{ worker_id }}'
@@ -294,7 +299,7 @@
     - require:
       - {{ worker_id }}-reboot
 
-# Perform any migratrions necessary before salt starts doing
+# Perform any necessary migrations before salt starts doing
 # "real work" again
 {{ worker_id }}-post-reboot:
   salt.state:
@@ -321,7 +326,7 @@
     - require:
       - salt: {{ worker_id }}-apply-haproxy
 
-# Perform any migratrions after services are started
+# Perform any migrations after services are started
 {{ worker_id }}-update-post-start-services:
   salt.state:
     - tgt: '{{ worker_id }}'
@@ -370,7 +375,7 @@
       - kubelet.configure-taints
       - kubelet.configure-labels
     - require:
-      - pre-orchestration-migration
+      - all-masters-post-start-services
 # wait until all the machines in the cluster have been upgraded
 {%- for master_id in masters.keys() %}
       # We use the last state within the masters loop, which is different


Reply via email to