Hello community,

here is the log from the commit of package kubernetes-salt for openSUSE:Factory 
checked in at 2018-07-27 10:55:40
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/kubernetes-salt (Old)
 and      /work/SRC/openSUSE:Factory/.kubernetes-salt.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "kubernetes-salt"

Fri Jul 27 10:55:40 2018 rev:30 rq:624696 version:4.0.0+git_r867_94b4e90

Changes:
--------
--- /work/SRC/openSUSE:Factory/kubernetes-salt/kubernetes-salt.changes  
2018-07-13 10:21:36.354473730 +0200
+++ /work/SRC/openSUSE:Factory/.kubernetes-salt.new/kubernetes-salt.changes     
2018-07-27 10:55:44.477589058 +0200
@@ -1,0 +2,16 @@
+Thu Jul 19 10:13:46 UTC 2018 - containers-bugow...@suse.de
+
+- Commit c02c3ec by Michal Jura mj...@suse.com
+ Move deprecated flags to kubelet config.yaml
+
+
+-------------------------------------------------------------------
+Mon Jul 16 07:14:58 UTC 2018 - containers-bugow...@suse.de
+
+- Commit f0a0ac1 by Rafael Fernández López eresli...@ereslibre.es
+ Batch potentially dangerous and massive operations.
+ 
+ Fixes: bsc#1101124
+
+
+-------------------------------------------------------------------

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ kubernetes-salt.spec ++++++
--- /var/tmp/diff_new_pack.2svNXR/_old  2018-07-27 10:55:44.909589885 +0200
+++ /var/tmp/diff_new_pack.2svNXR/_new  2018-07-27 10:55:44.909589885 +0200
@@ -32,7 +32,7 @@
 
 Name:           kubernetes-salt
 %define gitrepo salt
-Version:        4.0.0+git_r863_5c11a33
+Version:        4.0.0+git_r867_94b4e90
 Release:        0
 BuildArch:      noarch
 Summary:        Production-Grade Container Scheduling and Management

++++++ master.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kubelet/init.sls 
new/salt-master/salt/kubelet/init.sls
--- old/salt-master/salt/kubelet/init.sls       2018-07-12 09:57:10.000000000 
+0200
+++ new/salt-master/salt/kubelet/init.sls       2018-07-19 12:15:26.000000000 
+0200
@@ -25,7 +25,7 @@
          pillar['ssl']['kubelet_key'],
          o = 'system:nodes') }}
 
-kubelet-config:
+kubeconfig:
   file.managed:
     - name: {{ pillar['paths']['kubelet_config'] }}
     - source: salt://kubeconfig/kubeconfig.jinja
@@ -53,6 +53,14 @@
     - dir_mode: 755
     - makedirs: True
 
+kubelet-config:
+  file.managed:
+    - name:     /etc/kubernetes/kubelet-config.yaml
+    - source:   salt://kubelet/kubelet-config.jinja
+    - template: jinja
+    - require:
+      - sls:    kubernetes-common
+
 kubelet:
   pkg.installed:
     - pkgs:
@@ -70,6 +78,7 @@
     - enable:   True
     - watch:
       - file:   /etc/kubernetes/config
+      - kubeconfig
       - kubelet-config
       - file:   kubelet
 {% if salt.caasp_pillar.get('cloud:provider') == 'openstack' %}
@@ -80,6 +89,7 @@
     - require:
       - file:   /etc/kubernetes/manifests
       - file:   /etc/kubernetes/kubelet-initial
+      - kubeconfig
       - kubelet-config
       - cmd: unmount-swaps
   caasp_retriable.retry:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kubelet/kubelet-config.jinja 
new/salt-master/salt/kubelet/kubelet-config.jinja
--- old/salt-master/salt/kubelet/kubelet-config.jinja   1970-01-01 
01:00:00.000000000 +0100
+++ new/salt-master/salt/kubelet/kubelet-config.jinja   2018-07-19 
12:15:26.000000000 +0200
@@ -0,0 +1,59 @@
+kind: KubeletConfiguration
+apiVersion: kubelet.config.k8s.io/v1beta1
+staticPodPath: "/etc/kubernetes/manifests"
+syncFrequency: 1m0s
+fileCheckFrequency: 20s
+httpCheckFrequency: 20s
+address: 0.0.0.0
+port: {{ pillar['kubelet']['port'] }}
+tlsCertFile: "{{ pillar['ssl']['kubelet_crt'] }}"
+tlsPrivateKeyFile: "{{ pillar['ssl']['kubelet_key'] }}"
+authentication:
+  x509:
+    clientCAFile: "{{ pillar['ssl']['ca_file'] }}"
+  webhook:
+    enabled: false
+    cacheTTL: 2m0s
+  anonymous:
+    enabled: true
+authorization:
+  mode: AlwaysAllow
+  webhook:
+    cacheAuthorizedTTL: 5m0s
+    cacheUnauthorizedTTL: 30s
+healthzPort: 10248
+healthzBindAddress: 127.0.0.1
+clusterDomain: {{ pillar['dns']['domain'] }}
+clusterDNS:
+  - {{ pillar['dns']['cluster_ip'] }}
+nodeStatusUpdateFrequency: 10s
+cgroupRoot: "/"
+cgroupsPerQOS: true
+cgroupDriver: cgroupfs
+cpuManagerPolicy: none
+cpuManagerReconcilePeriod: 10s
+runtimeRequestTimeout: 2m0s
+maxPods: 110
+podPidsLimit: -1
+resolvConf: "/etc/resolv.conf"
+cpuCFSQuota: true
+maxOpenFiles: 1000000
+contentType: application/vnd.kubernetes.protobuf
+serializeImagePulls: true
+evictionHard:
+  imagefs.available: 15%
+  memory.available: 100Mi
+  nodefs.available: 10%
+  nodefs.inodesFree: 5%
+evictionPressureTransitionPeriod: 5m0s
+enableControllerAttachDetach: true
+makeIPTablesUtilChains: true
+iptablesMasqueradeBit: 14
+iptablesDropBit: 15
+failSwapOn: true
+containerLogMaxSize: 10Mi
+containerLogMaxFiles: 5
+systemReservedCgroup: system
+kubeReservedCgroup: podruntime.slice
+enforceNodeAllocatable:
+  - pods
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/kubelet/kubelet.jinja 
new/salt-master/salt/kubelet/kubelet.jinja
--- old/salt-master/salt/kubelet/kubelet.jinja  2018-07-12 09:57:10.000000000 
+0200
+++ new/salt-master/salt/kubelet/kubelet.jinja  2018-07-19 12:15:26.000000000 
+0200
@@ -7,51 +7,46 @@
 # kubernetes kubelet (minion) config
 
 # The address for the info server to serve on (set to 0.0.0.0 or "" for all 
interfaces)
-KUBELET_ADDRESS="--address=0.0.0.0"
+# DEPRECATED!!! This option was moved to kubelet-config.yaml
+KUBELET_ADDRESS=""
 
 # The port for the info server to serve on
-KUBELET_PORT="--port={{ pillar['kubelet']['port'] }}"
+# DEPRECATED!!! This option was moved to kubelet-config.yaml
+KUBELET_PORT=""
 
 # Ensure we match the machine hostname
 KUBELET_HOSTNAME="--hostname-override={{ grains['nodename'] }}"
 
 # Add your own!
 KUBELET_ARGS="\
-    --cgroups-per-qos \
-    --cgroup-driver=cgroupfs \
-    --cgroup-root=/ \
-    --kube-reserved-cgroup=podruntime.slice \
-{% if salt.caasp_pillar.get_kubelet_reserved_resources('kube') -%}
+    --config=/etc/kubernetes/kubelet-config.yaml \
+{%- if salt.caasp_pillar.get_kubelet_reserved_resources('kube') %}
     --kube-reserved={{ 
salt.caasp_pillar.get_kubelet_reserved_resources('kube') }} \
-{% endif -%}
-    --system-reserved-cgroup=system \
-{% if salt.caasp_pillar.get_kubelet_reserved_resources('system') -%}
+{%- endif %}
+{%- if salt.caasp_pillar.get_kubelet_reserved_resources('system') %}
     --kube-reserved={{ 
salt.caasp_pillar.get_kubelet_reserved_resources('system') }} \
-{% endif -%}
-    --enforce-node-allocatable=pods \
-{% if pillar['kubelet']['eviction-hard'] -%}
+{%- endif %}
+{%- if pillar['kubelet']['eviction-hard'] %}
     --eviction-hard={{ pillar['kubelet']['eviction-hard'] }} \
-{% endif -%}
-    --cluster-dns={{ pillar['dns']['cluster_ip'] }} \
-    --cluster-domain={{ pillar['dns']['domain'] }} \
+{%- endif %}
     --node-ip={{ salt.caasp_net.get_primary_ip() }} \
-{% if grains['lsb_distrib_id'] == "CAASP" -%}
-    --pod-manifest-path=/etc/kubernetes/manifests \
+{%- if grains['lsb_distrib_id'] == "CAASP" %}
     --pod-infra-container-image={{ pillar['pod_infra_container_image'] }} \
-{% endif -%}
-{% if cloud_provider -%}
-               --cloud-provider={{ pillar['cloud']['provider'] }} \
-  {% if cloud_provider == 'openstack' -%}
-               --cloud-config=/etc/kubernetes/openstack-config \
-  {% endif -%}
-{% endif -%}
+{%- endif %}
+{%- if cloud_provider %}
+    --cloud-provider={{ pillar['cloud']['provider'] }} \
+  {%- if cloud_provider == 'openstack' %}
+    --cloud-config=/etc/kubernetes/openstack-config \
+  {%- endif %}
+{%- endif %}
+{%- if pillar['components']['kubelet']['args'] %}
     {{ pillar['components']['kubelet']['args'] }} \
-{% if salt.caasp_cri.cri_name() == 'crio' -%}
+{%- endif %}
+{%- if salt.caasp_cri.cri_name() == 'crio' %}
     --container-runtime=remote \
     --container-runtime-endpoint={{ pillar['cri']['crio']['socket'] }} \
     --runtime-request-timeout=10m \
-{% endif -%}
-    --client-ca-file={{ pillar['ssl']['ca_file'] }} \
+{%- endif %}
     --network-plugin=cni \
     --cni-bin-dir={{ pillar['cni']['dirs']['bin'] }} \
     --cni-conf-dir={{ pillar['cni']['dirs']['conf'] }} \
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/salt-master/salt/orch/update.sls 
new/salt-master/salt/orch/update.sls
--- old/salt-master/salt/orch/update.sls        2018-07-12 09:57:10.000000000 
+0200
+++ new/salt-master/salt/orch/update.sls        2018-07-19 12:15:26.000000000 
+0200
@@ -127,7 +127,6 @@
     - require:
       - admin-setup
 
-
 # Before the real orchestration starts cordon all the worker nodes running 
2.0. This way we ensure
 # that no pods will be rescheduled on these machines while we upgrade: all 
rescheduled workloads
 # will be strictly sent to upgraded nodes (the only ones uncordoned).
@@ -135,6 +134,7 @@
   salt.state:
     - tgt: '( {{ is_updateable_worker_tgt }} ) and G@osrelease:2.0'
     - tgt_type: compound
+    - batch: 3
     - sls:
         - migrations.2-3.kubelet.cordon
     - require:
@@ -316,6 +316,7 @@
   salt.state:
     - tgt: '( {{ is_updateable_worker_tgt }} ) and G@osrelease:2.0'
     - tgt_type: compound
+    - batch: 3
     - sls:
         - etc-hosts
         - migrations.2-3.haproxy
@@ -332,6 +333,7 @@
   salt.state:
     - tgt: '( {{ is_updateable_worker_tgt }} ) and G@osrelease:3.0'
     - tgt_type: compound
+    - batch: 3
     - sls:
         - etc-hosts
         - haproxy


Reply via email to