Script 'mail_helper' called by obssrc Hello community, here is the log from the commit of package k0sctl for openSUSE:Factory checked in at 2024-12-10 23:43:52 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/k0sctl (Old) and /work/SRC/openSUSE:Factory/.k0sctl.new.29675 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "k0sctl" Tue Dec 10 23:43:52 2024 rev:9 rq:1229681 version:0.20.0 Changes: -------- --- /work/SRC/openSUSE:Factory/k0sctl/k0sctl.changes 2024-11-14 16:10:03.974228314 +0100 +++ /work/SRC/openSUSE:Factory/.k0sctl.new.29675/k0sctl.changes 2024-12-10 23:44:54.435992883 +0100 @@ -1,0 +2,20 @@ +Mon Dec 09 09:42:59 UTC 2024 - opensuse_buildserv...@ojkastl.de + +- Update to version 0.20.0: + * Use the URL from join token to validate connectivity (#799) + * Bump golang.org/x/text from 0.20.0 to 0.21.0 (#805) + * Add zsh completion tip to README (#804) + * Remove system pods running check (#802) + * Fix installFlags change detection for --enable-worker flag + (#803) + * Bump github.com/stretchr/testify from 1.9.0 to 1.10.0 (#800) + * Bump k8s.io/client-go from 0.31.2 to 0.31.3 (#797) + * Go forces the patch version into go.mod + * Do not require a specific go patch version for module use + * Use go1.23.3 (#796) + * Allow setting kubeconfig username (#793) + * Do not wait for scheduling events after reinstall (#794) + * Bump github.com/go-playground/validator/v10 from 10.22.1 to + 10.23.0 (#795) + +------------------------------------------------------------------- Old: ---- k0sctl-0.19.4.obscpio New: ---- k0sctl-0.20.0.obscpio ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ k0sctl.spec ++++++ --- /var/tmp/diff_new_pack.Bm6h8r/_old 2024-12-10 23:44:55.916054583 +0100 +++ /var/tmp/diff_new_pack.Bm6h8r/_new 2024-12-10 23:44:55.916054583 +0100 @@ -18,7 +18,7 @@ Name: k0sctl -Version: 0.19.4 +Version: 0.20.0 Release: 0 Summary: A bootstrapping and management tool for k0s clusters License: Apache-2.0 ++++++ _service ++++++ --- /var/tmp/diff_new_pack.Bm6h8r/_old 2024-12-10 23:44:55.944055750 +0100 +++ /var/tmp/diff_new_pack.Bm6h8r/_new 2024-12-10 23:44:55.948055916 +0100 @@ -2,7 +2,7 @@ <service name="obs_scm" mode="manual"> <param name="url">https://github.com/k0sproject/k0sctl.git</param> <param name="scm">git</param> - <param name="revision">v0.19.4</param> + <param name="revision">v0.20.0</param> <param name="versionformat">@PARENT_TAG@</param> <param name="versionrewrite-pattern">v(.*)</param> <param name="changesgenerate">enable</param> ++++++ _servicedata ++++++ --- /var/tmp/diff_new_pack.Bm6h8r/_old 2024-12-10 23:44:55.964056583 +0100 +++ /var/tmp/diff_new_pack.Bm6h8r/_new 2024-12-10 23:44:55.968056750 +0100 @@ -1,6 +1,6 @@ <servicedata> <service name="tar_scm"> <param name="url">https://github.com/k0sproject/k0sctl.git</param> - <param name="changesrevision">a06d3f6c227d15c3c7f1b87205ee6b32a2000521</param></service></servicedata> + <param name="changesrevision">b361d94bb457aeb44678015241ee735aed34a335</param></service></servicedata> (No newline at EOF) ++++++ k0sctl-0.19.4.obscpio -> k0sctl-0.20.0.obscpio ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/README.md new/k0sctl-0.20.0/README.md --- old/k0sctl-0.19.4/README.md 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/README.md 2024-12-09 09:34:46.000000000 +0100 @@ -80,6 +80,9 @@ ```sh k0sctl completion > /usr/local/share/zsh/site-functions/_k0sctl + +# For oh my zsh +k0sctl completion > $ZSH_CACHE_DIR/completions/_k0sctl ``` ##### Fish @@ -194,6 +197,7 @@ kind: Cluster metadata: name: my-k0s-cluster + user: admin spec: hosts: - role: controller @@ -256,6 +260,7 @@ ```yaml metadata: name: k0s-cluster-name + user: kubernetes-admin ``` ### Spec Fields diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/action/apply.go new/k0sctl-0.20.0/action/apply.go --- old/k0sctl-0.19.4/action/apply.go 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/action/apply.go 2024-12-09 09:34:46.000000000 +0100 @@ -32,6 +32,10 @@ KubeconfigOut io.Writer // KubeconfigAPIAddress is the API address to use in the kubeconfig KubeconfigAPIAddress string + // KubeconfigUser is the username to use in the kubeconfig + KubeconfigUser string + // KubeconfigCluster is the cluster name to use in the kubeconfig + KubeconfigCluster string // ConfigPath is the path to the configuration file (used for kubeconfig command tip on success) ConfigPath string } @@ -92,7 +96,7 @@ }, } if opts.KubeconfigOut != nil { - apply.Phases.InsertBefore(unlockPhase.Title(), &phase.GetKubeconfig{APIAddress: opts.KubeconfigAPIAddress}) + apply.Phases.InsertBefore(unlockPhase.Title(), &phase.GetKubeconfig{APIAddress: opts.KubeconfigAPIAddress, User: opts.KubeconfigUser, Cluster: opts.KubeconfigCluster}) } return apply diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/action/kubeconfig.go new/k0sctl-0.20.0/action/kubeconfig.go --- old/k0sctl-0.19.4/action/kubeconfig.go 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/action/kubeconfig.go 2024-12-09 09:34:46.000000000 +0100 @@ -9,6 +9,8 @@ // Manager is the phase manager Manager *phase.Manager KubeconfigAPIAddress string + KubeconfigUser string + KubeconfigCluster string Kubeconfig string } @@ -21,7 +23,7 @@ k.Manager.AddPhase( &phase.Connect{}, &phase.DetectOS{}, - &phase.GetKubeconfig{APIAddress: k.KubeconfigAPIAddress}, + &phase.GetKubeconfig{APIAddress: k.KubeconfigAPIAddress, User: k.KubeconfigUser, Cluster: k.KubeconfigCluster}, &phase.Disconnect{}, ) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/cmd/apply.go new/k0sctl-0.20.0/cmd/apply.go --- old/k0sctl-0.19.4/cmd/apply.go 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/cmd/apply.go 2024-12-09 09:34:46.000000000 +0100 @@ -41,6 +41,16 @@ Name: "kubeconfig-api-address", Usage: "Override the API address in the kubeconfig when kubeconfig-out is set", }, + &cli.StringFlag{ + Name: "kubeconfig-user", + Usage: "Set kubernetes username", + DefaultText: "admin", + }, + &cli.StringFlag{ + Name: "kubeconfig-cluster", + Usage: "Set kubernetes cluster name", + DefaultText: "k0s-cluster", + }, &cli.BoolFlag{ Name: "disable-downgrade-check", Usage: "Skip downgrade check", @@ -77,6 +87,8 @@ Manager: ctx.Context.Value(ctxManagerKey{}).(*phase.Manager), KubeconfigOut: kubeconfigOut, KubeconfigAPIAddress: ctx.String("kubeconfig-api-address"), + KubeconfigUser: ctx.String("kubeconfig-user"), + KubeconfigCluster: ctx.String("kubeconfig-cluster"), NoWait: ctx.Bool("no-wait"), NoDrain: ctx.Bool("no-drain"), DisableDowngradeCheck: ctx.Bool("disable-downgrade-check"), diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/cmd/kubeconfig.go new/k0sctl-0.20.0/cmd/kubeconfig.go --- old/k0sctl-0.19.4/cmd/kubeconfig.go 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/cmd/kubeconfig.go 2024-12-09 09:34:46.000000000 +0100 @@ -14,9 +14,21 @@ Usage: "Output the admin kubeconfig of the cluster", Flags: []cli.Flag{ &cli.StringFlag{ - Name: "address", - Usage: "Set kubernetes API address (default: auto-detect)", - Value: "", + Name: "address", + Value: "", + DefaultText: "auto-detect", + }, + &cli.StringFlag{ + Name: "user", + Usage: "Set kubernetes cluster username", + Aliases: []string{"u"}, + DefaultText: "admin", + }, + &cli.StringFlag{ + Name: "cluster", + Usage: "Set kubernetes cluster name", + Aliases: []string{"n"}, + DefaultText: "k0s-cluster", }, configFlag, dryRunFlag, @@ -36,6 +48,8 @@ kubeconfigAction := action.Kubeconfig{ Manager: ctx.Context.Value(ctxManagerKey{}).(*phase.Manager), KubeconfigAPIAddress: ctx.String("address"), + KubeconfigUser: ctx.String("user"), + KubeconfigCluster: ctx.String("cluster"), } if err := kubeconfigAction.Run(); err != nil { diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/go.mod new/k0sctl-0.20.0/go.mod --- old/k0sctl-0.19.4/go.mod 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/go.mod 2024-12-09 09:34:46.000000000 +0100 @@ -2,6 +2,8 @@ go 1.23.2 +toolchain go1.23.3 + require ( github.com/AlecAivazis/survey/v2 v2.3.7 github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect @@ -23,25 +25,25 @@ github.com/segmentio/backo-go v1.1.0 // indirect github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 github.com/sirupsen/logrus v1.9.3 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/urfave/cli/v2 v2.27.5 github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect golang.org/x/crypto v0.29.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/sys v0.27.0 // indirect golang.org/x/term v0.26.0 // indirect - golang.org/x/text v0.20.0 + golang.org/x/text v0.21.0 gopkg.in/yaml.v2 v2.4.0 ) require ( github.com/alessio/shellescape v1.4.2 github.com/carlmjohnson/versioninfo v0.22.5 - github.com/go-playground/validator/v10 v10.22.1 + github.com/go-playground/validator/v10 v10.23.0 github.com/jellydator/validation v1.1.0 github.com/k0sproject/version v0.6.0 github.com/sergi/go-diff v1.3.1 - k8s.io/client-go v0.31.2 + k8s.io/client-go v0.31.3 ) require ( @@ -90,7 +92,7 @@ golang.org/x/time v0.7.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apimachinery v0.31.2 // indirect + k8s.io/apimachinery v0.31.3 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/go.sum new/k0sctl-0.20.0/go.sum --- old/k0sctl-0.19.4/go.sum 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/go.sum 2024-12-09 09:34:46.000000000 +0100 @@ -62,8 +62,8 @@ github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.22.1 h1:40JcKH+bBNGFczGuoBYgX4I6m/i27HYW8P9FDk5PbgA= -github.com/go-playground/validator/v10 v10.22.1/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-playground/validator/v10 v10.23.0 h1:/PwmTwZhS0dPkav3cdK9kV1FsAmrL8sThn8IHr/sO+o= +github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -192,8 +192,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tidwall/transform v0.0.0-20201103190739-32f242e2dbde h1:AMNpJRc7P+GTwVbl8DkK2I9I8BBUzNiHuH/tlxrpan0= github.com/tidwall/transform v0.0.0-20201103190739-32f242e2dbde/go.mod h1:MvrEmduDUz4ST5pGZ7CABCnOU5f3ZiOAZzT6b1A6nX8= github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w= @@ -259,8 +259,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -286,12 +286,12 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= -k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= -k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= -k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= -k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= +k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= +k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= +k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= +k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= +k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/phase/get_kubeconfig.go new/k0sctl-0.20.0/phase/get_kubeconfig.go --- old/k0sctl-0.19.4/phase/get_kubeconfig.go 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/phase/get_kubeconfig.go 2024-12-09 09:34:46.000000000 +0100 @@ -13,6 +13,8 @@ type GetKubeconfig struct { GenericPhase APIAddress string + User string + Cluster string } // Title for the phase @@ -46,7 +48,15 @@ p.APIAddress = p.Config.Spec.KubeAPIURL() } - cfgString, err := kubeConfig(output, p.Config.Metadata.Name, p.APIAddress) + if p.User != "" { + p.Config.Metadata.User = p.User + } + + if p.Cluster != "" { + p.Config.Metadata.Name = p.Cluster + } + + cfgString, err := kubeConfig(output, p.Config.Metadata.Name, p.APIAddress, p.Config.Metadata.User) if err != nil { return err } @@ -58,7 +68,7 @@ // kubeConfig reads in the raw kubeconfig and changes the given address // and cluster name into it -func kubeConfig(raw string, name string, address string) (string, error) { +func kubeConfig(raw string, name string, address, user string) (string, error) { cfg, err := clientcmd.Load([]byte(raw)) if err != nil { return "", err @@ -71,11 +81,11 @@ cfg.Contexts[name] = cfg.Contexts["Default"] delete(cfg.Contexts, "Default") cfg.Contexts[name].Cluster = name - cfg.Contexts[name].AuthInfo = "admin" + cfg.Contexts[name].AuthInfo = user cfg.CurrentContext = name - cfg.AuthInfos["admin"] = cfg.AuthInfos["user"] + cfg.AuthInfos[user] = cfg.AuthInfos["user"] delete(cfg.AuthInfos, "user") out, err := clientcmd.Write(*cfg) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/phase/initialize_k0s.go new/k0sctl-0.20.0/phase/initialize_k0s.go --- old/k0sctl-0.19.4/phase/initialize_k0s.go 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/phase/initialize_k0s.go 2024-12-09 09:34:46.000000000 +0100 @@ -4,6 +4,7 @@ "context" "fmt" "strings" + "time" "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster" @@ -115,11 +116,22 @@ return err } - log.Infof("%s: waiting for kubernetes api to respond", h) - if err := retry.Timeout(context.TODO(), retry.DefaultTimeout, node.KubeAPIReadyFunc(h, p.Config)); err != nil { + log.Infof("%s: wait for kubernetes to reach ready state", h) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + err := retry.Context(ctx, func(_ context.Context) error { + out, err := h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "get --raw='/readyz'"), exec.Sudo(h)) + if out != "ok" { + return fmt.Errorf("kubernetes api /readyz responded with %q", out) + } return err + }) + if err != nil { + return fmt.Errorf("kubernetes not ready: %w", err) } + h.Metadata.Ready = true + return nil }) if err != nil { diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/phase/install_controllers.go new/k0sctl-0.20.0/phase/install_controllers.go --- old/k0sctl-0.19.4/phase/install_controllers.go 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/phase/install_controllers.go 2024-12-09 09:34:46.000000000 +0100 @@ -33,7 +33,6 @@ p.hosts = p.Config.Spec.Hosts.Controllers().Filter(func(h *cluster.Host) bool { return !h.Reset && !h.Metadata.NeedsUpgrade && (h != p.leader && h.Metadata.K0sRunningVersion == nil) }) - return nil } @@ -65,13 +64,13 @@ func (p *InstallControllers) After() error { for i, h := range p.hosts { - if h.Metadata.K0sJoinTokenID == "" { + if h.Metadata.K0sTokenData.Token == "" { continue } - h.Metadata.K0sJoinToken = "" + h.Metadata.K0sTokenData.Token = "" err := p.Wet(p.leader, fmt.Sprintf("invalidate k0s join token for controller %s", h), func() error { log.Debugf("%s: invalidating join token for controller %d", p.leader, i+1) - return p.leader.Exec(p.leader.Configurer.K0sCmdf("token invalidate --data-dir=%s %s", p.leader.K0sDataDir(), h.Metadata.K0sJoinTokenID), exec.Sudo(p.leader)) + return p.leader.Exec(p.leader.Configurer.K0sCmdf("token invalidate --data-dir=%s %s", p.leader.K0sDataDir(), h.Metadata.K0sTokenData.ID), exec.Sudo(p.leader)) }) if err != nil { log.Warnf("%s: failed to invalidate worker join token: %v", p.leader, err) @@ -88,33 +87,10 @@ // Run the phase func (p *InstallControllers) Run() error { - url := p.Config.Spec.InternalKubeAPIURL() - healthz := fmt.Sprintf("%s/healthz", url) - - err := p.parallelDo(p.hosts, func(h *cluster.Host) error { - if p.IsWet() || !p.leader.Metadata.DryRunFakeLeader { - log.Infof("%s: validating api connection to %s", h, url) - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - if err := retry.Context(ctx, node.HTTPStatusFunc(h, healthz, 200, 401)); err != nil { - return fmt.Errorf("failed to connect from controller to kubernetes api at %s - check networking", url) - } - } else { - log.Warnf("%s: dry-run: skipping api connection validation to %s because cluster is not running", h, url) - } - return nil - }) - if err != nil { - return err - } - for _, h := range p.hosts { - var token string - var tokenID string - if p.IsWet() { - log.Infof("%s: generating token", p.leader) - token, err = p.Config.Spec.K0s.GenerateToken( + log.Infof("%s: generate join token for %s", p.leader, h) + token, err := p.Config.Spec.K0s.GenerateToken( p.leader, "controller", time.Duration(10)*time.Minute, @@ -122,20 +98,40 @@ if err != nil { return err } - h.Metadata.K0sJoinToken = token - tokenID, err = cluster.TokenID(token) + tokenData, err := cluster.ParseToken(token) if err != nil { return err } - log.Debugf("%s: join token ID: %s", p.leader, tokenID) - h.Metadata.K0sJoinTokenID = tokenID + h.Metadata.K0sTokenData = tokenData } else { p.DryMsgf(p.leader, "generate a k0s join token for controller %s", h) - h.Metadata.K0sJoinTokenID = "dry-run" + h.Metadata.K0sTokenData.ID = "dry-run" + h.Metadata.K0sTokenData.URL = p.Config.Spec.KubeAPIURL() } - - log.Infof("%s: writing join token", h) - if err := h.Configurer.WriteFile(h, h.K0sJoinTokenPath(), h.Metadata.K0sJoinToken, "0640"); err != nil { + } + err := p.parallelDo(p.hosts, func(h *cluster.Host) error { + if p.IsWet() || !p.leader.Metadata.DryRunFakeLeader { + log.Infof("%s: validating api connection to %s", h, h.Metadata.K0sTokenData.URL) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + if err := retry.Context(ctx, node.HTTPStatusFunc(h, h.Metadata.K0sTokenData.URL, 200, 401, 404)); err != nil { + return fmt.Errorf("failed to connect from controller to kubernetes api - check networking: %w", err) + } + } else { + log.Warnf("%s: dry-run: skipping api connection validation to because cluster is not actually running", h) + } + return nil + }) + if err != nil { + return err + } + return p.parallelDo(p.hosts, func(h *cluster.Host) error { + tokenPath := h.K0sJoinTokenPath() + log.Infof("%s: writing join token to %s", h, tokenPath) + err := p.Wet(h, fmt.Sprintf("write k0s join token to %s", tokenPath), func() error { + return h.Configurer.WriteFile(h, tokenPath, h.Metadata.K0sTokenData.Token, "0600") + }) + if err != nil { return err } @@ -180,17 +176,22 @@ return err } - if err := p.waitJoined(h); err != nil { - return err + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + err := retry.Context(ctx, func(_ context.Context) error { + out, err := h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "get --raw='/readyz?verbose=true'"), exec.Sudo(h)) + if err != nil { + return fmt.Errorf("readiness endpoint reports %q: %w", out, err) + } + return nil + }) + if err != nil { + return fmt.Errorf("controller did not reach ready state: %w", err) } - } - h.Metadata.Ready = true - } - return nil -} + h.Metadata.Ready = true + } -func (p *InstallControllers) waitJoined(h *cluster.Host) error { - log.Infof("%s: waiting for kubernetes api to respond", h) - return retry.Timeout(context.TODO(), retry.DefaultTimeout, node.KubeAPIReadyFunc(h, p.Config)) + return nil + }) } diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/phase/install_workers.go new/k0sctl-0.20.0/phase/install_workers.go --- old/k0sctl-0.19.4/phase/install_workers.go 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/phase/install_workers.go 2024-12-09 09:34:46.000000000 +0100 @@ -66,7 +66,7 @@ func (p *InstallWorkers) After() error { if NoWait { for _, h := range p.hosts { - if h.Metadata.K0sJoinToken != "" { + if h.Metadata.K0sTokenData.Token != "" { log.Warnf("%s: --no-wait given, created join tokens will remain valid for 10 minutes", p.leader) break } @@ -74,19 +74,18 @@ return nil } for i, h := range p.hosts { - if h.Metadata.K0sJoinTokenID == "" { + h.Metadata.K0sTokenData.Token = "" + if h.Metadata.K0sTokenData.ID == "" { continue } - h.Metadata.K0sJoinToken = "" err := p.Wet(p.leader, fmt.Sprintf("invalidate k0s join token for worker %s", h), func() error { log.Debugf("%s: invalidating join token for worker %d", p.leader, i+1) - return p.leader.Exec(p.leader.Configurer.K0sCmdf("token invalidate --data-dir=%s %s", p.leader.K0sDataDir(), h.Metadata.K0sJoinTokenID), exec.Sudo(p.leader)) + return p.leader.Exec(p.leader.Configurer.K0sCmdf("token invalidate --data-dir=%s %s", p.leader.K0sDataDir(), h.Metadata.K0sTokenData.ID), exec.Sudo(p.leader)) }) if err != nil { log.Warnf("%s: failed to invalidate worker join token: %v", p.leader, err) } _ = p.Wet(h, "overwrite k0s join token file", func() error { - if err := h.Configurer.WriteFile(h, h.K0sJoinTokenPath(), "# overwritten by k0sctl after join\n", "0600"); err != nil { log.Warnf("%s: failed to overwrite the join token file at %s", h, h.K0sJoinTokenPath()) } @@ -98,30 +97,9 @@ // Run the phase func (p *InstallWorkers) Run() error { - url := p.Config.Spec.InternalKubeAPIURL() - healthz := fmt.Sprintf("%s/healthz", url) - - err := p.parallelDo(p.hosts, func(h *cluster.Host) error { - if p.IsWet() || !p.leader.Metadata.DryRunFakeLeader { - log.Infof("%s: validating api connection to %s", h, url) - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - if err := retry.Context(ctx, node.HTTPStatusFunc(h, healthz, 200, 401)); err != nil { - return fmt.Errorf("failed to connect from worker to kubernetes api at %s - check networking", url) - } - } else { - log.Warnf("%s: dry-run: skipping api connection validation to %s because cluster is not running", h, url) - } - return nil - }) - - if err != nil { - return err - } - for i, h := range p.hosts { log.Infof("%s: generating a join token for worker %d", p.leader, i+1) - err = p.Wet(p.leader, fmt.Sprintf("generate a k0s join token for worker %s", h), func() error { + err := p.Wet(p.leader, fmt.Sprintf("generate a k0s join token for worker %s", h), func() error { t, err := p.Config.Spec.K0s.GenerateToken( p.leader, "worker", @@ -130,18 +108,18 @@ if err != nil { return err } - h.Metadata.K0sJoinToken = t - ti, err := cluster.TokenID(t) + td, err := cluster.ParseToken(t) if err != nil { - return err + return fmt.Errorf("parse k0s token: %w", err) } - h.Metadata.K0sJoinTokenID = ti - log.Debugf("%s: join token ID: %s", h, ti) + h.Metadata.K0sTokenData = td + return nil }, func() error { - h.Metadata.K0sJoinTokenID = "dry-run" + h.Metadata.K0sTokenData.ID = "dry-run" + h.Metadata.K0sTokenData.URL = p.Config.Spec.KubeAPIURL() return nil }) if err != nil { @@ -149,10 +127,35 @@ } } + err := p.parallelDo(p.hosts, func(h *cluster.Host) error { + if p.IsWet() || !p.leader.Metadata.DryRunFakeLeader { + log.Infof("%s: validating api connection to %s using join token", h, h.Metadata.K0sTokenData.URL) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + err := retry.Context(ctx, func(_ context.Context) error { + err := h.Exec(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "get --raw='/version' --kubeconfig=/dev/stdin"), exec.Sudo(h), exec.Stdin(string(h.Metadata.K0sTokenData.Kubeconfig))) + if err != nil { + return fmt.Errorf("failed to connect to kubernetes api using the join token - check networking: %w", err) + } + return nil + }) + if err != nil { + return fmt.Errorf("connectivity check failed: %w", err) + } + } else { + log.Warnf("%s: dry-run: skipping api connection validation because cluster is not actually running", h) + } + return nil + }) + if err != nil { + return err + } + return p.parallelDo(p.hosts, func(h *cluster.Host) error { - err := p.Wet(h, fmt.Sprintf("write k0s join token to %s", h.K0sJoinTokenPath()), func() error { - log.Infof("%s: writing join token", h) - return h.Configurer.WriteFile(h, h.K0sJoinTokenPath(), h.Metadata.K0sJoinToken, "0640") + tokenPath := h.K0sJoinTokenPath() + err := p.Wet(h, fmt.Sprintf("write k0s join token to %s", tokenPath), func() error { + log.Infof("%s: writing join token to %s", h, tokenPath) + return h.Configurer.WriteFile(h, tokenPath, h.Metadata.K0sTokenData.Token, "0600") }) if err != nil { return err diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/phase/reinstall.go new/k0sctl-0.20.0/phase/reinstall.go --- old/k0sctl-0.19.4/phase/reinstall.go 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/phase/reinstall.go 2024-12-09 09:34:46.000000000 +0100 @@ -5,7 +5,6 @@ "fmt" "math" "strings" - "time" "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster" @@ -108,30 +107,5 @@ return fmt.Errorf("restart after reinstall: %w", err) } - if h != p.Config.Spec.K0sLeader() { - return nil - } - - if NoWait || !p.IsWet() { - log.Warnf("%s: skipping scheduler and system pod checks because --no-wait given", h) - return nil - } - - log.Infof("%s: waiting for the scheduler to become ready", h) - if err := retry.Timeout(context.TODO(), retry.DefaultTimeout, node.ScheduledEventsAfterFunc(h, time.Now())); err != nil { - if !Force { - return fmt.Errorf("failed to observe scheduling events after api start-up, you can ignore this check by using --force: %w", err) - } - log.Warnf("%s: failed to observe scheduling events after api start-up: %s", h, err) - } - - log.Infof("%s: waiting for system pods to become ready", h) - if err := retry.Timeout(context.TODO(), retry.DefaultTimeout, node.SystemPodsRunningFunc(h)); err != nil { - if !Force { - return fmt.Errorf("all system pods not running after api start-up, you can ignore this check by using --force: %w", err) - } - log.Warnf("%s: failed to observe system pods running after api start-up: %s", h, err) - } - return nil } diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/phase/upgrade_controllers.go new/k0sctl-0.20.0/phase/upgrade_controllers.go --- old/k0sctl-0.19.4/phase/upgrade_controllers.go 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/phase/upgrade_controllers.go 2024-12-09 09:34:46.000000000 +0100 @@ -133,8 +133,17 @@ } if p.IsWet() { - if err := retry.Timeout(context.TODO(), retry.DefaultTimeout, node.KubeAPIReadyFunc(h, p.Config)); err != nil { - return fmt.Errorf("kube api did not become ready: %w", err) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + err := retry.Context(ctx, func(_ context.Context) error { + out, err := h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "get --raw='/readyz?verbose=true'"), exec.Sudo(h)) + if err != nil { + return fmt.Errorf("readiness endpoint reports %q: %w", out, err) + } + return nil + }) + if err != nil { + return fmt.Errorf("controller did not reach ready state: %w", err) } } @@ -147,21 +156,5 @@ return nil } - log.Infof("%s: waiting for the scheduler to become ready", leader) - if err := retry.Timeout(context.TODO(), retry.DefaultTimeout, node.ScheduledEventsAfterFunc(leader, time.Now())); err != nil { - if !Force { - return fmt.Errorf("failed to observe scheduling events after api start-up, you can ignore this check by using --force: %w", err) - } - log.Warnf("%s: failed to observe scheduling events after api start-up: %s", leader, err) - } - - log.Infof("%s: waiting for system pods to become ready", leader) - if err := retry.Timeout(context.TODO(), retry.DefaultTimeout, node.SystemPodsRunningFunc(leader)); err != nil { - if !Force { - return fmt.Errorf("all system pods not running after api start-up, you can ignore this check by using --force: %w", err) - } - log.Warnf("%s: failed to observe system pods running after api start-up: %s", leader, err) - } - return nil } diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/flags.go new/k0sctl-0.20.0/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/flags.go --- old/k0sctl-0.19.4/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/flags.go 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/flags.go 2024-12-09 09:34:46.000000000 +0100 @@ -211,3 +211,20 @@ } return true } + +// NewFlags shell-splits and parses a string and returns new Flags or an error if splitting fails +func NewFlags(s string) (Flags, error) { + var flags Flags + unq, err := shell.Unquote(s) + if err != nil { + return flags, fmt.Errorf("failed to unquote flags %q: %w", s, err) + } + parts, err := shell.Split(unq) + if err != nil { + return flags, fmt.Errorf("failed to split flags %q: %w", s, err) + } + for _, part := range parts { + flags.Add(part) + } + return flags, nil +} diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/flags_test.go new/k0sctl-0.20.0/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/flags_test.go --- old/k0sctl-0.19.4/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/flags_test.go 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/flags_test.go 2024-12-09 09:34:46.000000000 +0100 @@ -135,3 +135,16 @@ flags2 = Flags{"-f", "--flag2=foo", "--flag3=baz"} require.False(t, flags1.Equals(flags2)) } + +func TestNewFlags(t *testing.T) { + t.Run("basic", func(t *testing.T) { + flags, err := NewFlags("--hello=world --bar=baz") + require.NoError(t, err) + require.Equal(t, "world", flags.GetValue("--hello")) + require.Equal(t, "baz", flags.GetValue("--bar")) + }) + t.Run("empty", func(t *testing.T) { + _, err := NewFlags("") + require.NoError(t, err) + }) +} diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host.go new/k0sctl-0.20.0/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host.go --- old/k0sctl-0.19.4/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host.go 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host.go 2024-12-09 09:34:46.000000000 +0100 @@ -13,7 +13,6 @@ "github.com/go-playground/validator/v10" "github.com/jellydator/validation" "github.com/jellydator/validation/is" - "github.com/k0sproject/k0sctl/internal/shell" "github.com/k0sproject/rig" "github.com/k0sproject/rig/exec" "github.com/k0sproject/rig/os" @@ -180,8 +179,7 @@ K0sInstalled bool K0sExistingConfig string K0sNewConfig string - K0sJoinToken string - K0sJoinTokenID string + K0sTokenData TokenData K0sStatusArgs Flags Arch string IsK0sLeader bool @@ -289,9 +287,9 @@ switch h.Role { case "controller+worker": - flags.AddUnlessExist("--enable-worker") + flags.AddUnlessExist("--enable-worker=true") if h.NoTaints { - flags.AddUnlessExist("--no-taints") + flags.AddUnlessExist("--no-taints=true") } case "single": flags.AddUnlessExist("--single=true") @@ -308,13 +306,11 @@ if strings.HasSuffix(h.Role, "worker") { var extra Flags if old := flags.GetValue("--kubelet-extra-args"); old != "" { - parts, err := shell.Split(old) + ex, err := NewFlags(old) if err != nil { return flags, fmt.Errorf("failed to split kubelet-extra-args: %w", err) } - for _, part := range parts { - extra.Add(part) - } + extra = ex } // set worker's private address to --node-ip in --extra-kubelet-args if cloud ins't enabled enableCloudProvider, err := h.InstallFlags.GetBoolean("--enable-cloud-provider") @@ -581,17 +577,41 @@ // FlagsChanged returns true when the flags have changed by comparing the host.Metadata.K0sStatusArgs to what host.InstallFlags would produce func (h *Host) FlagsChanged() bool { - installFlags, err := h.K0sInstallFlags() + our, err := h.K0sInstallFlags() if err != nil { log.Warnf("%s: could not get install flags: %s", h, err) - installFlags = Flags{} + our = Flags{} + } + ex := our.GetValue("--kubelet-extra-args") + ourExtra, err := NewFlags(ex) + if err != nil { + log.Warnf("%s: could not parse local --kubelet-extra-args value %q: %s", h, ex, err) + } + + var their Flags + their = append(their, h.Metadata.K0sStatusArgs...) + ex = their.GetValue("--kubelet-extra-args") + theirExtra, err := NewFlags(ex) + if err != nil { + log.Warnf("%s: could not parse remote --kubelet-extra-args value %q: %s", h, ex, err) + } + + if !ourExtra.Equals(theirExtra) { + log.Debugf("%s: installFlags --kubelet-extra-args seem to have changed: %+v vs %+v", h, theirExtra.Map(), ourExtra.Map()) + return true + } + + // remove flags that are dropped by k0s or are handled specially + for _, f := range []string{"--force", "--kubelet-extra-args", "--env", "--data-dir", "--token-file", "--config"} { + our.Delete(f) + their.Delete(f) } - if installFlags.Equals(h.Metadata.K0sStatusArgs) { + if our.Equals(their) { log.Debugf("%s: installFlags have not changed", h) return false } - log.Debugf("%s: installFlags seem to have changed. existing: %+v new: %+v", h, h.Metadata.K0sStatusArgs.Map(), installFlags.Map()) + log.Debugf("%s: installFlags seem to have changed. existing: %+v new: %+v", h, their.Map(), our.Map()) return true } diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host_test.go new/k0sctl-0.20.0/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host_test.go --- old/k0sctl-0.19.4/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host_test.go 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/host_test.go 2024-12-09 09:34:46.000000000 +0100 @@ -90,12 +90,12 @@ h.Metadata.IsK0sLeader = true cmd, err = h.K0sInstallCommand() require.NoError(t, err) - require.Equal(t, `k0s install controller --data-dir=/tmp/k0s --enable-worker --config=from-configurer`, cmd) + require.Equal(t, `k0s install controller --data-dir=/tmp/k0s --enable-worker=true --config=from-configurer`, cmd) h.Metadata.IsK0sLeader = false cmd, err = h.K0sInstallCommand() require.NoError(t, err) - require.Equal(t, `k0s install controller --data-dir=/tmp/k0s --enable-worker --token-file=from-configurer --config=from-configurer`, cmd) + require.Equal(t, `k0s install controller --data-dir=/tmp/k0s --enable-worker=true --token-file=from-configurer --config=from-configurer`, cmd) h.Role = "worker" h.PrivateAddress = "10.0.0.9" @@ -177,14 +177,16 @@ h := Host{ Configurer: cfg, DataDir: "/tmp/data", - Role: "controller", + Role: "controller+worker", PrivateAddress: "10.0.0.1", InstallFlags: []string{"--foo='bar'", "--bar=foo"}, Metadata: HostMetadata{ - K0sStatusArgs: []string{"--foo=bar", `--bar="foo"`, "--data-dir=/tmp/data", "--token-file=/tmp/token", "--config=/tmp/foo.yaml"}, + K0sStatusArgs: []string{"--foo=bar", `--bar="foo"`, "--enable-worker=true", "--data-dir=/tmp/data", "--token-file=/tmp/token", "--config=/tmp/foo.yaml", "--kubelet-extra-args=--node-ip=10.0.0.1"}, }, } - require.False(t, h.FlagsChanged()) + newFlags, err := h.K0sInstallFlags() + require.NoError(t, err) + require.False(t, h.FlagsChanged(), "flags %+v should not be considered different from %+v", newFlags, h.Metadata.K0sStatusArgs) h.InstallFlags = []string{"--foo=bar", `--bar="foo"`} require.False(t, h.FlagsChanged()) h.InstallFlags = []string{"--foo=baz", `--bar="foo"`} diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s.go new/k0sctl-0.20.0/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s.go --- old/k0sctl-0.19.4/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s.go 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s.go 2024-12-09 09:34:46.000000000 +0100 @@ -174,50 +174,78 @@ return h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "get -n kube-system namespace kube-system -o template={{.metadata.uid}}"), exec.Sudo(h)) } -// TokenID returns a token id from a token string that can be used to invalidate the token -func TokenID(s string) (string, error) { +// TokenData is data collected from a decoded k0s token +type TokenData struct { + ID string + URL string + Token string + Kubeconfig []byte +} + +// ParseToken returns TokenData for a token string +func ParseToken(s string) (TokenData, error) { + data := TokenData{Token: s} + b64 := make([]byte, base64.StdEncoding.DecodedLen(len(s))) _, err := base64.StdEncoding.Decode(b64, []byte(s)) if err != nil { - return "", fmt.Errorf("failed to decode token: %w", err) + return data, fmt.Errorf("failed to decode token: %w", err) } sr := strings.NewReader(s) b64r := base64.NewDecoder(base64.StdEncoding, sr) gzr, err := gzip.NewReader(b64r) if err != nil { - return "", fmt.Errorf("failed to create a reader for token: %w", err) + return data, fmt.Errorf("failed to create a reader for token: %w", err) } defer gzr.Close() c, err := io.ReadAll(gzr) if err != nil { - return "", fmt.Errorf("failed to uncompress token: %w", err) + return data, fmt.Errorf("failed to uncompress token: %w", err) } + data.Kubeconfig = c cfg := dig.Mapping{} err = yaml.Unmarshal(c, &cfg) if err != nil { - return "", fmt.Errorf("failed to unmarshal token: %w", err) + return data, fmt.Errorf("failed to unmarshal token: %w", err) } users, ok := cfg.Dig("users").([]interface{}) if !ok || len(users) < 1 { - return "", fmt.Errorf("failed to find users in token") + return data, fmt.Errorf("failed to find users in token") } user, ok := users[0].(dig.Mapping) if !ok { - return "", fmt.Errorf("failed to find user in token") + return data, fmt.Errorf("failed to find user in token") } token, ok := user.Dig("user", "token").(string) if !ok { - return "", fmt.Errorf("failed to find user token in token") + return data, fmt.Errorf("failed to find user token in token") } idx := strings.IndexRune(token, '.') if idx < 0 { - return "", fmt.Errorf("failed to find separator in token") + return data, fmt.Errorf("failed to find separator in token") + } + + data.ID = token[0:idx] + + clusters, ok := cfg.Dig("clusters").([]interface{}) + if !ok || len(clusters) < 1 { + return data, fmt.Errorf("failed to find clusters in token") + } + cluster, ok := clusters[0].(dig.Mapping) + if !ok { + return data, fmt.Errorf("failed to find cluster in token") } - return token[0:idx], nil + url := cluster.DigString("cluster", "server") + if url == "" { + return data, fmt.Errorf("failed to find cluster url in token") + } + data.URL = url + + return data, nil } diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s_test.go new/k0sctl-0.20.0/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s_test.go --- old/k0sctl-0.19.4/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s_test.go 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster/k0s_test.go 2024-12-09 09:34:46.000000000 +0100 @@ -9,12 +9,13 @@ "gopkg.in/yaml.v2" ) -func TestTokenID(t *testing.T) { +func TestParseToken(t *testing.T) { token := "H4sIAAAAAAAC/2xVXY/iOBZ9r1/BH6geO4GeAWkfKiEmGGLKjn1N/BbidAFOgjuk+Frtf18V3SPtSvN2fc/ROdaVfc9L6Q9Q9+fDqZuNLvilaj7PQ92fZy+vo9/17GU0Go3OdX+p+9loPwz+PPvjD/xn8A3/+Q19C2bfx+Pwyanqfjj8OFTlUL+Wn8P+1B+G+6sth3I2WudoWOc4FspSeYjmAqjKlaEcESWeGBpih2muRCQSNucavEEkzBWNDGoApDV1t19W6uNSbJsyRzS1mPc7TVdiDknV0qNFQmjl1zvsaZmao3RECHVd8YZEFtlEgGW8ISmXBIQiY6km+wwbr5v9yoIvVHs71pL81CAio0yYpQ2DJMFSe1InWHEZMZHQveiqa/3hf2Eg+v/FpKJdnZifHCA2aKK5IwwSsbVzYnZgJkWLdUZ8IbfCZA5CE1hSKhxliZ2rkKRxw2hxZIlSEHMgwFWCckUTi8iTmyNy+ZqJUtktO2Y9C8Wpuk8DsTUT7ehnjt9uBTQ0T7yDB9nyw+A4Tlb5wt2NbHgB5LSJpwvR2Ytpp6oKm/lG2ZvUZoDERjs9vubzamxJcZEaX6vDwLKWFeUWIoOqi7z/hWx7c2q77DfcJ5BkQQFAyxYw6xix8BZILAar8Ha3GM7l420ssZ/UZE/rrQtUytSus4ssXGKOissKkdgiOskw1fowPKRqxnFLPy0hj1pPvV6IC0t4AOhGgZDlZjFdGYdXLBVZBozKrUccW6Ra2mQNm5sF9bsHXRVqv8lB7E3XmNyZjKHTSm7Jp82HyxoJDom56HY8zgFa6/xCoOtdIL8qF8t71rDUYBZAI247ZHnpiluZn+9WNu8GsvEusFuOpvNS20J/+GUN1aN2U2kfpFQouVaBj3PsW6VgXwXVeJfSd4DlLdN2JR+gqoAed8hEBcB7OXc4J3Dl2jLuSCQCL0pHo9jhiCU2ygCcSC3hh2moFEQWNTFvfaQS2s nGLJXDMdfFWCiquBKRUh8XqZZXgZIbaJEYTLbcUQnBtLDkY8VbWuzmMAhH97ka1tWWKN1lvQFLICEb3tq+0vu+VNXEPqKvN/gQjkQSsejLv3BsUjTRNk8mpNbMF46d1Ju/SURPRWihBOJtS5eVwp9ZQhvIB8+UCo1ksSXg7IPcS2wNc35cphHKVKNE4rebbSR2ODpxd5uYAA/VfH+JW9Jt1GRv231eJ9mj1uao2+Z7pRrB2ulP4+xF5kOxDtUF3PLKJXmXCb4XgQmzuRFVmmGZnCaA/nrIBdCvuRduvMpVs8lcNi7UcDVhRG0A93JLYpP66yqYgJoLoZumlQ9x2xFD8znIkux77oacdWqSdZSVyjCWnkKmb+9WDz/Nh5+b9O1SIDIUHaC6bW5V4qFsYSnSRmUIloXCuV1MaE7IsQAxBkR5ndqASRZtFDVGm7VszHGzwEfhJqzUzTV2tMi1iG369dfsmjVvkxKKfhMPgjsccEUPLMmCTcJCsTDrfGHGdXsOJcBpo4ezQd7sQroC3EQrdLtVD+Z16lZCY58rEO8SrX7vZiId/+AIckiaRa5YBIl67uU1P/3rZTTqyraejRw6v1Snbqhvw6+U+FX/Som/I+PJ+mp8np+nz13d1MPr7nQazkNf+v9X++z7uhte/1Z6Nt2hs7NRfOp+HD5efF//qPu6q+rzbPTv/7x8qT7Nf4v8g/zT+HmF4eTqbjY6fD+E949vVzeZ7vHx8mM6uPCATi//DQAA//+MVAsnAgcAAA==" - id, err := TokenID(token) + tokendata, err := ParseToken(token) require.NoError(t, err) - require.Equal(t, "i6i3yg", id) + require.Equal(t, "i6i3yg", tokendata.ID) + require.Equal(t, "https://172.17.0.2:6443", tokendata.URL) } func TestUnmarshal(t *testing.T) { diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster.go new/k0sctl-0.20.0/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster.go --- old/k0sctl-0.19.4/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster.go 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster.go 2024-12-09 09:34:46.000000000 +0100 @@ -15,6 +15,7 @@ // ClusterMetadata defines cluster metadata type ClusterMetadata struct { Name string `yaml:"name" validate:"required" default:"k0s-cluster"` + User string `yaml:"user" default:"admin"` Kubeconfig string `yaml:"-"` EtcdMembers []string `yaml:"-"` } diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/pkg/node/statusfunc.go new/k0sctl-0.20.0/pkg/node/statusfunc.go --- old/k0sctl-0.19.4/pkg/node/statusfunc.go 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/pkg/node/statusfunc.go 2024-12-09 09:34:46.000000000 +0100 @@ -7,7 +7,6 @@ "strings" "time" - "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1" "github.com/k0sproject/k0sctl/pkg/apis/k0sctl.k0sproject.io/v1beta1/cluster" "github.com/k0sproject/rig/exec" @@ -42,20 +41,6 @@ } `json:"items"` } -// kubectl get pods -o json -type podStatusList struct { - Items []struct { - Status struct { - ContainerStatuses []struct { - ContainerID string `json:"containerID"` - Name string `json:"name"` - Ready bool `json:"ready"` - } `json:"containerStatuses"` - Phase string `json:"phase"` - } `json:"status"` - } `json:"items"` -} - // KubeNodeReady returns a function that returns an error unless the node is ready according to "kubectl get node" func KubeNodeReadyFunc(h *cluster.Host) retryFunc { return func(_ context.Context) error { @@ -125,49 +110,6 @@ } } -// SystemPodsRunningFunc returns a function that returns an error unless all kube-system pods are running -func SystemPodsRunningFunc(h *cluster.Host) retryFunc { - return func(_ context.Context) error { - output, err := h.ExecOutput(h.Configurer.KubectlCmdf(h, h.K0sDataDir(), "-n kube-system get pods -o json"), exec.HideOutput(), exec.Sudo(h)) - if err != nil { - return fmt.Errorf("failed to get kube-system pods: %w", err) - } - pods := &podStatusList{} - if err := json.Unmarshal([]byte(output), &pods); err != nil { - return fmt.Errorf("failed to decode kubectl output for get kube-system pods: %w", err) - } - - var running int - var notReady int - - for _, p := range pods.Items { - if p.Status.Phase != "Running" { - log.Tracef("%s: pod phase '%s' - container statuses: %+v", h, p.Status.Phase, p.Status.ContainerStatuses) - continue - } - running++ - for _, c := range p.Status.ContainerStatuses { - if !c.Ready { - log.Debugf("%s: container %s not ready", h, c.Name) - notReady++ - } - } - } - - if running == 0 { - return fmt.Errorf("no kube-system pods running") - } - - if notReady > 0 { - return fmt.Errorf("%d kube-system containers not ready", notReady) - } - - log.Debugf("%s: all (%d) system pods running", h, running) - - return nil - } -} - // HTTPStatus returns a function that returns an error unless the expected status code is returned for a HTTP get to the url func HTTPStatusFunc(h *cluster.Host, url string, expected ...int) retryFunc { return func(_ context.Context) error { @@ -194,10 +136,3 @@ return nil } } - -// KubeAPIReadyFunc returns a function that returns an error unless the host's local kube api responds to /version -func KubeAPIReadyFunc(h *cluster.Host, config *v1beta1.Cluster) retryFunc { - // If the anon-auth is disabled on kube api the version endpoint will give 401 - // thus we need to accept both 200 and 401 as valid statuses when checking kube api - return HTTPStatusFunc(h, fmt.Sprintf("%s/version", config.Spec.NodeInternalKubeAPIURL(h)), 200, 401) -} diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/smoke-test/k0sctl-controller-swap.yaml new/k0sctl-0.20.0/smoke-test/k0sctl-controller-swap.yaml --- old/k0sctl-0.19.4/smoke-test/k0sctl-controller-swap.yaml 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/smoke-test/k0sctl-controller-swap.yaml 2024-12-09 09:34:46.000000000 +0100 @@ -14,7 +14,7 @@ address: "127.0.0.1" port: 9023 keyPath: ./id_rsa_k0s - - role: controller + - role: controller+worker uploadBinary: true ssh: address: "127.0.0.1" diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/smoke-test/k0sctl-installflags.yaml.tpl new/k0sctl-0.20.0/smoke-test/k0sctl-installflags.yaml.tpl --- old/k0sctl-0.19.4/smoke-test/k0sctl-installflags.yaml.tpl 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/smoke-test/k0sctl-installflags.yaml.tpl 2024-12-09 09:34:46.000000000 +0100 @@ -2,7 +2,7 @@ kind: cluster spec: hosts: - - role: controller + - role: controller+worker uploadBinary: true installFlags: - "${K0S_CONTROLLER_FLAG}" diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/smoke-test/smoke-basic.sh new/k0sctl-0.20.0/smoke-test/smoke-basic.sh --- old/k0sctl-0.19.4/smoke-test/smoke-basic.sh 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/smoke-test/smoke-basic.sh 2024-12-09 09:34:46.000000000 +0100 @@ -31,11 +31,11 @@ ./kubectl --kubeconfig applykubeconfig get nodes echo "* Using k0sctl kubecofig locally" -../k0sctl kubeconfig --config k0sctl.yaml > kubeconfig +../k0sctl kubeconfig --config k0sctl.yaml --user smoke --cluster test > kubeconfig echo "* Output:" grep -v -- -data kubeconfig echo "* Running kubectl" -./kubectl --kubeconfig kubeconfig get nodes +./kubectl --kubeconfig kubeconfig --user smoke --cluster test get nodes echo "* Done" diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/k0sctl-0.19.4/smoke-test/smoke-reinstall.sh new/k0sctl-0.20.0/smoke-test/smoke-reinstall.sh --- old/k0sctl-0.19.4/smoke-test/smoke-reinstall.sh 2024-11-12 13:03:52.000000000 +0100 +++ new/k0sctl-0.20.0/smoke-test/smoke-reinstall.sh 2024-12-09 09:34:46.000000000 +0100 @@ -22,15 +22,31 @@ } echo "Installing ${K0S_VERSION}" -../k0sctl apply --config "${K0SCTL_CONFIG}" --debug +../k0sctl apply --config "${K0SCTL_CONFIG}" --debug | tee apply.log +echo "Initial apply should not perform a re-install" +grep -ivq "reinstalling" apply.log + +echo "Install flags should contain the expected flag on a controller" remoteCommand "root@manager0" "k0s status -o json | grep -q -- ${K0S_CONTROLLER_FLAG}" + +echo "Install flags should contain the expected flag on a worker" remoteCommand "root@worker0" "k0s status -o json | grep -q -- ${K0S_WORKER_FLAG}" +echo "A re-apply should not re-install if there are no changes" +../k0sctl apply --config "${K0SCTL_CONFIG}" --debug | tee apply.log +grep -ivq "reinstalling" apply.log + export K0S_CONTROLLER_FLAG="--labels=smoke-stage=2" export K0S_WORKER_FLAG="--labels=smoke-stage=2" envsubst < "k0sctl-installflags.yaml.tpl" > "${K0SCTL_CONFIG}" echo "Re-applying ${K0S_VERSION} with modified installFlags" -../k0sctl apply --config "${K0SCTL_CONFIG}" --debug +../k0sctl apply --config "${K0SCTL_CONFIG}" --debug | tee apply.log +echo "A re-apply should perform a re-install if there are changes" +grep -iq "reinstalling" apply.log + +echo "Install flags should change for controller" remoteCommand "root@manager0" "k0s status -o json | grep -q -- ${K0S_CONTROLLER_FLAG}" + +echo "Install flags should change for worker" remoteCommand "root@worker0" "k0s status -o json | grep -q -- ${K0S_WORKER_FLAG}" ++++++ k0sctl.obsinfo ++++++ --- /var/tmp/diff_new_pack.Bm6h8r/_old 2024-12-10 23:44:56.092061920 +0100 +++ /var/tmp/diff_new_pack.Bm6h8r/_new 2024-12-10 23:44:56.096062087 +0100 @@ -1,5 +1,5 @@ name: k0sctl -version: 0.19.4 -mtime: 1731413032 -commit: a06d3f6c227d15c3c7f1b87205ee6b32a2000521 +version: 0.20.0 +mtime: 1733733286 +commit: b361d94bb457aeb44678015241ee735aed34a335 ++++++ vendor.tar.gz ++++++ ++++ 2279 lines of diff (skipped)