This is an automated email from the ASF dual-hosted git repository.

chenyulin0719 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/yunikorn-k8shim.git


The following commit(s) were added to refs/heads/master by this push:
     new 878d3854 [YUNIKORN-3074] Upgrade Golangci-lint to version 2.1.6 (#973)
878d3854 is described below

commit 878d3854914ac653d858167219bf84e4f35318df
Author: Dennis Huang <[email protected]>
AuthorDate: Fri May 23 13:58:35 2025 +0000

    [YUNIKORN-3074] Upgrade Golangci-lint to version 2.1.6 (#973)
    
    Closes: #973
    
    Signed-off-by: Yu-Lin Chen <[email protected]>
---
 .golangci.yml                                      | 112 ++++++++++-----------
 Makefile                                           |   2 +-
 pkg/admission/admission_controller_test.go         |   2 +-
 pkg/admission/util_test.go                         |  10 +-
 pkg/admission/webhook_manager.go                   |  12 +--
 pkg/admission/webhook_manager_test.go              |   6 +-
 pkg/cache/application_test.go                      |   2 +-
 pkg/cache/external/scheduler_cache_test.go         |  22 ++--
 pkg/cache/gang_utils.go                            |   5 +-
 pkg/plugin/predicates/predicate_manager.go         |   2 +-
 pkg/shim/scheduler_mock_test.go                    |   5 +-
 .../admission_controller_test.go                   |  10 +-
 test/e2e/basic_scheduling/basic_scheduling_test.go |  16 +--
 test/e2e/framework/helpers/k8s/k8s_utils.go        |  11 +-
 test/e2e/framework/helpers/k8s/pod_conf.go         |   2 +-
 .../framework/helpers/yunikorn/rest_api_utils.go   |   2 +-
 .../pod_resource_scaling_test.go                   |  25 ++---
 test/e2e/predicates/predicates_test.go             |  16 +--
 test/e2e/preemption/preemption_test.go             |  22 ++--
 test/e2e/queue_quota_mgmt/queue_quota_mgmt_test.go |   2 +-
 .../recovery_and_restart_test.go                   |  10 +-
 .../restart_changed_config_test.go                 |   8 +-
 test/e2e/simple_preemptor/simple_preemptor_test.go |   9 +-
 test/e2e/user_group_limit/user_group_limit_test.go |   7 +-
 24 files changed, 159 insertions(+), 161 deletions(-)

diff --git a/.golangci.yml b/.golangci.yml
index c00bca80..2df04d8a 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -15,58 +15,46 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+version: "2"
 # options for analysis running
 run:
   issues-exit-code: 1
   modules-download-mode: readonly
-  timeout: 10m
-
-# settings of specific linters
-linters-settings:
-  errcheck:
-    check-type-assertions: true
-    check-blank: true
-  revive:
-    confidence: 0.8
-  gofmt:
-    simplify: true
-  goimports:
-    local-prefixes: github.com/apache/yunikorn
-  govet:
-    shadow: true
-  goconst:
-    min-occurrences: 5
-  funlen:
-    lines: 120
-    statements: 80
-  ginkgolinter:
-    enabled: true
-    path : ./test
-  depguard:
-    rules:
-      main:
-        files:
-          - $all
-        deny:
-          - pkg: "github.com/sirupsen/logrus"
-            desc: "logging is standardised via yunikorn logger and zap"
-          - pkg: "github.com/stretchr/testify"
-            desc: "test assertions must use gotest.tools/v3/assert"
-
-# linters to use
+  tests: true
 linters:
-  disable-all: true
-  fast: false
+  settings:
+    errcheck:
+      check-type-assertions: true
+      check-blank: true
+    revive:
+      confidence: 0.8
+    govet:
+      enable:
+        - shadow
+    goconst:
+      min-occurrences: 5
+    funlen:
+      lines: 120
+      statements: 80
+    depguard:
+      rules:
+        main:
+          files:
+            - $all
+          deny:
+            - pkg: github.com/sirupsen/logrus
+              desc: logging is standardised via yunikorn logger and zap
+            - pkg: github.com/stretchr/testify
+              desc: test assertions must use gotest.tools/v3/assert
+  default: none
+# linters to use
   enable:
     - errcheck
     - unused
     - staticcheck
-    - gosimple
     - ineffassign
     - funlen
     - revive
-    - gofmt
-    - goimports
     - govet
     - goconst
     - depguard
@@ -77,26 +65,36 @@ linters:
     - dogsled
     - whitespace
     - ginkgolinter
-
+  exclusions:
+    generated: lax
+    presets:
+      - comments
+      - common-false-positives
+      - legacy
+      - std-error-handling
+    paths:
+      - third_party$
+      - builtin$
+      - examples$
 issues:
-  exclude-use-default: true
-
   # Maximum issues count per one linter. Set to 0 to disable. Default is 50.
   max-issues-per-linter: 0
-
   # Maximum count of issues with the same text. Set to 0 to disable. Default 
is 3.
   max-same-issues: 0
-
-  # Show only new issues: if there are unstaged changes or untracked files,
-  # only those changes are analyzed, else only changes in HEAD~ are analyzed.
-  # It's a super-useful option for integration of golangci-lint into existing
-  # large codebase. It's not practical to fix all existing issues at the moment
-  # of integration: much better don't allow issues in new code.
-  # Default is false.
   new: false
-
-  # Show only new issues created after git revision `REV`
-  # new-from-rev: REV
-
-  # Show only new issues created in git patch with set file path.
-  # new-from-patch: path/to/patch/file
+formatters:
+  enable:
+    - gofmt
+    - goimports
+  settings:
+    gofmt:
+      simplify: true
+    goimports:
+      local-prefixes:
+        - github.com/apache/yunikorn
+  exclusions:
+    generated: lax
+    paths:
+      - third_party$
+      - builtin$
+      - examples$
diff --git a/Makefile b/Makefile
index 6fde1a02..03ae4c4f 100644
--- a/Makefile
+++ b/Makefile
@@ -163,7 +163,7 @@ endif
 export PATH := $(BASE_DIR)/$(SHELLCHECK_PATH):$(PATH)
 
 # golangci-lint
-GOLANGCI_LINT_VERSION=1.63.4
+GOLANGCI_LINT_VERSION=2.1.6
 GOLANGCI_LINT_PATH=$(TOOLS_DIR)/golangci-lint-v$(GOLANGCI_LINT_VERSION)
 GOLANGCI_LINT_BIN=$(GOLANGCI_LINT_PATH)/golangci-lint
 
GOLANGCI_LINT_ARCHIVE=golangci-lint-$(GOLANGCI_LINT_VERSION)-$(OS)-$(EXEC_ARCH).tar.gz
diff --git a/pkg/admission/admission_controller_test.go 
b/pkg/admission/admission_controller_test.go
index ec064c7f..3eee9c69 100644
--- a/pkg/admission/admission_controller_test.go
+++ b/pkg/admission/admission_controller_test.go
@@ -472,7 +472,7 @@ func TestMutate(t *testing.T) {
        assert.Equal(t, labels(t, resp.Patch)[constants.LabelApplicationID], 
"yunikorn-test-ns-autogen", "wrong applicationId label")
 
        // pod with applicationId
-       pod.ObjectMeta.Labels = map[string]string{constants.LabelApplicationID: 
"test-app"}
+       pod.Labels = map[string]string{constants.LabelApplicationID: "test-app"}
        podJSON, err = json.Marshal(pod)
        assert.NilError(t, err, "failed to marshal pod")
        req.Object = runtime.RawExtension{Raw: podJSON}
diff --git a/pkg/admission/util_test.go b/pkg/admission/util_test.go
index bcfe41b2..2a162d76 100644
--- a/pkg/admission/util_test.go
+++ b/pkg/admission/util_test.go
@@ -71,23 +71,23 @@ func createTestingPodWithMeta() *v1.Pod {
 
 func createTestingPodWithLabels(appId string, queue string) *v1.Pod {
        pod := createTestingPodWithMeta()
-       pod.ObjectMeta.Labels[constants.CanonicalLabelApplicationID] = appId
-       pod.ObjectMeta.Labels[constants.CanonicalLabelQueueName] = queue
+       pod.Labels[constants.CanonicalLabelApplicationID] = appId
+       pod.Labels[constants.CanonicalLabelQueueName] = queue
 
        return pod
 }
 
 func createTestingPodWithGenerateName() *v1.Pod {
        pod := createMinimalTestingPod()
-       pod.ObjectMeta.GenerateName = "some-pod-"
+       pod.GenerateName = "some-pod-"
 
        return pod
 }
 
 func createTestingPodWithAnnotations(appId string, queue string) *v1.Pod {
        pod := createTestingPodWithMeta()
-       pod.ObjectMeta.Annotations[constants.AnnotationApplicationID] = appId
-       pod.ObjectMeta.Annotations[constants.AnnotationQueueName] = queue
+       pod.Annotations[constants.AnnotationApplicationID] = appId
+       pod.Annotations[constants.AnnotationQueueName] = queue
 
        return pod
 }
diff --git a/pkg/admission/webhook_manager.go b/pkg/admission/webhook_manager.go
index 2d50404a..8eeba399 100644
--- a/pkg/admission/webhook_manager.go
+++ b/pkg/admission/webhook_manager.go
@@ -382,7 +382,7 @@ func (wm *webhookManagerImpl) 
checkValidatingWebhook(webhook *v1.ValidatingWebho
        none := v1.SideEffectClassNone
        path := "/validate-conf"
 
-       value, ok := webhook.ObjectMeta.GetLabels()["app"]
+       value, ok := webhook.GetLabels()["app"]
        if !ok || value != webhookLabel {
                return errors.New("webhook: missing label app=yunikorn")
        }
@@ -457,7 +457,7 @@ func (wm *webhookManagerImpl) checkMutatingWebhook(webhook 
*v1.MutatingWebhookCo
        none := v1.SideEffectClassNone
        path := "/mutate"
 
-       value, ok := webhook.ObjectMeta.GetLabels()["app"]
+       value, ok := webhook.GetLabels()["app"]
        if !ok || value != "yunikorn" {
                return errors.New("webhook: missing label app=yunikorn")
        }
@@ -584,8 +584,8 @@ func (wm *webhookManagerImpl) 
populateValidatingWebhook(webhook *v1.ValidatingWe
        namespace := wm.conf.GetNamespace()
        serviceName := wm.conf.GetAmServiceName()
 
-       webhook.ObjectMeta.Name = validatingWebhook
-       webhook.ObjectMeta.Labels = map[string]string{"app": "yunikorn"}
+       webhook.Name = validatingWebhook
+       webhook.Labels = map[string]string{"app": "yunikorn"}
        webhook.Webhooks = []v1.ValidatingWebhook{
                {
                        Name: validateConfHook,
@@ -619,8 +619,8 @@ func (wm *webhookManagerImpl) 
populateMutatingWebhook(webhook *v1.MutatingWebhoo
        namespace := wm.conf.GetNamespace()
        serviceName := wm.conf.GetAmServiceName()
 
-       webhook.ObjectMeta.Name = mutatingWebhook
-       webhook.ObjectMeta.Labels = map[string]string{"app": "yunikorn"}
+       webhook.Name = mutatingWebhook
+       webhook.Labels = map[string]string{"app": "yunikorn"}
        webhook.Webhooks = []v1.MutatingWebhook{
                {
                        Name: mutatePodsWebhook,
diff --git a/pkg/admission/webhook_manager_test.go 
b/pkg/admission/webhook_manager_test.go
index 649033ee..62792c54 100644
--- a/pkg/admission/webhook_manager_test.go
+++ b/pkg/admission/webhook_manager_test.go
@@ -114,7 +114,7 @@ func TestLoadCACertificatesWithConflict(t *testing.T) {
        clientset := fake.NewClientset()
 
        spec := createSecret()
-       spec.ObjectMeta.SetAnnotations(map[string]string{"conflict": "true"})
+       spec.SetAnnotations(map[string]string{"conflict": "true"})
        _, err := 
clientset.CoreV1().Secrets(spec.Namespace).Create(context.Background(), spec, 
metav1.CreateOptions{})
        assert.NilError(t, err, "failed to create secret")
 
@@ -244,7 +244,7 @@ func TestInstallWebhooksWithValidationConflict(t 
*testing.T) {
 
        vh := wm.createEmptyValidatingWebhook()
        wm.populateValidatingWebhook(vh, []byte{0})
-       vh.ObjectMeta.SetAnnotations(map[string]string{"conflict": "true"})
+       vh.SetAnnotations(map[string]string{"conflict": "true"})
        _, err := 
clientset.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(context.Background(),
 vh, metav1.CreateOptions{})
        assert.NilError(t, err, "failed to add validating webhook")
 
@@ -270,7 +270,7 @@ func TestInstallWebhooksWithMutationConflict(t *testing.T) {
 
        mh := wm.createEmptyMutatingWebhook()
        wm.populateMutatingWebhook(mh, []byte{0})
-       mh.ObjectMeta.SetAnnotations(map[string]string{"conflict": "true"})
+       mh.SetAnnotations(map[string]string{"conflict": "true"})
        _, err = 
clientset.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(context.Background(),
 mh, metav1.CreateOptions{})
        assert.NilError(t, err, "failed to add mutating webhook")
 
diff --git a/pkg/cache/application_test.go b/pkg/cache/application_test.go
index 418b2194..f54bf79c 100644
--- a/pkg/cache/application_test.go
+++ b/pkg/cache/application_test.go
@@ -469,7 +469,7 @@ func newMockSchedulerAPI() *mockSchedulerAPI {
 }
 
 type mockSchedulerAPI struct {
-       callback   api.ResourceManagerCallback //nolint:structcheck,unused
+       callback   api.ResourceManagerCallback //nolint:unused
        registerFn func(request *si.RegisterResourceManagerRequest,
                callback api.ResourceManagerCallback) 
(*si.RegisterResourceManagerResponse, error)
        UpdateAllocationFn    func(request *si.AllocationRequest) error
diff --git a/pkg/cache/external/scheduler_cache_test.go 
b/pkg/cache/external/scheduler_cache_test.go
index f87464b6..c00660f8 100644
--- a/pkg/cache/external/scheduler_cache_test.go
+++ b/pkg/cache/external/scheduler_cache_test.go
@@ -755,8 +755,8 @@ func TestUpdatePod(t *testing.T) {
        }
 
        pod1 := podTemplate.DeepCopy()
-       pod1.ObjectMeta.Name = podName1
-       pod1.ObjectMeta.UID = podUID1
+       pod1.Name = podName1
+       pod1.UID = podUID1
        cache.UpdatePod(pod1)
        assert.Equal(t, len(cache.podsMap), 1, "wrong pod count after add of 
pod1")
        pod := cache.GetPod(podUID1)
@@ -764,8 +764,8 @@ func TestUpdatePod(t *testing.T) {
 
        // update of non-existent pod should be equivalent to an add
        pod2 := podTemplate.DeepCopy()
-       pod2.ObjectMeta.Name = podName2
-       pod2.ObjectMeta.UID = podUID2
+       pod2.Name = podName2
+       pod2.UID = podUID2
        cache.UpdatePod(pod2)
        assert.Equal(t, len(cache.podsMap), 2, "wrong pod count after add of 
pod2")
        pod = cache.GetPod(podUID2)
@@ -773,7 +773,7 @@ func TestUpdatePod(t *testing.T) {
 
        // normal pod update should succeed
        pod1Copy := pod1.DeepCopy()
-       pod1Copy.ObjectMeta.Annotations["state"] = "updated"
+       pod1Copy.Annotations["state"] = "updated"
        cache.UpdatePod(pod1Copy)
        found := cache.GetPod(podUID1)
        assert.Check(t, found != nil, "pod1 not found")
@@ -792,8 +792,8 @@ func TestUpdatePod(t *testing.T) {
 
        // unassumed pod should survive node changing without crashing
        pod3 := podTemplate.DeepCopy()
-       pod3.ObjectMeta.Name = "pod00003"
-       pod3.ObjectMeta.UID = "Pod-UID-00003"
+       pod3.Name = "pod00003"
+       pod3.UID = "Pod-UID-00003"
        pod3.Spec.NodeName = "orig-node"
        cache.UpdatePod(pod3)
        pod3Copy := pod3.DeepCopy()
@@ -1056,8 +1056,8 @@ func TestUpdatePVCRefCounts(t *testing.T) {
        }
 
        pod1 := podTemplate.DeepCopy()
-       pod1.ObjectMeta.Name = podName1
-       pod1.ObjectMeta.UID = podUID1
+       pod1.Name = podName1
+       pod1.UID = podUID1
        pod1.Spec.NodeName = node1.Name
        pod1.Spec.Volumes = []v1.Volume{
                {
@@ -1070,8 +1070,8 @@ func TestUpdatePVCRefCounts(t *testing.T) {
 
        // add a pod without assigned node can't update pvcRefCounts
        pod2 := podTemplate.DeepCopy()
-       pod2.ObjectMeta.Name = podName2
-       pod2.ObjectMeta.UID = podUID2
+       pod2.Name = podName2
+       pod2.UID = podUID2
        pod2.Spec.Volumes = []v1.Volume{
                {
                        Name:         pvcName2,
diff --git a/pkg/cache/gang_utils.go b/pkg/cache/gang_utils.go
index aed89b93..7d6ebd89 100644
--- a/pkg/cache/gang_utils.go
+++ b/pkg/cache/gang_utils.go
@@ -107,12 +107,13 @@ func GetSchedulingPolicyParam(pod *v1.Pod) 
*SchedulingPolicyParameters {
                        log.Log(log.ShimUtils).Warn("Skipping malformed 
scheduling policy parameter: ", zap.String("namespace", pod.Namespace), 
zap.String("name", pod.Name), zap.String("Scheduling Policy parameters passed 
in annotation: ", p))
                        continue
                }
-               if param[0] == constants.SchedulingPolicyTimeoutParam {
+               switch param[0] {
+               case constants.SchedulingPolicyTimeoutParam:
                        timeout, err = strconv.ParseInt(param[1], 10, 64)
                        if err != nil {
                                log.Log(log.ShimUtils).Warn("Failed to parse 
timeout value from annotation", zap.String("namespace", pod.Namespace), 
zap.String("name", pod.Name), zap.Int64("Using Placeholder timeout: ", 
timeout), zap.String("Placeholder timeout passed in annotation: ", p))
                        }
-               } else if param[0] == constants.SchedulingPolicyStyleParam {
+               case constants.SchedulingPolicyStyleParam:
                        style = 
constants.SchedulingPolicyStyleParamValues[param[1]]
                        if style == "" {
                                style = 
constants.SchedulingPolicyStyleParamDefault
diff --git a/pkg/plugin/predicates/predicate_manager.go 
b/pkg/plugin/predicates/predicate_manager.go
index 6d9d3f07..b8db248d 100644
--- a/pkg/plugin/predicates/predicate_manager.go
+++ b/pkg/plugin/predicates/predicate_manager.go
@@ -450,7 +450,7 @@ func defaultConfig() 
(*apiConfig.KubeSchedulerConfiguration, error) {
        // because the field will be cleared later by API machinery during
        // conversion. See KubeSchedulerConfiguration internal type definition 
for
        // more details.
-       cfg.TypeMeta.APIVersion = schedConfig.SchemeGroupVersion.String()
+       cfg.APIVersion = schedConfig.SchemeGroupVersion.String()
 
        // Disable some plugins we don't want for YuniKorn
        removePlugin(&cfg, names.DefaultPreemption) // we do our own preemption 
algorithm
diff --git a/pkg/shim/scheduler_mock_test.go b/pkg/shim/scheduler_mock_test.go
index 22b4b8f2..44ee5869 100644
--- a/pkg/shim/scheduler_mock_test.go
+++ b/pkg/shim/scheduler_mock_test.go
@@ -169,10 +169,7 @@ func (fc *MockScheduler) waitAndAssertTaskState(t 
*testing.T, appID, taskID, exp
 
        task := app.GetTask(taskID)
        deadline := time.Now().Add(10 * time.Second)
-       for {
-               if task.GetTaskState() == expectedState {
-                       break
-               }
+       for task.GetTaskState() != expectedState {
                log.Log(log.Test).Info("waiting for task state",
                        zap.String("expected", expectedState),
                        zap.String("actual", task.GetTaskState()))
diff --git a/test/e2e/admission_controller/admission_controller_test.go 
b/test/e2e/admission_controller/admission_controller_test.go
index 620a9f19..a07dd5d1 100644
--- a/test/e2e/admission_controller/admission_controller_test.go
+++ b/test/e2e/admission_controller/admission_controller_test.go
@@ -306,7 +306,7 @@ var _ = ginkgo.Describe("AdmissionController", func() {
 
                // pod is not expected to appear
                ginkgo.By("Check for sleep pods (should time out)")
-               err = kubeClient.WaitForPodBySelector(ns, fmt.Sprintf("app=%s", 
testDeployment.ObjectMeta.Labels["app"]),
+               err = kubeClient.WaitForPodBySelector(ns, fmt.Sprintf("app=%s", 
testDeployment.Labels["app"]),
                        10*time.Second)
                fmt.Fprintf(ginkgo.GinkgoWriter, "Error: %v\n", err)
                gomega.Ω(err).Should(gomega.HaveOccurred())
@@ -332,7 +332,7 @@ var _ = ginkgo.Describe("AdmissionController", func() {
 
                // pod is expected to appear
                ginkgo.By("Check for sleep pod")
-               err = kubeClient.WaitForPodBySelector(ns, fmt.Sprintf("app=%s", 
testDeployment.ObjectMeta.Labels["app"]),
+               err = kubeClient.WaitForPodBySelector(ns, fmt.Sprintf("app=%s", 
testDeployment.Labels["app"]),
                        60*time.Second)
                gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
        })
@@ -381,7 +381,7 @@ var _ = ginkgo.Describe("AdmissionController", func() {
 
                // pod is expected to appear
                ginkgo.By("Check for sleep pod")
-               err = kubeClient.WaitForPodBySelector(ns, fmt.Sprintf("app=%s", 
testDeployment.ObjectMeta.Labels["app"]),
+               err = kubeClient.WaitForPodBySelector(ns, fmt.Sprintf("app=%s", 
testDeployment.Labels["app"]),
                        60*time.Second)
                gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
        })
@@ -407,7 +407,7 @@ var _ = ginkgo.Describe("AdmissionController", func() {
                gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
                defer kubeClient.DeleteWorkloadAndPods(deployment.Name, 
k8s.Deployment, ns)
                ginkgo.By("Check for sleep pod")
-               err = kubeClient.WaitForPodBySelector(ns, fmt.Sprintf("app=%s", 
deployment.ObjectMeta.Labels["app"]),
+               err = kubeClient.WaitForPodBySelector(ns, fmt.Sprintf("app=%s", 
deployment.Labels["app"]),
                        60*time.Second)
                gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
 
@@ -431,7 +431,7 @@ var _ = ginkgo.Describe("AdmissionController", func() {
                gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
 
                ginkgo.By("Wait for sleep pod")
-               err = kubeClient.WaitForPodBySelectorRunning(ns, 
fmt.Sprintf("app=%s", testDeployment.ObjectMeta.Labels["app"]),
+               err = kubeClient.WaitForPodBySelectorRunning(ns, 
fmt.Sprintf("app=%s", testDeployment.Labels["app"]),
                        60)
                gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
 
diff --git a/test/e2e/basic_scheduling/basic_scheduling_test.go 
b/test/e2e/basic_scheduling/basic_scheduling_test.go
index e4a7d990..7ee27ed7 100644
--- a/test/e2e/basic_scheduling/basic_scheduling_test.go
+++ b/test/e2e/basic_scheduling/basic_scheduling_test.go
@@ -57,15 +57,15 @@ var _ = ginkgo.Describe("", func() {
                // Wait for pod to move to running state
                err = kClient.WaitForPodRunning(dev, sleepPodConfigs.Name, 
30*time.Second)
                gomega.Ω(err).NotTo(gomega.HaveOccurred())
-               appsInfo, err = restClient.GetAppInfo("default", "root."+dev, 
sleepRespPod.ObjectMeta.Labels["applicationId"])
+               appsInfo, err = restClient.GetAppInfo("default", "root."+dev, 
sleepRespPod.Labels["applicationId"])
                gomega.Ω(err).NotTo(gomega.HaveOccurred())
                gomega.Ω(appsInfo).NotTo(gomega.BeNil())
        })
 
        ginkgo.It("Verify_App_Queue_Info", func() {
                ginkgo.By("Verify that the sleep pod is mapped to development 
queue")
-               
gomega.Ω(appsInfo.ApplicationID).To(gomega.Equal(sleepRespPod.ObjectMeta.Labels["applicationId"]))
-               
gomega.Ω(appsInfo.QueueName).To(gomega.ContainSubstring(sleepRespPod.ObjectMeta.Namespace))
+               
gomega.Ω(appsInfo.ApplicationID).To(gomega.Equal(sleepRespPod.Labels["applicationId"]))
+               
gomega.Ω(appsInfo.QueueName).To(gomega.ContainSubstring(sleepRespPod.Namespace))
        })
 
        ginkgo.It("Verify_Job_State", func() {
@@ -81,7 +81,7 @@ var _ = ginkgo.Describe("", func() {
                gomega.Ω(allocation).NotTo(gomega.BeNil())
                gomega.Ω(allocation.AllocationKey).NotTo(gomega.BeNil())
                gomega.Ω(allocation.NodeID).NotTo(gomega.BeNil())
-               
gomega.Ω(allocation.ApplicationID).To(gomega.Equal(sleepRespPod.ObjectMeta.Labels["applicationId"]))
+               
gomega.Ω(allocation.ApplicationID).To(gomega.Equal(sleepRespPod.Labels["applicationId"]))
                core := 
sleepRespPod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue()
                mem := 
sleepRespPod.Spec.Containers[0].Resources.Requests.Memory().Value()
                resMap := allocation.ResourcePerAlloc
@@ -103,7 +103,7 @@ var _ = ginkgo.Describe("", func() {
                gomega.Ω(err).NotTo(gomega.HaveOccurred())
 
                ginkgo.By("Verify that the pod is scheduled and running")
-               appsInfo, err = restClient.GetAppInfo("default", "root."+dev, 
bestEffortPod.ObjectMeta.Labels["applicationId"])
+               appsInfo, err = restClient.GetAppInfo("default", "root."+dev, 
bestEffortPod.Labels["applicationId"])
                gomega.Ω(err).NotTo(gomega.HaveOccurred())
                gomega.Ω(appsInfo).NotTo(gomega.BeNil())
                gomega.Ω(appsInfo.State).To(gomega.Equal("Running"))
@@ -117,7 +117,7 @@ var _ = ginkgo.Describe("", func() {
                gomega.Ω(allocation).NotTo(gomega.BeNil())
                gomega.Ω(allocation.AllocationKey).NotTo(gomega.BeNil())
                gomega.Ω(allocation.NodeID).NotTo(gomega.BeNil())
-               
gomega.Ω(allocation.ApplicationID).To(gomega.Equal(bestEffortPod.ObjectMeta.Labels["applicationId"]))
+               
gomega.Ω(allocation.ApplicationID).To(gomega.Equal(bestEffortPod.Labels["applicationId"]))
                core := 
bestEffortPod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue()
                mem := 
bestEffortPod.Spec.Containers[0].Resources.Requests.Memory().Value()
                resMap := allocation.ResourcePerAlloc
@@ -139,7 +139,7 @@ var _ = ginkgo.Describe("", func() {
                gomega.Ω(err).NotTo(gomega.HaveOccurred())
 
                ginkgo.By("Verify that the pod is scheduled and running")
-               appsInfo, err = restClient.GetAppInfo("default", "root."+dev, 
burstablePod.ObjectMeta.Labels["applicationId"])
+               appsInfo, err = restClient.GetAppInfo("default", "root."+dev, 
burstablePod.Labels["applicationId"])
                gomega.Ω(err).NotTo(gomega.HaveOccurred())
                gomega.Ω(appsInfo).NotTo(gomega.BeNil())
                gomega.Ω(appsInfo.State).To(gomega.Equal("Running"))
@@ -153,7 +153,7 @@ var _ = ginkgo.Describe("", func() {
                gomega.Ω(allocation).NotTo(gomega.BeNil())
                gomega.Ω(allocation.AllocationKey).NotTo(gomega.BeNil())
                gomega.Ω(allocation.NodeID).NotTo(gomega.BeNil())
-               
gomega.Ω(allocation.ApplicationID).To(gomega.Equal(burstablePod.ObjectMeta.Labels["applicationId"]))
+               
gomega.Ω(allocation.ApplicationID).To(gomega.Equal(burstablePod.Labels["applicationId"]))
                core := 
burstablePod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue()
                mem := 
burstablePod.Spec.Containers[0].Resources.Requests.Memory().Value()
                resMap := allocation.ResourcePerAlloc
diff --git a/test/e2e/framework/helpers/k8s/k8s_utils.go 
b/test/e2e/framework/helpers/k8s/k8s_utils.go
index e63f7ad7..f53ebe46 100644
--- a/test/e2e/framework/helpers/k8s/k8s_utils.go
+++ b/test/e2e/framework/helpers/k8s/k8s_utils.go
@@ -37,7 +37,6 @@ import (
        appsv1 "k8s.io/api/apps/v1"
        batchv1 "k8s.io/api/batch/v1"
        v1 "k8s.io/api/core/v1"
-       authv1 "k8s.io/api/rbac/v1"
        rbacv1 "k8s.io/api/rbac/v1"
        schedulingv1 "k8s.io/api/scheduling/v1"
        storagev1 "k8s.io/api/storage/v1"
@@ -1160,18 +1159,18 @@ func (k *KubeCtl) CreateClusterRoleBinding(
        roleName string,
        role string,
        namespace string,
-       serviceAccount string) (*authv1.ClusterRoleBinding, error) {
-       return 
k.clientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), 
&authv1.ClusterRoleBinding{
+       serviceAccount string) (*rbacv1.ClusterRoleBinding, error) {
+       return 
k.clientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), 
&rbacv1.ClusterRoleBinding{
                TypeMeta:   metav1.TypeMeta{},
                ObjectMeta: metav1.ObjectMeta{Name: roleName},
-               Subjects: []authv1.Subject{
+               Subjects: []rbacv1.Subject{
                        {
                                Kind:      "ServiceAccount",
                                Name:      serviceAccount,
                                Namespace: namespace,
                        },
                },
-               RoleRef: authv1.RoleRef{Name: role, Kind: "ClusterRole"},
+               RoleRef: rbacv1.RoleRef{Name: role, Kind: "ClusterRole"},
        }, metav1.CreateOptions{})
 }
 
@@ -1839,7 +1838,7 @@ func (k *KubeCtl) GetSecret(namespace, secretName string) 
(*v1.Secret, error) {
 }
 
 func (k *KubeCtl) WaitForSecret(namespace, secretName string, timeout 
time.Duration) error {
-       var cond wait.ConditionFunc // nolint:gosimple
+       var cond wait.ConditionFunc // nolint:staticcheck
        cond = func() (done bool, err error) {
                secret, err := k.GetSecret(namespace, secretName)
                if err != nil {
diff --git a/test/e2e/framework/helpers/k8s/pod_conf.go 
b/test/e2e/framework/helpers/k8s/pod_conf.go
index 6f54060d..d0873b96 100644
--- a/test/e2e/framework/helpers/k8s/pod_conf.go
+++ b/test/e2e/framework/helpers/k8s/pod_conf.go
@@ -217,7 +217,7 @@ func InitTestPod(conf TestPodConfig) (*v1.Pod, error) { 
//nolint:funlen
                pod.Spec.Containers[0].Resources = *conf.Resources
        }
        if conf.DeletionGracePeriodSeconds != nil {
-               pod.ObjectMeta.DeletionGracePeriodSeconds = 
conf.DeletionGracePeriodSeconds
+               pod.DeletionGracePeriodSeconds = conf.DeletionGracePeriodSeconds
        }
        if conf.InitContainerSleepSecs > 0 {
                containerReqs := v1.ResourceRequirements{
diff --git a/test/e2e/framework/helpers/yunikorn/rest_api_utils.go 
b/test/e2e/framework/helpers/yunikorn/rest_api_utils.go
index f57c0ab0..5be30f2f 100644
--- a/test/e2e/framework/helpers/yunikorn/rest_api_utils.go
+++ b/test/e2e/framework/helpers/yunikorn/rest_api_utils.go
@@ -258,7 +258,7 @@ func (c *RClient) GetCompletedAppInfo(partition string, 
appID string) (*dao.Appl
                return latestApp, nil
        }
 
-       return nil, fmt.Errorf("No application found with ID %s in 'Failed', 
'Expired', 'Completed' state", appID)
+       return nil, fmt.Errorf("no application found with ID %s in 'Failed', 
'Expired', 'Completed' state", appID)
 }
 
 func (c *RClient) GetAllocationLog(partition string, queueName string, appID 
string, podName string) ([]*dao.AllocationAskLogDAOInfo, error) {
diff --git a/test/e2e/pod_resource_scaling/pod_resource_scaling_test.go 
b/test/e2e/pod_resource_scaling/pod_resource_scaling_test.go
index 95d0fd53..743d2c24 100644
--- a/test/e2e/pod_resource_scaling/pod_resource_scaling_test.go
+++ b/test/e2e/pod_resource_scaling/pod_resource_scaling_test.go
@@ -33,7 +33,6 @@ import (
 
 var kClient k8s.KubeCtl
 var restClient yunikorn.RClient
-var err error
 var ns string
 var oldConfigMap = new(v1.ConfigMap)
 var suiteName string
@@ -61,7 +60,7 @@ var _ = ginkgo.AfterEach(func() {
 })
 
 func verifyYunikornResourceUsage(appID, resourceName string, value int64) {
-       err = utils.WaitForCondition(func() bool {
+       err := utils.WaitForCondition(func() bool {
                app, err := restClient.GetAppInfo("default", "root."+ns, appID)
                if err != nil || app == nil {
                        fmt.Println(err)
@@ -105,7 +104,7 @@ var _ = ginkgo.Describe("InPlacePodVerticalScaling", func() 
{
                Ω(err).NotTo(HaveOccurred())
 
                // Check if pod is scheduled by YuniKorn and verify CPU 
allocation is 100m
-               
verifyYunikornResourceUsage(pod.ObjectMeta.Labels["applicationId"], "vcore", 
100)
+               verifyYunikornResourceUsage(pod.Labels["applicationId"], 
"vcore", 100)
 
                // Get initial pod restart count
                pod, err = kClient.GetPod(pod.Name, ns)
@@ -116,13 +115,13 @@ var _ = ginkgo.Describe("InPlacePodVerticalScaling", 
func() {
                Ω(err).NotTo(HaveOccurred())
 
                
Ω(pod.Status.ContainerStatuses[0].RestartCount).To(Equal(initialRestartCount), 
"Container should not have restarted")
-               
verifyYunikornResourceUsage(pod.ObjectMeta.Labels["applicationId"], "vcore", 
200)
+               verifyYunikornResourceUsage(pod.Labels["applicationId"], 
"vcore", 200)
 
                pod, err = kClient.ModifyResourceUsage(pod, ns, 200, 200)
                Ω(err).NotTo(HaveOccurred())
 
                
Ω(pod.Status.ContainerStatuses[0].RestartCount).To(Equal(initialRestartCount), 
"Container should not have restarted")
-               
verifyYunikornResourceUsage(pod.ObjectMeta.Labels["applicationId"], "memory", 
200*1024*1024)
+               verifyYunikornResourceUsage(pod.Labels["applicationId"], 
"memory", 200*1024*1024)
        })
 
        ginkgo.It("Pod resources(cpu/memory) resize down", func() {
@@ -140,7 +139,7 @@ var _ = ginkgo.Describe("InPlacePodVerticalScaling", func() 
{
                Ω(err).NotTo(HaveOccurred())
 
                // Check if pod is scheduled by YuniKorn and verify CPU 
allocation is 100m
-               
verifyYunikornResourceUsage(pod.ObjectMeta.Labels["applicationId"], "vcore", 
200)
+               verifyYunikornResourceUsage(pod.Labels["applicationId"], 
"vcore", 200)
 
                // Get initial pod state
                pod, err = kClient.GetPod(pod.Name, ns)
@@ -153,7 +152,8 @@ var _ = ginkgo.Describe("InPlacePodVerticalScaling", func() 
{
 
                // Wait for resource update to be reflected
                err = utils.WaitForCondition(func() bool {
-                       currentPod, err := kClient.GetPod(pod.Name, ns)
+                       var currentPod *v1.Pod
+                       currentPod, err = kClient.GetPod(pod.Name, ns)
                        if err != nil {
                                return false
                        }
@@ -164,7 +164,7 @@ var _ = ginkgo.Describe("InPlacePodVerticalScaling", func() 
{
                Ω(err).NotTo(HaveOccurred())
                Ω(pod.Status.StartTime).To(Equal(initialStartTime), "Pod should 
not have restarted")
                
Ω(pod.Status.ContainerStatuses[0].RestartCount).To(Equal(initialRestartCount), 
"Container should not have restarted")
-               
verifyYunikornResourceUsage(pod.ObjectMeta.Labels["applicationId"], "vcore", 
100)
+               verifyYunikornResourceUsage(pod.Labels["applicationId"], 
"vcore", 100)
 
                pod, err = kClient.ModifyResourceUsage(pod, ns, 100, 100)
                Ω(err).NotTo(HaveOccurred()) // Expect an error as memory 
cannot be decreased
@@ -172,7 +172,7 @@ var _ = ginkgo.Describe("InPlacePodVerticalScaling", func() 
{
                Ω(err).NotTo(HaveOccurred())
                Ω(pod.Status.StartTime).To(Equal(initialStartTime), "Pod should 
not have restarted")
                
Ω(pod.Status.ContainerStatuses[0].RestartCount).To(Equal(initialRestartCount), 
"Container should not have restarted")
-               
verifyYunikornResourceUsage(pod.ObjectMeta.Labels["applicationId"], "memory", 
100*1024*1024)
+               verifyYunikornResourceUsage(pod.Labels["applicationId"], 
"memory", 100*1024*1024)
        })
 
        ginkgo.It("Pod resources(cpu/memory) resize to excessive values should 
fail", func() {
@@ -190,7 +190,7 @@ var _ = ginkgo.Describe("InPlacePodVerticalScaling", func() 
{
                Ω(err).NotTo(HaveOccurred())
 
                // Check if pod is scheduled by YuniKorn and verify CPU 
allocation is 100m
-               
verifyYunikornResourceUsage(pod.ObjectMeta.Labels["applicationId"], "vcore", 
100)
+               verifyYunikornResourceUsage(pod.Labels["applicationId"], 
"vcore", 100)
 
                // Get initial pod state
                pod, err = kClient.GetPod(pod.Name, ns)
@@ -204,7 +204,8 @@ var _ = ginkgo.Describe("InPlacePodVerticalScaling", func() 
{
 
                // Wait for resource update to be reflected
                err = utils.WaitForCondition(func() bool {
-                       currentPod, err := kClient.GetPod(pod.Name, ns)
+                       var currentPod *v1.Pod
+                       currentPod, err = kClient.GetPod(pod.Name, ns)
                        if err != nil {
                                return false
                        }
@@ -217,6 +218,6 @@ var _ = ginkgo.Describe("InPlacePodVerticalScaling", func() 
{
                
Ω(pod.Status.ContainerStatuses[0].RestartCount).To(Equal(initialRestartCount), 
"Container should not have restarted")
 
                // Verify pod resource usage is unchanged after set an 
excessive value
-               
verifyYunikornResourceUsage(pod.ObjectMeta.Labels["applicationId"], "vcore", 
100)
+               verifyYunikornResourceUsage(pod.Labels["applicationId"], 
"vcore", 100)
        })
 })
diff --git a/test/e2e/predicates/predicates_test.go 
b/test/e2e/predicates/predicates_test.go
index 6ffb1b00..fcda188a 100644
--- a/test/e2e/predicates/predicates_test.go
+++ b/test/e2e/predicates/predicates_test.go
@@ -153,9 +153,9 @@ var _ = Describe("Predicates", func() {
 
                By("Verify the YuniKorn request failed scheduling")
 
-               podErr = restClient.WaitForAllocationLog("default", "root."+ns, 
initPod.ObjectMeta.Labels["applicationId"], podName, 60)
+               podErr = restClient.WaitForAllocationLog("default", "root."+ns, 
initPod.Labels["applicationId"], podName, 60)
                Ω(podErr).NotTo(HaveOccurred())
-               log, podErr := restClient.GetAllocationLog("default", 
"root."+ns, initPod.ObjectMeta.Labels["applicationId"], podName)
+               log, podErr := restClient.GetAllocationLog("default", 
"root."+ns, initPod.Labels["applicationId"], podName)
                Ω(podErr).NotTo(HaveOccurred())
                Ω(log).NotTo(BeNil(), "Log can't be empty")
                logEntries := yunikorn.AllocLogToStrings(log)
@@ -259,9 +259,9 @@ var _ = Describe("Predicates", func() {
 
                By("Verify the YuniKorn request failed scheduling")
 
-               podErr = restClient.WaitForAllocationLog("default", "root."+ns, 
initPod.ObjectMeta.Labels["applicationId"], podName, 60)
+               podErr = restClient.WaitForAllocationLog("default", "root."+ns, 
initPod.Labels["applicationId"], podName, 60)
                Ω(podErr).NotTo(HaveOccurred())
-               log, podErr := restClient.GetAllocationLog("default", 
"root."+ns, initPod.ObjectMeta.Labels["applicationId"], podName)
+               log, podErr := restClient.GetAllocationLog("default", 
"root."+ns, initPod.Labels["applicationId"], podName)
                Ω(podErr).NotTo(HaveOccurred())
                Ω(log).NotTo(BeNil(), "Log can't be empty")
                logEntries := yunikorn.AllocLogToStrings(log)
@@ -422,9 +422,9 @@ var _ = Describe("Predicates", func() {
 
                By("Verify the YuniKorn request failed scheduling")
 
-               err = restClient.WaitForAllocationLog("default", "root."+ns, 
initPod.ObjectMeta.Labels["applicationId"], podNameNoTolerations, 60)
+               err = restClient.WaitForAllocationLog("default", "root."+ns, 
initPod.Labels["applicationId"], podNameNoTolerations, 60)
                Ω(err).NotTo(HaveOccurred())
-               log, err := restClient.GetAllocationLog("default", "root."+ns, 
initPod.ObjectMeta.Labels["applicationId"], podNameNoTolerations)
+               log, err := restClient.GetAllocationLog("default", "root."+ns, 
initPod.Labels["applicationId"], podNameNoTolerations)
                Ω(err).NotTo(HaveOccurred())
                Ω(log).NotTo(BeNil(), "Log can't be empty")
                logEntries := yunikorn.AllocLogToStrings(log)
@@ -1081,9 +1081,9 @@ var _ = Describe("Predicates", func() {
 
                By("Verify the YuniKorn request failed scheduling")
 
-               err = restClient.WaitForAllocationLog("default", 
"root."+anotherNS, initPod.ObjectMeta.Labels["applicationId"], labelPodName2, 
60)
+               err = restClient.WaitForAllocationLog("default", 
"root."+anotherNS, initPod.Labels["applicationId"], labelPodName2, 60)
                Ω(err).NotTo(HaveOccurred())
-               log, err := restClient.GetAllocationLog("default", 
"root."+anotherNS, initPod.ObjectMeta.Labels["applicationId"], labelPodName2)
+               log, err := restClient.GetAllocationLog("default", 
"root."+anotherNS, initPod.Labels["applicationId"], labelPodName2)
                Ω(err).NotTo(HaveOccurred())
                Ω(log).NotTo(BeNil(), "Log can't be empty")
                logEntries := yunikorn.AllocLogToStrings(log)
diff --git a/test/e2e/preemption/preemption_test.go 
b/test/e2e/preemption/preemption_test.go
index 9325d377..23c2f270 100644
--- a/test/e2e/preemption/preemption_test.go
+++ b/test/e2e/preemption/preemption_test.go
@@ -104,7 +104,7 @@ var _ = ginkgo.Describe("Preemption", func() {
 
                        // Wait for pod to move to running state
                        podErr = kClient.WaitForPodBySelectorRunning(dev,
-                               fmt.Sprintf("app=%s", 
sleepRespPod.ObjectMeta.Labels["app"]),
+                               fmt.Sprintf("app=%s", 
sleepRespPod.Labels["app"]),
                                120)
                        gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
                }
@@ -166,7 +166,7 @@ var _ = ginkgo.Describe("Preemption", func() {
 
                        // Wait for pod to move to running state
                        podErr = kClient.WaitForPodBySelectorRunning(dev,
-                               fmt.Sprintf("app=%s", 
sleepRespPod.ObjectMeta.Labels["app"]),
+                               fmt.Sprintf("app=%s", 
sleepRespPod.Labels["app"]),
                                30)
                        gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
                }
@@ -232,7 +232,7 @@ var _ = ginkgo.Describe("Preemption", func() {
 
                        // Wait for pod to move to running state
                        podErr = kClient.WaitForPodBySelectorRunning(dev,
-                               fmt.Sprintf("app=%s", 
sleepRespPod.ObjectMeta.Labels["app"]),
+                               fmt.Sprintf("app=%s", 
sleepRespPod.Labels["app"]),
                                30)
                        gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
                }
@@ -311,7 +311,7 @@ var _ = ginkgo.Describe("Preemption", func() {
 
                        // Wait for pod to move to running state
                        podErr = kClient.WaitForPodBySelectorRunning(dev,
-                               fmt.Sprintf("app=%s", 
sleepRespPod.ObjectMeta.Labels["app"]),
+                               fmt.Sprintf("app=%s", 
sleepRespPod.Labels["app"]),
                                60)
                        gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
                }
@@ -336,9 +336,9 @@ var _ = ginkgo.Describe("Preemption", func() {
                Ω(podErr).NotTo(HaveOccurred())
 
                ginkgo.By("Verify the sleep pod " + sleepPod4Config.Name + " 
request failed scheduling")
-               podErr = restClient.WaitForAllocationLog("default", 
"root.parent.low-priority", sleepRespPod4.ObjectMeta.Labels["applicationId"], 
sleepPod4Config.Name, 60)
+               podErr = restClient.WaitForAllocationLog("default", 
"root.parent.low-priority", sleepRespPod4.Labels["applicationId"], 
sleepPod4Config.Name, 60)
                Ω(podErr).NotTo(HaveOccurred())
-               log, podErr := restClient.GetAllocationLog("default", 
"root.parent.low-priority", sleepRespPod4.ObjectMeta.Labels["applicationId"], 
sleepPod4Config.Name)
+               log, podErr := restClient.GetAllocationLog("default", 
"root.parent.low-priority", sleepRespPod4.Labels["applicationId"], 
sleepPod4Config.Name)
                Ω(podErr).NotTo(HaveOccurred())
                Ω(log).NotTo(gomega.BeNil(), "Log can't be empty")
                logEntries := yunikorn.AllocLogToStrings(log)
@@ -446,7 +446,7 @@ var _ = ginkgo.Describe("Preemption", func() {
 
                        // Wait for pod to move to running state
                        podErr = kClient.WaitForPodBySelectorRunning(dev,
-                               fmt.Sprintf("app=%s", 
sleepRespPod.ObjectMeta.Labels["app"]),
+                               fmt.Sprintf("app=%s", 
sleepRespPod.Labels["app"]),
                                60*60*2)
                        gomega.Ω(podErr).NotTo(gomega.HaveOccurred())
                }
@@ -478,11 +478,11 @@ var _ = ginkgo.Describe("Preemption", func() {
                }
                Ω(sandbox3RunningPodsCnt).To(gomega.Equal(1), "One of the pods 
in root.sandbox3 should be preempted")
 
-               ginkgo.By(fmt.Sprintf("Removing priority class %s", 
preemptAllowPriorityClass.ObjectMeta.Name))
-               err = 
kClient.DeletePriorityClass(preemptAllowPriorityClass.ObjectMeta.Name)
+               ginkgo.By(fmt.Sprintf("Removing priority class %s", 
preemptAllowPriorityClass.Name))
+               err = 
kClient.DeletePriorityClass(preemptAllowPriorityClass.Name)
                gomega.Ω(err).ShouldNot(HaveOccurred())
-               ginkgo.By(fmt.Sprintf("Removing priority class %s", 
preemptNotAllowPriorityClass.ObjectMeta.Name))
-               err = 
kClient.DeletePriorityClass(preemptNotAllowPriorityClass.ObjectMeta.Name)
+               ginkgo.By(fmt.Sprintf("Removing priority class %s", 
preemptNotAllowPriorityClass.Name))
+               err = 
kClient.DeletePriorityClass(preemptNotAllowPriorityClass.Name)
                gomega.Ω(err).ShouldNot(HaveOccurred())
        })
 
diff --git a/test/e2e/queue_quota_mgmt/queue_quota_mgmt_test.go 
b/test/e2e/queue_quota_mgmt/queue_quota_mgmt_test.go
index a19796c9..d7220e44 100644
--- a/test/e2e/queue_quota_mgmt/queue_quota_mgmt_test.go
+++ b/test/e2e/queue_quota_mgmt/queue_quota_mgmt_test.go
@@ -174,7 +174,7 @@ var _ = Describe("", func() {
 
                By(fmt.Sprintf("App-%d: Verify app:%s in accepted state", 
nextPod, sleepObj.Name))
                // Wait for pod to move to accepted state
-               err = restClient.WaitForAppStateTransition("default", 
"root."+ns, sleepRespPod.ObjectMeta.Labels["applicationId"],
+               err = restClient.WaitForAppStateTransition("default", 
"root."+ns, sleepRespPod.Labels["applicationId"],
                        yunikorn.States().Application.Accepted,
                        240)
                Ω(err).NotTo(HaveOccurred())
diff --git a/test/e2e/recovery_and_restart/recovery_and_restart_test.go 
b/test/e2e/recovery_and_restart/recovery_and_restart_test.go
index eb8ba73f..09d89b17 100644
--- a/test/e2e/recovery_and_restart/recovery_and_restart_test.go
+++ b/test/e2e/recovery_and_restart/recovery_and_restart_test.go
@@ -70,7 +70,7 @@ var _ = ginkgo.Describe("", func() {
                gomega.Ω(err).NotTo(gomega.HaveOccurred())
                // Wait for pod to move to running state
                err = kClient.WaitForPodBySelectorRunning(dev,
-                       fmt.Sprintf("applicationId=%s", 
sleepRespPod.ObjectMeta.Labels["applicationId"]),
+                       fmt.Sprintf("applicationId=%s", 
sleepRespPod.Labels["applicationId"]),
                        60)
                gomega.Ω(err).NotTo(gomega.HaveOccurred())
                ginkgo.By("Deploy 2nd sleep pod to the development namespace")
@@ -80,15 +80,15 @@ var _ = ginkgo.Describe("", func() {
                gomega.Ω(err).NotTo(gomega.HaveOccurred())
                // Wait for pod to move to running state
                err = kClient.WaitForPodBySelectorRunning(dev,
-                       fmt.Sprintf("applicationId=%s", 
sleepRespPod2.ObjectMeta.Labels["applicationId"]),
+                       fmt.Sprintf("applicationId=%s", 
sleepRespPod2.Labels["applicationId"]),
                        60)
                gomega.Ω(err).NotTo(gomega.HaveOccurred())
        })
 
        ginkgo.It("Verify_Pod_Alloc_Props", func() {
-               err := restClient.WaitForAppStateTransition("default", 
"root."+dev, sleepRespPod.ObjectMeta.Labels["applicationId"], "Running", 30)
+               err := restClient.WaitForAppStateTransition("default", 
"root."+dev, sleepRespPod.Labels["applicationId"], "Running", 30)
                gomega.Ω(err).NotTo(gomega.HaveOccurred())
-               appsInfo, err := restClient.GetAppInfo("default", "root."+dev, 
sleepRespPod.ObjectMeta.Labels["applicationId"])
+               appsInfo, err := restClient.GetAppInfo("default", "root."+dev, 
sleepRespPod.Labels["applicationId"])
                gomega.Ω(err).NotTo(gomega.HaveOccurred())
                gomega.Ω(appsInfo).NotTo(gomega.BeNil())
                ginkgo.By("Verify the pod allocation properties")
@@ -98,7 +98,7 @@ var _ = ginkgo.Describe("", func() {
                gomega.Ω(allocations).NotTo(gomega.BeNil())
                gomega.Ω(allocations.AllocationKey).NotTo(gomega.BeNil())
                gomega.Ω(allocations.NodeID).NotTo(gomega.BeNil())
-               
gomega.Ω(allocations.ApplicationID).To(gomega.Equal(sleepRespPod.ObjectMeta.Labels["applicationId"]))
+               
gomega.Ω(allocations.ApplicationID).To(gomega.Equal(sleepRespPod.Labels["applicationId"]))
                core := 
sleepRespPod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue()
                mem := 
sleepRespPod.Spec.Containers[0].Resources.Requests.Memory().Value()
                resMap := allocations.ResourcePerAlloc
diff --git a/test/e2e/restart_changed_config/restart_changed_config_test.go 
b/test/e2e/restart_changed_config/restart_changed_config_test.go
index 792ad426..a54fe8af 100644
--- a/test/e2e/restart_changed_config/restart_changed_config_test.go
+++ b/test/e2e/restart_changed_config/restart_changed_config_test.go
@@ -131,13 +131,13 @@ var _ = ginkgo.Describe("PodInRecoveryQueue", func() {
 
                ginkgo.By("Check pod in the dev namespace")
                var appsInfo *dao.ApplicationDAOInfo
-               appsInfo, err = restClient.GetAppInfo("default", "root."+dev, 
podDev.ObjectMeta.Labels["applicationId"])
+               appsInfo, err = restClient.GetAppInfo("default", "root."+dev, 
podDev.Labels["applicationId"])
                gomega.Ω(err).NotTo(gomega.HaveOccurred())
                gomega.Ω(appsInfo).NotTo(gomega.BeNil())
 
                ginkgo.By("Check pod in the test namespace")
                var appsInfo2 *dao.ApplicationDAOInfo
-               appsInfo2, err = restClient.GetAppInfo("default", "root."+test, 
podTest.ObjectMeta.Labels["applicationId"])
+               appsInfo2, err = restClient.GetAppInfo("default", "root."+test, 
podTest.Labels["applicationId"])
                gomega.Ω(err).NotTo(gomega.HaveOccurred())
                gomega.Ω(appsInfo2).NotTo(gomega.BeNil())
 
@@ -152,12 +152,12 @@ var _ = ginkgo.Describe("PodInRecoveryQueue", func() {
                gomega.Ω(err).NotTo(gomega.HaveOccurred())
 
                ginkgo.By("Check pod in the dev namespace")
-               appsInfo, err = restClient.GetAppInfo("default", "root."+dev, 
podDev.ObjectMeta.Labels["applicationId"])
+               appsInfo, err = restClient.GetAppInfo("default", "root."+dev, 
podDev.Labels["applicationId"])
                gomega.Ω(err).NotTo(gomega.HaveOccurred())
                gomega.Ω(appsInfo).NotTo(gomega.BeNil())
 
                ginkgo.By("Check pod in the test namespace: recovery queue")
-               appsInfo2, err = restClient.GetAppInfo("default", "", 
podTest.ObjectMeta.Labels["applicationId"])
+               appsInfo2, err = restClient.GetAppInfo("default", "", 
podTest.Labels["applicationId"])
                gomega.Ω(err).NotTo(gomega.HaveOccurred())
                gomega.Ω(appsInfo2).NotTo(gomega.BeNil())
                
gomega.Ω(appsInfo2.QueueName).Should(gomega.BeEquivalentTo("root.@recovery@"))
diff --git a/test/e2e/simple_preemptor/simple_preemptor_test.go 
b/test/e2e/simple_preemptor/simple_preemptor_test.go
index 8f902a23..eb7d356c 100644
--- a/test/e2e/simple_preemptor/simple_preemptor_test.go
+++ b/test/e2e/simple_preemptor/simple_preemptor_test.go
@@ -105,9 +105,10 @@ var _ = ginkgo.BeforeSuite(func() {
        if err == nil {
                for _, pod := range pods.Items {
                        for _, c := range pod.Spec.Containers {
-                               if pod.Spec.NodeName == Worker1 {
+                               switch pod.Spec.NodeName {
+                               case Worker1:
                                        
totalPodQuantity1.Add(*resource.NewQuantity(c.Resources.Requests.Memory().Value(),
 resource.DecimalSI))
-                               } else if pod.Spec.NodeName == Worker2 {
+                               case Worker2:
                                        
totalPodQuantity2.Add(*resource.NewQuantity(c.Resources.Requests.Memory().Value(),
 resource.DecimalSI))
                                }
                        }
@@ -157,7 +158,7 @@ var _ = ginkgo.Describe("SimplePreemptor", func() {
 
                        // Wait for pod to move to running state
                        err = kClient.WaitForPodBySelectorRunning(dev,
-                               fmt.Sprintf("app=%s", 
sleepRespPod.ObjectMeta.Labels["app"]),
+                               fmt.Sprintf("app=%s", 
sleepRespPod.Labels["app"]),
                                60)
                        gomega.Ω(err).NotTo(gomega.HaveOccurred())
                }
@@ -189,7 +190,7 @@ var _ = ginkgo.Describe("SimplePreemptor", func() {
                        gomega.Ω(err).NotTo(gomega.HaveOccurred())
                        // Wait for pod to move to running state
                        err = kClient.WaitForPodBySelectorRunning(dev,
-                               fmt.Sprintf("app=%s", 
sleepRespPod.ObjectMeta.Labels["app"]),
+                               fmt.Sprintf("app=%s", 
sleepRespPod.Labels["app"]),
                                240)
                        gomega.Ω(err).NotTo(gomega.HaveOccurred())
                }
diff --git a/test/e2e/user_group_limit/user_group_limit_test.go 
b/test/e2e/user_group_limit/user_group_limit_test.go
index 4f6b6bc8..39f5f14c 100644
--- a/test/e2e/user_group_limit/user_group_limit_test.go
+++ b/test/e2e/user_group_limit/user_group_limit_test.go
@@ -985,7 +985,7 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
                                },
                        },
                }
-               _, err = 
kClient.CreateClusterRoleBinding(clusterRoleBinding.ObjectMeta.Name, 
clusterRoleBinding.RoleRef.Name, clusterRoleBinding.Subjects[0].Namespace, 
clusterRoleBinding.Subjects[0].Name)
+               _, err = 
kClient.CreateClusterRoleBinding(clusterRoleBinding.Name, 
clusterRoleBinding.RoleRef.Name, clusterRoleBinding.Subjects[0].Namespace, 
clusterRoleBinding.Subjects[0].Name)
                gomega.Ω(err).NotTo(HaveOccurred())
                // Create a Secret for the Service Account
                ginkgo.By("Creating Secret for the Service Account...")
@@ -1120,14 +1120,15 @@ func deploySleepPod(usergroup *si.UserGroupInformation, 
queuePath string, expect
 
 func checkUsage(testType TestType, name string, queuePath string, 
expectedRunningPods []*v1.Pod) {
        var rootQueueResourceUsageDAO *dao.ResourceUsageDAOInfo
-       if testType == userTestType {
+       switch testType {
+       case userTestType:
                ginkgo.By(fmt.Sprintf("Check user resource usage for %s in 
queue %s", name, queuePath))
                userUsageDAOInfo, err := 
restClient.GetUserUsage(constants.DefaultPartition, name)
                Ω(err).NotTo(HaveOccurred())
                Ω(userUsageDAOInfo).NotTo(gomega.BeNil())
 
                rootQueueResourceUsageDAO = userUsageDAOInfo.Queues
-       } else if testType == groupTestType {
+       case groupTestType:
                ginkgo.By(fmt.Sprintf("Check group resource usage for %s in 
queue %s", name, queuePath))
                groupUsageDAOInfo, err := 
restClient.GetGroupUsage(constants.DefaultPartition, name)
                Ω(err).NotTo(HaveOccurred())


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to