This is an automated email from the ASF dual-hosted git repository.

wilfreds pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/yunikorn-k8shim.git


The following commit(s) were added to refs/heads/master by this push:
     new c8e7eca3 [YUNIKORN-2092] Remove spark-operator AppManager (#711)
c8e7eca3 is described below

commit c8e7eca3b19e8c7f6149ba8213936bc7bd79385a
Author: Craig Condit <ccon...@apache.org>
AuthorDate: Thu Nov 2 19:05:05 2023 +1100

    [YUNIKORN-2092] Remove spark-operator AppManager (#711)
    
    Closes: #711
    
    Signed-off-by: Wilfred Spiegelenburg <wilfr...@apache.org>
---
 Makefile                                           |  12 -
 deployments/scheduler/yunikorn-rbac.yaml           |   3 -
 pkg/appmgmt/appmgmt.go                             |   3 -
 pkg/appmgmt/sparkoperator/spark.go                 | 125 ----------
 pkg/common/constants/constants.go                  |   2 -
 pkg/log/logger.go                                  |  57 +++--
 pkg/log/logger_test.go                             |   2 +-
 pkg/sparkclient/clientset/versioned/clientset.go   | 121 ----------
 pkg/sparkclient/clientset/versioned/doc.go         |  21 --
 .../versioned/fake/clientset_generated.go          |  86 -------
 pkg/sparkclient/clientset/versioned/fake/doc.go    |  21 --
 .../clientset/versioned/fake/register.go           |  57 -----
 pkg/sparkclient/clientset/versioned/scheme/doc.go  |  21 --
 .../clientset/versioned/scheme/register.go         |  57 -----
 .../typed/sparkoperator.k8s.io/v1beta2/doc.go      |  21 --
 .../typed/sparkoperator.k8s.io/v1beta2/fake/doc.go |  21 --
 .../v1beta2/fake/fake_scheduledsparkapplication.go | 142 ------------
 .../v1beta2/fake/fake_sparkapplication.go          | 142 ------------
 .../fake/fake_sparkoperator.k8s.io_client.go       |  45 ----
 .../v1beta2/generated_expansion.go                 |  24 --
 .../v1beta2/scheduledsparkapplication.go           | 196 ----------------
 .../v1beta2/sparkapplication.go                    | 196 ----------------
 .../v1beta2/sparkoperator.k8s.io_client.go         | 113 ---------
 .../informers/externalversions/factory.go          | 252 ---------------------
 .../informers/externalversions/generic.go          |  65 ------
 .../internalinterfaces/factory_interfaces.go       |  41 ----
 .../sparkoperator.k8s.io/interface.go              |  47 ----
 .../sparkoperator.k8s.io/v1beta2/interface.go      |  53 -----
 .../v1beta2/scheduledsparkapplication.go           |  91 --------
 .../v1beta2/sparkapplication.go                    |  91 --------
 .../v1beta2/expansion_generated.go                 |  36 ---
 .../v1beta2/scheduledsparkapplication.go           | 100 --------
 .../v1beta2/sparkapplication.go                    | 100 --------
 scripts/generate-groups.sh                         | 116 ----------
 scripts/update-codegen.sh                          |  29 ---
 35 files changed, 29 insertions(+), 2480 deletions(-)

diff --git a/Makefile b/Makefile
index a20122d0..1f2487d3 100644
--- a/Makefile
+++ b/Makefile
@@ -472,18 +472,6 @@ $(RELEASE_BIN_DIR)/$(TEST_SERVER_BINARY): go.mod go.sum 
$(shell find pkg)
        -installsuffix netgo \
        ./pkg/cmd/webtest/
 
-#Generate the CRD code with code-generator (release-1.14)
-
-# If you want to re-run the code-generator to generate code,
-# Please make sure the directory structure must be the example.
-# ex: github.com/apache/yunikorn-k8shim
-# Also you need to set you GOPATH environmental variables first.
-# If GOPATH is empty, we will set it to "$HOME/go".
-.PHONY: code_gen
-code_gen:
-       @echo "Generating CRD code"
-       ./scripts/update-codegen.sh
-
 # Run the tests after building
 .PHONY: test
 test:
diff --git a/deployments/scheduler/yunikorn-rbac.yaml 
b/deployments/scheduler/yunikorn-rbac.yaml
index cc9cf0fa..5a047e80 100644
--- a/deployments/scheduler/yunikorn-rbac.yaml
+++ b/deployments/scheduler/yunikorn-rbac.yaml
@@ -38,9 +38,6 @@ rules:
   - apiGroups: [""]
     resources: ["pods"]
     verbs: ["get", "watch", "list", "create", "patch", "update", "delete"]
-  - apiGroups: ["sparkoperator.k8s.io"]
-    resources: ["*"]
-    verbs: ["*"]
 
 ---
 apiVersion: rbac.authorization.k8s.io/v1
diff --git a/pkg/appmgmt/appmgmt.go b/pkg/appmgmt/appmgmt.go
index ea7abb94..5d44a75b 100644
--- a/pkg/appmgmt/appmgmt.go
+++ b/pkg/appmgmt/appmgmt.go
@@ -25,7 +25,6 @@ import (
 
        "github.com/apache/yunikorn-k8shim/pkg/appmgmt/general"
        "github.com/apache/yunikorn-k8shim/pkg/appmgmt/interfaces"
-       "github.com/apache/yunikorn-k8shim/pkg/appmgmt/sparkoperator"
        "github.com/apache/yunikorn-k8shim/pkg/client"
        "github.com/apache/yunikorn-k8shim/pkg/conf"
        "github.com/apache/yunikorn-k8shim/pkg/log"
@@ -58,8 +57,6 @@ func NewAMService(amProtocol 
interfaces.ApplicationManagementProtocol,
                // registered app plugins
                // for general apps
                general.NewManager(apiProvider, podEventHandler),
-               // for spark operator - SparkApplication
-               sparkoperator.NewManager(amProtocol, apiProvider),
        )
 
        return appManager
diff --git a/pkg/appmgmt/sparkoperator/spark.go 
b/pkg/appmgmt/sparkoperator/spark.go
deleted file mode 100644
index a1caf655..00000000
--- a/pkg/appmgmt/sparkoperator/spark.go
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package sparkoperator
-
-import (
-       
"github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
-       "go.uber.org/zap"
-       k8sCache "k8s.io/client-go/tools/cache"
-
-       "github.com/apache/yunikorn-k8shim/pkg/appmgmt/interfaces"
-       "github.com/apache/yunikorn-k8shim/pkg/client"
-       "github.com/apache/yunikorn-k8shim/pkg/log"
-       crcClientSet 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/clientset/versioned"
-       crInformers 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/informers/externalversions"
-)
-
-// Manager implements interfaces#Recoverable, interfaces#AppManager
-type Manager struct {
-       amProtocol         interfaces.ApplicationManagementProtocol
-       apiProvider        client.APIProvider
-       crdInformer        k8sCache.SharedIndexInformer
-       crdInformerFactory crInformers.SharedInformerFactory
-       stopCh             chan struct{}
-}
-
-func NewManager(amProtocol interfaces.ApplicationManagementProtocol, 
apiProvider client.APIProvider) *Manager {
-       return &Manager{
-               amProtocol:  amProtocol,
-               apiProvider: apiProvider,
-               stopCh:      make(chan struct{}),
-       }
-}
-
-// ServiceInit implements AppManagementService interface
-/*
-It watches for changes to the SparkApplications CRD objects
-Two event handlers are defined to react accordingly when an SparkApplication 
is updated or deleted.
-Note that there's no need for an event handler for AddFunc because when a 
SparkApplication object
-is first created, the application ID has not been generated yet. It will only 
be available after the driver
-pod starts and then the Spark K8s backend will assign a string that starts 
with "spark-" as the app ID
-*/
-func (os *Manager) ServiceInit() error {
-       crClient, err := crcClientSet.NewForConfig(
-               os.apiProvider.GetAPIs().KubeClient.GetConfigs())
-       if err != nil {
-               return err
-       }
-
-       var factoryOpts []crInformers.SharedInformerOption
-       os.crdInformerFactory = crInformers.NewSharedInformerFactoryWithOptions(
-               crClient, 0, factoryOpts...)
-       os.crdInformer = 
os.crdInformerFactory.Sparkoperator().V1beta2().SparkApplications().Informer()
-       os.crdInformer.AddEventHandler(k8sCache.ResourceEventHandlerFuncs{
-               UpdateFunc: os.updateApplication,
-               DeleteFunc: os.deleteApplication,
-       })
-       log.Log(log.ShimAppMgmtSparkOperator).Info("Spark operator AppMgmt 
service initialized")
-
-       return nil
-}
-
-func (os *Manager) Name() string {
-       return "spark-k8s-operator"
-}
-
-func (os *Manager) Start() error {
-       if os.crdInformerFactory != nil {
-               log.Log(log.ShimAppMgmtSparkOperator).Info("starting", 
zap.String("Name", os.Name()))
-               go os.crdInformerFactory.Start(os.stopCh)
-       }
-       return nil
-}
-
-func (os *Manager) Stop() {
-       log.Log(log.ShimAppMgmtSparkOperator).Info("stopping", 
zap.String("Name", os.Name()))
-       os.stopCh <- struct{}{}
-}
-
-/*
-When a SparkApplication's state is updated and the new state is one of
-FailedState or CompletedState, send the ApplicationFail and ApplicationComplete
-message, respectively, through the app mgmt protocol
-*/
-func (os *Manager) updateApplication(old, new interface{}) {
-       appOld := old.(*v1beta2.SparkApplication)
-       appNew := new.(*v1beta2.SparkApplication)
-       currState := appNew.Status.AppState.State
-       log.Log(log.ShimAppMgmtSparkOperator).Debug("spark app updated",
-               zap.Any("old", appOld),
-               zap.Any("new", appNew),
-               zap.Any("new state", string(currState)))
-       if currState == v1beta2.FailedState {
-               log.Log(log.ShimAppMgmtSparkOperator).Debug("SparkApp has 
failed. Ready to initiate app cleanup")
-               
os.amProtocol.NotifyApplicationFail(appNew.Status.SparkApplicationID)
-       } else if currState == v1beta2.CompletedState {
-               log.Log(log.ShimAppMgmtSparkOperator).Debug("SparkApp has 
completed. Ready to initiate app cleanup")
-               
os.amProtocol.NotifyApplicationComplete(appNew.Status.SparkApplicationID)
-       }
-}
-
-/*
-When a request to delete a SparkApplicaiton is detected,
-send an ApplicationComplete message through the app mgmt protocol
-*/
-func (os *Manager) deleteApplication(obj interface{}) {
-       app := obj.(*v1beta2.SparkApplication)
-       log.Log(log.ShimAppMgmtSparkOperator).Info("spark app deleted", 
zap.Any("SparkApplication", app))
-       os.amProtocol.NotifyApplicationComplete(app.Status.SparkApplicationID)
-}
diff --git a/pkg/common/constants/constants.go 
b/pkg/common/constants/constants.go
index e30256d0..c64eda7a 100644
--- a/pkg/common/constants/constants.go
+++ b/pkg/common/constants/constants.go
@@ -53,8 +53,6 @@ const DefaultUser = "nobody"
 
 // Spark
 const SparkLabelAppID = "spark-app-selector"
-const SparkLabelRole = "spark-role"
-const SparkLabelRoleDriver = "driver"
 
 // Configuration
 const ConfigMapName = "yunikorn-configs"
diff --git a/pkg/log/logger.go b/pkg/log/logger.go
index 381cdf24..5cb348f5 100644
--- a/pkg/log/logger.go
+++ b/pkg/log/logger.go
@@ -53,41 +53,40 @@ const (
 
 // Defined loggers: when adding new loggers, ids must be sequential, and all 
must be added to the loggers slice in the same order
 var (
-       Shim                     = &LoggerHandle{id: 0, name: "shim"}
-       Kubernetes               = &LoggerHandle{id: 1, name: "kubernetes"}
-       Test                     = &LoggerHandle{id: 2, name: "test"}
-       Admission                = &LoggerHandle{id: 3, name: "admission"}
-       AdmissionClient          = &LoggerHandle{id: 4, name: 
"admission.client"}
-       AdmissionConf            = &LoggerHandle{id: 5, name: "admission.conf"}
-       AdmissionWebhook         = &LoggerHandle{id: 6, name: 
"admission.webhook"}
-       AdmissionUtils           = &LoggerHandle{id: 7, name: "admission.utils"}
-       ShimAppMgmt              = &LoggerHandle{id: 8, name: "shim.appmgmt"}
-       ShimAppMgmtGeneral       = &LoggerHandle{id: 9, name: 
"shim.appmgmt.general"}
-       ShimAppMgmtSparkOperator = &LoggerHandle{id: 10, name: 
"shim.appmgmt.sparkoperator"}
-       ShimContext              = &LoggerHandle{id: 11, name: "shim.context"}
-       ShimFSM                  = &LoggerHandle{id: 12, name: "shim.fsm"}
-       ShimCacheApplication     = &LoggerHandle{id: 13, name: 
"shim.cache.application"}
-       ShimCacheNode            = &LoggerHandle{id: 14, name: 
"shim.cache.node"}
-       ShimCacheTask            = &LoggerHandle{id: 15, name: 
"shim.cache.task"}
-       ShimCacheExternal        = &LoggerHandle{id: 16, name: 
"shim.cache.external"}
-       ShimCachePlaceholder     = &LoggerHandle{id: 17, name: 
"shim.cache.placeholder"}
-       ShimRMCallback           = &LoggerHandle{id: 18, name: 
"shim.rmcallback"}
-       ShimClient               = &LoggerHandle{id: 19, name: "shim.client"}
-       ShimResources            = &LoggerHandle{id: 20, name: "shim.resources"}
-       ShimUtils                = &LoggerHandle{id: 21, name: "shim.utils"}
-       ShimConfig               = &LoggerHandle{id: 22, name: "shim.config"}
-       ShimDispatcher           = &LoggerHandle{id: 23, name: 
"shim.dispatcher"}
-       ShimScheduler            = &LoggerHandle{id: 24, name: "shim.scheduler"}
-       ShimSchedulerPlugin      = &LoggerHandle{id: 25, name: 
"shim.scheduler.plugin"}
-       ShimPredicates           = &LoggerHandle{id: 26, name: 
"shim.predicates"}
-       ShimFramework            = &LoggerHandle{id: 27, name: "shim.framework"}
+       Shim                 = &LoggerHandle{id: 0, name: "shim"}
+       Kubernetes           = &LoggerHandle{id: 1, name: "kubernetes"}
+       Test                 = &LoggerHandle{id: 2, name: "test"}
+       Admission            = &LoggerHandle{id: 3, name: "admission"}
+       AdmissionClient      = &LoggerHandle{id: 4, name: "admission.client"}
+       AdmissionConf        = &LoggerHandle{id: 5, name: "admission.conf"}
+       AdmissionWebhook     = &LoggerHandle{id: 6, name: "admission.webhook"}
+       AdmissionUtils       = &LoggerHandle{id: 7, name: "admission.utils"}
+       ShimAppMgmt          = &LoggerHandle{id: 8, name: "shim.appmgmt"}
+       ShimAppMgmtGeneral   = &LoggerHandle{id: 9, name: 
"shim.appmgmt.general"}
+       ShimContext          = &LoggerHandle{id: 10, name: "shim.context"}
+       ShimFSM              = &LoggerHandle{id: 11, name: "shim.fsm"}
+       ShimCacheApplication = &LoggerHandle{id: 12, name: 
"shim.cache.application"}
+       ShimCacheNode        = &LoggerHandle{id: 13, name: "shim.cache.node"}
+       ShimCacheTask        = &LoggerHandle{id: 14, name: "shim.cache.task"}
+       ShimCacheExternal    = &LoggerHandle{id: 15, name: 
"shim.cache.external"}
+       ShimCachePlaceholder = &LoggerHandle{id: 16, name: 
"shim.cache.placeholder"}
+       ShimRMCallback       = &LoggerHandle{id: 17, name: "shim.rmcallback"}
+       ShimClient           = &LoggerHandle{id: 18, name: "shim.client"}
+       ShimResources        = &LoggerHandle{id: 19, name: "shim.resources"}
+       ShimUtils            = &LoggerHandle{id: 20, name: "shim.utils"}
+       ShimConfig           = &LoggerHandle{id: 21, name: "shim.config"}
+       ShimDispatcher       = &LoggerHandle{id: 22, name: "shim.dispatcher"}
+       ShimScheduler        = &LoggerHandle{id: 23, name: "shim.scheduler"}
+       ShimSchedulerPlugin  = &LoggerHandle{id: 24, name: 
"shim.scheduler.plugin"}
+       ShimPredicates       = &LoggerHandle{id: 25, name: "shim.predicates"}
+       ShimFramework        = &LoggerHandle{id: 26, name: "shim.framework"}
 )
 
 // this tracks all the known logger handles, used to preallocate the real 
logger instances when configuration changes
 var loggers = []*LoggerHandle{
        Shim, Kubernetes, Test,
        Admission, AdmissionClient, AdmissionConf, AdmissionWebhook, 
AdmissionUtils,
-       ShimAppMgmt, ShimAppMgmtGeneral, ShimAppMgmtSparkOperator, ShimContext, 
ShimFSM,
+       ShimAppMgmt, ShimAppMgmtGeneral, ShimContext, ShimFSM,
        ShimCacheApplication, ShimCacheNode, ShimCacheTask, ShimCacheExternal, 
ShimCachePlaceholder,
        ShimRMCallback, ShimClient, ShimResources, ShimUtils, ShimConfig, 
ShimDispatcher,
        ShimScheduler, ShimSchedulerPlugin, ShimPredicates, ShimFramework,
diff --git a/pkg/log/logger_test.go b/pkg/log/logger_test.go
index a8a9ff3b..e43e0930 100644
--- a/pkg/log/logger_test.go
+++ b/pkg/log/logger_test.go
@@ -38,7 +38,7 @@ func TestLoggerIds(t *testing.T) {
        _ = Log(Test)
 
        // validate logger count
-       assert.Equal(t, 28, len(loggers), "wrong logger count")
+       assert.Equal(t, 27, len(loggers), "wrong logger count")
 
        // validate that all loggers are populated and have sequential ids
        for i := 0; i < len(loggers); i++ {
diff --git a/pkg/sparkclient/clientset/versioned/clientset.go 
b/pkg/sparkclient/clientset/versioned/clientset.go
deleted file mode 100644
index 0d115aae..00000000
--- a/pkg/sparkclient/clientset/versioned/clientset.go
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by client-gen. DO NOT EDIT.
-
-package versioned
-
-import (
-       "fmt"
-       "net/http"
-
-       sparkoperatorv1beta2 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2"
-       discovery "k8s.io/client-go/discovery"
-       rest "k8s.io/client-go/rest"
-       flowcontrol "k8s.io/client-go/util/flowcontrol"
-)
-
-type Interface interface {
-       Discovery() discovery.DiscoveryInterface
-       SparkoperatorV1beta2() 
sparkoperatorv1beta2.SparkoperatorV1beta2Interface
-}
-
-// Clientset contains the clients for groups.
-type Clientset struct {
-       *discovery.DiscoveryClient
-       sparkoperatorV1beta2 *sparkoperatorv1beta2.SparkoperatorV1beta2Client
-}
-
-// SparkoperatorV1beta2 retrieves the SparkoperatorV1beta2Client
-func (c *Clientset) SparkoperatorV1beta2() 
sparkoperatorv1beta2.SparkoperatorV1beta2Interface {
-       return c.sparkoperatorV1beta2
-}
-
-// Discovery retrieves the DiscoveryClient
-func (c *Clientset) Discovery() discovery.DiscoveryInterface {
-       if c == nil {
-               return nil
-       }
-       return c.DiscoveryClient
-}
-
-// NewForConfig creates a new Clientset for the given config.
-// If config's RateLimiter is not set and QPS and Burst are acceptable,
-// NewForConfig will generate a rate-limiter in configShallowCopy.
-// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
-// where httpClient was generated with rest.HTTPClientFor(c).
-func NewForConfig(c *rest.Config) (*Clientset, error) {
-       configShallowCopy := *c
-
-       if configShallowCopy.UserAgent == "" {
-               configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
-       }
-
-       // share the transport between all clients
-       httpClient, err := rest.HTTPClientFor(&configShallowCopy)
-       if err != nil {
-               return nil, err
-       }
-
-       return NewForConfigAndClient(&configShallowCopy, httpClient)
-}
-
-// NewForConfigAndClient creates a new Clientset for the given config and http 
client.
-// Note the http client provided takes precedence over the configured 
transport values.
-// If config's RateLimiter is not set and QPS and Burst are acceptable,
-// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.
-func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) 
(*Clientset, error) {
-       configShallowCopy := *c
-       if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
-               if configShallowCopy.Burst <= 0 {
-                       return nil, fmt.Errorf("burst is required to be greater 
than 0 when RateLimiter is not set and QPS is set to greater than 0")
-               }
-               configShallowCopy.RateLimiter = 
flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, 
configShallowCopy.Burst)
-       }
-
-       var cs Clientset
-       var err error
-       cs.sparkoperatorV1beta2, err = 
sparkoperatorv1beta2.NewForConfigAndClient(&configShallowCopy, httpClient)
-       if err != nil {
-               return nil, err
-       }
-
-       cs.DiscoveryClient, err = 
discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient)
-       if err != nil {
-               return nil, err
-       }
-       return &cs, nil
-}
-
-// NewForConfigOrDie creates a new Clientset for the given config and
-// panics if there is an error in the config.
-func NewForConfigOrDie(c *rest.Config) *Clientset {
-       cs, err := NewForConfig(c)
-       if err != nil {
-               panic(err)
-       }
-       return cs
-}
-
-// New creates a new Clientset for the given RESTClient.
-func New(c rest.Interface) *Clientset {
-       var cs Clientset
-       cs.sparkoperatorV1beta2 = sparkoperatorv1beta2.New(c)
-
-       cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
-       return &cs
-}
diff --git a/pkg/sparkclient/clientset/versioned/doc.go 
b/pkg/sparkclient/clientset/versioned/doc.go
deleted file mode 100644
index 33d3ce0e..00000000
--- a/pkg/sparkclient/clientset/versioned/doc.go
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by client-gen. DO NOT EDIT.
-
-// This package has the automatically generated clientset.
-package versioned
diff --git a/pkg/sparkclient/clientset/versioned/fake/clientset_generated.go 
b/pkg/sparkclient/clientset/versioned/fake/clientset_generated.go
deleted file mode 100644
index 06760f93..00000000
--- a/pkg/sparkclient/clientset/versioned/fake/clientset_generated.go
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by client-gen. DO NOT EDIT.
-
-package fake
-
-import (
-       clientset 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/clientset/versioned"
-       sparkoperatorv1beta2 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2"
-       fakesparkoperatorv1beta2 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake"
-       "k8s.io/apimachinery/pkg/runtime"
-       "k8s.io/apimachinery/pkg/watch"
-       "k8s.io/client-go/discovery"
-       fakediscovery "k8s.io/client-go/discovery/fake"
-       "k8s.io/client-go/testing"
-)
-
-// NewSimpleClientset returns a clientset that will respond with the provided 
objects.
-// It's backed by a very simple object tracker that processes creates, updates 
and deletions as-is,
-// without applying any validations and/or defaults. It shouldn't be 
considered a replacement
-// for a real clientset and is mostly useful in simple unit tests.
-func NewSimpleClientset(objects ...runtime.Object) *Clientset {
-       o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
-       for _, obj := range objects {
-               if err := o.Add(obj); err != nil {
-                       panic(err)
-               }
-       }
-
-       cs := &Clientset{tracker: o}
-       cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
-       cs.AddReactor("*", "*", testing.ObjectReaction(o))
-       cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret 
watch.Interface, err error) {
-               gvr := action.GetResource()
-               ns := action.GetNamespace()
-               watch, err := o.Watch(gvr, ns)
-               if err != nil {
-                       return false, nil, err
-               }
-               return true, watch, nil
-       })
-
-       return cs
-}
-
-// Clientset implements clientset.Interface. Meant to be embedded into a
-// struct to get a default implementation. This makes faking out just the 
method
-// you want to test easier.
-type Clientset struct {
-       testing.Fake
-       discovery *fakediscovery.FakeDiscovery
-       tracker   testing.ObjectTracker
-}
-
-func (c *Clientset) Discovery() discovery.DiscoveryInterface {
-       return c.discovery
-}
-
-func (c *Clientset) Tracker() testing.ObjectTracker {
-       return c.tracker
-}
-
-var (
-       _ clientset.Interface = &Clientset{}
-       _ testing.FakeClient  = &Clientset{}
-)
-
-// SparkoperatorV1beta2 retrieves the SparkoperatorV1beta2Client
-func (c *Clientset) SparkoperatorV1beta2() 
sparkoperatorv1beta2.SparkoperatorV1beta2Interface {
-       return &fakesparkoperatorv1beta2.FakeSparkoperatorV1beta2{Fake: &c.Fake}
-}
diff --git a/pkg/sparkclient/clientset/versioned/fake/doc.go 
b/pkg/sparkclient/clientset/versioned/fake/doc.go
deleted file mode 100644
index 9dcd00f1..00000000
--- a/pkg/sparkclient/clientset/versioned/fake/doc.go
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by client-gen. DO NOT EDIT.
-
-// This package has the automatically generated fake clientset.
-package fake
diff --git a/pkg/sparkclient/clientset/versioned/fake/register.go 
b/pkg/sparkclient/clientset/versioned/fake/register.go
deleted file mode 100644
index 153cbc27..00000000
--- a/pkg/sparkclient/clientset/versioned/fake/register.go
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by client-gen. DO NOT EDIT.
-
-package fake
-
-import (
-       sparkoperatorv1beta2 
"github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
-       v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-       runtime "k8s.io/apimachinery/pkg/runtime"
-       schema "k8s.io/apimachinery/pkg/runtime/schema"
-       serializer "k8s.io/apimachinery/pkg/runtime/serializer"
-       utilruntime "k8s.io/apimachinery/pkg/util/runtime"
-)
-
-var scheme = runtime.NewScheme()
-var codecs = serializer.NewCodecFactory(scheme)
-
-var localSchemeBuilder = runtime.SchemeBuilder{
-       sparkoperatorv1beta2.AddToScheme,
-}
-
-// AddToScheme adds all types of this clientset into the given scheme. This 
allows composition
-// of clientsets, like in:
-//
-//     import (
-//       "k8s.io/client-go/kubernetes"
-//       clientsetscheme "k8s.io/client-go/kubernetes/scheme"
-//       aggregatorclientsetscheme 
"k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
-//     )
-//
-//     kclientset, _ := kubernetes.NewForConfig(c)
-//     _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
-//
-// After this, RawExtensions in Kubernetes types will serialize 
kube-aggregator types
-// correctly.
-var AddToScheme = localSchemeBuilder.AddToScheme
-
-func init() {
-       v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
-       utilruntime.Must(AddToScheme(scheme))
-}
diff --git a/pkg/sparkclient/clientset/versioned/scheme/doc.go 
b/pkg/sparkclient/clientset/versioned/scheme/doc.go
deleted file mode 100644
index 2b2fbd2b..00000000
--- a/pkg/sparkclient/clientset/versioned/scheme/doc.go
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by client-gen. DO NOT EDIT.
-
-// This package contains the scheme of the automatically generated clientset.
-package scheme
diff --git a/pkg/sparkclient/clientset/versioned/scheme/register.go 
b/pkg/sparkclient/clientset/versioned/scheme/register.go
deleted file mode 100644
index 7c1c39a8..00000000
--- a/pkg/sparkclient/clientset/versioned/scheme/register.go
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by client-gen. DO NOT EDIT.
-
-package scheme
-
-import (
-       sparkoperatorv1beta2 
"github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
-       v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-       runtime "k8s.io/apimachinery/pkg/runtime"
-       schema "k8s.io/apimachinery/pkg/runtime/schema"
-       serializer "k8s.io/apimachinery/pkg/runtime/serializer"
-       utilruntime "k8s.io/apimachinery/pkg/util/runtime"
-)
-
-var Scheme = runtime.NewScheme()
-var Codecs = serializer.NewCodecFactory(Scheme)
-var ParameterCodec = runtime.NewParameterCodec(Scheme)
-var localSchemeBuilder = runtime.SchemeBuilder{
-       sparkoperatorv1beta2.AddToScheme,
-}
-
-// AddToScheme adds all types of this clientset into the given scheme. This 
allows composition
-// of clientsets, like in:
-//
-//     import (
-//       "k8s.io/client-go/kubernetes"
-//       clientsetscheme "k8s.io/client-go/kubernetes/scheme"
-//       aggregatorclientsetscheme 
"k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
-//     )
-//
-//     kclientset, _ := kubernetes.NewForConfig(c)
-//     _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
-//
-// After this, RawExtensions in Kubernetes types will serialize 
kube-aggregator types
-// correctly.
-var AddToScheme = localSchemeBuilder.AddToScheme
-
-func init() {
-       v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
-       utilruntime.Must(AddToScheme(Scheme))
-}
diff --git 
a/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/doc.go 
b/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/doc.go
deleted file mode 100644
index 4f224d51..00000000
--- 
a/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/doc.go
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by client-gen. DO NOT EDIT.
-
-// This package has the automatically generated typed clients.
-package v1beta2
diff --git 
a/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/doc.go
 
b/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/doc.go
deleted file mode 100644
index 16964a37..00000000
--- 
a/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/doc.go
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by client-gen. DO NOT EDIT.
-
-// Package fake has the automatically generated clients.
-package fake
diff --git 
a/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go
 
b/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go
deleted file mode 100644
index 15a42d38..00000000
--- 
a/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by client-gen. DO NOT EDIT.
-
-package fake
-
-import (
-       "context"
-
-       v1beta2 
"github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
-       v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-       labels "k8s.io/apimachinery/pkg/labels"
-       types "k8s.io/apimachinery/pkg/types"
-       watch "k8s.io/apimachinery/pkg/watch"
-       testing "k8s.io/client-go/testing"
-)
-
-// FakeScheduledSparkApplications implements ScheduledSparkApplicationInterface
-type FakeScheduledSparkApplications struct {
-       Fake *FakeSparkoperatorV1beta2
-       ns   string
-}
-
-var scheduledsparkapplicationsResource = 
v1beta2.SchemeGroupVersion.WithResource("scheduledsparkapplications")
-
-var scheduledsparkapplicationsKind = 
v1beta2.SchemeGroupVersion.WithKind("ScheduledSparkApplication")
-
-// Get takes name of the scheduledSparkApplication, and returns the 
corresponding scheduledSparkApplication object, and an error if there is any.
-func (c *FakeScheduledSparkApplications) Get(ctx context.Context, name string, 
options v1.GetOptions) (result *v1beta2.ScheduledSparkApplication, err error) {
-       obj, err := c.Fake.
-               
Invokes(testing.NewGetAction(scheduledsparkapplicationsResource, c.ns, name), 
&v1beta2.ScheduledSparkApplication{})
-
-       if obj == nil {
-               return nil, err
-       }
-       return obj.(*v1beta2.ScheduledSparkApplication), err
-}
-
-// List takes label and field selectors, and returns the list of 
ScheduledSparkApplications that match those selectors.
-func (c *FakeScheduledSparkApplications) List(ctx context.Context, opts 
v1.ListOptions) (result *v1beta2.ScheduledSparkApplicationList, err error) {
-       obj, err := c.Fake.
-               
Invokes(testing.NewListAction(scheduledsparkapplicationsResource, 
scheduledsparkapplicationsKind, c.ns, opts), 
&v1beta2.ScheduledSparkApplicationList{})
-
-       if obj == nil {
-               return nil, err
-       }
-
-       label, _, _ := testing.ExtractFromListOptions(opts)
-       if label == nil {
-               label = labels.Everything()
-       }
-       list := &v1beta2.ScheduledSparkApplicationList{ListMeta: 
obj.(*v1beta2.ScheduledSparkApplicationList).ListMeta}
-       for _, item := range obj.(*v1beta2.ScheduledSparkApplicationList).Items 
{
-               if label.Matches(labels.Set(item.Labels)) {
-                       list.Items = append(list.Items, item)
-               }
-       }
-       return list, err
-}
-
-// Watch returns a watch.Interface that watches the requested 
scheduledSparkApplications.
-func (c *FakeScheduledSparkApplications) Watch(ctx context.Context, opts 
v1.ListOptions) (watch.Interface, error) {
-       return c.Fake.
-               
InvokesWatch(testing.NewWatchAction(scheduledsparkapplicationsResource, c.ns, 
opts))
-
-}
-
-// Create takes the representation of a scheduledSparkApplication and creates 
it.  Returns the server's representation of the scheduledSparkApplication, and 
an error, if there is any.
-func (c *FakeScheduledSparkApplications) Create(ctx context.Context, 
scheduledSparkApplication *v1beta2.ScheduledSparkApplication, opts 
v1.CreateOptions) (result *v1beta2.ScheduledSparkApplication, err error) {
-       obj, err := c.Fake.
-               
Invokes(testing.NewCreateAction(scheduledsparkapplicationsResource, c.ns, 
scheduledSparkApplication), &v1beta2.ScheduledSparkApplication{})
-
-       if obj == nil {
-               return nil, err
-       }
-       return obj.(*v1beta2.ScheduledSparkApplication), err
-}
-
-// Update takes the representation of a scheduledSparkApplication and updates 
it. Returns the server's representation of the scheduledSparkApplication, and 
an error, if there is any.
-func (c *FakeScheduledSparkApplications) Update(ctx context.Context, 
scheduledSparkApplication *v1beta2.ScheduledSparkApplication, opts 
v1.UpdateOptions) (result *v1beta2.ScheduledSparkApplication, err error) {
-       obj, err := c.Fake.
-               
Invokes(testing.NewUpdateAction(scheduledsparkapplicationsResource, c.ns, 
scheduledSparkApplication), &v1beta2.ScheduledSparkApplication{})
-
-       if obj == nil {
-               return nil, err
-       }
-       return obj.(*v1beta2.ScheduledSparkApplication), err
-}
-
-// UpdateStatus was generated because the type contains a Status member.
-// Add a +genclient:noStatus comment above the type to avoid generating 
UpdateStatus().
-func (c *FakeScheduledSparkApplications) UpdateStatus(ctx context.Context, 
scheduledSparkApplication *v1beta2.ScheduledSparkApplication, opts 
v1.UpdateOptions) (*v1beta2.ScheduledSparkApplication, error) {
-       obj, err := c.Fake.
-               
Invokes(testing.NewUpdateSubresourceAction(scheduledsparkapplicationsResource, 
"status", c.ns, scheduledSparkApplication), 
&v1beta2.ScheduledSparkApplication{})
-
-       if obj == nil {
-               return nil, err
-       }
-       return obj.(*v1beta2.ScheduledSparkApplication), err
-}
-
-// Delete takes name of the scheduledSparkApplication and deletes it. Returns 
an error if one occurs.
-func (c *FakeScheduledSparkApplications) Delete(ctx context.Context, name 
string, opts v1.DeleteOptions) error {
-       _, err := c.Fake.
-               
Invokes(testing.NewDeleteActionWithOptions(scheduledsparkapplicationsResource, 
c.ns, name, opts), &v1beta2.ScheduledSparkApplication{})
-
-       return err
-}
-
-// DeleteCollection deletes a collection of objects.
-func (c *FakeScheduledSparkApplications) DeleteCollection(ctx context.Context, 
opts v1.DeleteOptions, listOpts v1.ListOptions) error {
-       action := 
testing.NewDeleteCollectionAction(scheduledsparkapplicationsResource, c.ns, 
listOpts)
-
-       _, err := c.Fake.Invokes(action, 
&v1beta2.ScheduledSparkApplicationList{})
-       return err
-}
-
-// Patch applies the patch and returns the patched scheduledSparkApplication.
-func (c *FakeScheduledSparkApplications) Patch(ctx context.Context, name 
string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources 
...string) (result *v1beta2.ScheduledSparkApplication, err error) {
-       obj, err := c.Fake.
-               
Invokes(testing.NewPatchSubresourceAction(scheduledsparkapplicationsResource, 
c.ns, name, pt, data, subresources...), &v1beta2.ScheduledSparkApplication{})
-
-       if obj == nil {
-               return nil, err
-       }
-       return obj.(*v1beta2.ScheduledSparkApplication), err
-}
diff --git 
a/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go
 
b/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go
deleted file mode 100644
index e9f88194..00000000
--- 
a/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by client-gen. DO NOT EDIT.
-
-package fake
-
-import (
-       "context"
-
-       v1beta2 
"github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
-       v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-       labels "k8s.io/apimachinery/pkg/labels"
-       types "k8s.io/apimachinery/pkg/types"
-       watch "k8s.io/apimachinery/pkg/watch"
-       testing "k8s.io/client-go/testing"
-)
-
-// FakeSparkApplications implements SparkApplicationInterface
-type FakeSparkApplications struct {
-       Fake *FakeSparkoperatorV1beta2
-       ns   string
-}
-
-var sparkapplicationsResource = 
v1beta2.SchemeGroupVersion.WithResource("sparkapplications")
-
-var sparkapplicationsKind = 
v1beta2.SchemeGroupVersion.WithKind("SparkApplication")
-
-// Get takes name of the sparkApplication, and returns the corresponding 
sparkApplication object, and an error if there is any.
-func (c *FakeSparkApplications) Get(ctx context.Context, name string, options 
v1.GetOptions) (result *v1beta2.SparkApplication, err error) {
-       obj, err := c.Fake.
-               Invokes(testing.NewGetAction(sparkapplicationsResource, c.ns, 
name), &v1beta2.SparkApplication{})
-
-       if obj == nil {
-               return nil, err
-       }
-       return obj.(*v1beta2.SparkApplication), err
-}
-
-// List takes label and field selectors, and returns the list of 
SparkApplications that match those selectors.
-func (c *FakeSparkApplications) List(ctx context.Context, opts v1.ListOptions) 
(result *v1beta2.SparkApplicationList, err error) {
-       obj, err := c.Fake.
-               Invokes(testing.NewListAction(sparkapplicationsResource, 
sparkapplicationsKind, c.ns, opts), &v1beta2.SparkApplicationList{})
-
-       if obj == nil {
-               return nil, err
-       }
-
-       label, _, _ := testing.ExtractFromListOptions(opts)
-       if label == nil {
-               label = labels.Everything()
-       }
-       list := &v1beta2.SparkApplicationList{ListMeta: 
obj.(*v1beta2.SparkApplicationList).ListMeta}
-       for _, item := range obj.(*v1beta2.SparkApplicationList).Items {
-               if label.Matches(labels.Set(item.Labels)) {
-                       list.Items = append(list.Items, item)
-               }
-       }
-       return list, err
-}
-
-// Watch returns a watch.Interface that watches the requested 
sparkApplications.
-func (c *FakeSparkApplications) Watch(ctx context.Context, opts 
v1.ListOptions) (watch.Interface, error) {
-       return c.Fake.
-               InvokesWatch(testing.NewWatchAction(sparkapplicationsResource, 
c.ns, opts))
-
-}
-
-// Create takes the representation of a sparkApplication and creates it.  
Returns the server's representation of the sparkApplication, and an error, if 
there is any.
-func (c *FakeSparkApplications) Create(ctx context.Context, sparkApplication 
*v1beta2.SparkApplication, opts v1.CreateOptions) (result 
*v1beta2.SparkApplication, err error) {
-       obj, err := c.Fake.
-               Invokes(testing.NewCreateAction(sparkapplicationsResource, 
c.ns, sparkApplication), &v1beta2.SparkApplication{})
-
-       if obj == nil {
-               return nil, err
-       }
-       return obj.(*v1beta2.SparkApplication), err
-}
-
-// Update takes the representation of a sparkApplication and updates it. 
Returns the server's representation of the sparkApplication, and an error, if 
there is any.
-func (c *FakeSparkApplications) Update(ctx context.Context, sparkApplication 
*v1beta2.SparkApplication, opts v1.UpdateOptions) (result 
*v1beta2.SparkApplication, err error) {
-       obj, err := c.Fake.
-               Invokes(testing.NewUpdateAction(sparkapplicationsResource, 
c.ns, sparkApplication), &v1beta2.SparkApplication{})
-
-       if obj == nil {
-               return nil, err
-       }
-       return obj.(*v1beta2.SparkApplication), err
-}
-
-// UpdateStatus was generated because the type contains a Status member.
-// Add a +genclient:noStatus comment above the type to avoid generating 
UpdateStatus().
-func (c *FakeSparkApplications) UpdateStatus(ctx context.Context, 
sparkApplication *v1beta2.SparkApplication, opts v1.UpdateOptions) 
(*v1beta2.SparkApplication, error) {
-       obj, err := c.Fake.
-               
Invokes(testing.NewUpdateSubresourceAction(sparkapplicationsResource, "status", 
c.ns, sparkApplication), &v1beta2.SparkApplication{})
-
-       if obj == nil {
-               return nil, err
-       }
-       return obj.(*v1beta2.SparkApplication), err
-}
-
-// Delete takes name of the sparkApplication and deletes it. Returns an error 
if one occurs.
-func (c *FakeSparkApplications) Delete(ctx context.Context, name string, opts 
v1.DeleteOptions) error {
-       _, err := c.Fake.
-               
Invokes(testing.NewDeleteActionWithOptions(sparkapplicationsResource, c.ns, 
name, opts), &v1beta2.SparkApplication{})
-
-       return err
-}
-
-// DeleteCollection deletes a collection of objects.
-func (c *FakeSparkApplications) DeleteCollection(ctx context.Context, opts 
v1.DeleteOptions, listOpts v1.ListOptions) error {
-       action := testing.NewDeleteCollectionAction(sparkapplicationsResource, 
c.ns, listOpts)
-
-       _, err := c.Fake.Invokes(action, &v1beta2.SparkApplicationList{})
-       return err
-}
-
-// Patch applies the patch and returns the patched sparkApplication.
-func (c *FakeSparkApplications) Patch(ctx context.Context, name string, pt 
types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) 
(result *v1beta2.SparkApplication, err error) {
-       obj, err := c.Fake.
-               
Invokes(testing.NewPatchSubresourceAction(sparkapplicationsResource, c.ns, 
name, pt, data, subresources...), &v1beta2.SparkApplication{})
-
-       if obj == nil {
-               return nil, err
-       }
-       return obj.(*v1beta2.SparkApplication), err
-}
diff --git 
a/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkoperator.k8s.io_client.go
 
b/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkoperator.k8s.io_client.go
deleted file mode 100644
index f2f8a1fa..00000000
--- 
a/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkoperator.k8s.io_client.go
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by client-gen. DO NOT EDIT.
-
-package fake
-
-import (
-       v1beta2 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2"
-       rest "k8s.io/client-go/rest"
-       testing "k8s.io/client-go/testing"
-)
-
-type FakeSparkoperatorV1beta2 struct {
-       *testing.Fake
-}
-
-func (c *FakeSparkoperatorV1beta2) ScheduledSparkApplications(namespace 
string) v1beta2.ScheduledSparkApplicationInterface {
-       return &FakeScheduledSparkApplications{c, namespace}
-}
-
-func (c *FakeSparkoperatorV1beta2) SparkApplications(namespace string) 
v1beta2.SparkApplicationInterface {
-       return &FakeSparkApplications{c, namespace}
-}
-
-// RESTClient returns a RESTClient that is used to communicate
-// with API server by this client implementation.
-func (c *FakeSparkoperatorV1beta2) RESTClient() rest.Interface {
-       var ret *rest.RESTClient
-       return ret
-}
diff --git 
a/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/generated_expansion.go
 
b/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/generated_expansion.go
deleted file mode 100644
index b3e6c2f1..00000000
--- 
a/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/generated_expansion.go
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by client-gen. DO NOT EDIT.
-
-package v1beta2
-
-type ScheduledSparkApplicationExpansion interface{}
-
-type SparkApplicationExpansion interface{}
diff --git 
a/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go
 
b/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go
deleted file mode 100644
index 70b6a7b6..00000000
--- 
a/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by client-gen. DO NOT EDIT.
-
-package v1beta2
-
-import (
-       "context"
-       "time"
-
-       v1beta2 
"github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
-       scheme 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/clientset/versioned/scheme"
-       v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-       types "k8s.io/apimachinery/pkg/types"
-       watch "k8s.io/apimachinery/pkg/watch"
-       rest "k8s.io/client-go/rest"
-)
-
-// ScheduledSparkApplicationsGetter has a method to return a 
ScheduledSparkApplicationInterface.
-// A group's client should implement this interface.
-type ScheduledSparkApplicationsGetter interface {
-       ScheduledSparkApplications(namespace string) 
ScheduledSparkApplicationInterface
-}
-
-// ScheduledSparkApplicationInterface has methods to work with 
ScheduledSparkApplication resources.
-type ScheduledSparkApplicationInterface interface {
-       Create(ctx context.Context, scheduledSparkApplication 
*v1beta2.ScheduledSparkApplication, opts v1.CreateOptions) 
(*v1beta2.ScheduledSparkApplication, error)
-       Update(ctx context.Context, scheduledSparkApplication 
*v1beta2.ScheduledSparkApplication, opts v1.UpdateOptions) 
(*v1beta2.ScheduledSparkApplication, error)
-       UpdateStatus(ctx context.Context, scheduledSparkApplication 
*v1beta2.ScheduledSparkApplication, opts v1.UpdateOptions) 
(*v1beta2.ScheduledSparkApplication, error)
-       Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
-       DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts 
v1.ListOptions) error
-       Get(ctx context.Context, name string, opts v1.GetOptions) 
(*v1beta2.ScheduledSparkApplication, error)
-       List(ctx context.Context, opts v1.ListOptions) 
(*v1beta2.ScheduledSparkApplicationList, error)
-       Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
-       Patch(ctx context.Context, name string, pt types.PatchType, data 
[]byte, opts v1.PatchOptions, subresources ...string) (result 
*v1beta2.ScheduledSparkApplication, err error)
-       ScheduledSparkApplicationExpansion
-}
-
-// scheduledSparkApplications implements ScheduledSparkApplicationInterface
-type scheduledSparkApplications struct {
-       client rest.Interface
-       ns     string
-}
-
-// newScheduledSparkApplications returns a ScheduledSparkApplications
-func newScheduledSparkApplications(c *SparkoperatorV1beta2Client, namespace 
string) *scheduledSparkApplications {
-       return &scheduledSparkApplications{
-               client: c.RESTClient(),
-               ns:     namespace,
-       }
-}
-
-// Get takes name of the scheduledSparkApplication, and returns the 
corresponding scheduledSparkApplication object, and an error if there is any.
-func (c *scheduledSparkApplications) Get(ctx context.Context, name string, 
options v1.GetOptions) (result *v1beta2.ScheduledSparkApplication, err error) {
-       result = &v1beta2.ScheduledSparkApplication{}
-       err = c.client.Get().
-               Namespace(c.ns).
-               Resource("scheduledsparkapplications").
-               Name(name).
-               VersionedParams(&options, scheme.ParameterCodec).
-               Do(ctx).
-               Into(result)
-       return
-}
-
-// List takes label and field selectors, and returns the list of 
ScheduledSparkApplications that match those selectors.
-func (c *scheduledSparkApplications) List(ctx context.Context, opts 
v1.ListOptions) (result *v1beta2.ScheduledSparkApplicationList, err error) {
-       var timeout time.Duration
-       if opts.TimeoutSeconds != nil {
-               timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
-       }
-       result = &v1beta2.ScheduledSparkApplicationList{}
-       err = c.client.Get().
-               Namespace(c.ns).
-               Resource("scheduledsparkapplications").
-               VersionedParams(&opts, scheme.ParameterCodec).
-               Timeout(timeout).
-               Do(ctx).
-               Into(result)
-       return
-}
-
-// Watch returns a watch.Interface that watches the requested 
scheduledSparkApplications.
-func (c *scheduledSparkApplications) Watch(ctx context.Context, opts 
v1.ListOptions) (watch.Interface, error) {
-       var timeout time.Duration
-       if opts.TimeoutSeconds != nil {
-               timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
-       }
-       opts.Watch = true
-       return c.client.Get().
-               Namespace(c.ns).
-               Resource("scheduledsparkapplications").
-               VersionedParams(&opts, scheme.ParameterCodec).
-               Timeout(timeout).
-               Watch(ctx)
-}
-
-// Create takes the representation of a scheduledSparkApplication and creates 
it.  Returns the server's representation of the scheduledSparkApplication, and 
an error, if there is any.
-func (c *scheduledSparkApplications) Create(ctx context.Context, 
scheduledSparkApplication *v1beta2.ScheduledSparkApplication, opts 
v1.CreateOptions) (result *v1beta2.ScheduledSparkApplication, err error) {
-       result = &v1beta2.ScheduledSparkApplication{}
-       err = c.client.Post().
-               Namespace(c.ns).
-               Resource("scheduledsparkapplications").
-               VersionedParams(&opts, scheme.ParameterCodec).
-               Body(scheduledSparkApplication).
-               Do(ctx).
-               Into(result)
-       return
-}
-
-// Update takes the representation of a scheduledSparkApplication and updates 
it. Returns the server's representation of the scheduledSparkApplication, and 
an error, if there is any.
-func (c *scheduledSparkApplications) Update(ctx context.Context, 
scheduledSparkApplication *v1beta2.ScheduledSparkApplication, opts 
v1.UpdateOptions) (result *v1beta2.ScheduledSparkApplication, err error) {
-       result = &v1beta2.ScheduledSparkApplication{}
-       err = c.client.Put().
-               Namespace(c.ns).
-               Resource("scheduledsparkapplications").
-               Name(scheduledSparkApplication.Name).
-               VersionedParams(&opts, scheme.ParameterCodec).
-               Body(scheduledSparkApplication).
-               Do(ctx).
-               Into(result)
-       return
-}
-
-// UpdateStatus was generated because the type contains a Status member.
-// Add a +genclient:noStatus comment above the type to avoid generating 
UpdateStatus().
-func (c *scheduledSparkApplications) UpdateStatus(ctx context.Context, 
scheduledSparkApplication *v1beta2.ScheduledSparkApplication, opts 
v1.UpdateOptions) (result *v1beta2.ScheduledSparkApplication, err error) {
-       result = &v1beta2.ScheduledSparkApplication{}
-       err = c.client.Put().
-               Namespace(c.ns).
-               Resource("scheduledsparkapplications").
-               Name(scheduledSparkApplication.Name).
-               SubResource("status").
-               VersionedParams(&opts, scheme.ParameterCodec).
-               Body(scheduledSparkApplication).
-               Do(ctx).
-               Into(result)
-       return
-}
-
-// Delete takes name of the scheduledSparkApplication and deletes it. Returns 
an error if one occurs.
-func (c *scheduledSparkApplications) Delete(ctx context.Context, name string, 
opts v1.DeleteOptions) error {
-       return c.client.Delete().
-               Namespace(c.ns).
-               Resource("scheduledsparkapplications").
-               Name(name).
-               Body(&opts).
-               Do(ctx).
-               Error()
-}
-
-// DeleteCollection deletes a collection of objects.
-func (c *scheduledSparkApplications) DeleteCollection(ctx context.Context, 
opts v1.DeleteOptions, listOpts v1.ListOptions) error {
-       var timeout time.Duration
-       if listOpts.TimeoutSeconds != nil {
-               timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
-       }
-       return c.client.Delete().
-               Namespace(c.ns).
-               Resource("scheduledsparkapplications").
-               VersionedParams(&listOpts, scheme.ParameterCodec).
-               Timeout(timeout).
-               Body(&opts).
-               Do(ctx).
-               Error()
-}
-
-// Patch applies the patch and returns the patched scheduledSparkApplication.
-func (c *scheduledSparkApplications) Patch(ctx context.Context, name string, 
pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) 
(result *v1beta2.ScheduledSparkApplication, err error) {
-       result = &v1beta2.ScheduledSparkApplication{}
-       err = c.client.Patch(pt).
-               Namespace(c.ns).
-               Resource("scheduledsparkapplications").
-               Name(name).
-               SubResource(subresources...).
-               VersionedParams(&opts, scheme.ParameterCodec).
-               Body(data).
-               Do(ctx).
-               Into(result)
-       return
-}
diff --git 
a/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go
 
b/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go
deleted file mode 100644
index e5fbfd6a..00000000
--- 
a/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by client-gen. DO NOT EDIT.
-
-package v1beta2
-
-import (
-       "context"
-       "time"
-
-       v1beta2 
"github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
-       scheme 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/clientset/versioned/scheme"
-       v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-       types "k8s.io/apimachinery/pkg/types"
-       watch "k8s.io/apimachinery/pkg/watch"
-       rest "k8s.io/client-go/rest"
-)
-
-// SparkApplicationsGetter has a method to return a SparkApplicationInterface.
-// A group's client should implement this interface.
-type SparkApplicationsGetter interface {
-       SparkApplications(namespace string) SparkApplicationInterface
-}
-
-// SparkApplicationInterface has methods to work with SparkApplication 
resources.
-type SparkApplicationInterface interface {
-       Create(ctx context.Context, sparkApplication *v1beta2.SparkApplication, 
opts v1.CreateOptions) (*v1beta2.SparkApplication, error)
-       Update(ctx context.Context, sparkApplication *v1beta2.SparkApplication, 
opts v1.UpdateOptions) (*v1beta2.SparkApplication, error)
-       UpdateStatus(ctx context.Context, sparkApplication 
*v1beta2.SparkApplication, opts v1.UpdateOptions) (*v1beta2.SparkApplication, 
error)
-       Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
-       DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts 
v1.ListOptions) error
-       Get(ctx context.Context, name string, opts v1.GetOptions) 
(*v1beta2.SparkApplication, error)
-       List(ctx context.Context, opts v1.ListOptions) 
(*v1beta2.SparkApplicationList, error)
-       Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
-       Patch(ctx context.Context, name string, pt types.PatchType, data 
[]byte, opts v1.PatchOptions, subresources ...string) (result 
*v1beta2.SparkApplication, err error)
-       SparkApplicationExpansion
-}
-
-// sparkApplications implements SparkApplicationInterface
-type sparkApplications struct {
-       client rest.Interface
-       ns     string
-}
-
-// newSparkApplications returns a SparkApplications
-func newSparkApplications(c *SparkoperatorV1beta2Client, namespace string) 
*sparkApplications {
-       return &sparkApplications{
-               client: c.RESTClient(),
-               ns:     namespace,
-       }
-}
-
-// Get takes name of the sparkApplication, and returns the corresponding 
sparkApplication object, and an error if there is any.
-func (c *sparkApplications) Get(ctx context.Context, name string, options 
v1.GetOptions) (result *v1beta2.SparkApplication, err error) {
-       result = &v1beta2.SparkApplication{}
-       err = c.client.Get().
-               Namespace(c.ns).
-               Resource("sparkapplications").
-               Name(name).
-               VersionedParams(&options, scheme.ParameterCodec).
-               Do(ctx).
-               Into(result)
-       return
-}
-
-// List takes label and field selectors, and returns the list of 
SparkApplications that match those selectors.
-func (c *sparkApplications) List(ctx context.Context, opts v1.ListOptions) 
(result *v1beta2.SparkApplicationList, err error) {
-       var timeout time.Duration
-       if opts.TimeoutSeconds != nil {
-               timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
-       }
-       result = &v1beta2.SparkApplicationList{}
-       err = c.client.Get().
-               Namespace(c.ns).
-               Resource("sparkapplications").
-               VersionedParams(&opts, scheme.ParameterCodec).
-               Timeout(timeout).
-               Do(ctx).
-               Into(result)
-       return
-}
-
-// Watch returns a watch.Interface that watches the requested 
sparkApplications.
-func (c *sparkApplications) Watch(ctx context.Context, opts v1.ListOptions) 
(watch.Interface, error) {
-       var timeout time.Duration
-       if opts.TimeoutSeconds != nil {
-               timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
-       }
-       opts.Watch = true
-       return c.client.Get().
-               Namespace(c.ns).
-               Resource("sparkapplications").
-               VersionedParams(&opts, scheme.ParameterCodec).
-               Timeout(timeout).
-               Watch(ctx)
-}
-
-// Create takes the representation of a sparkApplication and creates it.  
Returns the server's representation of the sparkApplication, and an error, if 
there is any.
-func (c *sparkApplications) Create(ctx context.Context, sparkApplication 
*v1beta2.SparkApplication, opts v1.CreateOptions) (result 
*v1beta2.SparkApplication, err error) {
-       result = &v1beta2.SparkApplication{}
-       err = c.client.Post().
-               Namespace(c.ns).
-               Resource("sparkapplications").
-               VersionedParams(&opts, scheme.ParameterCodec).
-               Body(sparkApplication).
-               Do(ctx).
-               Into(result)
-       return
-}
-
-// Update takes the representation of a sparkApplication and updates it. 
Returns the server's representation of the sparkApplication, and an error, if 
there is any.
-func (c *sparkApplications) Update(ctx context.Context, sparkApplication 
*v1beta2.SparkApplication, opts v1.UpdateOptions) (result 
*v1beta2.SparkApplication, err error) {
-       result = &v1beta2.SparkApplication{}
-       err = c.client.Put().
-               Namespace(c.ns).
-               Resource("sparkapplications").
-               Name(sparkApplication.Name).
-               VersionedParams(&opts, scheme.ParameterCodec).
-               Body(sparkApplication).
-               Do(ctx).
-               Into(result)
-       return
-}
-
-// UpdateStatus was generated because the type contains a Status member.
-// Add a +genclient:noStatus comment above the type to avoid generating 
UpdateStatus().
-func (c *sparkApplications) UpdateStatus(ctx context.Context, sparkApplication 
*v1beta2.SparkApplication, opts v1.UpdateOptions) (result 
*v1beta2.SparkApplication, err error) {
-       result = &v1beta2.SparkApplication{}
-       err = c.client.Put().
-               Namespace(c.ns).
-               Resource("sparkapplications").
-               Name(sparkApplication.Name).
-               SubResource("status").
-               VersionedParams(&opts, scheme.ParameterCodec).
-               Body(sparkApplication).
-               Do(ctx).
-               Into(result)
-       return
-}
-
-// Delete takes name of the sparkApplication and deletes it. Returns an error 
if one occurs.
-func (c *sparkApplications) Delete(ctx context.Context, name string, opts 
v1.DeleteOptions) error {
-       return c.client.Delete().
-               Namespace(c.ns).
-               Resource("sparkapplications").
-               Name(name).
-               Body(&opts).
-               Do(ctx).
-               Error()
-}
-
-// DeleteCollection deletes a collection of objects.
-func (c *sparkApplications) DeleteCollection(ctx context.Context, opts 
v1.DeleteOptions, listOpts v1.ListOptions) error {
-       var timeout time.Duration
-       if listOpts.TimeoutSeconds != nil {
-               timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
-       }
-       return c.client.Delete().
-               Namespace(c.ns).
-               Resource("sparkapplications").
-               VersionedParams(&listOpts, scheme.ParameterCodec).
-               Timeout(timeout).
-               Body(&opts).
-               Do(ctx).
-               Error()
-}
-
-// Patch applies the patch and returns the patched sparkApplication.
-func (c *sparkApplications) Patch(ctx context.Context, name string, pt 
types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) 
(result *v1beta2.SparkApplication, err error) {
-       result = &v1beta2.SparkApplication{}
-       err = c.client.Patch(pt).
-               Namespace(c.ns).
-               Resource("sparkapplications").
-               Name(name).
-               SubResource(subresources...).
-               VersionedParams(&opts, scheme.ParameterCodec).
-               Body(data).
-               Do(ctx).
-               Into(result)
-       return
-}
diff --git 
a/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go
 
b/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go
deleted file mode 100644
index eb1594cd..00000000
--- 
a/pkg/sparkclient/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by client-gen. DO NOT EDIT.
-
-package v1beta2
-
-import (
-       "net/http"
-
-       v1beta2 
"github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
-       
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/clientset/versioned/scheme"
-       rest "k8s.io/client-go/rest"
-)
-
-type SparkoperatorV1beta2Interface interface {
-       RESTClient() rest.Interface
-       ScheduledSparkApplicationsGetter
-       SparkApplicationsGetter
-}
-
-// SparkoperatorV1beta2Client is used to interact with features provided by 
the sparkoperator.k8s.io group.
-type SparkoperatorV1beta2Client struct {
-       restClient rest.Interface
-}
-
-func (c *SparkoperatorV1beta2Client) ScheduledSparkApplications(namespace 
string) ScheduledSparkApplicationInterface {
-       return newScheduledSparkApplications(c, namespace)
-}
-
-func (c *SparkoperatorV1beta2Client) SparkApplications(namespace string) 
SparkApplicationInterface {
-       return newSparkApplications(c, namespace)
-}
-
-// NewForConfig creates a new SparkoperatorV1beta2Client for the given config.
-// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
-// where httpClient was generated with rest.HTTPClientFor(c).
-func NewForConfig(c *rest.Config) (*SparkoperatorV1beta2Client, error) {
-       config := *c
-       if err := setConfigDefaults(&config); err != nil {
-               return nil, err
-       }
-       httpClient, err := rest.HTTPClientFor(&config)
-       if err != nil {
-               return nil, err
-       }
-       return NewForConfigAndClient(&config, httpClient)
-}
-
-// NewForConfigAndClient creates a new SparkoperatorV1beta2Client for the 
given config and http client.
-// Note the http client provided takes precedence over the configured 
transport values.
-func NewForConfigAndClient(c *rest.Config, h *http.Client) 
(*SparkoperatorV1beta2Client, error) {
-       config := *c
-       if err := setConfigDefaults(&config); err != nil {
-               return nil, err
-       }
-       client, err := rest.RESTClientForConfigAndClient(&config, h)
-       if err != nil {
-               return nil, err
-       }
-       return &SparkoperatorV1beta2Client{client}, nil
-}
-
-// NewForConfigOrDie creates a new SparkoperatorV1beta2Client for the given 
config and
-// panics if there is an error in the config.
-func NewForConfigOrDie(c *rest.Config) *SparkoperatorV1beta2Client {
-       client, err := NewForConfig(c)
-       if err != nil {
-               panic(err)
-       }
-       return client
-}
-
-// New creates a new SparkoperatorV1beta2Client for the given RESTClient.
-func New(c rest.Interface) *SparkoperatorV1beta2Client {
-       return &SparkoperatorV1beta2Client{c}
-}
-
-func setConfigDefaults(config *rest.Config) error {
-       gv := v1beta2.SchemeGroupVersion
-       config.GroupVersion = &gv
-       config.APIPath = "/apis"
-       config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
-
-       if config.UserAgent == "" {
-               config.UserAgent = rest.DefaultKubernetesUserAgent()
-       }
-
-       return nil
-}
-
-// RESTClient returns a RESTClient that is used to communicate
-// with API server by this client implementation.
-func (c *SparkoperatorV1beta2Client) RESTClient() rest.Interface {
-       if c == nil {
-               return nil
-       }
-       return c.restClient
-}
diff --git a/pkg/sparkclient/informers/externalversions/factory.go 
b/pkg/sparkclient/informers/externalversions/factory.go
deleted file mode 100644
index 5a8b493d..00000000
--- a/pkg/sparkclient/informers/externalversions/factory.go
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by informer-gen. DO NOT EDIT.
-
-package externalversions
-
-import (
-       reflect "reflect"
-       sync "sync"
-       time "time"
-
-       versioned 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/clientset/versioned"
-       internalinterfaces 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/informers/externalversions/internalinterfaces"
-       sparkoperatork8sio 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/informers/externalversions/sparkoperator.k8s.io"
-       v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-       runtime "k8s.io/apimachinery/pkg/runtime"
-       schema "k8s.io/apimachinery/pkg/runtime/schema"
-       cache "k8s.io/client-go/tools/cache"
-)
-
-// SharedInformerOption defines the functional option type for 
SharedInformerFactory.
-type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
-
-type sharedInformerFactory struct {
-       client           versioned.Interface
-       namespace        string
-       tweakListOptions internalinterfaces.TweakListOptionsFunc
-       lock             sync.Mutex
-       defaultResync    time.Duration
-       customResync     map[reflect.Type]time.Duration
-
-       informers map[reflect.Type]cache.SharedIndexInformer
-       // startedInformers is used for tracking which informers have been 
started.
-       // This allows Start() to be called multiple times safely.
-       startedInformers map[reflect.Type]bool
-       // wg tracks how many goroutines were started.
-       wg sync.WaitGroup
-       // shuttingDown is true when Shutdown has been called. It may still be 
running
-       // because it needs to wait for goroutines.
-       shuttingDown bool
-}
-
-// WithCustomResyncConfig sets a custom resync period for the specified 
informer types.
-func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) 
SharedInformerOption {
-       return func(factory *sharedInformerFactory) *sharedInformerFactory {
-               for k, v := range resyncConfig {
-                       factory.customResync[reflect.TypeOf(k)] = v
-               }
-               return factory
-       }
-}
-
-// WithTweakListOptions sets a custom filter on all listers of the configured 
SharedInformerFactory.
-func WithTweakListOptions(tweakListOptions 
internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
-       return func(factory *sharedInformerFactory) *sharedInformerFactory {
-               factory.tweakListOptions = tweakListOptions
-               return factory
-       }
-}
-
-// WithNamespace limits the SharedInformerFactory to the specified namespace.
-func WithNamespace(namespace string) SharedInformerOption {
-       return func(factory *sharedInformerFactory) *sharedInformerFactory {
-               factory.namespace = namespace
-               return factory
-       }
-}
-
-// NewSharedInformerFactory constructs a new instance of sharedInformerFactory 
for all namespaces.
-func NewSharedInformerFactory(client versioned.Interface, defaultResync 
time.Duration) SharedInformerFactory {
-       return NewSharedInformerFactoryWithOptions(client, defaultResync)
-}
-
-// NewFilteredSharedInformerFactory constructs a new instance of 
sharedInformerFactory.
-// Listers obtained via this SharedInformerFactory will be subject to the same 
filters
-// as specified here.
-// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
-func NewFilteredSharedInformerFactory(client versioned.Interface, 
defaultResync time.Duration, namespace string, tweakListOptions 
internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
-       return NewSharedInformerFactoryWithOptions(client, defaultResync, 
WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
-}
-
-// NewSharedInformerFactoryWithOptions constructs a new instance of a 
SharedInformerFactory with additional options.
-func NewSharedInformerFactoryWithOptions(client versioned.Interface, 
defaultResync time.Duration, options ...SharedInformerOption) 
SharedInformerFactory {
-       factory := &sharedInformerFactory{
-               client:           client,
-               namespace:        v1.NamespaceAll,
-               defaultResync:    defaultResync,
-               informers:        
make(map[reflect.Type]cache.SharedIndexInformer),
-               startedInformers: make(map[reflect.Type]bool),
-               customResync:     make(map[reflect.Type]time.Duration),
-       }
-
-       // Apply all options
-       for _, opt := range options {
-               factory = opt(factory)
-       }
-
-       return factory
-}
-
-func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
-       f.lock.Lock()
-       defer f.lock.Unlock()
-
-       if f.shuttingDown {
-               return
-       }
-
-       for informerType, informer := range f.informers {
-               if !f.startedInformers[informerType] {
-                       f.wg.Add(1)
-                       // We need a new variable in each loop iteration,
-                       // otherwise the goroutine would use the loop variable
-                       // and that keeps changing.
-                       informer := informer
-                       go func() {
-                               defer f.wg.Done()
-                               informer.Run(stopCh)
-                       }()
-                       f.startedInformers[informerType] = true
-               }
-       }
-}
-
-func (f *sharedInformerFactory) Shutdown() {
-       f.lock.Lock()
-       f.shuttingDown = true
-       f.lock.Unlock()
-
-       // Will return immediately if there is nothing to wait for.
-       f.wg.Wait()
-}
-
-func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) 
map[reflect.Type]bool {
-       informers := func() map[reflect.Type]cache.SharedIndexInformer {
-               f.lock.Lock()
-               defer f.lock.Unlock()
-
-               informers := map[reflect.Type]cache.SharedIndexInformer{}
-               for informerType, informer := range f.informers {
-                       if f.startedInformers[informerType] {
-                               informers[informerType] = informer
-                       }
-               }
-               return informers
-       }()
-
-       res := map[reflect.Type]bool{}
-       for informType, informer := range informers {
-               res[informType] = cache.WaitForCacheSync(stopCh, 
informer.HasSynced)
-       }
-       return res
-}
-
-// InternalInformerFor returns the SharedIndexInformer for obj using an 
internal
-// client.
-func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc 
internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
-       f.lock.Lock()
-       defer f.lock.Unlock()
-
-       informerType := reflect.TypeOf(obj)
-       informer, exists := f.informers[informerType]
-       if exists {
-               return informer
-       }
-
-       resyncPeriod, exists := f.customResync[informerType]
-       if !exists {
-               resyncPeriod = f.defaultResync
-       }
-
-       informer = newFunc(f.client, resyncPeriod)
-       f.informers[informerType] = informer
-
-       return informer
-}
-
-// SharedInformerFactory provides shared informers for resources in all known
-// API group versions.
-//
-// It is typically used like this:
-//
-//     ctx, cancel := context.Background()
-//     defer cancel()
-//     factory := NewSharedInformerFactory(client, resyncPeriod)
-//     defer factory.WaitForStop()    // Returns immediately if nothing was 
started.
-//     genericInformer := factory.ForResource(resource)
-//     typedInformer := factory.SomeAPIGroup().V1().SomeType()
-//     factory.Start(ctx.Done())          // Start processing these informers.
-//     synced := factory.WaitForCacheSync(ctx.Done())
-//     for v, ok := range synced {
-//         if !ok {
-//             fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v)
-//             return
-//         }
-//     }
-//
-//     // Creating informers can also be created after Start, but then
-//     // Start must be called again:
-//     anotherGenericInformer := factory.ForResource(resource)
-//     factory.Start(ctx.Done())
-type SharedInformerFactory interface {
-       internalinterfaces.SharedInformerFactory
-
-       // Start initializes all requested informers. They are handled in 
goroutines
-       // which run until the stop channel gets closed.
-       Start(stopCh <-chan struct{})
-
-       // Shutdown marks a factory as shutting down. At that point no new
-       // informers can be started anymore and Start will return without
-       // doing anything.
-       //
-       // In addition, Shutdown blocks until all goroutines have terminated. 
For that
-       // to happen, the close channel(s) that they were started with must be 
closed,
-       // either before Shutdown gets called or while it is waiting.
-       //
-       // Shutdown may be called multiple times, even concurrently. All such 
calls will
-       // block until all goroutines have terminated.
-       Shutdown()
-
-       // WaitForCacheSync blocks until all started informers' caches were 
synced
-       // or the stop channel gets closed.
-       WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
-
-       // ForResource gives generic access to a shared informer of the 
matching type.
-       ForResource(resource schema.GroupVersionResource) (GenericInformer, 
error)
-
-       // InternalInformerFor returns the SharedIndexInformer for obj using an 
internal
-       // client.
-       InformerFor(obj runtime.Object, newFunc 
internalinterfaces.NewInformerFunc) cache.SharedIndexInformer
-
-       Sparkoperator() sparkoperatork8sio.Interface
-}
-
-func (f *sharedInformerFactory) Sparkoperator() sparkoperatork8sio.Interface {
-       return sparkoperatork8sio.New(f, f.namespace, f.tweakListOptions)
-}
diff --git a/pkg/sparkclient/informers/externalversions/generic.go 
b/pkg/sparkclient/informers/externalversions/generic.go
deleted file mode 100644
index bfb440ff..00000000
--- a/pkg/sparkclient/informers/externalversions/generic.go
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by informer-gen. DO NOT EDIT.
-
-package externalversions
-
-import (
-       "fmt"
-
-       v1beta2 
"github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
-       schema "k8s.io/apimachinery/pkg/runtime/schema"
-       cache "k8s.io/client-go/tools/cache"
-)
-
-// GenericInformer is type of SharedIndexInformer which will locate and 
delegate to other
-// sharedInformers based on type
-type GenericInformer interface {
-       Informer() cache.SharedIndexInformer
-       Lister() cache.GenericLister
-}
-
-type genericInformer struct {
-       informer cache.SharedIndexInformer
-       resource schema.GroupResource
-}
-
-// Informer returns the SharedIndexInformer.
-func (f *genericInformer) Informer() cache.SharedIndexInformer {
-       return f.informer
-}
-
-// Lister returns the GenericLister.
-func (f *genericInformer) Lister() cache.GenericLister {
-       return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)
-}
-
-// ForResource gives generic access to a shared informer of the matching type
-// TODO extend this to unknown resources with a client pool
-func (f *sharedInformerFactory) ForResource(resource 
schema.GroupVersionResource) (GenericInformer, error) {
-       switch resource {
-       // Group=sparkoperator.k8s.io, Version=v1beta2
-       case 
v1beta2.SchemeGroupVersion.WithResource("scheduledsparkapplications"):
-               return &genericInformer{resource: resource.GroupResource(), 
informer: f.Sparkoperator().V1beta2().ScheduledSparkApplications().Informer()}, 
nil
-       case v1beta2.SchemeGroupVersion.WithResource("sparkapplications"):
-               return &genericInformer{resource: resource.GroupResource(), 
informer: f.Sparkoperator().V1beta2().SparkApplications().Informer()}, nil
-
-       }
-
-       return nil, fmt.Errorf("no informer found for %v", resource)
-}
diff --git 
a/pkg/sparkclient/informers/externalversions/internalinterfaces/factory_interfaces.go
 
b/pkg/sparkclient/informers/externalversions/internalinterfaces/factory_interfaces.go
deleted file mode 100644
index b7397da8..00000000
--- 
a/pkg/sparkclient/informers/externalversions/internalinterfaces/factory_interfaces.go
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by informer-gen. DO NOT EDIT.
-
-package internalinterfaces
-
-import (
-       time "time"
-
-       versioned 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/clientset/versioned"
-       v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-       runtime "k8s.io/apimachinery/pkg/runtime"
-       cache "k8s.io/client-go/tools/cache"
-)
-
-// NewInformerFunc takes versioned.Interface and time.Duration to return a 
SharedIndexInformer.
-type NewInformerFunc func(versioned.Interface, time.Duration) 
cache.SharedIndexInformer
-
-// SharedInformerFactory a small interface to allow for adding an informer 
without an import cycle
-type SharedInformerFactory interface {
-       Start(stopCh <-chan struct{})
-       InformerFor(obj runtime.Object, newFunc NewInformerFunc) 
cache.SharedIndexInformer
-}
-
-// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
-type TweakListOptionsFunc func(*v1.ListOptions)
diff --git 
a/pkg/sparkclient/informers/externalversions/sparkoperator.k8s.io/interface.go 
b/pkg/sparkclient/informers/externalversions/sparkoperator.k8s.io/interface.go
deleted file mode 100644
index 22b60607..00000000
--- 
a/pkg/sparkclient/informers/externalversions/sparkoperator.k8s.io/interface.go
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by informer-gen. DO NOT EDIT.
-
-package sparkoperator
-
-import (
-       internalinterfaces 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/informers/externalversions/internalinterfaces"
-       v1beta2 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/informers/externalversions/sparkoperator.k8s.io/v1beta2"
-)
-
-// Interface provides access to each of this group's versions.
-type Interface interface {
-       // V1beta2 provides access to shared informers for resources in V1beta2.
-       V1beta2() v1beta2.Interface
-}
-
-type group struct {
-       factory          internalinterfaces.SharedInformerFactory
-       namespace        string
-       tweakListOptions internalinterfaces.TweakListOptionsFunc
-}
-
-// New returns a new Interface.
-func New(f internalinterfaces.SharedInformerFactory, namespace string, 
tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
-       return &group{factory: f, namespace: namespace, tweakListOptions: 
tweakListOptions}
-}
-
-// V1beta2 returns a new v1beta2.Interface.
-func (g *group) V1beta2() v1beta2.Interface {
-       return v1beta2.New(g.factory, g.namespace, g.tweakListOptions)
-}
diff --git 
a/pkg/sparkclient/informers/externalversions/sparkoperator.k8s.io/v1beta2/interface.go
 
b/pkg/sparkclient/informers/externalversions/sparkoperator.k8s.io/v1beta2/interface.go
deleted file mode 100644
index 2a436104..00000000
--- 
a/pkg/sparkclient/informers/externalversions/sparkoperator.k8s.io/v1beta2/interface.go
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by informer-gen. DO NOT EDIT.
-
-package v1beta2
-
-import (
-       internalinterfaces 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/informers/externalversions/internalinterfaces"
-)
-
-// Interface provides access to all the informers in this group version.
-type Interface interface {
-       // ScheduledSparkApplications returns a 
ScheduledSparkApplicationInformer.
-       ScheduledSparkApplications() ScheduledSparkApplicationInformer
-       // SparkApplications returns a SparkApplicationInformer.
-       SparkApplications() SparkApplicationInformer
-}
-
-type version struct {
-       factory          internalinterfaces.SharedInformerFactory
-       namespace        string
-       tweakListOptions internalinterfaces.TweakListOptionsFunc
-}
-
-// New returns a new Interface.
-func New(f internalinterfaces.SharedInformerFactory, namespace string, 
tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
-       return &version{factory: f, namespace: namespace, tweakListOptions: 
tweakListOptions}
-}
-
-// ScheduledSparkApplications returns a ScheduledSparkApplicationInformer.
-func (v *version) ScheduledSparkApplications() 
ScheduledSparkApplicationInformer {
-       return &scheduledSparkApplicationInformer{factory: v.factory, 
namespace: v.namespace, tweakListOptions: v.tweakListOptions}
-}
-
-// SparkApplications returns a SparkApplicationInformer.
-func (v *version) SparkApplications() SparkApplicationInformer {
-       return &sparkApplicationInformer{factory: v.factory, namespace: 
v.namespace, tweakListOptions: v.tweakListOptions}
-}
diff --git 
a/pkg/sparkclient/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go
 
b/pkg/sparkclient/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go
deleted file mode 100644
index f804baf1..00000000
--- 
a/pkg/sparkclient/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by informer-gen. DO NOT EDIT.
-
-package v1beta2
-
-import (
-       "context"
-       time "time"
-
-       sparkoperatork8siov1beta2 
"github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
-       versioned 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/clientset/versioned"
-       internalinterfaces 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/informers/externalversions/internalinterfaces"
-       v1beta2 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/listers/sparkoperator.k8s.io/v1beta2"
-       v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-       runtime "k8s.io/apimachinery/pkg/runtime"
-       watch "k8s.io/apimachinery/pkg/watch"
-       cache "k8s.io/client-go/tools/cache"
-)
-
-// ScheduledSparkApplicationInformer provides access to a shared informer and 
lister for
-// ScheduledSparkApplications.
-type ScheduledSparkApplicationInformer interface {
-       Informer() cache.SharedIndexInformer
-       Lister() v1beta2.ScheduledSparkApplicationLister
-}
-
-type scheduledSparkApplicationInformer struct {
-       factory          internalinterfaces.SharedInformerFactory
-       tweakListOptions internalinterfaces.TweakListOptionsFunc
-       namespace        string
-}
-
-// NewScheduledSparkApplicationInformer constructs a new informer for 
ScheduledSparkApplication type.
-// Always prefer using an informer factory to get a shared informer instead of 
getting an independent
-// one. This reduces memory footprint and number of connections to the server.
-func NewScheduledSparkApplicationInformer(client versioned.Interface, 
namespace string, resyncPeriod time.Duration, indexers cache.Indexers) 
cache.SharedIndexInformer {
-       return NewFilteredScheduledSparkApplicationInformer(client, namespace, 
resyncPeriod, indexers, nil)
-}
-
-// NewFilteredScheduledSparkApplicationInformer constructs a new informer for 
ScheduledSparkApplication type.
-// Always prefer using an informer factory to get a shared informer instead of 
getting an independent
-// one. This reduces memory footprint and number of connections to the server.
-func NewFilteredScheduledSparkApplicationInformer(client versioned.Interface, 
namespace string, resyncPeriod time.Duration, indexers cache.Indexers, 
tweakListOptions internalinterfaces.TweakListOptionsFunc) 
cache.SharedIndexInformer {
-       return cache.NewSharedIndexInformer(
-               &cache.ListWatch{
-                       ListFunc: func(options v1.ListOptions) (runtime.Object, 
error) {
-                               if tweakListOptions != nil {
-                                       tweakListOptions(&options)
-                               }
-                               return 
client.SparkoperatorV1beta2().ScheduledSparkApplications(namespace).List(context.TODO(),
 options)
-                       },
-                       WatchFunc: func(options v1.ListOptions) 
(watch.Interface, error) {
-                               if tweakListOptions != nil {
-                                       tweakListOptions(&options)
-                               }
-                               return 
client.SparkoperatorV1beta2().ScheduledSparkApplications(namespace).Watch(context.TODO(),
 options)
-                       },
-               },
-               &sparkoperatork8siov1beta2.ScheduledSparkApplication{},
-               resyncPeriod,
-               indexers,
-       )
-}
-
-func (f *scheduledSparkApplicationInformer) defaultInformer(client 
versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
-       return NewFilteredScheduledSparkApplicationInformer(client, 
f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: 
cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
-}
-
-func (f *scheduledSparkApplicationInformer) Informer() 
cache.SharedIndexInformer {
-       return 
f.factory.InformerFor(&sparkoperatork8siov1beta2.ScheduledSparkApplication{}, 
f.defaultInformer)
-}
-
-func (f *scheduledSparkApplicationInformer) Lister() 
v1beta2.ScheduledSparkApplicationLister {
-       return 
v1beta2.NewScheduledSparkApplicationLister(f.Informer().GetIndexer())
-}
diff --git 
a/pkg/sparkclient/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go
 
b/pkg/sparkclient/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go
deleted file mode 100644
index 13556032..00000000
--- 
a/pkg/sparkclient/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by informer-gen. DO NOT EDIT.
-
-package v1beta2
-
-import (
-       "context"
-       time "time"
-
-       sparkoperatork8siov1beta2 
"github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
-       versioned 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/clientset/versioned"
-       internalinterfaces 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/informers/externalversions/internalinterfaces"
-       v1beta2 
"github.com/apache/yunikorn-k8shim/pkg/sparkclient/listers/sparkoperator.k8s.io/v1beta2"
-       v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-       runtime "k8s.io/apimachinery/pkg/runtime"
-       watch "k8s.io/apimachinery/pkg/watch"
-       cache "k8s.io/client-go/tools/cache"
-)
-
-// SparkApplicationInformer provides access to a shared informer and lister for
-// SparkApplications.
-type SparkApplicationInformer interface {
-       Informer() cache.SharedIndexInformer
-       Lister() v1beta2.SparkApplicationLister
-}
-
-type sparkApplicationInformer struct {
-       factory          internalinterfaces.SharedInformerFactory
-       tweakListOptions internalinterfaces.TweakListOptionsFunc
-       namespace        string
-}
-
-// NewSparkApplicationInformer constructs a new informer for SparkApplication 
type.
-// Always prefer using an informer factory to get a shared informer instead of 
getting an independent
-// one. This reduces memory footprint and number of connections to the server.
-func NewSparkApplicationInformer(client versioned.Interface, namespace string, 
resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
-       return NewFilteredSparkApplicationInformer(client, namespace, 
resyncPeriod, indexers, nil)
-}
-
-// NewFilteredSparkApplicationInformer constructs a new informer for 
SparkApplication type.
-// Always prefer using an informer factory to get a shared informer instead of 
getting an independent
-// one. This reduces memory footprint and number of connections to the server.
-func NewFilteredSparkApplicationInformer(client versioned.Interface, namespace 
string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions 
internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
-       return cache.NewSharedIndexInformer(
-               &cache.ListWatch{
-                       ListFunc: func(options v1.ListOptions) (runtime.Object, 
error) {
-                               if tweakListOptions != nil {
-                                       tweakListOptions(&options)
-                               }
-                               return 
client.SparkoperatorV1beta2().SparkApplications(namespace).List(context.TODO(), 
options)
-                       },
-                       WatchFunc: func(options v1.ListOptions) 
(watch.Interface, error) {
-                               if tweakListOptions != nil {
-                                       tweakListOptions(&options)
-                               }
-                               return 
client.SparkoperatorV1beta2().SparkApplications(namespace).Watch(context.TODO(),
 options)
-                       },
-               },
-               &sparkoperatork8siov1beta2.SparkApplication{},
-               resyncPeriod,
-               indexers,
-       )
-}
-
-func (f *sparkApplicationInformer) defaultInformer(client versioned.Interface, 
resyncPeriod time.Duration) cache.SharedIndexInformer {
-       return NewFilteredSparkApplicationInformer(client, f.namespace, 
resyncPeriod, cache.Indexers{cache.NamespaceIndex: 
cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
-}
-
-func (f *sparkApplicationInformer) Informer() cache.SharedIndexInformer {
-       return 
f.factory.InformerFor(&sparkoperatork8siov1beta2.SparkApplication{}, 
f.defaultInformer)
-}
-
-func (f *sparkApplicationInformer) Lister() v1beta2.SparkApplicationLister {
-       return v1beta2.NewSparkApplicationLister(f.Informer().GetIndexer())
-}
diff --git 
a/pkg/sparkclient/listers/sparkoperator.k8s.io/v1beta2/expansion_generated.go 
b/pkg/sparkclient/listers/sparkoperator.k8s.io/v1beta2/expansion_generated.go
deleted file mode 100644
index 690c2d62..00000000
--- 
a/pkg/sparkclient/listers/sparkoperator.k8s.io/v1beta2/expansion_generated.go
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by lister-gen. DO NOT EDIT.
-
-package v1beta2
-
-// ScheduledSparkApplicationListerExpansion allows custom methods to be added 
to
-// ScheduledSparkApplicationLister.
-type ScheduledSparkApplicationListerExpansion interface{}
-
-// ScheduledSparkApplicationNamespaceListerExpansion allows custom methods to 
be added to
-// ScheduledSparkApplicationNamespaceLister.
-type ScheduledSparkApplicationNamespaceListerExpansion interface{}
-
-// SparkApplicationListerExpansion allows custom methods to be added to
-// SparkApplicationLister.
-type SparkApplicationListerExpansion interface{}
-
-// SparkApplicationNamespaceListerExpansion allows custom methods to be added 
to
-// SparkApplicationNamespaceLister.
-type SparkApplicationNamespaceListerExpansion interface{}
diff --git 
a/pkg/sparkclient/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go
 
b/pkg/sparkclient/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go
deleted file mode 100644
index 8ccd7fe3..00000000
--- 
a/pkg/sparkclient/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by lister-gen. DO NOT EDIT.
-
-package v1beta2
-
-import (
-       v1beta2 
"github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
-       "k8s.io/apimachinery/pkg/api/errors"
-       "k8s.io/apimachinery/pkg/labels"
-       "k8s.io/client-go/tools/cache"
-)
-
-// ScheduledSparkApplicationLister helps list ScheduledSparkApplications.
-// All objects returned here must be treated as read-only.
-type ScheduledSparkApplicationLister interface {
-       // List lists all ScheduledSparkApplications in the indexer.
-       // Objects returned here must be treated as read-only.
-       List(selector labels.Selector) (ret 
[]*v1beta2.ScheduledSparkApplication, err error)
-       // ScheduledSparkApplications returns an object that can list and get 
ScheduledSparkApplications.
-       ScheduledSparkApplications(namespace string) 
ScheduledSparkApplicationNamespaceLister
-       ScheduledSparkApplicationListerExpansion
-}
-
-// scheduledSparkApplicationLister implements the 
ScheduledSparkApplicationLister interface.
-type scheduledSparkApplicationLister struct {
-       indexer cache.Indexer
-}
-
-// NewScheduledSparkApplicationLister returns a new 
ScheduledSparkApplicationLister.
-func NewScheduledSparkApplicationLister(indexer cache.Indexer) 
ScheduledSparkApplicationLister {
-       return &scheduledSparkApplicationLister{indexer: indexer}
-}
-
-// List lists all ScheduledSparkApplications in the indexer.
-func (s *scheduledSparkApplicationLister) List(selector labels.Selector) (ret 
[]*v1beta2.ScheduledSparkApplication, err error) {
-       err = cache.ListAll(s.indexer, selector, func(m interface{}) {
-               ret = append(ret, m.(*v1beta2.ScheduledSparkApplication))
-       })
-       return ret, err
-}
-
-// ScheduledSparkApplications returns an object that can list and get 
ScheduledSparkApplications.
-func (s *scheduledSparkApplicationLister) ScheduledSparkApplications(namespace 
string) ScheduledSparkApplicationNamespaceLister {
-       return scheduledSparkApplicationNamespaceLister{indexer: s.indexer, 
namespace: namespace}
-}
-
-// ScheduledSparkApplicationNamespaceLister helps list and get 
ScheduledSparkApplications.
-// All objects returned here must be treated as read-only.
-type ScheduledSparkApplicationNamespaceLister interface {
-       // List lists all ScheduledSparkApplications in the indexer for a given 
namespace.
-       // Objects returned here must be treated as read-only.
-       List(selector labels.Selector) (ret 
[]*v1beta2.ScheduledSparkApplication, err error)
-       // Get retrieves the ScheduledSparkApplication from the indexer for a 
given namespace and name.
-       // Objects returned here must be treated as read-only.
-       Get(name string) (*v1beta2.ScheduledSparkApplication, error)
-       ScheduledSparkApplicationNamespaceListerExpansion
-}
-
-// scheduledSparkApplicationNamespaceLister implements the 
ScheduledSparkApplicationNamespaceLister
-// interface.
-type scheduledSparkApplicationNamespaceLister struct {
-       indexer   cache.Indexer
-       namespace string
-}
-
-// List lists all ScheduledSparkApplications in the indexer for a given 
namespace.
-func (s scheduledSparkApplicationNamespaceLister) List(selector 
labels.Selector) (ret []*v1beta2.ScheduledSparkApplication, err error) {
-       err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m 
interface{}) {
-               ret = append(ret, m.(*v1beta2.ScheduledSparkApplication))
-       })
-       return ret, err
-}
-
-// Get retrieves the ScheduledSparkApplication from the indexer for a given 
namespace and name.
-func (s scheduledSparkApplicationNamespaceLister) Get(name string) 
(*v1beta2.ScheduledSparkApplication, error) {
-       obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
-       if err != nil {
-               return nil, err
-       }
-       if !exists {
-               return nil, 
errors.NewNotFound(v1beta2.Resource("scheduledsparkapplication"), name)
-       }
-       return obj.(*v1beta2.ScheduledSparkApplication), nil
-}
diff --git 
a/pkg/sparkclient/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go 
b/pkg/sparkclient/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go
deleted file mode 100644
index e7a8692d..00000000
--- a/pkg/sparkclient/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-// Code generated by lister-gen. DO NOT EDIT.
-
-package v1beta2
-
-import (
-       v1beta2 
"github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
-       "k8s.io/apimachinery/pkg/api/errors"
-       "k8s.io/apimachinery/pkg/labels"
-       "k8s.io/client-go/tools/cache"
-)
-
-// SparkApplicationLister helps list SparkApplications.
-// All objects returned here must be treated as read-only.
-type SparkApplicationLister interface {
-       // List lists all SparkApplications in the indexer.
-       // Objects returned here must be treated as read-only.
-       List(selector labels.Selector) (ret []*v1beta2.SparkApplication, err 
error)
-       // SparkApplications returns an object that can list and get 
SparkApplications.
-       SparkApplications(namespace string) SparkApplicationNamespaceLister
-       SparkApplicationListerExpansion
-}
-
-// sparkApplicationLister implements the SparkApplicationLister interface.
-type sparkApplicationLister struct {
-       indexer cache.Indexer
-}
-
-// NewSparkApplicationLister returns a new SparkApplicationLister.
-func NewSparkApplicationLister(indexer cache.Indexer) SparkApplicationLister {
-       return &sparkApplicationLister{indexer: indexer}
-}
-
-// List lists all SparkApplications in the indexer.
-func (s *sparkApplicationLister) List(selector labels.Selector) (ret 
[]*v1beta2.SparkApplication, err error) {
-       err = cache.ListAll(s.indexer, selector, func(m interface{}) {
-               ret = append(ret, m.(*v1beta2.SparkApplication))
-       })
-       return ret, err
-}
-
-// SparkApplications returns an object that can list and get SparkApplications.
-func (s *sparkApplicationLister) SparkApplications(namespace string) 
SparkApplicationNamespaceLister {
-       return sparkApplicationNamespaceLister{indexer: s.indexer, namespace: 
namespace}
-}
-
-// SparkApplicationNamespaceLister helps list and get SparkApplications.
-// All objects returned here must be treated as read-only.
-type SparkApplicationNamespaceLister interface {
-       // List lists all SparkApplications in the indexer for a given 
namespace.
-       // Objects returned here must be treated as read-only.
-       List(selector labels.Selector) (ret []*v1beta2.SparkApplication, err 
error)
-       // Get retrieves the SparkApplication from the indexer for a given 
namespace and name.
-       // Objects returned here must be treated as read-only.
-       Get(name string) (*v1beta2.SparkApplication, error)
-       SparkApplicationNamespaceListerExpansion
-}
-
-// sparkApplicationNamespaceLister implements the 
SparkApplicationNamespaceLister
-// interface.
-type sparkApplicationNamespaceLister struct {
-       indexer   cache.Indexer
-       namespace string
-}
-
-// List lists all SparkApplications in the indexer for a given namespace.
-func (s sparkApplicationNamespaceLister) List(selector labels.Selector) (ret 
[]*v1beta2.SparkApplication, err error) {
-       err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m 
interface{}) {
-               ret = append(ret, m.(*v1beta2.SparkApplication))
-       })
-       return ret, err
-}
-
-// Get retrieves the SparkApplication from the indexer for a given namespace 
and name.
-func (s sparkApplicationNamespaceLister) Get(name string) 
(*v1beta2.SparkApplication, error) {
-       obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
-       if err != nil {
-               return nil, err
-       }
-       if !exists {
-               return nil, 
errors.NewNotFound(v1beta2.Resource("sparkapplication"), name)
-       }
-       return obj.(*v1beta2.SparkApplication), nil
-}
diff --git a/scripts/generate-groups.sh b/scripts/generate-groups.sh
deleted file mode 100755
index 2fd6eab5..00000000
--- a/scripts/generate-groups.sh
+++ /dev/null
@@ -1,116 +0,0 @@
-#!/usr/bin/env bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-#limitations under the License.
-#
-
-set -o errexit
-set -o nounset
-set -o pipefail
-
-function check_cmd() {
-  CMD=$1
-  if ! command -v "${CMD}" &> /dev/null
-  then
-    echo "command ${CMD} could not be found"
-    exit 1
-  fi
-}
-
-GO="${GO:-go}"
-check_cmd "${GO}"
-GOPATH="${GOPATH:-$("${GO}" env GOPATH)}"
-
-#Set GOPATH if GOPATH is empty.
-if [ "${GOPATH}" == "" ]; then
-  echo "GOPATH is empty."
-  export GOPATH="${HOME}/go"
-  echo "Set GOPATH to ${GOPATH}"
-fi
-
-# generate-groups generates everything for a project with external types only, 
e.g. a project based
-# on CustomResourceDefinitions.
-
-if [ "$#" -lt 4 ] || [ "${1}" == "--help" ]; then
-  cat <<EOF
-Usage: $(basename "$0") <generators> <output-package> <apis-package> 
<groups-versions> ...
-
-  <generators>        the generators comma separated to run 
(deepcopy,defaulter,client,lister,informer) or "all".
-  <output-package>    the output package name (e.g. 
github.com/example/project/pkg/generated).
-  <apis-package>      the external types dir (e.g. github.com/example/api or 
github.com/example/project/pkg/apis).
-  <groups-versions>   the groups and their versions in the format 
"groupA:v1,v2 groupB:v1 groupC:v2", relative
-                      to <api-package>.
-  ...                 arbitrary flags passed to all generator binaries.
-
-
-Examples:
-  $(basename "$0") all             github.com/example/project/pkg/client 
github.com/example/project/pkg/apis "foo:v1 bar:v1alpha1,v1beta1"
-  $(basename "$0") deepcopy,client github.com/example/project/pkg/client 
github.com/example/project/pkg/apis "foo:v1 bar:v1alpha1,v1beta1"
-EOF
-  exit 0
-fi
-
-GENS="$1"
-OUTPUT_PKG="$2"
-APIS_PKG="$3"
-GROUPS_WITH_VERSIONS="$4"
-shift 4
-
-(
-  # To support running this script from anywhere, we have to first cd into 
this directory
-  # so we can install the tools.
-  cd "$(dirname "${0}")"
-  go install 
k8s.io/code-generator/cmd/{defaulter-gen,client-gen,lister-gen,informer-gen,deepcopy-gen}@v0.27.3
-)
-
-function codegen::join() { local IFS="$1"; shift; echo "$*"; }
-
-# enumerate group versions
-FQ_APIS=() # e.g. k8s.io/api/apps/v1
-for GVs in ${GROUPS_WITH_VERSIONS}; do
-  IFS=: read -r G Vs <<<"${GVs}"
-
-  # enumerate versions
-  for V in ${Vs//,/ }; do
-    FQ_APIS+=("${APIS_PKG}/${G}/${V}")
-  done
-done
-
-if [ "${GENS}" = "all" ] || grep -qw "deepcopy" <<<"${GENS}"; then
-  echo "Generating deepcopy funcs"
-  "${GOPATH}/bin/deepcopy-gen" --input-dirs "$(codegen::join , 
"${FQ_APIS[@]}")" -O zz_generated.deepcopy --bounding-dirs "${APIS_PKG}" "$@"
-fi
-
-if [ "${GENS}" = "all" ] || grep -qw "client" <<<"${GENS}"; then
-  echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at 
${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}"
-  "${GOPATH}/bin/client-gen" --clientset-name 
"${CLIENTSET_NAME_VERSIONED:-versioned}" --input-base "" --input 
"$(codegen::join , "${FQ_APIS[@]}")" --output-package 
"${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" "$@"
-fi
-
-if [ "${GENS}" = "all" ] || grep -qw "lister" <<<"${GENS}"; then
-  echo "Generating listers for ${GROUPS_WITH_VERSIONS} at 
${OUTPUT_PKG}/listers"
-  "${GOPATH}/bin/lister-gen" --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" 
--output-package "${OUTPUT_PKG}/listers" "$@"
-fi
-
-if [ "${GENS}" = "all" ] || grep -qw "informer" <<<"${GENS}"; then
-  echo "Generating informers for ${GROUPS_WITH_VERSIONS} at 
${OUTPUT_PKG}/informers"
-  "${GOPATH}/bin/informer-gen" \
-           --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" \
-           --versioned-clientset-package 
"${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_VERSIONED:-versioned}"
 \
-           --listers-package "${OUTPUT_PKG}/listers" \
-           --output-package "${OUTPUT_PKG}/informers" \
-           "$@"
-fi
diff --git a/scripts/update-codegen.sh b/scripts/update-codegen.sh
deleted file mode 100755
index 4eb65787..00000000
--- a/scripts/update-codegen.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-#limitations under the License.
-#
-
-# If you want to re-run the code-generator to generate code,
-# Please make sure the directory structure must be the example.
-# ex: github.com/apache/yunikorn-k8shim  
-
-./scripts/generate-groups.sh "all" \
-  github.com/apache/yunikorn-k8shim/pkg/sparkclient 
github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis \
-  "sparkoperator.k8s.io:v1beta2" \
-  --go-header-file "$(dirname "${BASH_SOURCE[@]}")"/custom-boilerplate.go.txt \
-  --output-base "$(dirname "${BASH_SOURCE[@]}")/../../../.."


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@yunikorn.apache.org
For additional commands, e-mail: issues-h...@yunikorn.apache.org

Reply via email to