Move vGPU workload cache initialization/de-initialization into
intel_vgpu_{setup, clean}_submission() since they are not specific to
execlist stuffs.

Signed-off-by: Zhi Wang <zhi.a.w...@intel.com>
---
 drivers/gpu/drm/i915/gvt/execlist.c  | 15 +--------------
 drivers/gpu/drm/i915/gvt/scheduler.c | 24 +++++++++++++++++++++++-
 2 files changed, 24 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/execlist.c 
b/drivers/gpu/drm/i915/gvt/execlist.c
index 9402aa5..bdf78ab 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -857,14 +857,12 @@ void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
        struct intel_engine_cs *engine;
 
        clean_workloads(vgpu, ALL_ENGINES);
-       kmem_cache_destroy(vgpu->workloads);
 
        for_each_engine(engine, vgpu->gvt->dev_priv, i) {
                kfree(vgpu->reserve_ring_buffer_va[i]);
                vgpu->reserve_ring_buffer_va[i] = NULL;
                vgpu->reserve_ring_buffer_size[i] = 0;
        }
-
 }
 
 #define RESERVE_RING_BUFFER_SIZE               ((1 * PAGE_SIZE)/8)
@@ -873,19 +871,8 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
        enum intel_engine_id i;
        struct intel_engine_cs *engine;
 
-       /* each ring has a virtual execlist engine */
-       for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+       for_each_engine(engine, vgpu->gvt->dev_priv, i)
                init_vgpu_execlist(vgpu, i);
-               INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
-       }
-
-       vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
-                       sizeof(struct intel_vgpu_workload), 0,
-                       SLAB_HWCACHE_ALIGN,
-                       NULL);
-
-       if (!vgpu->workloads)
-               return -ENOMEM;
 
        /* each ring has a shadow ring buffer until vgpu destroyed */
        for_each_engine(engine, vgpu->gvt->dev_priv, i) {
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c 
b/drivers/gpu/drm/i915/gvt/scheduler.c
index d14d910..c5d7baf 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -718,6 +718,7 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
 {
        i915_gem_context_put(vgpu->shadow_ctx);
+       kmem_cache_destroy(vgpu->workloads);
 }
 
 /**
@@ -732,7 +733,9 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
  */
 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 {
-       atomic_set(&vgpu->running_workload_num, 0);
+       enum intel_engine_id i;
+       struct intel_engine_cs *engine;
+       int ret;
 
        vgpu->shadow_ctx = i915_gem_context_create_gvt(
                        &vgpu->gvt->dev_priv->drm);
@@ -743,5 +746,24 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 
        bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
 
+       vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
+                       sizeof(struct intel_vgpu_workload), 0,
+                       SLAB_HWCACHE_ALIGN,
+                       NULL);
+
+       if (!vgpu->workloads) {
+               ret = -ENOMEM;
+               goto out_shadow_ctx;
+       }
+
+       for_each_engine(engine, vgpu->gvt->dev_priv, i)
+               INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
+
+       atomic_set(&vgpu->running_workload_num, 0);
+
        return 0;
+
+out_shadow_ctx:
+       i915_gem_context_put(vgpu->shadow_ctx);
+       return ret;
 }
-- 
2.7.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to