[PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list

2019-12-11 Thread Nirmoy Das
drm_sched_entity_init() takes drm gpu scheduler list instead of
drm_sched_rq list. This makes conversion of drm_sched_rq list
to drm gpu scheduler list unnecessary

Signed-off-by: Nirmoy Das 
Reviewed-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 11 ---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  4 ++--
 drivers/gpu/drm/amd/amdgpu/cik_sdma.c  |  8 +++-
 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c |  8 +++-
 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c |  8 +++-
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c |  5 ++---
 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c |  8 +++-
 drivers/gpu/drm/amd/amdgpu/si_dma.c|  8 +++-
 9 files changed, 24 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 47b4f359f07c..8c5b0cda9a3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2785,7 +2785,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL;
-   adev->vm_manager.vm_pte_num_rqs = 0;
+   adev->vm_manager.vm_pte_num_scheds = 0;
adev->gmc.gmc_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1c65b5bffa6b..b999b67ff57a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2744,7 +2744,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm,
 {
struct amdgpu_bo_param bp;
struct amdgpu_bo *root;
-   struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
int r, i;
 
vm->va = RB_ROOT_CACHED;
@@ -2758,19 +2757,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm,
spin_lock_init(>invalidated_lock);
INIT_LIST_HEAD(>freed);
 
-   for (i = 0; i < adev->vm_manager.vm_pte_num_rqs; i++)
-   sched_list[i] = adev->vm_manager.vm_pte_rqs[i]->sched;
 
/* create scheduler entities for page table updates */
r = drm_sched_entity_init(>direct, DRM_SCHED_PRIORITY_NORMAL,
- sched_list, adev->vm_manager.vm_pte_num_rqs,
- NULL);
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
return r;
 
r = drm_sched_entity_init(>delayed, DRM_SCHED_PRIORITY_NORMAL,
- sched_list, adev->vm_manager.vm_pte_num_rqs,
- NULL);
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
goto error_free_direct;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index d5613d184e99..100547f094ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -330,8 +330,8 @@ struct amdgpu_vm_manager {
u64 vram_base_offset;
/* vm pte handling */
const struct amdgpu_vm_pte_funcs*vm_pte_funcs;
-   struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS];
-   unsignedvm_pte_num_rqs;
+   struct drm_gpu_scheduler
*vm_pte_scheds[AMDGPU_MAX_RINGS];
+   unsignedvm_pte_num_scheds;
struct amdgpu_ring  *page_fault;
 
/* partial resident texture handling */
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c 
b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 82cdb8f57bfd..1f22a8d0f7f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1373,16 +1373,14 @@ static const struct amdgpu_vm_pte_funcs 
cik_sdma_vm_pte_funcs = {
 
 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-   struct drm_gpu_scheduler *sched;
unsigned i;
 
adev->vm_manager.vm_pte_funcs = _sdma_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
-   sched = >sdma.instance[i].ring.sched;
-   adev->vm_manager.vm_pte_rqs[i] =
-   >sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+   adev->vm_manager.vm_pte_scheds[i] =
+   >sdma.instance[i].ring.sched;
}
-   adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+   adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct 

[PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list

2019-12-11 Thread Nirmoy Das
drm_sched_entity_init() takes drm gpu scheduler list instead of
drm_sched_rq list. This makes conversion of drm_sched_rq list
to drm gpu scheduler list unnecessary

Signed-off-by: Nirmoy Das 
Reviewed-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 11 ---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  4 ++--
 drivers/gpu/drm/amd/amdgpu/cik_sdma.c  |  8 +++-
 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c |  8 +++-
 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c |  8 +++-
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c |  5 ++---
 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c |  8 +++-
 drivers/gpu/drm/amd/amdgpu/si_dma.c|  8 +++-
 9 files changed, 24 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 47b4f359f07c..8c5b0cda9a3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2785,7 +2785,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL;
-   adev->vm_manager.vm_pte_num_rqs = 0;
+   adev->vm_manager.vm_pte_num_scheds = 0;
adev->gmc.gmc_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1c65b5bffa6b..b999b67ff57a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2744,7 +2744,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm,
 {
struct amdgpu_bo_param bp;
struct amdgpu_bo *root;
-   struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
int r, i;
 
vm->va = RB_ROOT_CACHED;
@@ -2758,19 +2757,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm,
spin_lock_init(>invalidated_lock);
INIT_LIST_HEAD(>freed);
 
-   for (i = 0; i < adev->vm_manager.vm_pte_num_rqs; i++)
-   sched_list[i] = adev->vm_manager.vm_pte_rqs[i]->sched;
 
/* create scheduler entities for page table updates */
r = drm_sched_entity_init(>direct, DRM_SCHED_PRIORITY_NORMAL,
- sched_list, adev->vm_manager.vm_pte_num_rqs,
- NULL);
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
return r;
 
r = drm_sched_entity_init(>delayed, DRM_SCHED_PRIORITY_NORMAL,
- sched_list, adev->vm_manager.vm_pte_num_rqs,
- NULL);
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
goto error_free_direct;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index d5613d184e99..100547f094ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -330,8 +330,8 @@ struct amdgpu_vm_manager {
u64 vram_base_offset;
/* vm pte handling */
const struct amdgpu_vm_pte_funcs*vm_pte_funcs;
-   struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS];
-   unsignedvm_pte_num_rqs;
+   struct drm_gpu_scheduler
*vm_pte_scheds[AMDGPU_MAX_RINGS];
+   unsignedvm_pte_num_scheds;
struct amdgpu_ring  *page_fault;
 
/* partial resident texture handling */
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c 
b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 82cdb8f57bfd..1f22a8d0f7f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1373,16 +1373,14 @@ static const struct amdgpu_vm_pte_funcs 
cik_sdma_vm_pte_funcs = {
 
 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-   struct drm_gpu_scheduler *sched;
unsigned i;
 
adev->vm_manager.vm_pte_funcs = _sdma_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
-   sched = >sdma.instance[i].ring.sched;
-   adev->vm_manager.vm_pte_rqs[i] =
-   >sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+   adev->vm_manager.vm_pte_scheds[i] =
+   >sdma.instance[i].ring.sched;
}
-   adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+   adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct 

[PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list

2019-12-10 Thread Nirmoy Das
drm_sched_entity_init() takes drm gpu scheduler list instead of
drm_sched_rq list. This makes conversion of drm_sched_rq list
to drm gpu scheduler list unnecessary

Signed-off-by: Nirmoy Das 
Reviewed-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 11 ---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  4 ++--
 drivers/gpu/drm/amd/amdgpu/cik_sdma.c  |  8 +++-
 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c |  8 +++-
 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c |  8 +++-
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c |  5 ++---
 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c |  8 +++-
 drivers/gpu/drm/amd/amdgpu/si_dma.c|  8 +++-
 9 files changed, 24 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7324a5fc5ccb..a7904d1e71f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2785,7 +2785,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL;
-   adev->vm_manager.vm_pte_num_rqs = 0;
+   adev->vm_manager.vm_pte_num_scheds = 0;
adev->gmc.gmc_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1c65b5bffa6b..b999b67ff57a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2744,7 +2744,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm,
 {
struct amdgpu_bo_param bp;
struct amdgpu_bo *root;
-   struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
int r, i;
 
vm->va = RB_ROOT_CACHED;
@@ -2758,19 +2757,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm,
spin_lock_init(>invalidated_lock);
INIT_LIST_HEAD(>freed);
 
-   for (i = 0; i < adev->vm_manager.vm_pte_num_rqs; i++)
-   sched_list[i] = adev->vm_manager.vm_pte_rqs[i]->sched;
 
/* create scheduler entities for page table updates */
r = drm_sched_entity_init(>direct, DRM_SCHED_PRIORITY_NORMAL,
- sched_list, adev->vm_manager.vm_pte_num_rqs,
- NULL);
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
return r;
 
r = drm_sched_entity_init(>delayed, DRM_SCHED_PRIORITY_NORMAL,
- sched_list, adev->vm_manager.vm_pte_num_rqs,
- NULL);
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
goto error_free_direct;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index d5613d184e99..100547f094ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -330,8 +330,8 @@ struct amdgpu_vm_manager {
u64 vram_base_offset;
/* vm pte handling */
const struct amdgpu_vm_pte_funcs*vm_pte_funcs;
-   struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS];
-   unsignedvm_pte_num_rqs;
+   struct drm_gpu_scheduler
*vm_pte_scheds[AMDGPU_MAX_RINGS];
+   unsignedvm_pte_num_scheds;
struct amdgpu_ring  *page_fault;
 
/* partial resident texture handling */
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c 
b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 82cdb8f57bfd..1f22a8d0f7f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1373,16 +1373,14 @@ static const struct amdgpu_vm_pte_funcs 
cik_sdma_vm_pte_funcs = {
 
 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-   struct drm_gpu_scheduler *sched;
unsigned i;
 
adev->vm_manager.vm_pte_funcs = _sdma_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
-   sched = >sdma.instance[i].ring.sched;
-   adev->vm_manager.vm_pte_rqs[i] =
-   >sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+   adev->vm_manager.vm_pte_scheds[i] =
+   >sdma.instance[i].ring.sched;
}
-   adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+   adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct 

[PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list

2019-12-10 Thread Nirmoy Das
drm_sched_entity_init() takes drm gpu scheduler list instead of
drm_sched_rq list. This makes conversion of drm_sched_rq list
to drm gpu scheduler list unnecessary

Signed-off-by: Nirmoy Das 
Reviewed-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 11 ---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  4 ++--
 drivers/gpu/drm/amd/amdgpu/cik_sdma.c  |  8 +++-
 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c |  8 +++-
 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c |  8 +++-
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c |  5 ++---
 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c |  8 +++-
 drivers/gpu/drm/amd/amdgpu/si_dma.c|  8 +++-
 9 files changed, 24 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index f85007382093..cf4953c4e2cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2779,7 +2779,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL;
-   adev->vm_manager.vm_pte_num_rqs = 0;
+   adev->vm_manager.vm_pte_num_scheds = 0;
adev->gmc.gmc_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5e78db30c722..0e1ed8ef2ce7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2687,7 +2687,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm,
 {
struct amdgpu_bo_param bp;
struct amdgpu_bo *root;
-   struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
int r, i;
 
vm->va = RB_ROOT_CACHED;
@@ -2701,19 +2700,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm,
spin_lock_init(>invalidated_lock);
INIT_LIST_HEAD(>freed);
 
-   for (i = 0; i < adev->vm_manager.vm_pte_num_rqs; i++)
-   sched_list[i] = adev->vm_manager.vm_pte_rqs[i]->sched;
 
/* create scheduler entities for page table updates */
r = drm_sched_entity_init(>direct, DRM_SCHED_PRIORITY_NORMAL,
- sched_list, adev->vm_manager.vm_pte_num_rqs,
- NULL);
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
return r;
 
r = drm_sched_entity_init(>delayed, DRM_SCHED_PRIORITY_NORMAL,
- sched_list, adev->vm_manager.vm_pte_num_rqs,
- NULL);
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
goto error_free_direct;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 76fcf853035c..5eaba8645a43 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -322,8 +322,8 @@ struct amdgpu_vm_manager {
u64 vram_base_offset;
/* vm pte handling */
const struct amdgpu_vm_pte_funcs*vm_pte_funcs;
-   struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS];
-   unsignedvm_pte_num_rqs;
+   struct drm_gpu_scheduler
*vm_pte_scheds[AMDGPU_MAX_RINGS];
+   unsignedvm_pte_num_scheds;
struct amdgpu_ring  *page_fault;
 
/* partial resident texture handling */
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c 
b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 82cdb8f57bfd..1f22a8d0f7f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1373,16 +1373,14 @@ static const struct amdgpu_vm_pte_funcs 
cik_sdma_vm_pte_funcs = {
 
 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-   struct drm_gpu_scheduler *sched;
unsigned i;
 
adev->vm_manager.vm_pte_funcs = _sdma_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
-   sched = >sdma.instance[i].ring.sched;
-   adev->vm_manager.vm_pte_rqs[i] =
-   >sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+   adev->vm_manager.vm_pte_scheds[i] =
+   >sdma.instance[i].ring.sched;
}
-   adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+   adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct 

[PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list

2019-12-09 Thread Nirmoy Das
drm_sched_entity_init() takes drm gpu scheduler list instead of
drm_sched_rq list. This makes conversion of drm_sched_rq list
to drm gpu scheduler list unnecessary

Signed-off-by: Nirmoy Das 
Reviewed-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 11 ---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  4 ++--
 drivers/gpu/drm/amd/amdgpu/cik_sdma.c  |  8 +++-
 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c |  8 +++-
 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c |  8 +++-
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c |  5 ++---
 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c |  8 +++-
 drivers/gpu/drm/amd/amdgpu/si_dma.c|  8 +++-
 9 files changed, 24 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index f85007382093..cf4953c4e2cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2779,7 +2779,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL;
-   adev->vm_manager.vm_pte_num_rqs = 0;
+   adev->vm_manager.vm_pte_num_scheds = 0;
adev->gmc.gmc_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5e78db30c722..0e1ed8ef2ce7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2687,7 +2687,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm,
 {
struct amdgpu_bo_param bp;
struct amdgpu_bo *root;
-   struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
int r, i;
 
vm->va = RB_ROOT_CACHED;
@@ -2701,19 +2700,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm,
spin_lock_init(>invalidated_lock);
INIT_LIST_HEAD(>freed);
 
-   for (i = 0; i < adev->vm_manager.vm_pte_num_rqs; i++)
-   sched_list[i] = adev->vm_manager.vm_pte_rqs[i]->sched;
 
/* create scheduler entities for page table updates */
r = drm_sched_entity_init(>direct, DRM_SCHED_PRIORITY_NORMAL,
- sched_list, adev->vm_manager.vm_pte_num_rqs,
- NULL);
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
return r;
 
r = drm_sched_entity_init(>delayed, DRM_SCHED_PRIORITY_NORMAL,
- sched_list, adev->vm_manager.vm_pte_num_rqs,
- NULL);
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
goto error_free_direct;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 76fcf853035c..5eaba8645a43 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -322,8 +322,8 @@ struct amdgpu_vm_manager {
u64 vram_base_offset;
/* vm pte handling */
const struct amdgpu_vm_pte_funcs*vm_pte_funcs;
-   struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS];
-   unsignedvm_pte_num_rqs;
+   struct drm_gpu_scheduler
*vm_pte_scheds[AMDGPU_MAX_RINGS];
+   unsignedvm_pte_num_scheds;
struct amdgpu_ring  *page_fault;
 
/* partial resident texture handling */
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c 
b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 82cdb8f57bfd..1f22a8d0f7f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1373,16 +1373,14 @@ static const struct amdgpu_vm_pte_funcs 
cik_sdma_vm_pte_funcs = {
 
 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-   struct drm_gpu_scheduler *sched;
unsigned i;
 
adev->vm_manager.vm_pte_funcs = _sdma_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
-   sched = >sdma.instance[i].ring.sched;
-   adev->vm_manager.vm_pte_rqs[i] =
-   >sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+   adev->vm_manager.vm_pte_scheds[i] =
+   >sdma.instance[i].ring.sched;
}
-   adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+   adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct 

Re: [PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list

2019-12-06 Thread Christian König

Am 06.12.19 um 18:33 schrieb Nirmoy Das:

drm_sched_entity_init() takes drm gpu scheduler list instead of
drm_sched_rq list. This makes conversion of drm_sched_rq list
to drm gpu scheduler list unnecessary

Signed-off-by: Nirmoy Das 


Reviewed-by: Christian König 


---
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  2 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 11 ---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  4 ++--
  drivers/gpu/drm/amd/amdgpu/cik_sdma.c  |  8 +++-
  drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c |  8 +++-
  drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c |  8 +++-
  drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c |  5 ++---
  drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c |  8 +++-
  drivers/gpu/drm/amd/amdgpu/si_dma.c|  8 +++-
  9 files changed, 24 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index f85007382093..cf4953c4e2cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2779,7 +2779,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL;
-   adev->vm_manager.vm_pte_num_rqs = 0;
+   adev->vm_manager.vm_pte_num_scheds = 0;
adev->gmc.gmc_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5e78db30c722..0e1ed8ef2ce7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2687,7 +2687,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm,
  {
struct amdgpu_bo_param bp;
struct amdgpu_bo *root;
-   struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
int r, i;
  
  	vm->va = RB_ROOT_CACHED;

@@ -2701,19 +2700,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm,
spin_lock_init(>invalidated_lock);
INIT_LIST_HEAD(>freed);
  
-	for (i = 0; i < adev->vm_manager.vm_pte_num_rqs; i++)

-   sched_list[i] = adev->vm_manager.vm_pte_rqs[i]->sched;
  
  	/* create scheduler entities for page table updates */

r = drm_sched_entity_init(>direct, DRM_SCHED_PRIORITY_NORMAL,
- sched_list, adev->vm_manager.vm_pte_num_rqs,
- NULL);
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
return r;
  
  	r = drm_sched_entity_init(>delayed, DRM_SCHED_PRIORITY_NORMAL,

- sched_list, adev->vm_manager.vm_pte_num_rqs,
- NULL);
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
goto error_free_direct;
  
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h

index 76fcf853035c..5eaba8645a43 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -322,8 +322,8 @@ struct amdgpu_vm_manager {
u64 vram_base_offset;
/* vm pte handling */
const struct amdgpu_vm_pte_funcs*vm_pte_funcs;
-   struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS];
-   unsignedvm_pte_num_rqs;
+   struct drm_gpu_scheduler
*vm_pte_scheds[AMDGPU_MAX_RINGS];
+   unsignedvm_pte_num_scheds;
struct amdgpu_ring  *page_fault;
  
  	/* partial resident texture handling */

diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c 
b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 82cdb8f57bfd..1f22a8d0f7f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1373,16 +1373,14 @@ static const struct amdgpu_vm_pte_funcs 
cik_sdma_vm_pte_funcs = {
  
  static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)

  {
-   struct drm_gpu_scheduler *sched;
unsigned i;
  
  	adev->vm_manager.vm_pte_funcs = _sdma_vm_pte_funcs;

for (i = 0; i < adev->sdma.num_instances; i++) {
-   sched = >sdma.instance[i].ring.sched;
-   adev->vm_manager.vm_pte_rqs[i] =
-   >sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+   adev->vm_manager.vm_pte_scheds[i] =
+   >sdma.instance[i].ring.sched;
}
-   adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+   adev->vm_manager.vm_pte_num_scheds = 

[PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list

2019-12-06 Thread Nirmoy Das
drm_sched_entity_init() takes drm gpu scheduler list instead of
drm_sched_rq list. This makes conversion of drm_sched_rq list
to drm gpu scheduler list unnecessary

Signed-off-by: Nirmoy Das 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 11 ---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  4 ++--
 drivers/gpu/drm/amd/amdgpu/cik_sdma.c  |  8 +++-
 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c |  8 +++-
 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c |  8 +++-
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c |  5 ++---
 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c |  8 +++-
 drivers/gpu/drm/amd/amdgpu/si_dma.c|  8 +++-
 9 files changed, 24 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index f85007382093..cf4953c4e2cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2779,7 +2779,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL;
-   adev->vm_manager.vm_pte_num_rqs = 0;
+   adev->vm_manager.vm_pte_num_scheds = 0;
adev->gmc.gmc_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5e78db30c722..0e1ed8ef2ce7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2687,7 +2687,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm,
 {
struct amdgpu_bo_param bp;
struct amdgpu_bo *root;
-   struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
int r, i;
 
vm->va = RB_ROOT_CACHED;
@@ -2701,19 +2700,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm,
spin_lock_init(>invalidated_lock);
INIT_LIST_HEAD(>freed);
 
-   for (i = 0; i < adev->vm_manager.vm_pte_num_rqs; i++)
-   sched_list[i] = adev->vm_manager.vm_pte_rqs[i]->sched;
 
/* create scheduler entities for page table updates */
r = drm_sched_entity_init(>direct, DRM_SCHED_PRIORITY_NORMAL,
- sched_list, adev->vm_manager.vm_pte_num_rqs,
- NULL);
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
return r;
 
r = drm_sched_entity_init(>delayed, DRM_SCHED_PRIORITY_NORMAL,
- sched_list, adev->vm_manager.vm_pte_num_rqs,
- NULL);
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
goto error_free_direct;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 76fcf853035c..5eaba8645a43 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -322,8 +322,8 @@ struct amdgpu_vm_manager {
u64 vram_base_offset;
/* vm pte handling */
const struct amdgpu_vm_pte_funcs*vm_pte_funcs;
-   struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS];
-   unsignedvm_pte_num_rqs;
+   struct drm_gpu_scheduler
*vm_pte_scheds[AMDGPU_MAX_RINGS];
+   unsignedvm_pte_num_scheds;
struct amdgpu_ring  *page_fault;
 
/* partial resident texture handling */
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c 
b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 82cdb8f57bfd..1f22a8d0f7f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1373,16 +1373,14 @@ static const struct amdgpu_vm_pte_funcs 
cik_sdma_vm_pte_funcs = {
 
 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-   struct drm_gpu_scheduler *sched;
unsigned i;
 
adev->vm_manager.vm_pte_funcs = _sdma_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
-   sched = >sdma.instance[i].ring.sched;
-   adev->vm_manager.vm_pte_rqs[i] =
-   >sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+   adev->vm_manager.vm_pte_scheds[i] =
+   >sdma.instance[i].ring.sched;
}
-   adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+   adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version