Re: [PATCH 5/6] drm/amdgpu: implement grab reserved vmid V3

2017-04-27 Thread Zhang, Jerry (Junwei)

On 04/27/2017 01:54 PM, Zhang, Jerry (Junwei) wrote:

On 04/27/2017 01:00 PM, Chunming Zhou wrote:

v2: move sync waiting only when flush needs
v3: fix racy

Change-Id: I64da2701c9fdcf986afb90ba1492a78d5bef1b6c
Signed-off-by: Chunming Zhou 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 72 --
  1 file changed, 68 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index e6fdfa4..7752279 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -397,6 +397,65 @@ static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device
*adev,
  atomic_read(>gpu_reset_counter);
  }

+static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub)
+{
+return !!vm->reserved_vmid[vmhub];
+}


It may be better to populate it into alloc reserved_vmid func as well, getting
easy maintenance in the future.



+
+/* idr_mgr->lock must be held */
+static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
+   struct amdgpu_ring *ring,
+   struct amdgpu_sync *sync,
+   struct fence *fence,
+   struct amdgpu_job *job)
+{
+struct amdgpu_device *adev = ring->adev;
+unsigned vmhub = ring->funcs->vmhub;
+struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub];
+struct amdgpu_vm_id_manager *id_mgr = >vm_manager.id_mgr[vmhub];
+struct fence *updates = sync->last_vm_update;
+int r = 0;
+struct fence *flushed, *tmp;
+bool needs_flush = false;
+
+flushed  = id->flushed_updates;
+if ((amdgpu_vm_had_gpu_reset(adev, id)) ||
+(atomic64_read(>owner) != vm->client_id) ||
+(job->vm_pd_addr != id->pd_gpu_addr) ||
+(updates && (!flushed || updates->context != flushed->context ||
+fence_is_later(updates, flushed {
+needs_flush = true;
+tmp = amdgpu_sync_get_fence(>active);
+if (tmp) {
+r = amdgpu_sync_fence(adev, sync, tmp);
+fence_put(tmp);
+return r;


Sorry, I didn't catch up this.
Could you give me some tips?
(in this case, we no need to update id info as below?)


Got that offline to wait for id idle.
That will always use the reserved id when it's idle.
Anyway, it should work.



Jerry


+}
+}
+
+/* Good we can use this VMID. Remember this submission as
+* user of the VMID.
+*/
+r = amdgpu_sync_fence(ring->adev, >active, fence);
+if (r)
+goto out;
+
+if (updates && (!flushed || updates->context != flushed->context ||
+fence_is_later(updates, flushed))) {
+fence_put(id->flushed_updates);
+id->flushed_updates = fence_get(updates);
+}
+id->pd_gpu_addr = job->vm_pd_addr;
+id->current_gpu_reset_count = atomic_read(>gpu_reset_counter);
+atomic64_set(>owner, vm->client_id);
+job->vm_needs_flush = needs_flush;
+
+job->vm_id = id - id_mgr->ids;
+trace_amdgpu_vm_grab_id(vm, ring, job);
+out:
+return r;
+}
+
  /**
   * amdgpu_vm_grab_id - allocate the next free VMID
   *
@@ -421,12 +480,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct
amdgpu_ring *ring,
  unsigned i;
  int r = 0;

+mutex_lock(_mgr->lock);
+if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) {
+r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, job);
+mutex_unlock(_mgr->lock);
+return r;
+}
  fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
-if (!fences)
+if (!fences) {
+mutex_unlock(_mgr->lock);
  return -ENOMEM;
-
-mutex_lock(_mgr->lock);
-
+}
  /* Check if we have an idle VMID */
  i = 0;
  list_for_each_entry(idle, _mgr->ids_lru, list) {


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 5/6] drm/amdgpu: implement grab reserved vmid V3

2017-04-26 Thread Zhang, Jerry (Junwei)

On 04/27/2017 01:00 PM, Chunming Zhou wrote:

v2: move sync waiting only when flush needs
v3: fix racy

Change-Id: I64da2701c9fdcf986afb90ba1492a78d5bef1b6c
Signed-off-by: Chunming Zhou 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 72 --
  1 file changed, 68 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index e6fdfa4..7752279 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -397,6 +397,65 @@ static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device 
*adev,
atomic_read(>gpu_reset_counter);
  }

+static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub)
+{
+   return !!vm->reserved_vmid[vmhub];
+}


It may be better to populate it into alloc reserved_vmid func as well, getting 
easy maintenance in the future.




+
+/* idr_mgr->lock must be held */
+static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
+  struct amdgpu_ring *ring,
+  struct amdgpu_sync *sync,
+  struct fence *fence,
+  struct amdgpu_job *job)
+{
+   struct amdgpu_device *adev = ring->adev;
+   unsigned vmhub = ring->funcs->vmhub;
+   struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub];
+   struct amdgpu_vm_id_manager *id_mgr = >vm_manager.id_mgr[vmhub];
+   struct fence *updates = sync->last_vm_update;
+   int r = 0;
+   struct fence *flushed, *tmp;
+   bool needs_flush = false;
+
+   flushed  = id->flushed_updates;
+   if ((amdgpu_vm_had_gpu_reset(adev, id)) ||
+   (atomic64_read(>owner) != vm->client_id) ||
+   (job->vm_pd_addr != id->pd_gpu_addr) ||
+   (updates && (!flushed || updates->context != flushed->context ||
+   fence_is_later(updates, flushed {
+   needs_flush = true;
+   tmp = amdgpu_sync_get_fence(>active);
+   if (tmp) {
+   r = amdgpu_sync_fence(adev, sync, tmp);
+   fence_put(tmp);
+   return r;


Sorry, I didn't catch up this.
Could you give me some tips?
(in this case, we no need to update id info as below?)

Jerry


+   }
+   }
+
+   /* Good we can use this VMID. Remember this submission as
+   * user of the VMID.
+   */
+   r = amdgpu_sync_fence(ring->adev, >active, fence);
+   if (r)
+   goto out;
+
+   if (updates && (!flushed || updates->context != flushed->context ||
+   fence_is_later(updates, flushed))) {
+   fence_put(id->flushed_updates);
+   id->flushed_updates = fence_get(updates);
+   }
+   id->pd_gpu_addr = job->vm_pd_addr;
+   id->current_gpu_reset_count = atomic_read(>gpu_reset_counter);
+   atomic64_set(>owner, vm->client_id);
+   job->vm_needs_flush = needs_flush;
+
+   job->vm_id = id - id_mgr->ids;
+   trace_amdgpu_vm_grab_id(vm, ring, job);
+out:
+   return r;
+}
+
  /**
   * amdgpu_vm_grab_id - allocate the next free VMID
   *
@@ -421,12 +480,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct 
amdgpu_ring *ring,
unsigned i;
int r = 0;

+   mutex_lock(_mgr->lock);
+   if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) {
+   r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, 
job);
+   mutex_unlock(_mgr->lock);
+   return r;
+   }
fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
-   if (!fences)
+   if (!fences) {
+   mutex_unlock(_mgr->lock);
return -ENOMEM;
-
-   mutex_lock(_mgr->lock);
-
+   }
/* Check if we have an idle VMID */
i = 0;
list_for_each_entry(idle, _mgr->ids_lru, list) {


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 5/6] drm/amdgpu: implement grab reserved vmid V3

2017-04-26 Thread Chunming Zhou
v2: move sync waiting only when flush needs
v3: fix racy

Change-Id: I64da2701c9fdcf986afb90ba1492a78d5bef1b6c
Signed-off-by: Chunming Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 72 --
 1 file changed, 68 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index e6fdfa4..7752279 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -397,6 +397,65 @@ static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device 
*adev,
atomic_read(>gpu_reset_counter);
 }
 
+static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub)
+{
+   return !!vm->reserved_vmid[vmhub];
+}
+
+/* idr_mgr->lock must be held */
+static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
+  struct amdgpu_ring *ring,
+  struct amdgpu_sync *sync,
+  struct fence *fence,
+  struct amdgpu_job *job)
+{
+   struct amdgpu_device *adev = ring->adev;
+   unsigned vmhub = ring->funcs->vmhub;
+   struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub];
+   struct amdgpu_vm_id_manager *id_mgr = >vm_manager.id_mgr[vmhub];
+   struct fence *updates = sync->last_vm_update;
+   int r = 0;
+   struct fence *flushed, *tmp;
+   bool needs_flush = false;
+
+   flushed  = id->flushed_updates;
+   if ((amdgpu_vm_had_gpu_reset(adev, id)) ||
+   (atomic64_read(>owner) != vm->client_id) ||
+   (job->vm_pd_addr != id->pd_gpu_addr) ||
+   (updates && (!flushed || updates->context != flushed->context ||
+   fence_is_later(updates, flushed {
+   needs_flush = true;
+   tmp = amdgpu_sync_get_fence(>active);
+   if (tmp) {
+   r = amdgpu_sync_fence(adev, sync, tmp);
+   fence_put(tmp);
+   return r;
+   }
+   }
+
+   /* Good we can use this VMID. Remember this submission as
+   * user of the VMID.
+   */
+   r = amdgpu_sync_fence(ring->adev, >active, fence);
+   if (r)
+   goto out;
+
+   if (updates && (!flushed || updates->context != flushed->context ||
+   fence_is_later(updates, flushed))) {
+   fence_put(id->flushed_updates);
+   id->flushed_updates = fence_get(updates);
+   }
+   id->pd_gpu_addr = job->vm_pd_addr;
+   id->current_gpu_reset_count = atomic_read(>gpu_reset_counter);
+   atomic64_set(>owner, vm->client_id);
+   job->vm_needs_flush = needs_flush;
+
+   job->vm_id = id - id_mgr->ids;
+   trace_amdgpu_vm_grab_id(vm, ring, job);
+out:
+   return r;
+}
+
 /**
  * amdgpu_vm_grab_id - allocate the next free VMID
  *
@@ -421,12 +480,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct 
amdgpu_ring *ring,
unsigned i;
int r = 0;
 
+   mutex_lock(_mgr->lock);
+   if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) {
+   r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, 
job);
+   mutex_unlock(_mgr->lock);
+   return r;
+   }
fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
-   if (!fences)
+   if (!fences) {
+   mutex_unlock(_mgr->lock);
return -ENOMEM;
-
-   mutex_lock(_mgr->lock);
-
+   }
/* Check if we have an idle VMID */
i = 0;
list_for_each_entry(idle, _mgr->ids_lru, list) {
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx