Re: [PATCH 02/12] drm/amdgpu: send IVs to the KFD only after processing them

2018-09-27 Thread Christian König

Am 26.09.2018 um 20:24 schrieb Jay Cornwall:

On Wed, Sep 26, 2018, at 08:53, Christian König wrote:

This allows us to filter out VM faults in the GMC code.

Signed-off-by: Christian König 

The KFD needs to receive notification of unhandled VM faults; when demand 
paging is disabled or the address is not pageable. It propagates this to the 
UMD (ROC runtime or the ROC debugger).

Does this patch change that behavior?


Potentially yes. We have some code in the GMC handling for this, but I'm 
actually not sure if that is the used path.


Thanks for the comment, I will take that into account and only block 
forwarding to the KFD if the GMC really filtered the request.


Christian.


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 02/12] drm/amdgpu: send IVs to the KFD only after processing them

2018-09-26 Thread Jay Cornwall
On Wed, Sep 26, 2018, at 08:53, Christian König wrote:
> This allows us to filter out VM faults in the GMC code.
> 
> Signed-off-by: Christian König 

The KFD needs to receive notification of unhandled VM faults; when demand 
paging is disabled or the address is not pageable. It propagates this to the 
UMD (ROC runtime or the ROC debugger).

Does this patch change that behavior?
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 02/12] drm/amdgpu: send IVs to the KFD only after processing them

2018-09-26 Thread Christian König
This allows us to filter out VM faults in the GMC code.

Signed-off-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 29 +
 drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c   |  2 +-
 5 files changed, 21 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 52c17f6219a7..8f0bc3f2e163 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -166,9 +166,6 @@ static void amdgpu_irq_callback(struct amdgpu_device *adev,
if (!amdgpu_ih_prescreen_iv(adev))
return;
 
-   /* Before dispatching irq to IP blocks, send it to amdkfd */
-   amdgpu_amdkfd_interrupt(adev, (const void *) &ih->ring[ring_index]);
-
entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
amdgpu_ih_decode_iv(adev, &entry);
 
@@ -392,29 +389,31 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
unsigned client_id = entry->client_id;
unsigned src_id = entry->src_id;
struct amdgpu_irq_src *src;
+   bool handled = false;
int r;
 
trace_amdgpu_iv(entry);
 
if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
-   DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
+   DRM_ERROR("Invalid client_id in IV: %d\n", client_id);
return;
}
 
if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
-   DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
+   DRM_ERROR("Invalid src_id in IV: %d\n", src_id);
return;
}
 
if (adev->irq.virq[src_id]) {
generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
-   } else {
-   if (!adev->irq.client[client_id].sources) {
-   DRM_DEBUG("Unregistered interrupt client_id: %d src_id: 
%d\n",
- client_id, src_id);
-   return;
-   }
+   return;
+   }
 
+   if (!adev->irq.client[client_id].sources) {
+   DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
+ client_id, src_id);
+   return;
+   } else {
src = adev->irq.client[client_id].sources[src_id];
if (!src) {
DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
@@ -422,9 +421,15 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
}
 
r = src->funcs->process(adev, src, entry);
-   if (r)
+   if (r < 0)
DRM_ERROR("error processing interrupt (%d)\n", r);
+   else if (r)
+   handled = true;
}
+
+   /* Send it to amdkfd as well if it isn't already handled */
+   if (!handled)
+   amdgpu_amdkfd_interrupt(adev, entry->iv_entry);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index e1c2b4e9c7b2..d65bfbe21f56 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -1132,7 +1132,7 @@ static int gmc_v6_0_process_interrupt(struct 
amdgpu_device *adev,
gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
}
 
-   return 0;
+   return 1;
 }
 
 static int gmc_v6_0_set_clockgating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 910c4ce19cb3..ca8b34bab261 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1325,7 +1325,7 @@ static int gmc_v7_0_process_interrupt(struct 
amdgpu_device *adev,
atomic_set(&adev->gmc.vm_fault_info_updated, 1);
}
 
-   return 0;
+   return 1;
 }
 
 static int gmc_v7_0_set_clockgating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1d3265c97b70..f7547a7776a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1487,7 +1487,7 @@ static int gmc_v8_0_process_interrupt(struct 
amdgpu_device *adev,
atomic_set(&adev->gmc.vm_fault_info_updated, 1);
}
 
-   return 0;
+   return 1;
 }
 
 static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device 
*adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 2420ae90047e..729a2c230f91 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -279,7 +279,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device 
*adev,
status);
}
 
-   return 0;
+   return 1;
 }
 
 stati