At this point the ASIC is already post reset by the HW/PSP
so the HW not in proper state to be configured for suspension,
some blocks might be even gated and so best is to avoid touching it.

v2: Rename in_dpc to more meaningful name

Signed-off-by: Andrey Grodzovsky <andrey.grodzov...@amd.com>
Reviewed-by: Alex Deucher <alexander.deuc...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 41 +++++++++++++++++++++++++++++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c    |  6 +++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c    |  6 +++++
 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c     | 18 ++++++++-----
 drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c     |  3 +++
 6 files changed, 67 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 3399242..cac51e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -992,6 +992,7 @@ struct amdgpu_device {
        atomic_t                        throttling_logging_enabled;
        struct ratelimit_state          throttling_logging_rs;
        uint32_t                        ras_features;
+       bool                            in_pci_err_recovery;
 };
 
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 67d61a5..43ce473 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -319,6 +319,9 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, 
uint32_t reg,
 {
        uint32_t ret;
 
+       if (adev->in_pci_err_recovery)
+               return 0;
+
        if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
                return amdgpu_kiq_rreg(adev, reg);
 
@@ -351,6 +354,9 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, 
uint32_t reg,
  * Returns the 8 bit value from the offset specified.
  */
 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
+       if (adev->in_pci_err_recovery)
+               return 0;
+
        if (offset < adev->rmmio_size)
                return (readb(adev->rmmio + offset));
        BUG();
@@ -372,14 +378,21 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, 
uint32_t offset) {
  * Writes the value specified to the offset specified.
  */
 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t 
value) {
+       if (adev->in_pci_err_recovery)
+               return;
+
        if (offset < adev->rmmio_size)
                writeb(value, adev->rmmio + offset);
        else
                BUG();
 }
 
-void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t 
reg, uint32_t v, uint32_t acc_flags)
+void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t 
reg,
+                                      uint32_t v, uint32_t acc_flags)
 {
+       if (adev->in_pci_err_recovery)
+               return;
+
        trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
 
        if ((reg * 4) < adev->rmmio_size)
@@ -407,6 +420,9 @@ void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device 
*adev, uint32_t reg,
 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
                    uint32_t acc_flags)
 {
+       if (adev->in_pci_err_recovery)
+               return;
+
        if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
                return amdgpu_kiq_wreg(adev, reg, v);
 
@@ -421,6 +437,9 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t 
reg, uint32_t v,
 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, 
uint32_t v,
                    uint32_t acc_flags)
 {
+       if (adev->in_pci_err_recovery)
+               return;
+
        if (amdgpu_sriov_fullaccess(adev) &&
                adev->gfx.rlc.funcs &&
                adev->gfx.rlc.funcs->is_rlcg_access_range) {
@@ -442,6 +461,9 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, 
uint32_t reg, uint32_t
  */
 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
 {
+       if (adev->in_pci_err_recovery)
+               return 0;
+
        if ((reg * 4) < adev->rio_mem_size)
                return ioread32(adev->rio_mem + (reg * 4));
        else {
@@ -461,6 +483,9 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
  */
 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 {
+       if (adev->in_pci_err_recovery)
+               return;
+
        if ((reg * 4) < adev->rio_mem_size)
                iowrite32(v, adev->rio_mem + (reg * 4));
        else {
@@ -480,6 +505,9 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, 
u32 v)
  */
 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
 {
+       if (adev->in_pci_err_recovery)
+               return 0;
+
        if (index < adev->doorbell.num_doorbells) {
                return readl(adev->doorbell.ptr + index);
        } else {
@@ -500,6 +528,9 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 
index)
  */
 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
 {
+       if (adev->in_pci_err_recovery)
+               return;
+
        if (index < adev->doorbell.num_doorbells) {
                writel(v, adev->doorbell.ptr + index);
        } else {
@@ -518,6 +549,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 
index, u32 v)
  */
 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
 {
+       if (adev->in_pci_err_recovery)
+               return 0;
+
        if (index < adev->doorbell.num_doorbells) {
                return atomic64_read((atomic64_t *)(adev->doorbell.ptr + 
index));
        } else {
@@ -538,6 +572,9 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 
index)
  */
 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
 {
+       if (adev->in_pci_err_recovery)
+               return;
+
        if (index < adev->doorbell.num_doorbells) {
                atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
        } else {
@@ -4775,7 +4812,9 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev 
*pdev)
 
        pci_restore_state(pdev);
 
+       adev->in_pci_err_recovery = true;
        r = amdgpu_device_ip_suspend(adev);
+       adev->in_pci_err_recovery = false;
        if (r)
                goto out;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index d698142..8c9bacf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -693,6 +693,9 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, 
uint32_t reg)
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
        struct amdgpu_ring *ring = &kiq->ring;
 
+       if (adev->in_pci_err_recovery)
+               return 0;
+
        BUG_ON(!ring->funcs->emit_rreg);
 
        spin_lock_irqsave(&kiq->ring_lock, flags);
@@ -757,6 +760,9 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t 
reg, uint32_t v)
 
        BUG_ON(!ring->funcs->emit_wreg);
 
+       if (adev->in_pci_err_recovery)
+               return;
+
        spin_lock_irqsave(&kiq->ring_lock, flags);
        amdgpu_ring_alloc(ring, 32);
        amdgpu_ring_emit_wreg(ring, reg, v);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index d6c38e2..a7771aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -219,6 +219,9 @@ int psp_wait_for(struct psp_context *psp, uint32_t 
reg_index,
        int i;
        struct amdgpu_device *adev = psp->adev;
 
+       if (psp->adev->in_pci_err_recovery)
+               return 0;
+
        for (i = 0; i < adev->usec_timeout; i++) {
                val = RREG32(reg_index);
                if (check_changed) {
@@ -245,6 +248,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
        bool ras_intr = false;
        bool skip_unsupport = false;
 
+       if (psp->adev->in_pci_err_recovery)
+               return 0;
+
        mutex_lock(&psp->mutex);
 
        memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 2db195e..ccf096c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -6980,15 +6980,19 @@ static int gfx_v10_0_hw_fini(void *handle)
 
        amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
        amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+
+       if (!adev->in_pci_err_recovery) {
 #ifndef BRING_UP_DEBUG
-       if (amdgpu_async_gfx_ring) {
-               r = gfx_v10_0_kiq_disable_kgq(adev);
-               if (r)
-                       DRM_ERROR("KGQ disable failed\n");
-       }
+               if (amdgpu_async_gfx_ring) {
+                       r = gfx_v10_0_kiq_disable_kgq(adev);
+                       if (r)
+                               DRM_ERROR("KGQ disable failed\n");
+               }
 #endif
-       if (amdgpu_gfx_disable_kcq(adev))
-               DRM_ERROR("KCQ disable failed\n");
+               if (amdgpu_gfx_disable_kcq(adev))
+                       DRM_ERROR("KCQ disable failed\n");
+       }
+
        if (amdgpu_sriov_vf(adev)) {
                gfx_v10_0_cp_gfx_enable(adev, false);
                /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index a58ea08..97aa72a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -112,6 +112,9 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
        struct amdgpu_device *adev = smu->adev;
        int ret = 0, index = 0;
 
+       if (smu->adev->in_pci_err_recovery)
+               return 0;
+
        index = smu_cmn_to_asic_specific_index(smu,
                                               CMN2ASIC_MAPPING_MSG,
                                               msg);
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to