On 6/16/23 01:57, Evan Quan wrote:
To protect PMFW from being overloaded.

Signed-off-by: Evan Quan <evan.q...@amd.com>
Reviewed-by: Mario Limonciello <mario.limoncie...@amd.com>
---
  drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c     | 28 ++++++++++++++++---
  drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h |  7 +++++
  2 files changed, 31 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c 
b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 89f876cc60e6..2619e310ef54 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1272,6 +1272,22 @@ static void smu_wbrf_event_handler(struct amdgpu_device 
*adev)
  {
        struct smu_context *smu = adev->powerplay.pp_handle;
+ schedule_delayed_work(&smu->wbrf_delayed_work,
+                             msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
+}
+
+/**
+ * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
+ *
+ * @work: struct work_struct pointer
+ *
+ * Flood is over and driver will consume the latest exclusion ranges.
+ */
+static void smu_wbrf_delayed_work_handler(struct work_struct *work)
+{
+       struct smu_context *smu =
+               container_of(work, struct smu_context, wbrf_delayed_work.work);
+
        smu_wbrf_handle_exclusion_ranges(smu);
  }
@@ -1311,6 +1327,9 @@ static int smu_wbrf_init(struct smu_context *smu)
        if (!smu->wbrf_supported)
                return 0;
+ INIT_DELAYED_WORK(&smu->wbrf_delayed_work,
+                         smu_wbrf_delayed_work_handler);
+
        ret = amdgpu_acpi_register_wbrf_notify_handler(adev,
                                                       smu_wbrf_event_handler);
        if (ret)
@@ -1321,11 +1340,10 @@ static int smu_wbrf_init(struct smu_context *smu)
         * before our driver loaded. To make sure our driver
         * is awared of those exclusion ranges.
         */
-       ret = smu_wbrf_handle_exclusion_ranges(smu);
-       if (ret)
-               dev_err(adev->dev, "Failed to handle wbrf exclusion ranges\n");
+       schedule_delayed_work(&smu->wbrf_delayed_work,
+                             msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
- return ret;
+       return 0;
  }
/**
@@ -1343,6 +1361,8 @@ static void smu_wbrf_fini(struct smu_context *smu)
                return;
amdgpu_acpi_unregister_wbrf_notify_handler(adev);
+
+       cancel_delayed_work_sync(&smu->wbrf_delayed_work);
  }
static int smu_smc_hw_setup(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index ff0af3da0be2..aa63cc43d41c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -478,6 +478,12 @@ struct stb_context {
#define WORKLOAD_POLICY_MAX 7 +/*
+ * Configure wbrf event handling pace as there can be only one
+ * event processed every SMU_WBRF_EVENT_HANDLING_PACE ms.
+ */
+#define SMU_WBRF_EVENT_HANDLING_PACE   10
+
  struct smu_context
  {
        struct amdgpu_device            *adev;
@@ -576,6 +582,7 @@ struct smu_context
/* data structures for wbrf feature support */
        bool                            wbrf_supported;
+       struct delayed_work             wbrf_delayed_work;
  };
struct i2c_adapter;

Reply via email to