As for the gpu metric export, metrics cache makes no sense. It's up to
user to decide how often the metrics should be retrieved.

Change-Id: Ic2a27ebc90f0a7cf581d0697c121b6d7df030f3b
Signed-off-by: Evan Quan <evan.q...@amd.com>
---
 .../drm/amd/powerplay/hwmgr/vega12_hwmgr.c    | 29 ++++++++++++-------
 1 file changed, 18 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 67e6a0521699..e5aada3b2d4d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -1262,22 +1262,29 @@ static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr 
*hwmgr, bool low)
        return (mem_clk * 100);
 }
 
-static int vega12_get_metrics_table(struct pp_hwmgr *hwmgr, SmuMetrics_t 
*metrics_table)
+static int vega12_get_metrics_table(struct pp_hwmgr *hwmgr,
+                                   SmuMetrics_t *metrics_table,
+                                   bool bypass_cache)
 {
        struct vega12_hwmgr *data =
                        (struct vega12_hwmgr *)(hwmgr->backend);
        int ret = 0;
 
-       if (!data->metrics_time || time_after(jiffies, data->metrics_time + HZ 
/ 2)) {
-               ret = smum_smc_table_manager(hwmgr, (uint8_t *)metrics_table,
-                               TABLE_SMU_METRICS, true);
+       if (bypass_cache ||
+           !data->metrics_time ||
+           time_after(jiffies, data->metrics_time + HZ / 2)) {
+               ret = smum_smc_table_manager(hwmgr,
+                                            (uint8_t *)(&data->metrics_table),
+                                            TABLE_SMU_METRICS,
+                                            true);
                if (ret) {
                        pr_info("Failed to export SMU metrics table!\n");
                        return ret;
                }
-               memcpy(&data->metrics_table, metrics_table, 
sizeof(SmuMetrics_t));
                data->metrics_time = jiffies;
-       } else
+       }
+
+       if (metrics_table)
                memcpy(metrics_table, &data->metrics_table, 
sizeof(SmuMetrics_t));
 
        return ret;
@@ -1288,7 +1295,7 @@ static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, 
uint32_t *query)
        SmuMetrics_t metrics_table;
        int ret = 0;
 
-       ret = vega12_get_metrics_table(hwmgr, &metrics_table);
+       ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
        if (ret)
                return ret;
 
@@ -1339,7 +1346,7 @@ static int vega12_get_current_activity_percent(
        SmuMetrics_t metrics_table;
        int ret = 0;
 
-       ret = vega12_get_metrics_table(hwmgr, &metrics_table);
+       ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
        if (ret)
                return ret;
 
@@ -1387,7 +1394,7 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int 
idx,
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
-               ret = vega12_get_metrics_table(hwmgr, &metrics_table);
+               ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
                if (ret)
                        return ret;
 
@@ -1396,7 +1403,7 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int 
idx,
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_MEM_TEMP:
-               ret = vega12_get_metrics_table(hwmgr, &metrics_table);
+               ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
                if (ret)
                        return ret;
 
@@ -2750,7 +2757,7 @@ static ssize_t vega12_get_gpu_metrics(struct pp_hwmgr 
*hwmgr,
        uint32_t fan_speed_rpm;
        int ret;
 
-       ret = vega12_get_metrics_table(hwmgr, &metrics);
+       ret = vega12_get_metrics_table(hwmgr, &metrics, true);
        if (ret)
                return ret;
 
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to