RE: [PATCH] drm/amdgpu: refine amdgpu_fru_get_product_info

2021-05-25 Thread Gui, Jack
Reviewed-by: Jack Gui 

-Original Message-
From: amd-gfx  On Behalf Of Chen, 
Jiansong (Simon)
Sent: Tuesday, May 25, 2021 4:07 PM
To: Chen, Guchun ; amd-gfx@lists.freedesktop.org
Subject: RE: [PATCH] drm/amdgpu: refine amdgpu_fru_get_product_info

I think we still could keep them to be more informative for the moment.

Regards,
Jiansong
-Original Message-
From: Chen, Guchun  
Sent: Tuesday, May 25, 2021 3:47 PM
To: Chen, Jiansong (Simon) ; 
amd-gfx@lists.freedesktop.org
Cc: Chen, Jiansong (Simon) 
Subject: RE: [PATCH] drm/amdgpu: refine amdgpu_fru_get_product_info

[AMD Public Use]

+   len = size;
/* Serial number should only be 16 characters. Any more,
 * and something could be wrong. Cap it at 16 to be safe
 */
-   if (size > 16) {
+   if (len >= sizeof(adev->serial)) {
DRM_WARN("FRU Serial Number is larger than 16 characters. This 
is likely a mistake");
-   size = 16;
+   len = sizeof(adev->serial) - 1;
}

The hardcoded '16' in comment/warning printing needs to be dropped as well?

Regards,
Guchun

-Original Message-
From: amd-gfx  On Behalf Of Jiansong Chen
Sent: Tuesday, May 25, 2021 2:42 PM
To: amd-gfx@lists.freedesktop.org
Cc: Chen, Jiansong (Simon) 
Subject: [PATCH] drm/amdgpu: refine amdgpu_fru_get_product_info

1. eliminate potential array index out of bounds.
2. return meaningful value for failure.

Signed-off-by: Jiansong Chen 
Change-Id: I9be36eb2e42ee46cd00464b0f2c35a4e4ea213e3
---
 .../gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c| 42 ++-
 1 file changed, 23 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index 8f4a8f8d8146..39b6c6bfab45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -101,7 +101,8 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device 
*adev, uint32_t addrptr,  int amdgpu_fru_get_product_info(struct amdgpu_device 
*adev)  {
unsigned char buff[34];
-   int addrptr = 0, size = 0;
+   int addrptr, size;
+   int len;
 
if (!is_fru_eeprom_supported(adev))
return 0;
@@ -109,7 +110,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
/* If algo exists, it means that the i2c_adapter's initialized */
if (!adev->pm.smu_i2c.algo) {
DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
-   return 0;
+   return -ENODEV;
}
 
/* There's a lot of repetition here. This is due to the FRU having @@ 
-128,7 +129,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
-   return size;
+   return -EINVAL;
}
 
/* Increment the addrptr by the size of the field, and 1 due to the @@ 
-138,43 +139,45 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU product name, ret:%d", size);
-   return size;
+   return -EINVAL;
}
 
+   len = size;
/* Product name should only be 32 characters. Any more,
 * and something could be wrong. Cap it at 32 to be safe
 */
-   if (size > 32) {
+   if (len >= sizeof(adev->product_name)) {
DRM_WARN("FRU Product Number is larger than 32 characters. This 
is likely a mistake");
-   size = 32;
+   len = sizeof(adev->product_name) - 1;
}
/* Start at 2 due to buff using fields 0 and 1 for the address */
-   memcpy(adev->product_name, &buff[2], size);
-   adev->product_name[size] = '\0';
+   memcpy(adev->product_name, &buff[2], len);
+   adev->product_name[len] = '\0';
 
addrptr += size + 1;
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU product number, ret:%d", size);
-   return size;
+   return -EINVAL;
}
 
+   len = size;
/* Product number should only be 16 characters. Any more,
 * and something could be wrong. Cap it at 16 to be safe
 */
-   if (size > 16) {
+   if (len >= sizeof(adev->product_number)) {
DRM_WARN("FRU Product Number is larger than 16 characters. This 
is likely a mistake");
-   size = 16;
+   len = sizeof(adev->product_number) - 1;
}
-   memcpy(adev->product_number, &buff[2], size);
-   adev->product_number[size] = '\0';
+   memcpy(adev->product_number, &buff[2], len);
+   adev->product_number[len] = '\0';
 
addrptr += size + 1;

RE: [PATCH 2/2] drm/amdgpu: add DM block for dimgrey_cavefish

2020-10-10 Thread Gui, Jack
[AMD Official Use Only - Internal Distribution Only]

The series is
Reviewed-by: Jack Gui 

-Original Message-
From: Zhou1, Tao  
Sent: Saturday, October 10, 2020 3:57 PM
To: Chen, Jiansong (Simon) ; Gui, Jack 
; Zhang, Hawking ; 
amd-gfx@lists.freedesktop.org
Cc: Zhou1, Tao 
Subject: [PATCH 2/2] drm/amdgpu: add DM block for dimgrey_cavefish

Add DM block support for dimgrey_cavefish.

Signed-off-by: Tao Zhou 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
 drivers/gpu/drm/amd/amdgpu/nv.c| 4 
 2 files changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8de930108db4..a2f0ce854160 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3013,6 +3013,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type 
asic_type)  #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+   case CHIP_DIMGREY_CAVEFISH:
 #endif
return amdgpu_dc != 0;
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c 
index e6bc7f09ec43..47bd79c9e6ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -637,6 +637,10 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+else if (amdgpu_device_has_dc_support(adev))
+amdgpu_device_ip_block_add(adev, &dm_ip_block); 
+#endif
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
--
2.17.1
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amd/amdgpu: set the default value of noretry to 1 for some dGPUs

2020-10-13 Thread Gui, Jack
[AMD Public Use]

Hi Guchun,

It's fine.
I will address the patch according to your suggestion.

BR,
Jack

-Original Message-
From: Chen, Guchun  
Sent: Tuesday, October 13, 2020 3:45 PM
To: Gui, Jack ; amd-gfx@lists.freedesktop.org; Deucher, 
Alexander 
Cc: Gui, Jack ; Zhou1, Tao ; Huang, Ray 
; Zhang, Hawking ; Kuehling, Felix 

Subject: RE: [PATCH] drm/amd/amdgpu: set the default value of noretry to 1 for 
some dGPUs

[AMD Public Use]

Hi Jack,

How about improving the patch a bit like below? As the code for raven and 
default case is totally the same, maybe we could squash both together with 
adding the comment for RAVEN and other default ASICs for readiness.

switch (adev->asic_type) {
case CHIP_VEGA20:
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_ARCTURUS:
/*
 * noretry = 0 will cause kfd page fault tests fail
 * for some ASICs, so set default to 1 for these ASICs.
 */
if (amdgpu_noretry == -1)
gmc->noretry = 1;
else
gmc->noretry = amdgpu_noretry;
break;
case CHIP_RAVEN: 
default:
/* Raven currently has issues with noretry
 * regardless of what we decide for other
 * asics, we should leave raven with
 * noretry = 0 until we root cause the
 * issues. The same for other default ASICs.
 */
if (amdgpu_noretry == -1)
gmc->noretry = 0;
else
gmc->noretry = amdgpu_noretry;
break;

Regards,
Guchun

-Original Message-
From: Chengming Gui  
Sent: Tuesday, October 13, 2020 12:35 PM
To: amd-gfx@lists.freedesktop.org; Deucher, Alexander 

Cc: Gui, Jack ; Zhou1, Tao ; 
rui.hu...@amd.com; Chen, Guchun ; Zhang, Hawking 
; Kuehling, Felix 
Subject: [PATCH] drm/amd/amdgpu: set the default value of noretry to 1 for some 
dGPUs

noretry = 0 cause some dGPU's kfd page fault tests fail, so set noretry to 1 
for these special ASICs:
vega20/navi10/navi14/ARCTURUS

Signed-off-by: Chengming Gui 
Change-Id: I3be70f463a49b0cd5c56456431d6c2cb98b13872
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 13 +
 1 file changed, 13 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 36604d751d62..f317bdeffcb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -437,6 +437,19 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
else
gmc->noretry = amdgpu_noretry;
break;
+   case CHIP_VEGA20:
+   case CHIP_NAVI10:
+   case CHIP_NAVI14:
+   case CHIP_ARCTURUS:
+   /*
+* noretry = 0 will cause kfd page fault tests fail
+* for some ASICs, so set default to 1 for these ASICs.
+*/
+   if (amdgpu_noretry == -1)
+   gmc->noretry = 1;
+   else
+   gmc->noretry = amdgpu_noretry;
+   break;
default:
/* default this to 0 for now, but we may want
 * to change this in the future for certain
--
2.17.1
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH 2/2] drm/amd/powerplay: add arcturus_is_dpm_running function for arcturus

2019-08-12 Thread Gui, Jack
Hi Evan,

All supported feature can be set there, 
Anyone of these features is running, we can judge dpm is running.  

BR,
Jack Gui

-Original Message-
From: Quan, Evan  
Sent: Monday, August 12, 2019 5:39 PM
To: Gui, Jack ; amd-gfx@lists.freedesktop.org
Cc: Gui, Jack 
Subject: RE: [PATCH 2/2] drm/amd/powerplay: add arcturus_is_dpm_running 
function for arcturus

Please set FEATURE_DPM_PREFETCHER_MASK | FEATURE_DPM_GFXCLK_MASK only. For now, 
only these two are enabled on arcturus.

With that fixed, the patch is reviewed-by: Evan Quan 
> -Original Message-
> From: amd-gfx  On Behalf Of 
> Chengming Gui
> Sent: Monday, August 12, 2019 4:22 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Gui, Jack 
> Subject: [PATCH 2/2] drm/amd/powerplay: add arcturus_is_dpm_running 
> function for arcturus
> 
> add arcturus_is_dpm_running function
> 
> Signed-off-by: Chengming Gui 
> ---
>  drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 21
> +
>  1 file changed, 21 insertions(+)
> 
> diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> index 03ce871..9107beb 100644
> --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> @@ -51,6 +51,15 @@
>  #define SMU_FEATURES_HIGH_MASK   0x
>  #define SMU_FEATURES_HIGH_SHIFT  32
> 
> +#define SMC_DPM_FEATURE ( \
> + FEATURE_DPM_PREFETCHER_MASK | \
> + FEATURE_DPM_GFXCLK_MASK | \
> + FEATURE_DPM_UCLK_MASK | \
> + FEATURE_DPM_SOCCLK_MASK | \
> + FEATURE_DPM_MP0CLK_MASK | \
> + FEATURE_DPM_FCLK_MASK | \
> + FEATURE_DPM_XGMI_MASK)
> +
>  /* possible frequency drift (1Mhz) */
>  #define EPSILON  1
> 
> @@ -1873,6 +1882,17 @@ static void arcturus_dump_pptable(struct 
> smu_context *smu)
> 
>  }
> 
> +static bool arcturus_is_dpm_running(struct smu_context *smu) {
> + int ret = 0;
> + uint32_t feature_mask[2];
> + unsigned long feature_enabled;
> + ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
> + feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
> +((uint64_t)feature_mask[1] << 32));
> + return !!(feature_enabled & SMC_DPM_FEATURE); }
> +
>  static const struct pptable_funcs arcturus_ppt_funcs = {
>   /* translate smu index into arcturus specific index */
>   .get_smu_msg_index = arcturus_get_smu_msg_index, @@ -1910,6
> +1930,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
>   /* debug (internal used) */
>   .dump_pptable = arcturus_dump_pptable,
>   .get_power_limit = arcturus_get_power_limit,
> + .is_dpm_running = arcturus_is_dpm_running,
>  };
> 
>  void arcturus_set_ppt_funcs(struct smu_context *smu)
> --
> 2.7.4
> 
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/amd/powerplay: guard manual mode prerequisite for clock level force

2019-09-01 Thread Gui, Jack
Reviewed-by: Jack Gui 

-Original Message-
From: Evan Quan  
Sent: Friday, August 30, 2019 5:37 PM
To: amd-gfx@lists.freedesktop.org
Cc: Li, Candice ; Gui, Jack ; Quan, Evan 

Subject: [PATCH] drm/amd/powerplay: guard manual mode prerequisite for clock 
level force

Force clock level is for dpm manual mode only.

Change-Id: I3b4caf3fafc72197d65e2b9255c68e40e673e25e
Reported-by: Candice Li 
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 18 ++
 drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h |  5 +++--
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c |  6 --
 3 files changed, 21 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 317d41331f4b..dd6c1547e523 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -1758,6 +1758,24 @@ int smu_set_display_count(struct smu_context *smu, 
uint32_t count)
return ret;
 }
 
+int smu_force_clk_levels(struct smu_context *smu,
+enum smu_clk_type clk_type,
+uint32_t mask)
+{
+   struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+   int ret = 0;
+
+   if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+   pr_debug("force clock level is for dpm manual mode only.\n");
+   return -EINVAL;
+   }
+
+   if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
+   ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
+
+   return ret;
+}
+
 const struct amd_ip_funcs smu_ip_funcs = {
.name = "smu",
.early_init = smu_early_init,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index e1165d323ea9..b19224cb6d6d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -635,8 +635,6 @@ struct smu_funcs
((smu)->funcs->get_current_clk_freq? 
(smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)  #define 
smu_print_clk_levels(smu, clk_type, buf) \
((smu)->ppt_funcs->print_clk_levels ? 
(smu)->ppt_funcs->print_clk_levels((smu), (clk_type), (buf)) : 0) -#define 
smu_force_clk_levels(smu, clk_type, level) \
-   ((smu)->ppt_funcs->force_clk_levels ? 
(smu)->ppt_funcs->force_clk_levels((smu), (clk_type), (level)) : 0)
 #define smu_get_od_percentage(smu, type) \
((smu)->ppt_funcs->get_od_percentage ? 
(smu)->ppt_funcs->get_od_percentage((smu), (type)) : 0)  #define 
smu_set_od_percentage(smu, type, value) \ @@ -833,5 +831,8 @@ const char 
*smu_get_message_name(struct smu_context *smu, enum smu_message_type  const 
char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask 
feature);  size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char 
*buf);  int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t 
new_mask);
+int smu_force_clk_levels(struct smu_context *smu,
+enum smu_clk_type clk_type,
+uint32_t mask);
 
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c 
b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 899bf96b23e1..78d77a63e084 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -1274,14 +1274,8 @@ static int vega20_force_clk_levels(struct smu_context 
*smu,
struct vega20_dpm_table *dpm_table;
struct vega20_single_dpm_table *single_dpm_table;
uint32_t soft_min_level, soft_max_level, hard_min_level;
-   struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
int ret = 0;
 
-   if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
-   pr_info("force clock level is for dpm manual mode only.\n");
-   return -EINVAL;
-   }
-
mutex_lock(&(smu->mutex));
 
soft_min_level = mask ? (ffs(mask) - 1) : 0;
--
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/amd/powerplay: update cached feature enablement status V3

2019-09-02 Thread Gui, Jack
Reviewed-by: Jack Gui 

-Original Message-
From: Quan, Evan  
Sent: Monday, September 2, 2019 4:16 PM
To: Quan, Evan ; amd-gfx@lists.freedesktop.org
Cc: Gui, Jack 
Subject: RE: [PATCH] drm/amd/powerplay: update cached feature enablement status 
V3

Ping..

> -Original Message-
> From: Evan Quan 
> Sent: 2019年8月23日 12:49
> To: amd-gfx@lists.freedesktop.org
> Cc: Quan, Evan 
> Subject: [PATCH] drm/amd/powerplay: update cached feature enablement 
> status V3
> 
> Need to update in cache feature enablement status after pp_feature 
> settings. Another fix for the commit below:
> drm/amd/powerplay: implment sysfs feature status function in smu
> 
> V2: update smu_feature_update_enable_state() and relates
> V3: use bitmap_or and bitmap_andnot
> 
> Change-Id: I90e29b0d839df26825d5993212f6097c7ad4bebf
> Signed-off-by: Evan Quan 
> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 101 +
> -
>  .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|   1 -
>  2 files changed, 49 insertions(+), 53 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index 4df7fb6eaf3c..c8c00966a621 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -94,6 +94,52 @@ size_t smu_sys_get_pp_feature_mask(struct
> smu_context *smu, char *buf)
>   return size;
>  }
> 
> +static int smu_feature_update_enable_state(struct smu_context *smu,
> +uint64_t feature_mask,
> +bool enabled)
> +{
> + struct smu_feature *feature = &smu->smu_feature;
> + uint32_t feature_low = 0, feature_high = 0;
> + int ret = 0;
> +
> + if (!smu->pm_enabled)
> + return ret;
> +
> + feature_low = (feature_mask >> 0 ) & 0x;
> + feature_high = (feature_mask >> 32) & 0x;
> +
> + if (enabled) {
> + ret = smu_send_smc_msg_with_param(smu,
> SMU_MSG_EnableSmuFeaturesLow,
> +   feature_low);
> + if (ret)
> + return ret;
> + ret = smu_send_smc_msg_with_param(smu,
> SMU_MSG_EnableSmuFeaturesHigh,
> +   feature_high);
> + if (ret)
> + return ret;
> + } else {
> + ret = smu_send_smc_msg_with_param(smu,
> SMU_MSG_DisableSmuFeaturesLow,
> +   feature_low);
> + if (ret)
> + return ret;
> + ret = smu_send_smc_msg_with_param(smu,
> SMU_MSG_DisableSmuFeaturesHigh,
> +   feature_high);
> + if (ret)
> + return ret;
> + }
> +
> + mutex_lock(&feature->mutex);
> + if (enabled)
> + bitmap_or(feature->enabled, feature->enabled,
> + (unsigned long *)(&feature_mask),
> SMU_FEATURE_MAX);
> + else
> + bitmap_andnot(feature->enabled, feature->enabled,
> + (unsigned long *)(&feature_mask),
> SMU_FEATURE_MAX);
> + mutex_unlock(&feature->mutex);
> +
> + return ret;
> +}
> +
>  int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t
> new_mask)  {
>   int ret = 0;
> @@ -591,41 +637,7 @@ int smu_feature_init_dpm(struct smu_context
> *smu)
> 
>   return ret;
>  }
> -int smu_feature_update_enable_state(struct smu_context *smu, uint64_t 
> feature_mask, bool enabled) -{
> - uint32_t feature_low = 0, feature_high = 0;
> - int ret = 0;
> -
> - if (!smu->pm_enabled)
> - return ret;
> -
> - feature_low = (feature_mask >> 0 ) & 0x;
> - feature_high = (feature_mask >> 32) & 0x;
> -
> - if (enabled) {
> - ret = smu_send_smc_msg_with_param(smu,
> SMU_MSG_EnableSmuFeaturesLow,
> -   feature_low);
> - if (ret)
> - return ret;
> - ret = smu_send_smc_msg_with_param(smu,
> SMU_MSG_EnableSmuFeaturesHigh,
> -   feature_high);
> - if (ret)
> - return ret;
> -
> - } else {
> - ret = smu_send_smc_msg_with_param(smu,
> SMU_MSG_DisableSmuFeaturesLow,
> -   feature_low);
> - if (ret)
> - return ret;
&

RE: [PATCH] drm/amd/powerplay: do proper cleanups on hw_fini

2019-09-02 Thread Gui, Jack
Reviewed-by: Jack Gui 

-Original Message-
From: Quan, Evan  
Sent: Tuesday, September 3, 2019 11:44 AM
To: amd-gfx@lists.freedesktop.org
Cc: Li, Candice ; Gui, Jack ; Quan, Evan 

Subject: [PATCH] drm/amd/powerplay: do proper cleanups on hw_fini

These are needed for smu_reset support.

Change-Id: If29ede4b99758adb08fd4e16665f44fd893ec99b
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 17 +
 drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h |  3 +++
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c  | 10 ++
 3 files changed, 30 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index d5ee13a78eb7..3cf8d944f890 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -1286,6 +1286,11 @@ static int smu_hw_init(void *handle)
return ret;
 }
 
+static int smu_stop_dpms(struct smu_context *smu) {
+   return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures); }
+
 static int smu_hw_fini(void *handle)
 {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1298,6 
+1303,18 @@ static int smu_hw_fini(void *handle)
smu_powergate_vcn(&adev->smu, true);
}
 
+   ret = smu_stop_thermal_control(smu);
+   if (ret) {
+   pr_warn("Fail to stop thermal control!\n");
+   return ret;
+   }
+
+   ret = smu_stop_dpms(smu);
+   if (ret) {
+   pr_warn("Fail to stop Dpms!\n");
+   return ret;
+   }
+
kfree(table_context->driver_pptable);
table_context->driver_pptable = NULL;
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index b19224cb6d6d..8e4b0ad24712 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -498,6 +498,7 @@ struct smu_funcs
int (*get_current_clk_freq)(struct smu_context *smu, enum smu_clk_type 
clk_id, uint32_t *value);
int (*init_max_sustainable_clocks)(struct smu_context *smu);
int (*start_thermal_control)(struct smu_context *smu);
+   int (*stop_thermal_control)(struct smu_context *smu);
int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
   void *data, uint32_t *size);
int (*set_deep_sleep_dcefclk)(struct smu_context *smu, uint32_t clk); 
@@ -647,6 +648,8 @@ struct smu_funcs
((smu)->ppt_funcs->set_thermal_fan_table ? 
(smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0)  #define 
smu_start_thermal_control(smu) \
((smu)->funcs->start_thermal_control? 
(smu)->funcs->start_thermal_control((smu)) : 0)
+#define smu_stop_thermal_control(smu) \
+   ((smu)->funcs->stop_thermal_control? 
+(smu)->funcs->stop_thermal_control((smu)) : 0)
 #define smu_read_sensor(smu, sensor, data, size) \
((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), 
(sensor), (data), (size)) : 0)  #define smu_smc_read_sensor(smu, sensor, data, 
size) \ diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c 
b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index db5e94ce54af..1a38af84394e 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -1209,6 +1209,15 @@ static int smu_v11_0_start_thermal_control(struct 
smu_context *smu)
return ret;
 }
 
+static int smu_v11_0_stop_thermal_control(struct smu_context *smu) {
+   struct amdgpu_device *adev = smu->adev;
+
+   WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
+
+   return 0;
+}
+
 static uint16_t convert_to_vddc(uint8_t vid)  {
return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE); @@ 
-1783,6 +1792,7 @@ static const struct smu_funcs smu_v11_0_funcs = {
.get_current_clk_freq = smu_v11_0_get_current_clk_freq,
.init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
.start_thermal_control = smu_v11_0_start_thermal_control,
+   .stop_thermal_control = smu_v11_0_stop_thermal_control,
.read_sensor = smu_v11_0_read_sensor,
.set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
.display_clock_voltage_request = 
smu_v11_0_display_clock_voltage_request,
--
2.23.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH 2/2] drm/amdgpu: Add debugfs interface to set arbitrary sclk for navi14

2020-02-27 Thread Gui, Jack
[AMD Official Use Only - Internal Distribution Only]

Hi Evan,

No lock is created for smu_set_soft_freq_range() in this code path.
But some other sysfs interfaces calling  smu_set_soft_freq_range() indirectly
have created lock in middle function to protect  smu_set_soft_freq_range() as 
critical resource.
So, if we want to lock the "issue message action" in smu_set_soft_freq_range(), 
another patch is needed.

BR,
Jack

-Original Message-
From: Quan, Evan  
Sent: Friday, February 28, 2020 11:37 AM
To: Gui, Jack ; amd-gfx@lists.freedesktop.org
Cc: Feng, Kenneth ; Xu, Feifei ; Gui, 
Jack 
Subject: RE: [PATCH 2/2] drm/amdgpu: Add debugfs interface to set arbitrary 
sclk for navi14

Please confirm whether smu_set_soft_freq_range() is properly lock protected.

-Original Message-
From: Chengming Gui 
Sent: Friday, February 28, 2020 10:37 AM
To: amd-gfx@lists.freedesktop.org
Cc: Quan, Evan ; Feng, Kenneth ; Xu, 
Feifei ; Gui, Jack 
Subject: [PATCH 2/2] drm/amdgpu: Add debugfs interface to set arbitrary sclk 
for navi14

add debugfs interface amdgpu_force_sclk
to set arbitrary sclk for navi14

Signed-off-by: Chengming Gui 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c| 44 ++
 drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h |  3 ++
 2 files changed, 47 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 3bb7405..5ee7e92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1269,9 +1269,43 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
return 0;
 }
 
+static int amdgpu_debugfs_sclk_set(void *data, u64 val) {
+   int ret = 0;
+   uint32_t max_freq, min_freq;
+   struct amdgpu_device *adev = (struct amdgpu_device *)data;
+
+   if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+   return -EINVAL;
+
+   ret = pm_runtime_get_sync(adev->ddev->dev);
+   if (ret < 0)
+   return ret;
+
+   if (is_support_sw_smu(adev)) {
+   ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, 
&max_freq, true);
+   if (ret || val > max_freq || val < min_freq)
+   return -EINVAL;
+   ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, 
(uint32_t)val, (uint32_t)val);
+   } else {
+   return 0;
+   }
+
+   pm_runtime_mark_last_busy(adev->ddev->dev);
+   pm_runtime_put_autosuspend(adev->ddev->dev);
+
+   if (ret)
+   return -EINVAL;
+
+   return 0;
+}
+
 DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
amdgpu_debugfs_ib_preempt, "%llu\n");
 
+DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL,
+   amdgpu_debugfs_sclk_set, "%llu\n");
+
 int amdgpu_debugfs_init(struct amdgpu_device *adev)  {
int r, i;
@@ -1285,6 +1319,15 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
return -EIO;
}
 
+   adev->smu.debugfs_sclk =
+   debugfs_create_file("amdgpu_force_sclk", 0200,
+   adev->ddev->primary->debugfs_root, adev,
+   &fops_sclk_set);
+   if (!(adev->smu.debugfs_sclk)) {
+   DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
+   return -EIO;
+   }
+
/* Register debugfs entries for amdgpu_ttm */
r = amdgpu_ttm_debugfs_init(adev);
if (r) {
@@ -1353,6 +1396,7 @@ void amdgpu_debugfs_fini(struct amdgpu_device *adev)
}
amdgpu_ttm_debugfs_fini(adev);
debugfs_remove(adev->debugfs_preempt);
+   debugfs_remove(adev->smu.debugfs_sclk);
 }
 
 #else
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 97b6714..36fe19c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -371,6 +371,9 @@ struct smu_context
struct amd_pp_display_configuration  *display_config;
struct smu_baco_context smu_baco;
void *od_settings;
+#if defined(CONFIG_DEBUG_FS)
+   struct dentry   *debugfs_sclk;
+#endif
 
uint32_t pstate_sclk;
uint32_t pstate_mclk;
--
2.7.4
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amd/pm: update DRIVE_IF_VERSION for beige_goby

2021-07-15 Thread Gui, Jack
[AMD Official Use Only]

Reviewed-by: Jack Gui 

-Original Message-
From: Zhou1, Tao  
Sent: Thursday, July 15, 2021 3:51 PM
To: amd-gfx@lists.freedesktop.org; Chen, Jiansong (Simon) 
; Gui, Jack ; Zhang, Hawking 

Cc: Zhou1, Tao 
Subject: [PATCH] drm/amd/pm: update DRIVE_IF_VERSION for beige_goby

Update the version to 0xD for beige_goby.

Signed-off-by: Tao Zhou 
---
 drivers/gpu/drm/amd/pm/inc/smu_v11_0.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h 
b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index b89e7dca8906..385b2ea5379c 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -34,7 +34,7 @@
 #define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE  #define 
SMU11_DRIVER_IF_VERSION_VANGOGH 0x03  #define 
SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF -#define 
SMU11_DRIVER_IF_VERSION_Beige_Goby 0x9
+#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0xD
 
 /* MP Apertures */
 #define MP0_Public 0x0380
--
2.17.1
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amd/pm: bug fix for the runtime pm BACO

2021-08-05 Thread Gui, Jack
[AMD Official Use Only]

Tested and Reviewed-by: Jack Gui 

-Original Message-
From: Kenneth Feng  
Sent: Friday, August 6, 2021 10:35 AM
To: amd-gfx@lists.freedesktop.org
Cc: Gui, Jack ; Feng, Kenneth 
Subject: [PATCH] drm/amd/pm: bug fix for the runtime pm BACO

In some systems only MACO is supported. This is to fix the prolbem that runtime 
pm is enabled but BACO is not supported. MACO will be handled seperately.

Signed-off-by: Kenneth Feng 
---
 drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 90e40aacb8f6..261ef8ca862e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -353,8 +353,7 @@ static void sienna_cichlid_check_bxco_support(struct 
smu_context *smu)
struct amdgpu_device *adev = smu->adev;
uint32_t val;
 
-   if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO ||
-   powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO) {
+   if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO) 
+{
val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
smu_baco->platform_support =
(val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
--
2.17.1


RE: [PATCH] drm/amd/pm: fix the issue of uploading powerplay table

2021-09-05 Thread Gui, Jack
[AMD Official Use Only]

Reviewed-by: Jack Gui < jack@amd.com >

-Original Message-
From: amd-gfx  On Behalf Of Kenneth Feng
Sent: Monday, September 6, 2021 7:57 AM
To: amd-gfx@lists.freedesktop.org
Cc: Feng, Kenneth 
Subject: [PATCH] drm/amd/pm: fix the issue of uploading powerplay table

fix the issue of uploading powerplay table due to the dependancy of rlc.

Signed-off-by: Kenneth Feng 
---
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c 
b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 3ab1ce4d3419..04863a797115 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1404,7 +1404,7 @@ static int smu_disable_dpms(struct smu_context *smu)
 */
if (smu->uploading_custom_pp_table &&
(adev->asic_type >= CHIP_NAVI10) &&
-   (adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
+   (adev->asic_type <= CHIP_BEIGE_GOBY))
return smu_disable_all_features_with_exception(smu,
   true,
   
SMU_FEATURE_COUNT);
-- 
2.17.1


RE: [PATCH] drm/amdgpu: only init tap_delay ucode when it's included in ucode binary

2022-08-30 Thread Gui, Jack
[AMD Official Use Only - General]

Reviewed-by: Jack Gui 

-Original Message-
From: Zhang, Hawking  
Sent: Tuesday, August 30, 2022 4:10 PM
To: amd-gfx@lists.freedesktop.org; Gui, Jack 
Cc: Zhang, Hawking 
Subject: [PATCH] drm/amdgpu: only init tap_delay ucode when it's included in 
ucode binary

Not all the gfx10 variants need to integrate global tap_delay and per se 
tap_delay firmwares

Only init tap_delay ucode when it does include in rlc ucode binary so driver 
doesn't send a null buffer to psp for firmware loading

Signed-off-by: Hawking Zhang 
---
 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 60 +++---
 1 file changed, 35 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 1a915edccb92..e4dde41f2f68 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4274,35 +4274,45 @@ static int gfx_v10_0_init_microcode(struct 
amdgpu_device *adev)
 
}
 
-   info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
-   info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
-   info->fw = adev->gfx.rlc_fw;
-   adev->firmware.fw_size +=
-   ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, 
PAGE_SIZE);
+   if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
+   info = 
&adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
+   info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
+   info->fw = adev->gfx.rlc_fw;
+   adev->firmware.fw_size +=
+   
ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
+   }
 
-   info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
-   info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
-   info->fw = adev->gfx.rlc_fw;
-   adev->firmware.fw_size +=
-   ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, 
PAGE_SIZE);
+   if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
+   info = 
&adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
+   info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
+   info->fw = adev->gfx.rlc_fw;
+   adev->firmware.fw_size +=
+   
ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
+   }
 
-   info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
-   info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
-   info->fw = adev->gfx.rlc_fw;
-   adev->firmware.fw_size +=
-   ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, 
PAGE_SIZE);
+   if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
+   info = 
&adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
+   info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
+   info->fw = adev->gfx.rlc_fw;
+   adev->firmware.fw_size +=
+   
ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
+   }
 
-   info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
-   info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
-   info->fw = adev->gfx.rlc_fw;
-   adev->firmware.fw_size +=
-   ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, 
PAGE_SIZE);
+   if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
+   info = 
&adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
+   info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
+   info->fw = adev->gfx.rlc_fw;
+   adev->firmware.fw_size +=
+   
ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
+   }
 
-   info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
-   info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
-   info->fw = adev->gfx.rlc_fw;
-   adev->firmware.fw_size +=
-   ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, 
PAGE_SIZE);
+   if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
+   info = 
&adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
+   info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
+   info->fw = adev->gfx.rlc_fw;
+   

RE: [PATCH] drm/amdgpu: drop temp programming for pagefault handling

2023-04-12 Thread Gui, Jack
[AMD Official Use Only - General]

Reviewed-by: Jack Gui 

-Original Message-
From: Zhang, Hawking  
Sent: Wednesday, April 12, 2023 10:02 PM
To: amd-gfx@lists.freedesktop.org; Gui, Jack ; Gao, Likun 

Cc: Zhang, Hawking 
Subject: [PATCH] drm/amdgpu: drop temp programming for pagefault handling

Was introduced as workaround. not needed anymore

Signed-off-by: Hawking Zhang 
---
 drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c | 22 --
 1 file changed, 22 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
index be0d0f47415e..13712640fa46 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
@@ -417,34 +417,12 @@ static void gfxhub_v3_0_set_fault_enable_default(struct 
amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, CP_DEBUG, CPG_UTCL1_ERROR_HALT_DISABLE, 1);
WREG32_SOC15(GC, 0, regCP_DEBUG, tmp);
 
-   /**
-* Set GRBM_GFX_INDEX in broad cast mode
-* before programming GL1C_UTCL0_CNTL1 and SQG_CONFIG
-*/
-   WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, regGRBM_GFX_INDEX_DEFAULT);
-
-   /**
-* Retry respond mode: RETRY
-* Error (no retry) respond mode: SUCCESS
-*/
-   tmp = RREG32_SOC15(GC, 0, regGL1C_UTCL0_CNTL1);
-   tmp = REG_SET_FIELD(tmp, GL1C_UTCL0_CNTL1, RESP_MODE, 0);
-   tmp = REG_SET_FIELD(tmp, GL1C_UTCL0_CNTL1, RESP_FAULT_MODE, 0x2);
-   WREG32_SOC15(GC, 0, regGL1C_UTCL0_CNTL1, tmp);
-
/* These registers are not accessible to VF-SRIOV.
 * The PF will program them instead.
 */
if (amdgpu_sriov_vf(adev))
return;
 
-   /* Disable SQ XNACK interrupt for all VMIDs */
-   tmp = RREG32_SOC15(GC, 0, regSQG_CONFIG);
-   tmp = REG_SET_FIELD(tmp, SQG_CONFIG, XNACK_INTR_MASK,
-   SQG_CONFIG__XNACK_INTR_MASK_MASK >>
-   SQG_CONFIG__XNACK_INTR_MASK__SHIFT);
-   WREG32_SOC15(GC, 0, regSQG_CONFIG, tmp);
-
tmp = RREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
-- 
2.34.1


RE: [PATCH] drm/amd/amdgpu: fix incorrect translation about the PCIe MLW info

2019-03-20 Thread Gui, Jack
Hi Alex,

I will rethink the patch.

Polaris10 encounted issue about the PCIe dpm feature (some platform, not all).
If we update pcie table with X16 link width, system will hang, 
But update with X8, our driver will modprobe successfully.
The link width got from the config register is real X16.

Could you give me some insight for this?

BR,
Jack Gui

-Original Message-
From: Alex Deucher  
Sent: Tuesday, March 19, 2019 10:43 PM
To: Gui, Jack 
Cc: amd-gfx list 
Subject: Re: [PATCH] drm/amd/amdgpu: fix incorrect translation about the PCIe 
MLW info

On Tue, Mar 19, 2019 at 12:26 AM Chengming Gui  wrote:
>
> Max Link Width's full mask is 0x3f,
> and it's highest bit express X16.
>
> Signed-off-by: Chengming Gui 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 12 ++--
>  1 file changed, 2 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 964a4d3..435f0d7 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -3763,15 +3763,6 @@ static void amdgpu_device_get_pcie_info(struct 
> amdgpu_device *adev)
> } else {
> switch (platform_link_width) {
> case PCIE_LNK_X32:
> -   adev->pm.pcie_mlw_mask = 
> (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
> - 
> CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
> - 
> CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
> - 
> CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
> - 
> CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
> - 
> CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
> - 
> CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
> -   break;
> -   case PCIE_LNK_X16:
> adev->pm.pcie_mlw_mask = 
> (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
>   
> CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
>   
> CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | @@ -3779,13 +3770,14 @@ static void 
> amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
>   
> CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
>   
> CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
> break;
> -   case PCIE_LNK_X12:
> +   case PCIE_LNK_X16:

Not sure I understand this change or the one below.  If we have a x16 link, why 
don't you want CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 set?

Alex

> adev->pm.pcie_mlw_mask = 
> (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
>   
> CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
>   
> CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
>   
> CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
>   
> CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
> break;
> +   case PCIE_LNK_X12:
> case PCIE_LNK_X8:
> adev->pm.pcie_mlw_mask = 
> (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
>   
> CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/amd/amdgpu: fix Polaris10 PCIe dpm feature issue.

2019-03-21 Thread Gui, Jack
Hi Alex,

Expand the fix to all asics, please help to review.

BR,
Jack Gui

-Original Message-
From: Alex Deucher  
Sent: Thursday, March 21, 2019 9:24 PM
To: Gui, Jack 
Cc: amd-gfx list 
Subject: Re: [PATCH] drm/amd/amdgpu: fix Polaris10 PCIe dpm feature issue.

On Thu, Mar 21, 2019 at 1:35 AM Chengming Gui  wrote:
>
> use pcie_bandwidth_available to get real link state to update pcie 
> table.
>
> Signed-off-by: Chengming Gui 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 7 +++
>  1 file changed, 7 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 964a4d3..df8e58b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -3649,7 +3649,9 @@ static void 
> amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,  {
> struct pci_dev *pdev = adev->pdev;
> enum pci_bus_speed cur_speed;
> +   enum pci_bus_speed tmp_speed;
> enum pcie_link_width cur_width;
> +   u32 ret = 0;
>
> *speed = PCI_SPEED_UNKNOWN;
> *width = PCIE_LNK_WIDTH_UNKNOWN; @@ -3657,6 +3659,11 @@ static 
> void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
> while (pdev) {
> cur_speed = pcie_get_speed_cap(pdev);
> cur_width = pcie_get_width_cap(pdev);
> +   if (adev->asic_type == CHIP_POLARIS10)
> +   ret = pcie_bandwidth_available(adev->pdev, NULL,
> +  &tmp_speed, 
> + &cur_width);

Any reason to not just do this for all asics?

Alex

> +   if (!ret)
> +   cur_width = PCIE_LNK_WIDTH_RESRV;
>
> if (cur_speed != PCI_SPEED_UNKNOWN) {
> if (*speed == PCI_SPEED_UNKNOWN)
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


0001-drm-amd-amdgpu-fix-PCIe-dpm-feature-issue-v3.patch
Description: 0001-drm-amd-amdgpu-fix-PCIe-dpm-feature-issue-v3.patch
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/amd/amdgpu: fix Polaris10 PCIe dpm feature issue.

2019-03-21 Thread Gui, Jack
Attach correct patch again.

Hi Alex,

Expand the fix to all asics, please help to review.

BR,
Jack Gui

-Original Message-
From: Gui, Jack 
Sent: Friday, March 22, 2019 10:28 AM
To: 'Alex Deucher' ; Xu, Feifei ; 
Zhang, Hawking 
Cc: amd-gfx list 
Subject: RE: [PATCH] drm/amd/amdgpu: fix Polaris10 PCIe dpm feature issue.

Hi Alex,

Expand the fix to all asics, please help to review.

BR,
Jack Gui

-Original Message-
From: Alex Deucher 
Sent: Thursday, March 21, 2019 9:24 PM
To: Gui, Jack 
Cc: amd-gfx list 
Subject: Re: [PATCH] drm/amd/amdgpu: fix Polaris10 PCIe dpm feature issue.

On Thu, Mar 21, 2019 at 1:35 AM Chengming Gui  wrote:
>
> use pcie_bandwidth_available to get real link state to update pcie 
> table.
>
> Signed-off-by: Chengming Gui 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 7 +++
>  1 file changed, 7 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 964a4d3..df8e58b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -3649,7 +3649,9 @@ static void
> amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,  {
> struct pci_dev *pdev = adev->pdev;
> enum pci_bus_speed cur_speed;
> +   enum pci_bus_speed tmp_speed;
> enum pcie_link_width cur_width;
> +   u32 ret = 0;
>
> *speed = PCI_SPEED_UNKNOWN;
> *width = PCIE_LNK_WIDTH_UNKNOWN; @@ -3657,6 +3659,11 @@ static 
> void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
> while (pdev) {
> cur_speed = pcie_get_speed_cap(pdev);
> cur_width = pcie_get_width_cap(pdev);
> +   if (adev->asic_type == CHIP_POLARIS10)
> +   ret = pcie_bandwidth_available(adev->pdev, NULL,
> +  &tmp_speed, 
> + &cur_width);

Any reason to not just do this for all asics?

Alex

> +   if (!ret)
> +   cur_width = PCIE_LNK_WIDTH_RESRV;
>
> if (cur_speed != PCI_SPEED_UNKNOWN) {
> if (*speed == PCI_SPEED_UNKNOWN)
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


0001-drm-amd-amdgpu-fix-PCIe-dpm-feature-issue-v3.patch
Description: 0001-drm-amd-amdgpu-fix-PCIe-dpm-feature-issue-v3.patch
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH 2/2] drm/amd/powerplay: Enable "disable dpm" feature for vega20 power functions.

2019-05-12 Thread Gui, Jack
Hi Hawking,

If no other tools (sysfs inferfaces have been controlled by adev->dpm_enabled) 
directly use the ppt functs, PATCH[1/2] can cover all the cases now, PATCH[2/2] 
can be dropped.

BR,
Jack Gui

-Original Message-
From: Zhang, Hawking  
Sent: Sunday, May 12, 2019 11:18 AM
To: Gui, Jack ; amd-gfx@lists.freedesktop.org
Cc: Gui, Jack 
Subject: RE: [PATCH 2/2] drm/amd/powerplay: Enable "disable dpm" feature for 
vega20 power functions.

I'm wondering whether we are able to handle all asic specific ppt function 
gracefully. Add pm_enable check into ppt functions would be boring since we 
have to do similar jobs for each ASIC. Instead, is it possible to return early 
from amdgpu_smu interface level? 

Take smu_handle_task for the example, all the amdgpu_smu level interface 
invoked from this function could be stopped if we check smu pm_enable flag at 
the beginning of smu_handle_task. We probably have better approach to eliminate 
this patch.

Regards,
Hawking 

-Original Message-
From: amd-gfx  On Behalf Of Chengming Gui
Sent: 2019年5月10日 16:49
To: amd-gfx@lists.freedesktop.org
Cc: Gui, Jack 
Subject: [PATCH 2/2] drm/amd/powerplay: Enable "disable dpm" feature for vega20 
power functions.

[CAUTION: External Email]

use pm_enabled to control all power related functions about vega20.

Signed-off-by: Chengming Gui 
---
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 48 +++---
 1 file changed, 38 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c 
b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 8fafcbd..7faa6a1 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -411,7 +411,8 @@ amd_pm_state_type vega20_get_current_power_state(struct 
smu_context *smu)
enum amd_pm_state_type pm_type;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);

-   if (!smu_dpm_ctx->dpm_context ||
+   if (!smu->pm_enabled ||
+   !smu_dpm_ctx->dpm_context ||
!smu_dpm_ctx->dpm_current_power_state)
return -EINVAL;

@@ -491,12 +492,14 @@ static void vega20_init_single_dpm_state(struct 
vega20_dpm_state *dpm_state)

 static int vega20_set_default_dpm_table(struct smu_context *smu)  {
-   int ret;
+   int ret = 0;

struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct vega20_dpm_table *dpm_table = NULL;
struct vega20_single_dpm_table *single_dpm_table;

+   if (smu->pm_enabled)
+   return ret;
dpm_table = smu_dpm->dpm_context;

/* socclk */
@@ -738,6 +741,8 @@ static int vega20_print_clk_levels(struct smu_context *smu,

dpm_table = smu_dpm->dpm_context;

+   if (!smu->pm_enabled)
+   return -EINVAL;
switch (type) {
case PP_SCLK:
ret = smu_get_current_clk_freq(smu, PPCLK_GFXCLK, &now); @@ 
-969,6 +974,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, 
bool max,
uint32_t freq;
int ret = 0;

+   if (!smu->pm_enabled)
+   return -EINVAL;
dpm_table = smu->smu_dpm.dpm_context;

if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT) && @@ -1058,6 
+1065,8 @@ static int vega20_force_clk_levels(struct smu_context *smu,
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
int ret = 0;

+   if (!smu->pm_enabled)
+   return -EINVAL;
if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
pr_info("force clock level is for dpm manual mode only.\n");
return -EINVAL;
@@ -1232,8 +1241,9 @@ static int vega20_get_clock_by_type_with_latency(struct 
smu_context *smu,

dpm_table = smu_dpm->dpm_context;

+   if (!smu->pm_enabled)
+   return -EINVAL;
mutex_lock(&smu->mutex);
-
switch (type) {
case amd_pp_sys_clock:
single_dpm_table = &(dpm_table->gfx_table); @@ -1265,6 +1275,8 
@@ static int vega20_overdrive_get_gfx_clk_base_voltage(struct smu_context 
*smu,  {
int ret;

+   if (!smu->pm_enabled)
+   return -EINVAL;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetAVFSVoltageByDpm,
((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) 
| freq)); @@ -1287,7 +1299,7 @@ static int 
vega20_set_default_od8_setttings(struct smu_context *smu)
PPTable_t *smc_pptable = table_context->driver_pptable;
int i, ret;

-   if (table_context->od8_settings)
+   if (!smu->pm_enabled || table_context->od8_settings)
return -EINVAL;

table_context->od8_settings = kzalloc(sizeof(struct 
vega20_od8_settings), GFP_KERNEL); @@ -1474,6 +1486,8 @@ static int 
vega20_get_od_percentage(struct smu_cont

RE: [PATCH 1/2] drm/amd/powerplay: Enable "disable dpm" feature to support swSMU debug

2019-05-12 Thread Gui, Jack

Hi Hawking,

V2 patch attached: Return 0 directly and merge some check code.

Please help review again, thanks.

BR,
Jack Gui

_
From: Zhang, Hawking 
Sent: Sunday, May 12, 2019 11:18 AM
To: Gui, Jack ; amd-gfx@lists.freedesktop.org
Cc: Gui, Jack 
Subject: RE: [PATCH 1/2] drm/amd/powerplay: Enable "disable dpm" feature to 
support swSMU debug


Please check my comments inline.

Regards,
Hawking
-Original Message-
From: amd-gfx 
mailto:amd-gfx-boun...@lists.freedesktop.org>>
 On Behalf Of Chengming Gui
Sent: 2019年5月10日 16:49
To: amd-gfx@lists.freedesktop.org<mailto:amd-gfx@lists.freedesktop.org>
Cc: Gui, Jack mailto:jack@amd.com>>
Subject: [PATCH 1/2] drm/amd/powerplay: Enable "disable dpm" feature to support 
swSMU debug

[CAUTION: External Email]

add pm_enabled to control the dpm off/on.

Signed-off-by: Chengming Gui mailto:jack@amd.com>>
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 34 +++---
 drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h |  1 +
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c  | 34 ++
 3 files changed, 60 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 52d919a..99b2082 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -198,6 +198,8 @@ int smu_sys_set_pp_table(struct smu_context *smu,  void 
*buf, size_t size)
ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
int ret = 0;

+   if (!smu->pm_enabled)
+   return -EINVAL;
if (header->usStructureSize != size) {
pr_err("pp table size not matched !\n");
return -EIO;
@@ -233,6 +235,8 @@ int smu_feature_init_dpm(struct smu_context *smu)
int ret = 0;
uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32];

+   if (!smu->pm_enabled)
+   return ret;
mutex_lock(&feature->mutex);
bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
mutex_unlock(&feature->mutex);
@@ -344,6 +348,7 @@ static int smu_early_init(void *handle)
struct smu_context *smu = &adev->smu;

smu->adev = adev;
+   smu->pm_enabled = amdgpu_dpm;
mutex_init(&smu->mutex);

return smu_set_funcs(adev);
@@ -353,6 +358,9 @@ static int smu_late_init(void *handle)  {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
+
+   if (!smu->pm_enabled)
+   return 0;
mutex_lock(&smu->mutex);
smu_handle_task(&adev->smu,
smu->smu_dpm.dpm_level, @@ -746,6 +754,9 @@ static int 
smu_smc_table_hw_init(struct smu_context *smu,
 */
ret = smu_set_tool_table_location(smu);

+   if (!smu_is_dpm_running(smu))
+   pr_info("dpm has been disabled\n");
+
return ret;
 }

@@ -861,7 +872,10 @@ static int smu_hw_init(void *handle)

mutex_unlock(&smu->mutex);

-   adev->pm.dpm_enabled = true;
+   if (!smu->pm_enabled)
+   adev->pm.dpm_enabled = false;
+   else
+   adev->pm.dpm_enabled = true;

pr_info("SMU is initialized successfully!\n");

@@ -879,6 +893,8 @@ static int smu_hw_fini(void *handle)
struct smu_table_context *table_context = &smu->smu_table;
int ret = 0;

+   if (!smu->pm_enabled)
+   return ret;
if (!is_support_sw_smu(adev))
return -EINVAL;

@@ -932,10 +948,12 @@ int smu_reset(struct smu_context *smu)

 static int smu_suspend(void *handle)
 {
-   int ret;
+   int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;

+   if (!smu->pm_enabled)
+   return ret;
[Hawking]: Just return 0 directly if dpm was disabled. Don't change the default 
return value to 0 which means resume complete successfully

if (!is_support_sw_smu(adev))
return -EINVAL;

@@ -950,10 +968,12 @@ static int smu_suspend(void *handle)

 static int smu_resume(void *handle)
 {
-   int ret;
+   int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;

+   if (!smu->pm_enabled)
+   return ret;
[Hawking]: similar as suspend, directly return 0 if dpm was disabled.

if (!is_support_sw_smu(adev))
return -EINVAL;

@@ -985,7 +1005,7 @@ int smu_display_configuration_change(struct smu_context 
*smu,
int index = 0;
int num_of_active_display = 0;

-   if (!is_support_sw_smu(smu->adev))
+   if (!smu->p

RE: [PATCH 2/3] drm/amd/powerplay: force sclk limit for peak profile

2019-07-18 Thread Gui, Jack
Hi Evan,
1, The hack hard code was just served for profile_peak mode and (max_count - 1) 
level always used for GFX clock, we just force the limit value with data from 
tool team.
2, The requirement from tool team is to force GFX clock limit value with 
different SKU’s clocks when enter profile peak mode, so the hack code was added 
when  clock adjust rules was applied.

From: Quan, Evan 
Sent: Thursday, July 18, 2019 6:32 PM
To: Gui, Jack ; amd-gfx@lists.freedesktop.org
Cc: Gui, Jack 
Subject: Re: [PATCH 2/3] drm/amd/powerplay: force sclk limit for peak profile

1. In navi10_force_clk_levels, i think you need to compare the max level user 
requested with the peak limit and set the smaller one.
2. can you help me to understand why the change in apply_clock_rules is needed?


发件人: amd-gfx 
mailto:amd-gfx-boun...@lists.freedesktop.org>>
 代表 Chengming Gui mailto:jack@amd.com>>
发送时间: Thursday, July 18, 2019 6:02:17 PM
收件人: amd-gfx@lists.freedesktop.org<mailto:amd-gfx@lists.freedesktop.org> 
mailto:amd-gfx@lists.freedesktop.org>>
抄送: Gui, Jack mailto:jack@amd.com>>
主题: [PATCH 2/3] drm/amd/powerplay: force sclk limit for peak profile

force different GFX clocks with different SKUs for navi10:
XL  (other rev_id):  1625MHz
XT (F1/C1):  1755MHz
XTX(F0/C0):  1830MHz

Signed-off-by: Chengming Gui mailto:jack@amd.com>>
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c |  2 +
 drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h |  2 +
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 66 +-
 3 files changed, 68 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 122985c..693414f 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -664,6 +664,8 @@ static int smu_sw_init(void *handle)
 smu->watermarks_bitmap = 0;
 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+   smu->smu_dpm.default_sclk_limit = 0;
+   smu->smu_dpm.peak_sclk_limit = 0;

 smu->workload_mask = 1 << 
smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 135a323..acb522b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -441,6 +441,8 @@ struct smu_dpm_context {
 void *dpm_context;
 void *golden_dpm_context;
 bool enable_umd_pstate;
+   uint32_t default_sclk_limit;
+   uint32_t peak_sclk_limit;
 enum amd_dpm_forced_level dpm_level;
 enum amd_dpm_forced_level saved_dpm_level;
 enum amd_dpm_forced_level requested_dpm_level;
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c 
b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 895a4e5..b4deb9e 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -37,6 +37,15 @@

 #include "asic_reg/mp/mp_11_0_sh_mask.h"

+#define NV_NV10_F0 0xF0
+#define NV_NV10_C0 0xC0
+#define NV_NV10_F1 0xF1
+#define NV_NV10_C1 0xC1
+
+#define NV_NV10_PEAK_SCLK_XTX 1830
+#define NV_NV10_PEAK_SCLK_XT  1755
+#define NV_NV10_PEAK_SCLK_XL  1625
+
 #define FEATURE_MASK(feature) (1ULL << feature)
 #define SMC_DPM_FEATURE ( \
 FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
@@ -675,6 +684,7 @@ static int navi10_force_clk_levels(struct smu_context *smu,

 int ret = 0, size = 0;
 uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, 
max_freq = 0;
+   struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);

 soft_min_level = mask ? (ffs(mask) - 1) : 0;
 soft_max_level = mask ? (fls(mask) - 1) : 0;
@@ -682,6 +692,23 @@ static int navi10_force_clk_levels(struct smu_context *smu,
 switch (clk_type) {
 case SMU_GFXCLK:
 case SMU_SCLK:
+   if (smu_dpm_ctx->peak_sclk_limit) {
+   max_freq = smu_dpm_ctx->peak_sclk_limit;
+   ret = smu_get_dpm_freq_by_index(smu, clk_type, 
soft_min_level, &min_freq);
+   if (ret)
+   return size;
+   } else {
+   ret = smu_get_dpm_freq_by_index(smu, clk_type, 
soft_min_level, &min_freq);
+   if (ret)
+   return size;
+   ret = smu_get_dpm_freq_by_index(smu, clk_type, 
soft_max_level, &max_freq);
+   if (ret)
+   return size;
+   }
+   ret = smu_set_soft_f

RE: [PATCH] drm/amd/powerplay: custom peak clock freq for navi10

2019-07-18 Thread Gui, Jack
Reviewed-by: Jack Gui 

-Original Message-
From: Wang, Kevin(Yang)  
Sent: Friday, July 19, 2019 11:46 AM
To: amd-gfx@lists.freedesktop.org
Cc: Feng, Kenneth ; Quan, Evan ; 
Huang, Ray ; Xu, Feifei ; Gui, Jack 
; Wang, Kevin(Yang) 
Subject: [PATCH] drm/amd/powerplay: custom peak clock freq for navi10

1.NAVI10_PEAK_SCLK_XTX1830 Mhz
2.NAVI10_PEAK_SCLK_XT 1755 Mhz
3.NAVI10_PEAK_SCLK_XL 1625 Mhz

Change-Id: I48863a9d0e261b9e7778a6c0e4a8762d7c978da6
Signed-off-by: Kevin Wang 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 65 ++-
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|  4 ++
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c| 55 
 drivers/gpu/drm/amd/powerplay/navi10_ppt.h|  4 ++
 4 files changed, 97 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 7f51bbd2ac90..ab389dde9562 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -1360,37 +1360,40 @@ int smu_adjust_power_state_dynamic(struct smu_context 
*smu,
}
 
if (smu_dpm_ctx->dpm_level != level) {
-   switch (level) {
-   case AMD_DPM_FORCED_LEVEL_HIGH:
-   ret = smu_force_dpm_limit_value(smu, true);
-   break;
-   case AMD_DPM_FORCED_LEVEL_LOW:
-   ret = smu_force_dpm_limit_value(smu, false);
-   break;
-
-   case AMD_DPM_FORCED_LEVEL_AUTO:
-   case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
-   ret = smu_unforce_dpm_levels(smu);
-   break;
-
-   case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
-   case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
-   case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
-   ret = smu_get_profiling_clk_mask(smu, level,
-&sclk_mask,
-&mclk_mask,
-&soc_mask);
-   if (ret)
-   return ret;
-   smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
-   smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
-   smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
-   break;
-
-   case AMD_DPM_FORCED_LEVEL_MANUAL:
-   case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
-   default:
-   break;
+   ret = smu_set_performance_level(smu, level);
+   if (ret) {
+   switch (level) {
+   case AMD_DPM_FORCED_LEVEL_HIGH:
+   ret = smu_force_dpm_limit_value(smu, true);
+   break;
+   case AMD_DPM_FORCED_LEVEL_LOW:
+   ret = smu_force_dpm_limit_value(smu, false);
+   break;
+
+   case AMD_DPM_FORCED_LEVEL_AUTO:
+   case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+   ret = smu_unforce_dpm_levels(smu);
+   break;
+
+   case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+   case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+   case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+   ret = smu_get_profiling_clk_mask(smu, level,
+&sclk_mask,
+&mclk_mask,
+&soc_mask);
+   if (ret)
+   return ret;
+   smu_force_clk_levels(smu, SMU_SCLK, 1 << 
sclk_mask);
+   smu_force_clk_levels(smu, SMU_MCLK, 1 << 
mclk_mask);
+   smu_force_clk_levels(smu, SMU_SOCCLK, 1 << 
soc_mask);
+   break;
+
+   case AMD_DPM_FORCED_LEVEL_MANUAL:
+   case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+   default:
+   break;
+   }
}
 
if (!ret)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 514d31518853..ba5ddafcbdba 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -631,6 +631,7 @@ struct pptable_funcs {
int (*get_thermal_temperature_range)(struct smu_context *smu, struct 
smu_tempe