[PATCH] drm/amdgpu: correct GART location info

2018-06-18 Thread Junwei Zhang
Signed-off-by: Junwei Zhang 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index f77b07b..f9fe8d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -676,17 +676,17 @@ void amdgpu_device_vram_location(struct amdgpu_device 
*adev,
 }
 
 /**
- * amdgpu_device_gart_location - try to find GTT location
+ * amdgpu_device_gart_location - try to find GART location
  *
  * @adev: amdgpu device structure holding all necessary informations
  * @mc: memory controller structure holding memory informations
  *
- * Function will place try to place GTT before or after VRAM.
+ * Function will place try to place GART before or after VRAM.
  *
- * If GTT size is bigger than space left then we ajust GTT size.
+ * If GART size is bigger than space left then we ajust GART size.
  * Thus function will never fails.
  *
- * FIXME: when reducing GTT size align new size on power of 2.
+ * FIXME: when reducing GART size align new size on power of 2.
  */
 void amdgpu_device_gart_location(struct amdgpu_device *adev,
 struct amdgpu_gmc *mc)
@@ -699,13 +699,13 @@ void amdgpu_device_gart_location(struct amdgpu_device 
*adev,
size_bf = mc->vram_start;
if (size_bf > size_af) {
if (mc->gart_size > size_bf) {
-   dev_warn(adev->dev, "limiting GTT\n");
+   dev_warn(adev->dev, "limiting GART\n");
mc->gart_size = size_bf;
}
mc->gart_start = 0;
} else {
if (mc->gart_size > size_af) {
-   dev_warn(adev->dev, "limiting GTT\n");
+   dev_warn(adev->dev, "limiting GART\n");
mc->gart_size = size_af;
}
/* VCE doesn't like it when BOs cross a 4GB segment, so align
@@ -714,7 +714,7 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
mc->gart_start = ALIGN(mc->vram_end + 1, 0x1ULL);
}
mc->gart_end = mc->gart_start + mc->gart_size - 1;
-   dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
+   dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
mc->gart_size >> 20, mc->gart_start, mc->gart_end);
 }
 
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/5] drm/amd/display: Implement dm_pp_get_clock_levels_by_type_with_latency

2018-06-18 Thread Harry Wentland
On 2018-06-18 07:18 AM, Rex Zhu wrote:
> Display component can get tru max_displ_clk_in_khz instand of hardcode
> 
> Signed-off-by: Rex Zhu 

Mikita has pretty much the same patch (part of the DC patchset going out 
tomorrow) and did the *10 on our end. I prefer your version, though, as that 
multiplication should probably come from powerplay. Haven't had a ton of time 
to look at this patchset closely today but will do so tomorrow with Mikita and 
get back to you.

Harry

> ---
>  .../drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 45 
> +-
>  1 file changed, 43 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
> index 5a33461..37f6a5f 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
> @@ -261,6 +261,33 @@ static void pp_to_dc_clock_levels_with_latency(
>   }
>  }
>  
> +static void pp_to_dc_clock_levels_with_voltage(
> + const struct pp_clock_levels_with_voltage *pp_clks,
> + struct dm_pp_clock_levels_with_voltage *clk_level_info,
> + enum dm_pp_clock_type dc_clk_type)
> +{
> + uint32_t i;
> +
> + if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
> + DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d 
> exceeds maximum of %d!\n",
> + DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
> + pp_clks->num_levels,
> + DM_PP_MAX_CLOCK_LEVELS);
> +
> + clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
> + } else
> + clk_level_info->num_levels = pp_clks->num_levels;
> +
> + DRM_DEBUG("DM_PPLIB: values for %s clock\n",
> + DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
> +
> + for (i = 0; i < clk_level_info->num_levels; i++) {
> + DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz);
> + clk_level_info->data[i].clocks_in_khz = 
> pp_clks->data[i].clocks_in_khz;
> + clk_level_info->data[i].voltage_in_mv = 
> pp_clks->data[i].voltage_in_mv;
> + }
> +}
> +
>  bool dm_pp_get_clock_levels_by_type(
>   const struct dc_context *ctx,
>   enum dm_pp_clock_type clk_type,
> @@ -361,8 +388,22 @@ bool dm_pp_get_clock_levels_by_type_with_voltage(
>   enum dm_pp_clock_type clk_type,
>   struct dm_pp_clock_levels_with_voltage *clk_level_info)
>  {
> - /* TODO: to be implemented */
> - return false;
> + struct amdgpu_device *adev = ctx->driver_context;
> + void *pp_handle = adev->powerplay.pp_handle;
> + struct pp_clock_levels_with_voltage pp_clks = { 0 };
> + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> +
> + if (!pp_funcs || !pp_funcs->get_clock_by_type_with_voltage)
> + return false;
> +
> + if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
> +  
> dc_to_pp_clock_type(clk_type),
> +  _clks))
> + return false;
> +
> + pp_to_dc_clock_levels_with_voltage(_clks, clk_level_info, clk_type);
> +
> + return true;
>  }
>  
>  bool dm_pp_notify_wm_clock_changes(
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 5/5] drm/amd/display: Refine the interface dm_pp_notify_wm_clock_changes

2018-06-18 Thread Alex Deucher
On Mon, Jun 18, 2018 at 7:18 AM, Rex Zhu  wrote:
> change function parameter type from dm_pp_wm_sets_with_clock_ranges * to
> void *. so this interface can be supported on AI/RV.
>
> Signed-off-by: Rex Zhu 

Acked-by: Alex Deucher 
but should probably be approved by the DC team as well.

Alex

> ---
>  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 2 +-
>  drivers/gpu/drm/amd/display/dc/dm_services.h   | 2 +-
>  drivers/gpu/drm/amd/include/kgd_pp_interface.h | 2 +-
>  drivers/gpu/drm/amd/powerplay/amd_powerplay.c  | 6 +++---
>  drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c  | 4 ++--
>  drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c  | 3 ++-
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 3 ++-
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 3 ++-
>  drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h| 2 +-
>  drivers/gpu/drm/amd/powerplay/inc/hwmgr.h  | 3 +--
>  10 files changed, 16 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
> index 37f6a5f..92d36fe 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
> @@ -408,7 +408,7 @@ bool dm_pp_get_clock_levels_by_type_with_voltage(
>
>  bool dm_pp_notify_wm_clock_changes(
> const struct dc_context *ctx,
> -   struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
> +   void *clock_ranges)
>  {
> /* TODO: to be implemented */
> return false;
> diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h 
> b/drivers/gpu/drm/amd/display/dc/dm_services.h
> index 4ff9b2b..535b415 100644
> --- a/drivers/gpu/drm/amd/display/dc/dm_services.h
> +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
> @@ -217,7 +217,7 @@ bool dm_pp_get_clock_levels_by_type_with_voltage(
>
>  bool dm_pp_notify_wm_clock_changes(
> const struct dc_context *ctx,
> -   struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges);
> +   void *clock_ranges);
>
>  void dm_pp_get_funcs_rv(struct dc_context *ctx,
> struct pp_smu_funcs_rv *funcs);
> diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h 
> b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> index 4535756..06f7ef2 100644
> --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> @@ -261,7 +261,7 @@ struct amd_pm_funcs {
> enum amd_pp_clock_type type,
> struct pp_clock_levels_with_voltage *clocks);
> int (*set_watermarks_for_clocks_ranges)(void *handle,
> -   struct pp_wm_sets_with_clock_ranges_soc15 
> *wm_with_clock_ranges);
> +   void *clock_ranges);
> int (*display_clock_voltage_request)(void *handle,
> struct pp_display_clock_request *clock);
> int (*get_display_mode_validation_clocks)(void *handle,
> diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c 
> b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
> index 7b0ff9d..ba5e0e2 100644
> --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
> +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
> @@ -1103,17 +1103,17 @@ static int pp_get_clock_by_type_with_voltage(void 
> *handle,
>  }
>
>  static int pp_set_watermarks_for_clocks_ranges(void *handle,
> -   struct pp_wm_sets_with_clock_ranges_soc15 
> *wm_with_clock_ranges)
> +   void *clock_ranges)
>  {
> struct pp_hwmgr *hwmgr = handle;
> int ret = 0;
>
> -   if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges)
> +   if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
> return -EINVAL;
>
> mutex_lock(>smu_lock);
> ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
> -   wm_with_clock_ranges);
> +   clock_ranges);
> mutex_unlock(>smu_lock);
>
> return ret;
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c 
> b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
> index a0bb921..53207e7 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
> @@ -435,7 +435,7 @@ int phm_get_clock_by_type_with_voltage(struct pp_hwmgr 
> *hwmgr,
>  }
>
>  int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
> -   struct pp_wm_sets_with_clock_ranges_soc15 
> *wm_with_clock_ranges)
> +   void *clock_ranges)
>  {
> PHM_FUNC_CHECK(hwmgr);
>
> @@ -443,7 +443,7 @@ int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr 
> *hwmgr,
> return -EINVAL;
>
> return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr,
> -   

Re: [PATCH 2/5] drm/amd/pp: Fix wrong clock-unit exported to Display

2018-06-18 Thread Alex Deucher
On Mon, Jun 18, 2018 at 7:18 AM, Rex Zhu  wrote:
> Transfer 10KHz (requested by smu) to KHz needed by Display
> component.
>
> This can fix the issue 4k Monitor can't be lit up on Vega/Raven.
>
> Signed-off-by: Rex Zhu 

Acked-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c  |  4 ++--
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 10 +-
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 10 +-
>  3 files changed, 12 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c 
> b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
> index 6d5e042..08690c9 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
> @@ -1014,7 +1014,7 @@ static int smu10_get_clock_by_type_with_latency(struct 
> pp_hwmgr *hwmgr,
>
> clocks->num_levels = 0;
> for (i = 0; i < pclk_vol_table->count; i++) {
> -   clocks->data[i].clocks_in_khz = 
> pclk_vol_table->entries[i].clk;
> +   clocks->data[i].clocks_in_khz = 
> pclk_vol_table->entries[i].clk * 10;
> clocks->data[i].latency_in_us = latency_required ?
> smu10_get_mem_latency(hwmgr,
> 
> pclk_vol_table->entries[i].clk) :
> @@ -1065,7 +1065,7 @@ static int smu10_get_clock_by_type_with_voltage(struct 
> pp_hwmgr *hwmgr,
>
> clocks->num_levels = 0;
> for (i = 0; i < pclk_vol_table->count; i++) {
> -   clocks->data[i].clocks_in_khz = 
> pclk_vol_table->entries[i].clk;
> +   clocks->data[i].clocks_in_khz = 
> pclk_vol_table->entries[i].clk  * 10;
> clocks->data[i].voltage_in_mv = 
> pclk_vol_table->entries[i].vol;
> clocks->num_levels++;
> }
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c 
> b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> index f97fce6..6057b59 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> @@ -4067,7 +4067,7 @@ static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
> for (i = 0; i < dep_table->count; i++) {
> if (dep_table->entries[i].clk) {
> clocks->data[clocks->num_levels].clocks_in_khz =
> -   dep_table->entries[i].clk;
> +   dep_table->entries[i].clk * 10;
> clocks->num_levels++;
> }
> }
> @@ -4104,7 +4104,7 @@ static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
> clocks->data[clocks->num_levels].clocks_in_khz =
> data->mclk_latency_table.entries
> [data->mclk_latency_table.count].frequency =
> -   dep_table->entries[i].clk;
> +   dep_table->entries[i].clk * 10;
> clocks->data[clocks->num_levels].latency_in_us =
> data->mclk_latency_table.entries
> [data->mclk_latency_table.count].latency =
> @@ -4126,7 +4126,7 @@ static void vega10_get_dcefclocks(struct pp_hwmgr 
> *hwmgr,
> uint32_t i;
>
> for (i = 0; i < dep_table->count; i++) {
> -   clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
> +   clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 
> 10;
> clocks->data[i].latency_in_us = 0;
> clocks->num_levels++;
> }
> @@ -4142,7 +4142,7 @@ static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
> uint32_t i;
>
> for (i = 0; i < dep_table->count; i++) {
> -   clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
> +   clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 
> 10;
> clocks->data[i].latency_in_us = 0;
> clocks->num_levels++;
> }
> @@ -4202,7 +4202,7 @@ static int vega10_get_clock_by_type_with_voltage(struct 
> pp_hwmgr *hwmgr,
> }
>
> for (i = 0; i < dep_table->count; i++) {
> -   clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
> +   clocks->data[i].clocks_in_khz = dep_table->entries[i].clk  * 
> 10;
> clocks->data[i].voltage_in_mv = 
> (uint32_t)(table_info->vddc_lookup_table->
> entries[dep_table->entries[i].vddInd].us_vdd);
> clocks->num_levels++;
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c 
> b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> index 782e209..d685ce7 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> @@ -1576,7 +1576,7 @@ static int vega12_get_sclks(struct 

Re: [PATCH 4/5] drm/amd/display: Delete old implementation of bw_calcs_data_update_from_pplib

2018-06-18 Thread Alex Deucher
On Mon, Jun 18, 2018 at 7:18 AM, Rex Zhu  wrote:
> this function is copied from dce112. it is not for AI/RV.
> driver need to re-implement this function.

Maybe it's similar enough to be ok for now?  What's better? Harry?

Alex

>
> Signed-off-by: Rex Zhu 
> ---
>  .../drm/amd/display/dc/dce120/dce120_resource.c| 123 
> +
>  1 file changed, 1 insertion(+), 122 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 
> b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
> index 2d58dac..450f7ec 100644
> --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
> +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
> @@ -26,7 +26,6 @@
>
>  #include "dm_services.h"
>
> -
>  #include "stream_encoder.h"
>  #include "resource.h"
>  #include "include/irq_service_interface.h"
> @@ -691,127 +690,7 @@ static void dce120_destroy_resource_pool(struct 
> resource_pool **pool)
>
>  static void bw_calcs_data_update_from_pplib(struct dc *dc)
>  {
> -   struct dm_pp_clock_levels_with_latency eng_clks = {0};
> -   struct dm_pp_clock_levels_with_latency mem_clks = {0};
> -   struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0};
> -   int i;
> -   unsigned int clk;
> -   unsigned int latency;
> -
> -   /*do system clock*/
> -   if (!dm_pp_get_clock_levels_by_type_with_latency(
> -   dc->ctx,
> -   DM_PP_CLOCK_TYPE_ENGINE_CLK,
> -   _clks) || eng_clks.num_levels == 0) {
> -
> -   eng_clks.num_levels = 8;
> -   clk = 30;
> -
> -   for (i = 0; i < eng_clks.num_levels; i++) {
> -   eng_clks.data[i].clocks_in_khz = clk;
> -   clk += 10;
> -   }
> -   }
> -
> -   /* convert all the clock fro kHz to fix point mHz  TODO: wloop data */
> -   dc->bw_vbios->high_sclk = bw_frc_to_fixed(
> -   eng_clks.data[eng_clks.num_levels-1].clocks_in_khz, 1000);
> -   dc->bw_vbios->mid1_sclk  = bw_frc_to_fixed(
> -   eng_clks.data[eng_clks.num_levels/8].clocks_in_khz, 1000);
> -   dc->bw_vbios->mid2_sclk  = bw_frc_to_fixed(
> -   eng_clks.data[eng_clks.num_levels*2/8].clocks_in_khz, 1000);
> -   dc->bw_vbios->mid3_sclk  = bw_frc_to_fixed(
> -   eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz, 1000);
> -   dc->bw_vbios->mid4_sclk  = bw_frc_to_fixed(
> -   eng_clks.data[eng_clks.num_levels*4/8].clocks_in_khz, 1000);
> -   dc->bw_vbios->mid5_sclk  = bw_frc_to_fixed(
> -   eng_clks.data[eng_clks.num_levels*5/8].clocks_in_khz, 1000);
> -   dc->bw_vbios->mid6_sclk  = bw_frc_to_fixed(
> -   eng_clks.data[eng_clks.num_levels*6/8].clocks_in_khz, 1000);
> -   dc->bw_vbios->low_sclk  = bw_frc_to_fixed(
> -   eng_clks.data[0].clocks_in_khz, 1000);
> -
> -   /*do memory clock*/
> -   if (!dm_pp_get_clock_levels_by_type_with_latency(
> -   dc->ctx,
> -   DM_PP_CLOCK_TYPE_MEMORY_CLK,
> -   _clks) || mem_clks.num_levels == 0) {
> -
> -   mem_clks.num_levels = 3;
> -   clk = 25;
> -   latency = 45;
> -
> -   for (i = 0; i < eng_clks.num_levels; i++) {
> -   mem_clks.data[i].clocks_in_khz = clk;
> -   mem_clks.data[i].latency_in_us = latency;
> -   clk += 50;
> -   latency -= 5;
> -   }
> -
> -   }
> -
> -   /* we don't need to call PPLIB for validation clock since they
> -* also give us the highest sclk and highest mclk (UMA clock).
> -* ALSO always convert UMA clock (from PPLIB)  to YCLK (HW formula):
> -* YCLK = UMACLK*m_memoryTypeMultiplier
> -*/
> -   dc->bw_vbios->low_yclk = bw_frc_to_fixed(
> -   mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 
> 1000);
> -   dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
> -   mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * 
> MEMORY_TYPE_MULTIPLIER,
> -   1000);
> -   dc->bw_vbios->high_yclk = bw_frc_to_fixed(
> -   mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * 
> MEMORY_TYPE_MULTIPLIER,
> -   1000);
> -
> -   /* Now notify PPLib/SMU about which Watermarks sets they should select
> -* depending on DPM state they are in. And update BW MGR GFX Engine 
> and
> -* Memory clock member variables for Watermarks calculations for each
> -* Watermark Set
> -*/
> -   clk_ranges.num_wm_sets = 4;
> -   clk_ranges.wm_clk_ranges[0].wm_set_id = WM_SET_A;
> -   clk_ranges.wm_clk_ranges[0].wm_min_eng_clk_in_khz =
> -   eng_clks.data[0].clocks_in_khz;
> -   

Re: [PATCH 3/5] drm/amd/pp: Memory Latency is always 25us on Vega10

2018-06-18 Thread Alex Deucher
On Mon, Jun 18, 2018 at 7:18 AM, Rex Zhu  wrote:
> Also use the tolerable latency defined in Display
> to find lowest MCLK frequency when disable mclk switch

This should probably be two patches?  One to fix the memory latency,
and one to switch the tolerable latency for display.  With that fixed:
Acked-by: Alex Deucher 

>
> Signed-off-by: Rex Zhu 
> ---
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 24 
> ++
>  1 file changed, 2 insertions(+), 22 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c 
> b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> index 6057b59..198c7ed 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> @@ -55,12 +55,6 @@
>
>  static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
>
> -#define MEM_FREQ_LOW_LATENCY25000
> -#define MEM_FREQ_HIGH_LATENCY   8
> -#define MEM_LATENCY_HIGH245
> -#define MEM_LATENCY_LOW 35
> -#define MEM_LATENCY_ERR 0x
> -
>  #define mmDF_CS_AON0_DramBaseAddress0
>   0x0044
>  #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX   
>   0
>
> @@ -3223,7 +3217,7 @@ static int vega10_apply_state_adjust_rules(struct 
> pp_hwmgr *hwmgr,
> /* Find the lowest MCLK frequency that is within
>  * the tolerable latency defined in DAL
>  */
> -   latency = 0;
> +   latency = 
> hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
> for (i = 0; i < data->mclk_latency_table.count; i++) {
> if ((data->mclk_latency_table.entries[i].latency <= 
> latency) &&
> 
> (data->mclk_latency_table.entries[i].frequency >=
> @@ -4074,18 +4068,6 @@ static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
>
>  }
>
> -static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
> -   uint32_t clock)
> -{
> -   if (clock >= MEM_FREQ_LOW_LATENCY &&
> -   clock < MEM_FREQ_HIGH_LATENCY)
> -   return MEM_LATENCY_HIGH;
> -   else if (clock >= MEM_FREQ_HIGH_LATENCY)
> -   return MEM_LATENCY_LOW;
> -   else
> -   return MEM_LATENCY_ERR;
> -}
> -
>  static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
> struct pp_clock_levels_with_latency *clocks)
>  {
> @@ -4107,9 +4089,7 @@ static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
> dep_table->entries[i].clk * 10;
> clocks->data[clocks->num_levels].latency_in_us =
> data->mclk_latency_table.entries
> -   [data->mclk_latency_table.count].latency =
> -   vega10_get_mem_latency(hwmgr,
> -   dep_table->entries[i].clk);
> +   [data->mclk_latency_table.count].latency = 25;
> clocks->num_levels++;
> data->mclk_latency_table.count++;
> }
> --
> 1.9.1
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/5] drm/amd/display: Implement dm_pp_get_clock_levels_by_type_with_latency

2018-06-18 Thread Alex Deucher
On Mon, Jun 18, 2018 at 7:18 AM, Rex Zhu  wrote:
> Display component can get tru max_displ_clk_in_khz instand of hardcode

tru -> true
With that fixed:
Acked-by: Alex Deucher 

>
> Signed-off-by: Rex Zhu 
> ---
>  .../drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 45 
> +-
>  1 file changed, 43 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
> index 5a33461..37f6a5f 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
> @@ -261,6 +261,33 @@ static void pp_to_dc_clock_levels_with_latency(
> }
>  }
>
> +static void pp_to_dc_clock_levels_with_voltage(
> +   const struct pp_clock_levels_with_voltage *pp_clks,
> +   struct dm_pp_clock_levels_with_voltage *clk_level_info,
> +   enum dm_pp_clock_type dc_clk_type)
> +{
> +   uint32_t i;
> +
> +   if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
> +   DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d 
> exceeds maximum of %d!\n",
> +   DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
> +   pp_clks->num_levels,
> +   DM_PP_MAX_CLOCK_LEVELS);
> +
> +   clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
> +   } else
> +   clk_level_info->num_levels = pp_clks->num_levels;
> +
> +   DRM_DEBUG("DM_PPLIB: values for %s clock\n",
> +   DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
> +
> +   for (i = 0; i < clk_level_info->num_levels; i++) {
> +   DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz);
> +   clk_level_info->data[i].clocks_in_khz = 
> pp_clks->data[i].clocks_in_khz;
> +   clk_level_info->data[i].voltage_in_mv = 
> pp_clks->data[i].voltage_in_mv;
> +   }
> +}
> +
>  bool dm_pp_get_clock_levels_by_type(
> const struct dc_context *ctx,
> enum dm_pp_clock_type clk_type,
> @@ -361,8 +388,22 @@ bool dm_pp_get_clock_levels_by_type_with_voltage(
> enum dm_pp_clock_type clk_type,
> struct dm_pp_clock_levels_with_voltage *clk_level_info)
>  {
> -   /* TODO: to be implemented */
> -   return false;
> +   struct amdgpu_device *adev = ctx->driver_context;
> +   void *pp_handle = adev->powerplay.pp_handle;
> +   struct pp_clock_levels_with_voltage pp_clks = { 0 };
> +   const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
> +
> +   if (!pp_funcs || !pp_funcs->get_clock_by_type_with_voltage)
> +   return false;
> +
> +   if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
> +
> dc_to_pp_clock_type(clk_type),
> +_clks))
> +   return false;
> +
> +   pp_to_dc_clock_levels_with_voltage(_clks, clk_level_info, 
> clk_type);
> +
> +   return true;
>  }
>
>  bool dm_pp_notify_wm_clock_changes(
> --
> 1.9.1
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu:All UVD instances share one idle_work handle

2018-06-18 Thread Alex Deucher
On Mon, Jun 18, 2018 at 2:00 PM, James Zhu  wrote:
> All UVD instanses have only one dpm control, so it is better
> to share one idle_work handle.
>
> Signed-off-by: James Zhu 

Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 14 +++---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h |  2 +-
>  2 files changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> index 04d77f1..cc15d32 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> @@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
> unsigned family_id;
> int i, j, r;
>
> -   INIT_DELAYED_WORK(>uvd.inst->idle_work, 
> amdgpu_uvd_idle_work_handler);
> +   INIT_DELAYED_WORK(>uvd.idle_work, amdgpu_uvd_idle_work_handler);
>
> switch (adev->asic_type) {
>  #ifdef CONFIG_DRM_AMDGPU_CIK
> @@ -331,12 +331,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
> void *ptr;
> int i, j;
>
> +   cancel_delayed_work_sync(>uvd.idle_work);
> +
> for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
> if (adev->uvd.inst[j].vcpu_bo == NULL)
> continue;
>
> -   cancel_delayed_work_sync(>uvd.inst[j].idle_work);
> -
> /* only valid for physical mode */
> if (adev->asic_type < CHIP_POLARIS10) {
> for (i = 0; i < adev->uvd.max_handles; ++i)
> @@ -1162,7 +1162,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring 
> *ring, uint32_t handle,
>  static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
>  {
> struct amdgpu_device *adev =
> -   container_of(work, struct amdgpu_device, 
> uvd.inst->idle_work.work);
> +   container_of(work, struct amdgpu_device, uvd.idle_work.work);
> unsigned fences = 0, i, j;
>
> for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
> @@ -1184,7 +1184,7 @@ static void amdgpu_uvd_idle_work_handler(struct 
> work_struct *work)
>
> AMD_CG_STATE_GATE);
> }
> } else {
> -   schedule_delayed_work(>uvd.inst->idle_work, 
> UVD_IDLE_TIMEOUT);
> +   schedule_delayed_work(>uvd.idle_work, UVD_IDLE_TIMEOUT);
> }
>  }
>
> @@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
> if (amdgpu_sriov_vf(adev))
> return;
>
> -   set_clocks = !cancel_delayed_work_sync(>uvd.inst->idle_work);
> +   set_clocks = !cancel_delayed_work_sync(>uvd.idle_work);
> if (set_clocks) {
> if (adev->pm.dpm_enabled) {
> amdgpu_dpm_enable_uvd(adev, true);
> @@ -1213,7 +1213,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
>  void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
>  {
> if (!amdgpu_sriov_vf(ring->adev))
> -   schedule_delayed_work(>adev->uvd.inst->idle_work, 
> UVD_IDLE_TIMEOUT);
> +   schedule_delayed_work(>adev->uvd.idle_work, 
> UVD_IDLE_TIMEOUT);
>  }
>
>  /**
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
> index b1579fb..8b23a1b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
> @@ -44,7 +44,6 @@ struct amdgpu_uvd_inst {
> void*saved_bo;
> atomic_thandles[AMDGPU_MAX_UVD_HANDLES];
> struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
> -   struct delayed_work idle_work;
> struct amdgpu_ring  ring;
> struct amdgpu_ring  ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
> struct amdgpu_irq_src   irq;
> @@ -62,6 +61,7 @@ struct amdgpu_uvd {
> booladdress_64_bit;
> booluse_ctx_buf;
> struct amdgpu_uvd_inst  inst[AMDGPU_MAX_UVD_INSTANCES];
> +   struct delayed_work idle_work;
>  };
>
>  int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu:All UVD instances share one idle_work handle

2018-06-18 Thread James Zhu
All UVD instanses have only one dpm control, so it is better
to share one idle_work handle.

Signed-off-by: James Zhu 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 14 +++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h |  2 +-
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 04d77f1..cc15d32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
unsigned family_id;
int i, j, r;
 
-   INIT_DELAYED_WORK(>uvd.inst->idle_work, 
amdgpu_uvd_idle_work_handler);
+   INIT_DELAYED_WORK(>uvd.idle_work, amdgpu_uvd_idle_work_handler);
 
switch (adev->asic_type) {
 #ifdef CONFIG_DRM_AMDGPU_CIK
@@ -331,12 +331,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
void *ptr;
int i, j;
 
+   cancel_delayed_work_sync(>uvd.idle_work);
+
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
if (adev->uvd.inst[j].vcpu_bo == NULL)
continue;
 
-   cancel_delayed_work_sync(>uvd.inst[j].idle_work);
-
/* only valid for physical mode */
if (adev->asic_type < CHIP_POLARIS10) {
for (i = 0; i < adev->uvd.max_handles; ++i)
@@ -1162,7 +1162,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, 
uint32_t handle,
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
 {
struct amdgpu_device *adev =
-   container_of(work, struct amdgpu_device, 
uvd.inst->idle_work.work);
+   container_of(work, struct amdgpu_device, uvd.idle_work.work);
unsigned fences = 0, i, j;
 
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
@@ -1184,7 +1184,7 @@ static void amdgpu_uvd_idle_work_handler(struct 
work_struct *work)
   
AMD_CG_STATE_GATE);
}
} else {
-   schedule_delayed_work(>uvd.inst->idle_work, 
UVD_IDLE_TIMEOUT);
+   schedule_delayed_work(>uvd.idle_work, UVD_IDLE_TIMEOUT);
}
 }
 
@@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
if (amdgpu_sriov_vf(adev))
return;
 
-   set_clocks = !cancel_delayed_work_sync(>uvd.inst->idle_work);
+   set_clocks = !cancel_delayed_work_sync(>uvd.idle_work);
if (set_clocks) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, true);
@@ -1213,7 +1213,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
 void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
 {
if (!amdgpu_sriov_vf(ring->adev))
-   schedule_delayed_work(>adev->uvd.inst->idle_work, 
UVD_IDLE_TIMEOUT);
+   schedule_delayed_work(>adev->uvd.idle_work, 
UVD_IDLE_TIMEOUT);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index b1579fb..8b23a1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -44,7 +44,6 @@ struct amdgpu_uvd_inst {
void*saved_bo;
atomic_thandles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
-   struct delayed_work idle_work;
struct amdgpu_ring  ring;
struct amdgpu_ring  ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
struct amdgpu_irq_src   irq;
@@ -62,6 +61,7 @@ struct amdgpu_uvd {
booladdress_64_bit;
booluse_ctx_buf;
struct amdgpu_uvd_inst  inst[AMDGPU_MAX_UVD_INSTANCES];
+   struct delayed_work idle_work;
 };
 
 int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2 3/4] drm/amdgpu: Make amdgpu_vram_mgr_bo_invisible_size always accurate

2018-06-18 Thread Christian König

Am 18.06.2018 um 18:52 schrieb Michel Dänzer:

From: Michel Dänzer 

Even BOs with AMDGPU_GEM_CREATE_NO_CPU_ACCESS may end up at least
partially in CPU visible VRAM, in particular when all VRAM is visible.

v2:
* Don't take VRAM mgr spinlock, not needed (Christian König)
* Make loop logic simpler and clearer.

Cc: sta...@vger.kernel.org
Signed-off-by: Michel Dänzer 


Reviewed-by: Christian König 


---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 20 ++--
  1 file changed, 18 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index ae0049c6c52c..b6333f92ba45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -106,10 +106,26 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device 
*adev,
   */
  u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
  {
-   if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+   struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+   struct ttm_mem_reg *mem = >tbo.mem;
+   struct drm_mm_node *nodes = mem->mm_node;
+   unsigned pages = mem->num_pages;
+   u64 usage = 0;
+
+   if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
+   return 0;
+
+   if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
return amdgpu_bo_size(bo);
  
-	return 0;

+   while (nodes && pages) {
+   usage += nodes->size << PAGE_SHIFT;
+   usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
+   pages -= nodes->size;
+   ++nodes;
+   }
+
+   return usage;
  }
  
  /**


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH v2 3/4] drm/amdgpu: Make amdgpu_vram_mgr_bo_invisible_size always accurate

2018-06-18 Thread Michel Dänzer
From: Michel Dänzer 

Even BOs with AMDGPU_GEM_CREATE_NO_CPU_ACCESS may end up at least
partially in CPU visible VRAM, in particular when all VRAM is visible.

v2:
* Don't take VRAM mgr spinlock, not needed (Christian König)
* Make loop logic simpler and clearer.

Cc: sta...@vger.kernel.org
Signed-off-by: Michel Dänzer 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 20 ++--
 1 file changed, 18 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index ae0049c6c52c..b6333f92ba45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -106,10 +106,26 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device 
*adev,
  */
 u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
 {
-   if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+   struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+   struct ttm_mem_reg *mem = >tbo.mem;
+   struct drm_mm_node *nodes = mem->mm_node;
+   unsigned pages = mem->num_pages;
+   u64 usage = 0;
+
+   if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
+   return 0;
+
+   if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
return amdgpu_bo_size(bo);
 
-   return 0;
+   while (nodes && pages) {
+   usage += nodes->size << PAGE_SHIFT;
+   usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
+   pages -= nodes->size;
+   ++nodes;
+   }
+
+   return usage;
 }
 
 /**
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/2] amdgpu: display: use modern ktime accessors

2018-06-18 Thread Harry Wentland
On 2018-06-18 11:35 AM, Arnd Bergmann wrote:
> getrawmonotonic64() is deprecated because of the nonstandard naming.
> 
> The replacement functions ktime_get_raw_ns() also simplifies the callers.
> 
> Signed-off-by: Arnd Bergmann 

Reviewed-by: Harry Wentland 

Harry

> ---
>  drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 8 
>  drivers/gpu/drm/amd/display/dc/dm_services.h   | 5 -
>  2 files changed, 4 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
> index 5a3346124a01..e861929dd981 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
> @@ -35,14 +35,6 @@
>  #include "amdgpu_dm_irq.h"
>  #include "amdgpu_pm.h"
>  
> -unsigned long long dm_get_timestamp(struct dc_context *ctx)
> -{
> - struct timespec64 time;
> -
> - getrawmonotonic64();
> - return timespec64_to_ns();
> -}
> -
>  unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
>   unsigned long long current_time_stamp,
>   unsigned long long last_time_stamp)
> diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h 
> b/drivers/gpu/drm/amd/display/dc/dm_services.h
> index 4ff9b2bba178..eb5ab3978e84 100644
> --- a/drivers/gpu/drm/amd/display/dc/dm_services.h
> +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
> @@ -339,7 +339,10 @@ bool dm_dmcu_set_pipe(struct dc_context *ctx, unsigned 
> int controller_id);
>  #define dm_log_to_buffer(buffer, size, fmt, args)\
>   vsnprintf(buffer, size, fmt, args)
>  
> -unsigned long long dm_get_timestamp(struct dc_context *ctx);
> +static inline unsigned long long dm_get_timestamp(struct dc_context *ctx)
> +{
> + return ktime_get_raw_ns();
> +}
>  
>  unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
>   unsigned long long current_time_stamp,
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/2] amdgpu: kfd: use modern ktime accessors

2018-06-18 Thread Arnd Bergmann
getrawmonotonic64() and get_monotonic_boottime64() are deprecated
because of the nonstandard naming.

The replacement functions ktime_get_raw_ns() and ktime_get_boot_ns()
also simplify the callers.

Signed-off-by: Arnd Bergmann 
---
 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 8 ++--
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index f64c5551cdba..7e717716b90e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -754,7 +754,6 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
 {
struct kfd_ioctl_get_clock_counters_args *args = data;
struct kfd_dev *dev;
-   struct timespec64 time;
 
dev = kfd_device_by_id(args->gpu_id);
if (dev)
@@ -766,11 +765,8 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
args->gpu_clock_counter = 0;
 
/* No access to rdtsc. Using raw monotonic time */
-   getrawmonotonic64();
-   args->cpu_clock_counter = (uint64_t)timespec64_to_ns();
-
-   get_monotonic_boottime64();
-   args->system_clock_counter = (uint64_t)timespec64_to_ns();
+   args->cpu_clock_counter = ktime_get_raw_ns();
+   args->system_clock_counter = ktime_get_boot_ns();
 
/* Since the counter is in nano-seconds we use 1GHz frequency */
args->system_clock_freq = 10;
-- 
2.9.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/2] amdgpu: display: use modern ktime accessors

2018-06-18 Thread Arnd Bergmann
getrawmonotonic64() is deprecated because of the nonstandard naming.

The replacement functions ktime_get_raw_ns() also simplifies the callers.

Signed-off-by: Arnd Bergmann 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 8 
 drivers/gpu/drm/amd/display/dc/dm_services.h   | 5 -
 2 files changed, 4 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 5a3346124a01..e861929dd981 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -35,14 +35,6 @@
 #include "amdgpu_dm_irq.h"
 #include "amdgpu_pm.h"
 
-unsigned long long dm_get_timestamp(struct dc_context *ctx)
-{
-   struct timespec64 time;
-
-   getrawmonotonic64();
-   return timespec64_to_ns();
-}
-
 unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
unsigned long long current_time_stamp,
unsigned long long last_time_stamp)
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h 
b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 4ff9b2bba178..eb5ab3978e84 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -339,7 +339,10 @@ bool dm_dmcu_set_pipe(struct dc_context *ctx, unsigned int 
controller_id);
 #define dm_log_to_buffer(buffer, size, fmt, args)\
vsnprintf(buffer, size, fmt, args)
 
-unsigned long long dm_get_timestamp(struct dc_context *ctx);
+static inline unsigned long long dm_get_timestamp(struct dc_context *ctx)
+{
+   return ktime_get_raw_ns();
+}
 
 unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
unsigned long long current_time_stamp,
-- 
2.9.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: use first uvd instance to avoid clang build error

2018-06-18 Thread Alex Deucher
On Sun, Jun 17, 2018 at 10:03 PM, James Zhu  wrote:
>
>
> On 2018-06-17 04:52 AM, Stefan Agner wrote:
>>
>> Explicitly use the first uvd instance to avoid a build error when
>> using clang 6:
>> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c:1148:52: error: expected ')'
>>  container_of(work, struct amdgpu_device,
>> uvd.inst->idle_work.work);
>>   ^
>> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c:1148:3: note: to match this '('
>>  container_of(work, struct amdgpu_device,
>> uvd.inst->idle_work.work);
>>  ^
>> ./include/linux/kernel.h:967:21: note: expanded from macro 'container_of'
>>  ((type *)(__mptr - offsetof(type, member))); })
>> ^
>> ./include/linux/stddef.h:17:32: note: expanded from macro 'offsetof'
>>  ^
>> ./include/linux/compiler-gcc.h:170:20: note: expanded from macro
>>'__compiler_offsetof'
>>  __builtin_offsetof(a, b)
>>^
>> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c:1147:24: error: initializing
>>'struct amdgpu_device *' with an expression of incompatible type
>> 'void'
>>  struct amdgpu_device *adev =
>>^
>> 2 errors generated.
>>
>> Fixes: 10dd74eac4db ("drm/amdgpu/vg20:Restruct uvd.inst to support
>> multiple instances")
>> Cc: James Zhu 
>> Signed-off-by: Stefan Agner 
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +-
>>   1 file changed, 1 insertion(+), 1 deletion(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> index bcf68f80bbf0..a5888c44 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> @@ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring
>> *ring, uint32_t handle,
>>   static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
>>   {
>> struct amdgpu_device *adev =
>> -   container_of(work, struct amdgpu_device,
>> uvd.inst->idle_work.work);
>> +   container_of(work, struct amdgpu_device,
>> uvd.inst[0].idle_work.work);
>
> Hi Alex,
> If all instances share one idle work from hardware view currently and in the
> future ,
> should we move struct delayed_work idle_work from struct amdgpu_uvd_inst to
> struct amdgpu_uvd?

Yes, please do.

Alex

> James
>>
>> unsigned fences = 0, i, j;
>> for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
>
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


linux-4.18-rc1/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c:207: broken nested if ?

2018-06-18 Thread David Binderman
Hello there,

[linux-4.18-rc1/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c:208]: (warning) 
Identical inner 'if' condition is always true.

Source code is

if (set_clocks && adev->pm.dpm_enabled) {
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, true);
else
amdgpu_device_ip_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_VCN,
   
AMD_PG_STATE_UNGATE);

So second function call never happens. Suggest code rework.

Regards

David Binderman
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 3/5] dma-buf: lock the reservation object during (un)map_dma_buf

2018-06-18 Thread Christian König

Am 18.06.2018 um 10:22 schrieb Daniel Vetter:

On Fri, Jun 01, 2018 at 02:00:18PM +0200, Christian König wrote:

First step towards unpinned DMA buf operation.

I've checked the DRM drivers to potential locking of the reservation
object, but essentially we need to audit all implementations of the
dma_buf _ops for this to work.

Signed-off-by: Christian König 

Agreed in principle, but I expect a fireworks show with just this patch
applied. It's not just that we need to audit all the implementations of
dma-buf-ops, we also need to audit all the callers.


Ah, yeah of course a good point.


No idea yet how to go about merging this, but for a start might be good to
throw this at the intel-gfx CI (just Cc: the intel-gfx mailing lists, but
make sure your series applies without amd-staging-next stuff which isn't
in drm.git yet).


Ok, going to incorporate all your other comments as well and then send 
the next round of this with CCing intel-gfx as well.


Thanks for the review,
Christian.


-Daniel


---
  drivers/dma-buf/dma-buf.c | 4 
  include/linux/dma-buf.h   | 4 
  2 files changed, 8 insertions(+)

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index e4c657d9fad7..4f0708cb58a7 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -631,7 +631,9 @@ struct sg_table *dma_buf_map_attachment(struct 
dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf))
return ERR_PTR(-EINVAL);
  
+	reservation_object_lock(attach->dmabuf->resv, NULL);

sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+   reservation_object_unlock(attach->dmabuf->resv);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
  
@@ -658,8 +660,10 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,

if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
return;
  
+	reservation_object_lock(attach->dmabuf->resv, NULL);

attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
direction);
+   reservation_object_unlock(attach->dmabuf->resv);
  }
  EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
  
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h

index d17cadd76802..d2ba7a027a78 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -118,6 +118,8 @@ struct dma_buf_ops {
 * any other kind of sharing that the exporter might wish to make
 * available to buffer-users.
 *
+* This is called with the dmabuf->resv object locked.
+*
 * Returns:
 *
 * A _table scatter list of or the backing storage of the DMA buffer,
@@ -138,6 +140,8 @@ struct dma_buf_ops {
 * It should also unpin the backing storage if this is the last mapping
 * of the DMA buffer, it the exporter supports backing storage
 * migration.
+*
+* This is called with the dmabuf->resv object locked.
 */
void (*unmap_dma_buf)(struct dma_buf_attachment *,
  struct sg_table *,
--
2.14.1

___
dri-devel mailing list
dri-de...@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/pp: Fix uninitialized variable

2018-06-18 Thread Zhu, Rex
Applied. Thanks.


Best Regards

Rex



From: rajan.v...@gmail.com 
Sent: Monday, June 18, 2018 3:31 PM
To: Deucher, Alexander; Koenig, Christian; Zhou, David(ChunMing); Zhu, Rex; 
StDenis, Tom
Cc: amd-gfx@lists.freedesktop.org; dri-de...@lists.freedesktop.org; 
linux-ker...@vger.kernel.org; Rajan Vaja
Subject: [PATCH] drm/amd/pp: Fix uninitialized variable

From: Rajan Vaja 

Initialize variable to 0 before performing logical OR operation.

Signed-off-by: Rajan Vaja 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index a9efd855..bde01d4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -1090,7 +1090,7 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr 
*hwmgr)
 static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 {
 struct amdgpu_device *adev = hwmgr->adev;
-   int result;
+   int result = 0;
 uint32_t num_se = 0;
 uint32_t count, data;

--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 3/4] drm/amdgpu: Make amdgpu_vram_mgr_bo_invisible_size always accurate

2018-06-18 Thread Christian König

Am 15.06.2018 um 17:19 schrieb Michel Dänzer:

On 2018-06-15 05:18 PM, Christian König wrote:

Am 15.06.2018 um 17:14 schrieb Michel Dänzer:

On 2018-06-15 05:11 PM, Christian König wrote:

Am 15.06.2018 um 16:42 schrieb Michel Dänzer:

+
+    if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
    return amdgpu_bo_size(bo);
    -    return 0;
+    if (!nodes)
+    return 0;
+
+    spin_lock(>lock);

I actually don't see any need to grab the lock here.

Ah, so amdgpu_vram_mgr_del only grabs it for drm_mm_remove_node?

Yes, exactly.

Will drop it in v2, thanks!


Haven't seeen v2 yet, but with that lock removed the whole series is 
Reviewed-by: Christian König .


Regards,
Christian.
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 5/5] drm/amd/display: Refine the interface dm_pp_notify_wm_clock_changes

2018-06-18 Thread Rex Zhu
change function parameter type from dm_pp_wm_sets_with_clock_ranges * to
void *. so this interface can be supported on AI/RV.

Signed-off-by: Rex Zhu 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 2 +-
 drivers/gpu/drm/amd/display/dc/dm_services.h   | 2 +-
 drivers/gpu/drm/amd/include/kgd_pp_interface.h | 2 +-
 drivers/gpu/drm/amd/powerplay/amd_powerplay.c  | 6 +++---
 drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c  | 4 ++--
 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c  | 3 ++-
 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 3 ++-
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 3 ++-
 drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h| 2 +-
 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h  | 3 +--
 10 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 37f6a5f..92d36fe 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -408,7 +408,7 @@ bool dm_pp_get_clock_levels_by_type_with_voltage(
 
 bool dm_pp_notify_wm_clock_changes(
const struct dc_context *ctx,
-   struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
+   void *clock_ranges)
 {
/* TODO: to be implemented */
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h 
b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 4ff9b2b..535b415 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -217,7 +217,7 @@ bool dm_pp_get_clock_levels_by_type_with_voltage(
 
 bool dm_pp_notify_wm_clock_changes(
const struct dc_context *ctx,
-   struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges);
+   void *clock_ranges);
 
 void dm_pp_get_funcs_rv(struct dc_context *ctx,
struct pp_smu_funcs_rv *funcs);
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h 
b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 4535756..06f7ef2 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -261,7 +261,7 @@ struct amd_pm_funcs {
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks);
int (*set_watermarks_for_clocks_ranges)(void *handle,
-   struct pp_wm_sets_with_clock_ranges_soc15 
*wm_with_clock_ranges);
+   void *clock_ranges);
int (*display_clock_voltage_request)(void *handle,
struct pp_display_clock_request *clock);
int (*get_display_mode_validation_clocks)(void *handle,
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c 
b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 7b0ff9d..ba5e0e2 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -1103,17 +1103,17 @@ static int pp_get_clock_by_type_with_voltage(void 
*handle,
 }
 
 static int pp_set_watermarks_for_clocks_ranges(void *handle,
-   struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+   void *clock_ranges)
 {
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
 
-   if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges)
+   if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
return -EINVAL;
 
mutex_lock(>smu_lock);
ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
-   wm_with_clock_ranges);
+   clock_ranges);
mutex_unlock(>smu_lock);
 
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index a0bb921..53207e7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -435,7 +435,7 @@ int phm_get_clock_by_type_with_voltage(struct pp_hwmgr 
*hwmgr,
 }
 
 int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
-   struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+   void *clock_ranges)
 {
PHM_FUNC_CHECK(hwmgr);
 
@@ -443,7 +443,7 @@ int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr 
*hwmgr,
return -EINVAL;
 
return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr,
-   wm_with_clock_ranges);
+   clock_ranges);
 }
 
 int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 08690c9..4ca8033 100644
--- 

[PATCH 4/5] drm/amd/display: Delete old implementation of bw_calcs_data_update_from_pplib

2018-06-18 Thread Rex Zhu
this function is copied from dce112. it is not for AI/RV.
driver need to re-implement this function.

Signed-off-by: Rex Zhu 
---
 .../drm/amd/display/dc/dce120/dce120_resource.c| 123 +
 1 file changed, 1 insertion(+), 122 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c 
b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 2d58dac..450f7ec 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -26,7 +26,6 @@
 
 #include "dm_services.h"
 
-
 #include "stream_encoder.h"
 #include "resource.h"
 #include "include/irq_service_interface.h"
@@ -691,127 +690,7 @@ static void dce120_destroy_resource_pool(struct 
resource_pool **pool)
 
 static void bw_calcs_data_update_from_pplib(struct dc *dc)
 {
-   struct dm_pp_clock_levels_with_latency eng_clks = {0};
-   struct dm_pp_clock_levels_with_latency mem_clks = {0};
-   struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0};
-   int i;
-   unsigned int clk;
-   unsigned int latency;
-
-   /*do system clock*/
-   if (!dm_pp_get_clock_levels_by_type_with_latency(
-   dc->ctx,
-   DM_PP_CLOCK_TYPE_ENGINE_CLK,
-   _clks) || eng_clks.num_levels == 0) {
-
-   eng_clks.num_levels = 8;
-   clk = 30;
-
-   for (i = 0; i < eng_clks.num_levels; i++) {
-   eng_clks.data[i].clocks_in_khz = clk;
-   clk += 10;
-   }
-   }
-
-   /* convert all the clock fro kHz to fix point mHz  TODO: wloop data */
-   dc->bw_vbios->high_sclk = bw_frc_to_fixed(
-   eng_clks.data[eng_clks.num_levels-1].clocks_in_khz, 1000);
-   dc->bw_vbios->mid1_sclk  = bw_frc_to_fixed(
-   eng_clks.data[eng_clks.num_levels/8].clocks_in_khz, 1000);
-   dc->bw_vbios->mid2_sclk  = bw_frc_to_fixed(
-   eng_clks.data[eng_clks.num_levels*2/8].clocks_in_khz, 1000);
-   dc->bw_vbios->mid3_sclk  = bw_frc_to_fixed(
-   eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz, 1000);
-   dc->bw_vbios->mid4_sclk  = bw_frc_to_fixed(
-   eng_clks.data[eng_clks.num_levels*4/8].clocks_in_khz, 1000);
-   dc->bw_vbios->mid5_sclk  = bw_frc_to_fixed(
-   eng_clks.data[eng_clks.num_levels*5/8].clocks_in_khz, 1000);
-   dc->bw_vbios->mid6_sclk  = bw_frc_to_fixed(
-   eng_clks.data[eng_clks.num_levels*6/8].clocks_in_khz, 1000);
-   dc->bw_vbios->low_sclk  = bw_frc_to_fixed(
-   eng_clks.data[0].clocks_in_khz, 1000);
-
-   /*do memory clock*/
-   if (!dm_pp_get_clock_levels_by_type_with_latency(
-   dc->ctx,
-   DM_PP_CLOCK_TYPE_MEMORY_CLK,
-   _clks) || mem_clks.num_levels == 0) {
-
-   mem_clks.num_levels = 3;
-   clk = 25;
-   latency = 45;
-
-   for (i = 0; i < eng_clks.num_levels; i++) {
-   mem_clks.data[i].clocks_in_khz = clk;
-   mem_clks.data[i].latency_in_us = latency;
-   clk += 50;
-   latency -= 5;
-   }
-
-   }
-
-   /* we don't need to call PPLIB for validation clock since they
-* also give us the highest sclk and highest mclk (UMA clock).
-* ALSO always convert UMA clock (from PPLIB)  to YCLK (HW formula):
-* YCLK = UMACLK*m_memoryTypeMultiplier
-*/
-   dc->bw_vbios->low_yclk = bw_frc_to_fixed(
-   mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
-   dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
-   mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * 
MEMORY_TYPE_MULTIPLIER,
-   1000);
-   dc->bw_vbios->high_yclk = bw_frc_to_fixed(
-   mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * 
MEMORY_TYPE_MULTIPLIER,
-   1000);
-
-   /* Now notify PPLib/SMU about which Watermarks sets they should select
-* depending on DPM state they are in. And update BW MGR GFX Engine and
-* Memory clock member variables for Watermarks calculations for each
-* Watermark Set
-*/
-   clk_ranges.num_wm_sets = 4;
-   clk_ranges.wm_clk_ranges[0].wm_set_id = WM_SET_A;
-   clk_ranges.wm_clk_ranges[0].wm_min_eng_clk_in_khz =
-   eng_clks.data[0].clocks_in_khz;
-   clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
-   eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 
1;
-   clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz =
-   mem_clks.data[0].clocks_in_khz;
-   clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
-   mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
-

[PATCH 3/5] drm/amd/pp: Memory Latency is always 25us on Vega10

2018-06-18 Thread Rex Zhu
Also use the tolerable latency defined in Display
to find lowest MCLK frequency when disable mclk switch

Signed-off-by: Rex Zhu 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 24 ++
 1 file changed, 2 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 6057b59..198c7ed 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -55,12 +55,6 @@
 
 static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
 
-#define MEM_FREQ_LOW_LATENCY25000
-#define MEM_FREQ_HIGH_LATENCY   8
-#define MEM_LATENCY_HIGH245
-#define MEM_LATENCY_LOW 35
-#define MEM_LATENCY_ERR 0x
-
 #define mmDF_CS_AON0_DramBaseAddress0  
0x0044
 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 
0
 
@@ -3223,7 +3217,7 @@ static int vega10_apply_state_adjust_rules(struct 
pp_hwmgr *hwmgr,
/* Find the lowest MCLK frequency that is within
 * the tolerable latency defined in DAL
 */
-   latency = 0;
+   latency = 
hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
for (i = 0; i < data->mclk_latency_table.count; i++) {
if ((data->mclk_latency_table.entries[i].latency <= 
latency) &&
(data->mclk_latency_table.entries[i].frequency 
>=
@@ -4074,18 +4068,6 @@ static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
 
 }
 
-static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
-   uint32_t clock)
-{
-   if (clock >= MEM_FREQ_LOW_LATENCY &&
-   clock < MEM_FREQ_HIGH_LATENCY)
-   return MEM_LATENCY_HIGH;
-   else if (clock >= MEM_FREQ_HIGH_LATENCY)
-   return MEM_LATENCY_LOW;
-   else
-   return MEM_LATENCY_ERR;
-}
-
 static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
struct pp_clock_levels_with_latency *clocks)
 {
@@ -4107,9 +4089,7 @@ static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
dep_table->entries[i].clk * 10;
clocks->data[clocks->num_levels].latency_in_us =
data->mclk_latency_table.entries
-   [data->mclk_latency_table.count].latency =
-   vega10_get_mem_latency(hwmgr,
-   dep_table->entries[i].clk);
+   [data->mclk_latency_table.count].latency = 25;
clocks->num_levels++;
data->mclk_latency_table.count++;
}
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/5] drm/amd/pp: Fix wrong clock-unit exported to Display

2018-06-18 Thread Rex Zhu
Transfer 10KHz (requested by smu) to KHz needed by Display
component.

This can fix the issue 4k Monitor can't be lit up on Vega/Raven.

Signed-off-by: Rex Zhu 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c  |  4 ++--
 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 10 +-
 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 10 +-
 3 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 6d5e042..08690c9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1014,7 +1014,7 @@ static int smu10_get_clock_by_type_with_latency(struct 
pp_hwmgr *hwmgr,
 
clocks->num_levels = 0;
for (i = 0; i < pclk_vol_table->count; i++) {
-   clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
+   clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk 
* 10;
clocks->data[i].latency_in_us = latency_required ?
smu10_get_mem_latency(hwmgr,
pclk_vol_table->entries[i].clk) 
:
@@ -1065,7 +1065,7 @@ static int smu10_get_clock_by_type_with_voltage(struct 
pp_hwmgr *hwmgr,
 
clocks->num_levels = 0;
for (i = 0; i < pclk_vol_table->count; i++) {
-   clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
+   clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk  
* 10;
clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
clocks->num_levels++;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index f97fce6..6057b59 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -4067,7 +4067,7 @@ static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
for (i = 0; i < dep_table->count; i++) {
if (dep_table->entries[i].clk) {
clocks->data[clocks->num_levels].clocks_in_khz =
-   dep_table->entries[i].clk;
+   dep_table->entries[i].clk * 10;
clocks->num_levels++;
}
}
@@ -4104,7 +4104,7 @@ static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
clocks->data[clocks->num_levels].clocks_in_khz =
data->mclk_latency_table.entries
[data->mclk_latency_table.count].frequency =
-   dep_table->entries[i].clk;
+   dep_table->entries[i].clk * 10;
clocks->data[clocks->num_levels].latency_in_us =
data->mclk_latency_table.entries
[data->mclk_latency_table.count].latency =
@@ -4126,7 +4126,7 @@ static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
uint32_t i;
 
for (i = 0; i < dep_table->count; i++) {
-   clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
+   clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
clocks->data[i].latency_in_us = 0;
clocks->num_levels++;
}
@@ -4142,7 +4142,7 @@ static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
uint32_t i;
 
for (i = 0; i < dep_table->count; i++) {
-   clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
+   clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
clocks->data[i].latency_in_us = 0;
clocks->num_levels++;
}
@@ -4202,7 +4202,7 @@ static int vega10_get_clock_by_type_with_voltage(struct 
pp_hwmgr *hwmgr,
}
 
for (i = 0; i < dep_table->count; i++) {
-   clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
+   clocks->data[i].clocks_in_khz = dep_table->entries[i].clk  * 10;
clocks->data[i].voltage_in_mv = 
(uint32_t)(table_info->vddc_lookup_table->
entries[dep_table->entries[i].vddInd].us_vdd);
clocks->num_levels++;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 782e209..d685ce7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -1576,7 +1576,7 @@ static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
 
for (i = 0; i < ucount; i++) {
clocks->data[i].clocks_in_khz =
-   dpm_table->dpm_levels[i].value * 100;
+   dpm_table->dpm_levels[i].value * 1000;
 
clocks->data[i].latency_in_us = 0;
  

[PATCH 1/5] drm/amd/display: Implement dm_pp_get_clock_levels_by_type_with_latency

2018-06-18 Thread Rex Zhu
Display component can get tru max_displ_clk_in_khz instand of hardcode

Signed-off-by: Rex Zhu 
---
 .../drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 45 +-
 1 file changed, 43 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 5a33461..37f6a5f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -261,6 +261,33 @@ static void pp_to_dc_clock_levels_with_latency(
}
 }
 
+static void pp_to_dc_clock_levels_with_voltage(
+   const struct pp_clock_levels_with_voltage *pp_clks,
+   struct dm_pp_clock_levels_with_voltage *clk_level_info,
+   enum dm_pp_clock_type dc_clk_type)
+{
+   uint32_t i;
+
+   if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
+   DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d 
exceeds maximum of %d!\n",
+   DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+   pp_clks->num_levels,
+   DM_PP_MAX_CLOCK_LEVELS);
+
+   clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+   } else
+   clk_level_info->num_levels = pp_clks->num_levels;
+
+   DRM_DEBUG("DM_PPLIB: values for %s clock\n",
+   DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+   for (i = 0; i < clk_level_info->num_levels; i++) {
+   DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz);
+   clk_level_info->data[i].clocks_in_khz = 
pp_clks->data[i].clocks_in_khz;
+   clk_level_info->data[i].voltage_in_mv = 
pp_clks->data[i].voltage_in_mv;
+   }
+}
+
 bool dm_pp_get_clock_levels_by_type(
const struct dc_context *ctx,
enum dm_pp_clock_type clk_type,
@@ -361,8 +388,22 @@ bool dm_pp_get_clock_levels_by_type_with_voltage(
enum dm_pp_clock_type clk_type,
struct dm_pp_clock_levels_with_voltage *clk_level_info)
 {
-   /* TODO: to be implemented */
-   return false;
+   struct amdgpu_device *adev = ctx->driver_context;
+   void *pp_handle = adev->powerplay.pp_handle;
+   struct pp_clock_levels_with_voltage pp_clks = { 0 };
+   const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+   if (!pp_funcs || !pp_funcs->get_clock_by_type_with_voltage)
+   return false;
+
+   if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
+
dc_to_pp_clock_type(clk_type),
+_clks))
+   return false;
+
+   pp_to_dc_clock_levels_with_voltage(_clks, clk_level_info, clk_type);
+
+   return true;
 }
 
 bool dm_pp_notify_wm_clock_changes(
-- 
1.9.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/2] drm/doc: Make naming consistent for Core Driver Infrastructure

2018-06-18 Thread Daniel Vetter
On Wed, Jun 13, 2018 at 01:45:23PM -0400, Alex Deucher wrote:
> On Mon, Jun 4, 2018 at 5:11 AM, Michel Dänzer  wrote:
> >
> > Adding dri-devel.
> >
> 
> Any opinions?

100% meh, i.e. if you care, go with whatever, you have my ack. Anyone who
cares about making docs more consistent makes me a happy camper :-)

Cheers, Daniel

> 
> Alex
> 
> >
> > On 2018-06-01 08:03 PM, Alex Deucher wrote:
> >> Use chapter rather than section to align with the rst markup.
> >>
> >> Signed-off-by: Alex Deucher 
> >> ---
> >>  Documentation/gpu/amdgpu.rst | 2 +-
> >>  1 file changed, 1 insertion(+), 1 deletion(-)
> >>
> >> diff --git a/Documentation/gpu/amdgpu.rst b/Documentation/gpu/amdgpu.rst
> >> index 1d726b90a619..e99732553c71 100644
> >> --- a/Documentation/gpu/amdgpu.rst
> >> +++ b/Documentation/gpu/amdgpu.rst
> >> @@ -8,7 +8,7 @@ Next (GCN) architecture.
> >>  Core Driver Infrastructure
> >>  ==
> >>
> >> -This section covers core driver infrastructure.
> >> +This chapter covers core driver infrastructure.
> >>
> >>  PRIME Buffer Sharing
> >>  
> >
> > I don't mind either way, but I copied the "section" wording from i915.rst.
> >
> >
> > --
> > Earthling Michel Dänzer   |   http://www.amd.com
> > Libre software enthusiast | Mesa and X developer
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 5/5] drm/amdgpu: add independent DMA-buf export v3

2018-06-18 Thread Daniel Vetter
On Fri, Jun 01, 2018 at 02:00:20PM +0200, Christian König wrote:
> The caching of SGT's done by the DRM code is actually quite harmful and
> should probably removed altogether in the long term.

Hm, why is it harmful? We've done it because it's expensive, and people
started screaming about the overhead ... hence the caching. Doing an
amdgpu copypasta seems like working around issues in shared code.
-Daniel

> 
> Start by providing a separate DMA-buf export implementation in amdgpu. This is
> also a prerequisite of unpinned DMA-buf handling.
> 
> v2: fix unintended recursion, remove debugging leftovers
> v3: split out from unpinned DMA-buf work
> 
> Signed-off-by: Christian König 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h   |  1 -
>  drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c   |  1 -
>  drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 73 
> ++-
>  3 files changed, 32 insertions(+), 43 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 2d7500921c0b..93dc57d74fc2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -373,7 +373,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
>  void amdgpu_gem_object_close(struct drm_gem_object *obj,
>   struct drm_file *file_priv);
>  unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
> -struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
>  struct drm_gem_object *
>  amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
>struct dma_buf_attachment *attach,
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> index b0bf2f24da48..270b8ad927ea 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> @@ -907,7 +907,6 @@ static struct drm_driver kms_driver = {
>   .gem_prime_export = amdgpu_gem_prime_export,
>   .gem_prime_import = amdgpu_gem_prime_import,
>   .gem_prime_res_obj = amdgpu_gem_prime_res_obj,
> - .gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
>   .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
>   .gem_prime_vmap = amdgpu_gem_prime_vmap,
>   .gem_prime_vunmap = amdgpu_gem_prime_vunmap,
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
> index a156b3891a3f..0c5a75b06648 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
> @@ -32,14 +32,6 @@
>  
>  static const struct dma_buf_ops amdgpu_dmabuf_ops;
>  
> -struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
> -{
> - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
> - int npages = bo->tbo.num_pages;
> -
> - return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
> -}
> -
>  void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
>  {
>   struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
> @@ -132,23 +124,17 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
>   return ERR_PTR(ret);
>  }
>  
> -static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
> -  struct dma_buf_attachment *attach)
> +static struct sg_table *
> +amdgpu_gem_map_dma_buf(struct dma_buf_attachment *attach,
> +enum dma_data_direction dir)
>  {
> + struct dma_buf *dma_buf = attach->dmabuf;
>   struct drm_gem_object *obj = dma_buf->priv;
>   struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
>   struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
> + struct sg_table *sgt;
>   long r;
>  
> - r = drm_gem_map_attach(dma_buf, attach);
> - if (r)
> - return r;
> -
> - r = amdgpu_bo_reserve(bo, false);
> - if (unlikely(r != 0))
> - goto error_detach;
> -
> -
>   if (attach->dev->driver != adev->dev->driver) {
>   /*
>* Wait for all shared fences to complete before we switch to 
> future
> @@ -159,46 +145,53 @@ static int amdgpu_gem_map_attach(struct dma_buf 
> *dma_buf,
>   MAX_SCHEDULE_TIMEOUT);
>   if (unlikely(r < 0)) {
>   DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
> - goto error_unreserve;
> + return ERR_PTR(r);
>   }
>   }
>  
>   /* pin buffer into GTT */
>   r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
>   if (r)
> - goto error_unreserve;
> + return ERR_PTR(r);
> +
> + sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
> + if (IS_ERR(sgt))
> + return sgt;
> +
> + if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
> +   DMA_ATTR_SKIP_CPU_SYNC))
> + goto error_free;
>  
>   if 

Re: [PATCH 4/5] dma-buf: add dma_buf_(un)map_attachment_locked variants

2018-06-18 Thread Daniel Vetter
On Fri, Jun 01, 2018 at 02:00:19PM +0200, Christian König wrote:
> Add function variants which can be called with the reservation lock
> already held.
> 
> Signed-off-by: Christian König 

I expect that we'll need this patch before patch 3 and then roll it out to
drivers doing reservation locking already, before we can add the
reservation stuff for all other callers.
> ---
>  drivers/dma-buf/dma-buf.c | 60 
> ++-
>  include/linux/dma-buf.h   |  5 
>  2 files changed, 59 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index 4f0708cb58a7..3371509b468e 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -606,6 +606,38 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct 
> dma_buf_attachment *attach)
>  }
>  EXPORT_SYMBOL_GPL(dma_buf_detach);
>  
> +/**
> + * dma_buf_map_attachment_locked - Maps the buffer into _device_ address 
> space
> + * with the reservation lock held. Is a wrapper for map_dma_buf() of the
> + *
> + * Returns the scatterlist table of the attachment;
> + * dma_buf_ops.
> + * @attach:  [in]attachment whose scatterlist is to be returned
> + * @direction:   [in]direction of DMA transfer
> + *
> + * Returns sg_table containing the scatterlist to be returned; returns 
> ERR_PTR
> + * on error. May return -EINTR if it is interrupted by a signal.
> + *
> + * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
_locked

Also please reference the other variants here for doc completeness.

> + * the underlying backing storage is pinned for as long as a mapping exists,
> + * therefore users/importers should not hold onto a mapping for undue 
> amounts of
> + * time.
> + */
> +struct sg_table *
> +dma_buf_map_attachment_locked(struct dma_buf_attachment *attach,
> +   enum dma_data_direction direction)
> +{
> + struct sg_table *sg_table;
> +
> + might_sleep();

Needs a lockdep_assert_held here.

> + sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
> + if (!sg_table)
> + sg_table = ERR_PTR(-ENOMEM);
> +
> + return sg_table;
> +}
> +EXPORT_SYMBOL_GPL(dma_buf_map_attachment_locked);
> +
>  /**
>   * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
>   * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
> @@ -626,13 +658,12 @@ struct sg_table *dma_buf_map_attachment(struct 
> dma_buf_attachment *attach,
>  {
>   struct sg_table *sg_table;
>  
> - might_sleep();
>  
>   if (WARN_ON(!attach || !attach->dmabuf))
>   return ERR_PTR(-EINVAL);
>  
>   reservation_object_lock(attach->dmabuf->resv, NULL);
> - sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
> + sg_table = dma_buf_map_attachment_locked(attach, direction);
>   reservation_object_unlock(attach->dmabuf->resv);
>   if (!sg_table)
>   sg_table = ERR_PTR(-ENOMEM);
> @@ -641,6 +672,26 @@ struct sg_table *dma_buf_map_attachment(struct 
> dma_buf_attachment *attach,
>  }
>  EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
>  
> +/**
> + * dma_buf_unmap_attachment_locked - unmaps the buffer with reservation lock
> + * held, should deallocate the associated scatterlist. Is a wrapper for
> + * unmap_dma_buf() of dma_buf_ops.
> + * @attach:  [in]attachment to unmap buffer from
> + * @sg_table:[in]scatterlist info of the buffer to unmap
> + * @direction:  [in]direction of DMA transfer
> + *
> + * This unmaps a DMA mapping for @attached obtained by 
> dma_buf_map_attachment().

_locked

> + */
> +void dma_buf_unmap_attachment_locked(struct dma_buf_attachment *attach,
> +  struct sg_table *sg_table,
> +  enum dma_data_direction direction)
> +{
> + might_sleep();

Needs a lockdep_assert_held here.

Otherwise lgtm, but there's the big caveat of that I expect lockdep
fireworks on mass with this, but drivers not yet converted.
-Daniel

> + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
> + direction);
> +}
> +EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment_locked);
> +
>  /**
>   * dma_buf_unmap_attachment - unmaps and decreases usecount of the 
> buffer;might
>   * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
> @@ -655,14 +706,11 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment 
> *attach,
>   struct sg_table *sg_table,
>   enum dma_data_direction direction)
>  {
> - might_sleep();
> -
>   if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
>   return;
>  
>   reservation_object_lock(attach->dmabuf->resv, NULL);
> - attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
> - direction);
> + 

Re: [PATCH 3/5] dma-buf: lock the reservation object during (un)map_dma_buf

2018-06-18 Thread Daniel Vetter
On Fri, Jun 01, 2018 at 02:00:18PM +0200, Christian König wrote:
> First step towards unpinned DMA buf operation.
> 
> I've checked the DRM drivers to potential locking of the reservation
> object, but essentially we need to audit all implementations of the
> dma_buf _ops for this to work.
> 
> Signed-off-by: Christian König 

Agreed in principle, but I expect a fireworks show with just this patch
applied. It's not just that we need to audit all the implementations of
dma-buf-ops, we also need to audit all the callers.

No idea yet how to go about merging this, but for a start might be good to
throw this at the intel-gfx CI (just Cc: the intel-gfx mailing lists, but
make sure your series applies without amd-staging-next stuff which isn't
in drm.git yet).
-Daniel

> ---
>  drivers/dma-buf/dma-buf.c | 4 
>  include/linux/dma-buf.h   | 4 
>  2 files changed, 8 insertions(+)
> 
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index e4c657d9fad7..4f0708cb58a7 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -631,7 +631,9 @@ struct sg_table *dma_buf_map_attachment(struct 
> dma_buf_attachment *attach,
>   if (WARN_ON(!attach || !attach->dmabuf))
>   return ERR_PTR(-EINVAL);
>  
> + reservation_object_lock(attach->dmabuf->resv, NULL);
>   sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
> + reservation_object_unlock(attach->dmabuf->resv);
>   if (!sg_table)
>   sg_table = ERR_PTR(-ENOMEM);
>  
> @@ -658,8 +660,10 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment 
> *attach,
>   if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
>   return;
>  
> + reservation_object_lock(attach->dmabuf->resv, NULL);
>   attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
>   direction);
> + reservation_object_unlock(attach->dmabuf->resv);
>  }
>  EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
>  
> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
> index d17cadd76802..d2ba7a027a78 100644
> --- a/include/linux/dma-buf.h
> +++ b/include/linux/dma-buf.h
> @@ -118,6 +118,8 @@ struct dma_buf_ops {
>* any other kind of sharing that the exporter might wish to make
>* available to buffer-users.
>*
> +  * This is called with the dmabuf->resv object locked.
> +  *
>* Returns:
>*
>* A _table scatter list of or the backing storage of the DMA buffer,
> @@ -138,6 +140,8 @@ struct dma_buf_ops {
>* It should also unpin the backing storage if this is the last mapping
>* of the DMA buffer, it the exporter supports backing storage
>* migration.
> +  *
> +  * This is called with the dmabuf->resv object locked.
>*/
>   void (*unmap_dma_buf)(struct dma_buf_attachment *,
> struct sg_table *,
> -- 
> 2.14.1
> 
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/5] dma-buf: remove kmap_atomic interface

2018-06-18 Thread Daniel Vetter
On Fri, Jun 01, 2018 at 02:00:17PM +0200, Christian König wrote:
> Neither used nor correctly implemented anywhere. Just completely remove
> the interface.
> 
> Signed-off-by: Christian König 

I wonder whether we can nuke the normal kmap stuff too ... everyone seems
to want/use the vmap stuff for kernel-internal mapping needs.

Anyway, this looks good.
> ---
>  drivers/dma-buf/dma-buf.c  | 44 
> --
>  drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c  |  2 -
>  drivers/gpu/drm/armada/armada_gem.c|  2 -
>  drivers/gpu/drm/drm_prime.c| 26 -
>  drivers/gpu/drm/i915/i915_gem_dmabuf.c | 11 --
>  drivers/gpu/drm/i915/selftests/mock_dmabuf.c   |  2 -
>  drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c  |  2 -
>  drivers/gpu/drm/tegra/gem.c| 14 ---
>  drivers/gpu/drm/udl/udl_dmabuf.c   | 17 -
>  drivers/gpu/drm/vmwgfx/vmwgfx_prime.c  | 13 ---
>  .../media/common/videobuf2/videobuf2-dma-contig.c  |  1 -
>  drivers/media/common/videobuf2/videobuf2-dma-sg.c  |  1 -
>  drivers/media/common/videobuf2/videobuf2-vmalloc.c |  1 -
>  drivers/staging/android/ion/ion.c  |  2 -
>  drivers/tee/tee_shm.c  |  6 ---
>  include/drm/drm_prime.h|  4 --
>  include/linux/dma-buf.h|  4 --
>  17 files changed, 152 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index e99a8d19991b..e4c657d9fad7 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -405,7 +405,6 @@ struct dma_buf *dma_buf_export(const struct 
> dma_buf_export_info *exp_info)
> || !exp_info->ops->map_dma_buf
> || !exp_info->ops->unmap_dma_buf
> || !exp_info->ops->release
> -   || !exp_info->ops->map_atomic
> || !exp_info->ops->map
> || !exp_info->ops->mmap)) {
>   return ERR_PTR(-EINVAL);
> @@ -687,14 +686,6 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
>   *  void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
>   *  void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
>   *
> - *   There are also atomic variants of these interfaces. Like for kmap they
> - *   facilitate non-blocking fast-paths. Neither the importer nor the 
> exporter
> - *   (in the callback) is allowed to block when using these.
> - *
> - *   Interfaces::
> - *  void \*dma_buf_kmap_atomic(struct dma_buf \*, unsigned long);
> - *  void dma_buf_kunmap_atomic(struct dma_buf \*, unsigned long, void 
> \*);
> - *
>   *   For importers all the restrictions of using kmap apply, like the limited
>   *   supply of kmap_atomic slots. Hence an importer shall only hold onto at
>   *   max 2 atomic dma_buf kmaps at the same time (in any given process 
> context).

This is also about atomic kmap ...

And the subsequent language around "Note that these calls need to always
succeed." is also not true, might be good to update that stating that kmap
is optional (like we say already for vmap).

With those docs nits addressed:

Reviewed-by: Daniel Vetter 

> @@ -859,41 +850,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
>  }
>  EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
>  
> -/**
> - * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
> - * space. The same restrictions as for kmap_atomic and friends apply.
> - * @dmabuf:  [in]buffer to map page from.
> - * @page_num:[in]page in PAGE_SIZE units to map.
> - *
> - * This call must always succeed, any necessary preparations that might fail
> - * need to be done in begin_cpu_access.
> - */
> -void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
> -{
> - WARN_ON(!dmabuf);
> -
> - return dmabuf->ops->map_atomic(dmabuf, page_num);
> -}
> -EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
> -
> -/**
> - * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
> - * @dmabuf:  [in]buffer to unmap page from.
> - * @page_num:[in]page in PAGE_SIZE units to unmap.
> - * @vaddr:   [in]kernel space pointer obtained from dma_buf_kmap_atomic.
> - *
> - * This call must always succeed.
> - */
> -void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
> -void *vaddr)
> -{
> - WARN_ON(!dmabuf);
> -
> - if (dmabuf->ops->unmap_atomic)
> - dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr);
> -}
> -EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
> -
>  /**
>   * dma_buf_kmap - Map a page of the buffer object into kernel address space. 
> The
>   * same restrictions as for kmap and friends apply.
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
> index 

Re: [PATCH 1/5] dma_buf: remove device parameter from attach callback

2018-06-18 Thread Daniel Vetter
On Fri, Jun 01, 2018 at 02:00:16PM +0200, Christian König wrote:
> The device parameter is completely unused because it is available in the
> attachment structure as well.
> 
> Signed-off-by: Christian König 
> ---
>  drivers/dma-buf/dma-buf.c | 2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 3 +--
>  drivers/gpu/drm/drm_prime.c   | 3 +--
>  drivers/gpu/drm/udl/udl_dmabuf.c  | 1 -
>  drivers/gpu/drm/vmwgfx/vmwgfx_prime.c | 1 -
>  drivers/media/common/videobuf2/videobuf2-dma-contig.c | 2 +-
>  drivers/media/common/videobuf2/videobuf2-dma-sg.c | 2 +-
>  drivers/media/common/videobuf2/videobuf2-vmalloc.c| 2 +-
>  include/drm/drm_prime.h   | 2 +-
>  include/linux/dma-buf.h   | 3 +--
>  10 files changed, 8 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index d78d5fc173dc..e99a8d19991b 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -568,7 +568,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf 
> *dmabuf,
>   mutex_lock(>lock);
>  
>   if (dmabuf->ops->attach) {
> - ret = dmabuf->ops->attach(dmabuf, dev, attach);
> + ret = dmabuf->ops->attach(dmabuf, attach);
>   if (ret)
>   goto err_attach;
>   }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
> index 4683626b065f..f1500f1ec0f5 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
> @@ -133,7 +133,6 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
>  }
>  
>  static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
> -  struct device *target_dev,
>struct dma_buf_attachment *attach)
>  {
>   struct drm_gem_object *obj = dma_buf->priv;
> @@ -141,7 +140,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
>   struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
>   long r;
>  
> - r = drm_gem_map_attach(dma_buf, target_dev, attach);
> + r = drm_gem_map_attach(dma_buf, attach);
>   if (r)
>   return r;
>  
> diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
> index 7856a9b3f8a8..4a3a232fea67 100644
> --- a/drivers/gpu/drm/drm_prime.c
> +++ b/drivers/gpu/drm/drm_prime.c
> @@ -186,7 +186,6 @@ static int drm_prime_lookup_buf_handle(struct 
> drm_prime_file_private *prime_fpri
>  /**
>   * drm_gem_map_attach - dma_buf attach implementation for GEM
>   * @dma_buf: buffer to attach device to
> - * @target_dev: not used
>   * @attach: buffer attachment data
>   *
>   * Allocates _prime_attachment and calls _driver.gem_prime_pin for
> @@ -195,7 +194,7 @@ static int drm_prime_lookup_buf_handle(struct 
> drm_prime_file_private *prime_fpri
>   *
>   * Returns 0 on success, negative error code on failure.
>   */
> -int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
> +int drm_gem_map_attach(struct dma_buf *dma_buf,
>  struct dma_buf_attachment *attach)
>  {
>   struct drm_prime_attachment *prime_attach;
> diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c 
> b/drivers/gpu/drm/udl/udl_dmabuf.c
> index 2867ed155ff6..5fdc8bdc2026 100644
> --- a/drivers/gpu/drm/udl/udl_dmabuf.c
> +++ b/drivers/gpu/drm/udl/udl_dmabuf.c
> @@ -29,7 +29,6 @@ struct udl_drm_dmabuf_attachment {
>  };
>  
>  static int udl_attach_dma_buf(struct dma_buf *dmabuf,
> -   struct device *dev,
> struct dma_buf_attachment *attach)
>  {
>   struct udl_drm_dmabuf_attachment *udl_attach;
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c 
> b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
> index 0d42a46521fc..fbffb37ccf42 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
> @@ -40,7 +40,6 @@
>   */
>  
>  static int vmw_prime_map_attach(struct dma_buf *dma_buf,
> - struct device *target_dev,
>   struct dma_buf_attachment *attach)
>  {
>   return -ENOSYS;
> diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c 
> b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
> index f1178f6f434d..12d0072c52c2 100644
> --- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
> +++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
> @@ -222,7 +222,7 @@ struct vb2_dc_attachment {
>   enum dma_data_direction dma_dir;
>  };
>  
> -static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
> +static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
>   struct dma_buf_attachment *dbuf_attach)
>  {
>   struct vb2_dc_attachment *attach;
> diff --git 

[PATCH] drm/amd/pp: Fix uninitialized variable

2018-06-18 Thread rajan . vaja
From: Rajan Vaja 

Initialize variable to 0 before performing logical OR operation.

Signed-off-by: Rajan Vaja 
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index a9efd855..bde01d4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -1090,7 +1090,7 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr 
*hwmgr)
 static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 {
struct amdgpu_device *adev = hwmgr->adev;
-   int result;
+   int result = 0;
uint32_t num_se = 0;
uint32_t count, data;
 
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx