[PATCH] drm/amd/display/amdgpu_dm/amdgpu_dm.c: Remove duplicate header

2018-11-20 Thread Brajeswar Ghosh
Remove dm_services_types.h which is included more than once

Signed-off-by: Brajeswar Ghosh 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e224f23e2215..62a96c683584 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -38,7 +38,6 @@
 #include "amd_shared.h"
 #include "amdgpu_dm_irq.h"
 #include "dm_helpers.h"
-#include "dm_services_types.h"
 #include "amdgpu_dm_mst_types.h"
 #if defined(CONFIG_DEBUG_FS)
 #include "amdgpu_dm_debugfs.h"
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: disable page queue support for Vega20

2018-11-20 Thread Alex Deucher
Reviewed-by: Alex Deucher 
On Wed, Nov 21, 2018 at 1:40 AM Evan Quan  wrote:
>
> Keep it disabled until we confirm it's ready.
>
> Change-Id: I2dc4c0f1156dd82f7046672fed6d22b9d18c2010
> Signed-off-by: Evan Quan 
> ---
>  drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 
> b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> index 0a3b68dd49a0..e6cb2c399957 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> @@ -1457,7 +1457,8 @@ static bool sdma_v4_0_fw_support_paging_queue(struct 
> amdgpu_device *adev)
> case CHIP_VEGA12:
> return fw_version >= 31;
> case CHIP_VEGA20:
> -   return fw_version >= 115;
> +   //return fw_version >= 115;
> +   return false;
> default:
> return false;
> }
> --
> 2.19.1
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/amdgpu/vce_v3_0.c: Remove duplicate header

2018-11-20 Thread Brajeswar Ghosh
Remove gca/gfx_8_0_d.h which is included more than once

Signed-off-by: Brajeswar Ghosh 
---
 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6dbd39730070..4e4289a06a53 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -37,7 +37,6 @@
 #include "gca/gfx_8_0_d.h"
 #include "smu/smu_7_1_2_d.h"
 #include "smu/smu_7_1_2_sh_mask.h"
-#include "gca/gfx_8_0_d.h"
 #include "gca/gfx_8_0_sh_mask.h"
 #include "ivsrcid/ivsrcid_vislands30.h"
 
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: disable page queue support for Vega20

2018-11-20 Thread Evan Quan
Keep it disabled until we confirm it's ready.

Change-Id: I2dc4c0f1156dd82f7046672fed6d22b9d18c2010
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 
b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 0a3b68dd49a0..e6cb2c399957 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1457,7 +1457,8 @@ static bool sdma_v4_0_fw_support_paging_queue(struct 
amdgpu_device *adev)
case CHIP_VEGA12:
return fw_version >= 31;
case CHIP_VEGA20:
-   return fw_version >= 115;
+   //return fw_version >= 115;
+   return false;
default:
return false;
}
-- 
2.19.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 4/4] drm/amdkfd: Add support for doorbell BOs

2018-11-20 Thread Kuehling, Felix
This allows user mode to map doorbell pages into GPUVM address space.
That way GPUs can submit to user mode queues (self-dispatch).

Signed-off-by: Felix Kuehling 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 59 ++--
 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c |  6 +++
 drivers/gpu/drm/amd/include/kgd_kfd_interface.h  |  4 +-
 3 files changed, 62 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 14f76765..f3ac8e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -887,6 +887,24 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev,
return ret;
 }
 
+static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
+{
+   struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
+
+   if (!sg)
+   return NULL;
+   if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
+   kfree(sg);
+   return NULL;
+   }
+   sg->sgl->dma_address = addr;
+   sg->sgl->length = size;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+   sg->sgl->dma_length = size;
+#endif
+   return sg;
+}
+
 static int process_validate_vms(struct amdkfd_process_info *process_info)
 {
struct amdgpu_vm *peer_vm;
@@ -1170,6 +1188,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 {
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+   enum ttm_bo_type bo_type = ttm_bo_type_device;
+   struct sg_table *sg = NULL;
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
@@ -1198,13 +1218,25 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (!offset || !*offset)
return -EINVAL;
user_addr = *offset;
+   } else if (flags & ALLOC_MEM_FLAGS_DOORBELL) {
+   domain = AMDGPU_GEM_DOMAIN_GTT;
+   alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
+   bo_type = ttm_bo_type_sg;
+   alloc_flags = 0;
+   if (size > UINT_MAX)
+   return -EINVAL;
+   sg = create_doorbell_sg(*offset, size);
+   if (!sg)
+   return -ENOMEM;
} else {
return -EINVAL;
}
 
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
-   if (!*mem)
-   return -ENOMEM;
+   if (!*mem) {
+   ret = -ENOMEM;
+   goto err;
+   }
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
(*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
@@ -1237,7 +1269,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 
amdgpu_sync_create(&(*mem)->sync);
 
-   ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, false);
+   ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
if (ret) {
pr_debug("Insufficient system memory\n");
goto err_reserve_limit;
@@ -1251,7 +1283,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
bp.byte_align = byte_align;
bp.domain = alloc_domain;
bp.flags = alloc_flags;
-   bp.type = ttm_bo_type_device;
+   bp.type = bo_type;
bp.resv = NULL;
ret = amdgpu_bo_create(adev, , );
if (ret) {
@@ -1259,6 +1291,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
domain_string(alloc_domain), ret);
goto err_bo_create;
}
+   if (bo_type == ttm_bo_type_sg) {
+   bo->tbo.sg = sg;
+   bo->tbo.ttm->sg = sg;
+   }
bo->kfd_bo = *mem;
(*mem)->bo = bo;
if (user_addr)
@@ -1290,10 +1326,15 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
/* Don't unreserve system mem limit twice */
goto err_reserve_limit;
 err_bo_create:
-   unreserve_mem_limit(adev, size, alloc_domain, false);
+   unreserve_mem_limit(adev, size, alloc_domain, !!sg);
 err_reserve_limit:
mutex_destroy(&(*mem)->lock);
kfree(*mem);
+err:
+   if (sg) {
+   sg_free_table(sg);
+   kfree(sg);
+   }
return ret;
 }
 
@@ -1363,6 +1404,14 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
/* Free the sync object */
amdgpu_sync_free(>sync);
 
+   /* If the SG is not NULL, it's one we created for a doorbell
+* BO. We need to free it.
+*/
+   if (mem->bo->tbo.sg) {
+   sg_free_table(mem->bo->tbo.sg);
+   kfree(mem->bo->tbo.sg);
+   }
+
/* Free the BO*/
amdgpu_bo_unref(>bo);
mutex_destroy(>lock);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index ae3ae0f..3623538 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ 

[PATCH 3/4] drm/amdkfd: Add DMABuf import functionality

2018-11-20 Thread Kuehling, Felix
This is used for interoperability between ROCm compute and graphics
APIs. It allows importing graphics driver BOs into the ROCm SVM
address space for zero-copy GPU access.

The API is split into two steps (query and import) to allow user mode
to manage the virtual address space allocation for the imported buffer.

Signed-off-by: Felix Kuehling 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c   |  57 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h   |  11 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c |  55 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h  |   2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c|   4 +-
 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 118 ++-
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h|   1 +
 drivers/gpu/drm/amd/amdkfd/kfd_topology.c|  18 
 include/uapi/linux/kfd_ioctl.h   |  26 -
 9 files changed, 287 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 274099a..44b8a12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -26,6 +26,7 @@
 #include "amdgpu.h"
 #include "amdgpu_gfx.h"
 #include 
+#include 
 
 const struct kgd2kfd_calls *kgd2kfd;
 
@@ -444,6 +445,62 @@ void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct 
kfd_cu_info *cu_info)
cu_info->lds_size = acu_info.lds_size;
 }
 
+int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
+ struct kgd_dev **dma_buf_kgd,
+ uint64_t *bo_size, void *metadata_buffer,
+ size_t buffer_size, uint32_t *metadata_size,
+ uint32_t *flags)
+{
+   struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+   struct dma_buf *dma_buf;
+   struct drm_gem_object *obj;
+   struct amdgpu_bo *bo;
+   uint64_t metadata_flags;
+   int r = -EINVAL;
+
+   dma_buf = dma_buf_get(dma_buf_fd);
+   if (IS_ERR(dma_buf))
+   return PTR_ERR(dma_buf);
+
+   if (dma_buf->ops != _dmabuf_ops)
+   /* Can't handle non-graphics buffers */
+   goto out_put;
+
+   obj = dma_buf->priv;
+   if (obj->dev->driver != adev->ddev->driver)
+   /* Can't handle buffers from different drivers */
+   goto out_put;
+
+   adev = obj->dev->dev_private;
+   bo = gem_to_amdgpu_bo(obj);
+   if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+   AMDGPU_GEM_DOMAIN_GTT)))
+   /* Only VRAM and GTT BOs are supported */
+   goto out_put;
+
+   r = 0;
+   if (dma_buf_kgd)
+   *dma_buf_kgd = (struct kgd_dev *)adev;
+   if (bo_size)
+   *bo_size = amdgpu_bo_size(bo);
+   if (metadata_size)
+   *metadata_size = bo->metadata_size;
+   if (metadata_buffer)
+   r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
+  metadata_size, _flags);
+   if (flags) {
+   *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+   ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
+
+   if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+   *flags |= ALLOC_MEM_FLAGS_PUBLIC;
+   }
+
+out_put:
+   dma_buf_put(dma_buf);
+   return r;
+}
+
 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
 {
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 4d766cb..1a84fe2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -150,6 +150,11 @@ uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct 
kgd_dev *kgd);
 
 uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
 void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info 
*cu_info);
+int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
+ struct kgd_dev **dmabuf_kgd,
+ uint64_t *bo_size, void *metadata_buffer,
+ size_t buffer_size, uint32_t *metadata_size,
+ uint32_t *flags);
 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
 uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
 
@@ -201,6 +206,12 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void 
*process_info,
 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
  struct kfd_vm_fault_info *info);
 
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+ struct dma_buf *dmabuf,
+   

[PATCH 1/4] drm/amdgpu: Add KFD VRAM limit checking

2018-11-20 Thread Kuehling, Felix
We don't want KFD processes evicting each other over VRAM usage.
Therefore prevent overcommitting VRAM among KFD applications with
a per-GPU limit. Also leave enough room for page tables on top
of the application memory usage.

Signed-off-by: Felix Kuehling 
Reviewed-by: Eric Huang 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h  |  7 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c   | 52 
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h   |  8 +++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 60 
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c   |  2 +-
 5 files changed, 75 insertions(+), 54 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 2c80453..40e084a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -81,6 +81,7 @@
 #include "amdgpu_job.h"
 #include "amdgpu_bo_list.h"
 #include "amdgpu_gem.h"
+#include "amdgpu_amdkfd.h"
 
 #define MAX_GPU_INSTANCE   16
 
@@ -979,6 +980,9 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds   gds;
 
+   /* KFD */
+   struct amdgpu_kfd_dev   kfd;
+
/* display related functionality */
struct amdgpu_display_manager dm;
 
@@ -992,9 +996,6 @@ struct amdgpu_device {
atomic64_t visible_pin_size;
atomic64_t gart_pin_size;
 
-   /* amdkfd interface */
-   struct kfd_dev  *kfd;
-
/* soc15 register offset based on ip, instance and  segment */
uint32_t*reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 1c1fed6..274099a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -31,10 +31,20 @@ const struct kgd2kfd_calls *kgd2kfd;
 
 static const unsigned int compute_vmid_bitmap = 0xFF00;
 
+/* Total memory size in system memory and all GPU VRAM. Used to
+ * estimate worst case amount of memory to reserve for page tables
+ */
+uint64_t amdgpu_amdkfd_total_mem_size;
+
 int amdgpu_amdkfd_init(void)
 {
+   struct sysinfo si;
int ret;
 
+   si_meminfo();
+   amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
+   amdgpu_amdkfd_total_mem_size *= si.mem_unit;
+
 #ifdef CONFIG_HSA_AMD
ret = kgd2kfd_init(KFD_INTERFACE_VERSION, );
if (ret)
@@ -87,8 +97,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
return;
}
 
-   adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
-  adev->pdev, kfd2kgd);
+   adev->kfd.dev = kgd2kfd->probe((struct kgd_dev *)adev,
+  adev->pdev, kfd2kgd);
+
+   if (adev->kfd.dev)
+   amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
 }
 
 /**
@@ -128,7 +141,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 {
int i, n;
int last_valid_bit;
-   if (adev->kfd) {
+
+   if (adev->kfd.dev) {
struct kgd2kfd_shared_resources gpu_resources = {
.compute_vmid_bitmap = compute_vmid_bitmap,
.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
@@ -167,7 +181,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
_resources.doorbell_start_offset);
 
if (adev->asic_type < CHIP_VEGA10) {
-   kgd2kfd->device_init(adev->kfd, _resources);
+   kgd2kfd->device_init(adev->kfd.dev, _resources);
return;
}
 
@@ -207,37 +221,37 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
gpu_resources.reserved_doorbell_mask = 0x1e0;
gpu_resources.reserved_doorbell_val  = 0x0e0;
 
-   kgd2kfd->device_init(adev->kfd, _resources);
+   kgd2kfd->device_init(adev->kfd.dev, _resources);
}
 }
 
 void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
 {
-   if (adev->kfd) {
-   kgd2kfd->device_exit(adev->kfd);
-   adev->kfd = NULL;
+   if (adev->kfd.dev) {
+   kgd2kfd->device_exit(adev->kfd.dev);
+   adev->kfd.dev = NULL;
}
 }
 
 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry)
 {
-   if (adev->kfd)
-   kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
+   if (adev->kfd.dev)
+   kgd2kfd->interrupt(adev->kfd.dev, ih_ring_entry);
 }
 
 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
 {
-   if (adev->kfd)
-   kgd2kfd->suspend(adev->kfd);
+   if (adev->kfd.dev)
+   kgd2kfd->suspend(adev->kfd.dev);
 }
 
 int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
 {
int r = 0;
 
-   if (adev->kfd)
-   

[PATCH 2/4] drm/amdkfd: Add NULL-pointer check

2018-11-20 Thread Kuehling, Felix
top_dev->gpu is NULL for CPUs. Avoid dereferencing it if NULL.

Signed-off-by: Felix Kuehling 
---
 drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index aa793fc..c5ed21e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -101,7 +101,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev 
*pdev)
down_read(_lock);
 
list_for_each_entry(top_dev, _device_list, list)
-   if (top_dev->gpu->pdev == pdev) {
+   if (top_dev->gpu && top_dev->gpu->pdev == pdev) {
device = top_dev->gpu;
break;
}
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 0/4] KFD upstreaming Nov 2018, part 2

2018-11-20 Thread Kuehling, Felix
This round adds support for more ROCm memory manager features:
* VRAM limit checking to avoid overcommitment
* DMABuf import for graphics interoperability
* Support for mapping doorbells into GPUVM address space

Felix Kuehling (4):
  drm/amdgpu: Add KFD VRAM limit checking
  drm/amdkfd: Add NULL-pointer check
  drm/amdkfd: Add DMABuf import functionality
  drm/amdkfd: Add support for doorbell BOs

 drivers/gpu/drm/amd/amdgpu/amdgpu.h  |   7 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c   | 109 ---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h   |  19 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 170 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h  |   2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c   |   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c|   4 +-
 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 124 -
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h|   1 +
 drivers/gpu/drm/amd/amdkfd/kfd_topology.c|  20 ++-
 drivers/gpu/drm/amd/include/kgd_kfd_interface.h  |   4 +-
 include/uapi/linux/kfd_ioctl.h   |  26 +++-
 12 files changed, 423 insertions(+), 65 deletions(-)

-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH RFC 2/5] cgroup: Add mechanism to register vendor specific DRM devices

2018-11-20 Thread Tejun Heo
Hello,

On Tue, Nov 20, 2018 at 10:21:14PM +, Ho, Kenny wrote:
> By this reply, are you suggesting that vendor specific resources
> will never be acceptable to be managed under cgroup?  Let say a user

I wouldn't say never but whatever which gets included as a cgroup
controller should have clearly defined resource abstractions and the
control schemes around them including support for delegation.  AFAICS,
gpu side still seems to have a long way to go (and it's not clear
whether that's somewhere it will or needs to end up).

> want to have similar functionality as what cgroup is offering but to
> manage vendor specific resources, what would you suggest as a
> solution?  When you say keeping vendor specific resource regulation
> inside drm or specific drivers, do you mean we should replicate the
> cgroup infrastructure there or do you mean either drm or specific
> driver should query existing hierarchy (such as device or perhaps
> cpu) for the process organization information?
> 
> To put the questions in more concrete terms, let say a user wants to
> expose certain part of a gpu to a particular cgroup similar to the
> way selective cpu cores are exposed to a cgroup via cpuset, how
> should we go about enabling such functionality?

Do what the intel driver or bpf is doing?  It's not difficult to hook
into cgroup for identification purposes.

Thanks.

-- 
tejun
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH RFC 2/5] cgroup: Add mechanism to register vendor specific DRM devices

2018-11-20 Thread Ho, Kenny
Hi Tejun,

Thanks for the reply.  A few clarifying questions:

On Tue, Nov 20, 2018 at 3:21 PM Tejun Heo  wrote:
> So, I'm still pretty negative about adding drm controller at this
> point.  There isn't enough of common resource model defined yet and
> until that gets sorted out I think it's in the best interest of
> everyone involved to keep it inside drm or specific driver proper.
By this reply, are you suggesting that vendor specific resources will never be 
acceptable to be managed under cgroup?  Let say a user want to have similar 
functionality as what cgroup is offering but to manage vendor specific 
resources, what would you suggest as a solution?  When you say keeping vendor 
specific resource regulation inside drm or specific drivers, do you mean we 
should replicate the cgroup infrastructure there or do you mean either drm or 
specific driver should query existing hierarchy (such as device or perhaps cpu) 
for the process organization information?

To put the questions in more concrete terms, let say a user wants to expose 
certain part of a gpu to a particular cgroup similar to the way selective cpu 
cores are exposed to a cgroup via cpuset, how should we go about enabling such 
functionality?

Regards,
Kenny
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH RFC 4/5] drm/amdgpu: Add accounting of command submission via DRM cgroup

2018-11-20 Thread Eric Anholt
Kenny Ho  writes:

> Account for the number of command submitted to amdgpu by type on a per
> cgroup basis, for the purpose of profiling/monitoring applications.

For profiling other drivers, I've used perf tracepoints, which let you
get useful timelines of multiple events in the driver.  Have you made
use of this stat for productive profiling?


signature.asc
Description: PGP signature
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH RFC 5/5] drm/amdgpu: Add accounting of buffer object creation request via DRM cgroup

2018-11-20 Thread Eric Anholt
Kenny Ho  writes:

> Account for the total size of buffer object requested to amdgpu by
> buffer type on a per cgroup basis.
>
> x prefix in the control file name x.bo_requested.amd.stat signify
> experimental.

Why is a counting of the size of buffer objects ever allocated useful,
as opposed to the current size of buffer objects allocated?

And, really, why is this stat in cgroups, instead of a debugfs entry?


signature.asc
Description: PGP signature
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH RFC 2/5] cgroup: Add mechanism to register vendor specific DRM devices

2018-11-20 Thread Tejun Heo
Hello,

On Tue, Nov 20, 2018 at 01:58:11PM -0500, Kenny Ho wrote:
> Since many parts of the DRM subsystem has vendor-specific
> implementations, we introduce mechanisms for vendor to register their
> specific resources and control files to the DRM cgroup subsystem.  A
> vendor will register itself with the DRM cgroup subsystem first before
> registering individual DRM devices to the cgroup subsystem.
> 
> In addition to the cgroup_subsys_state that is common to all DRM
> devices, a device-specific state is introduced and it is allocated
> according to the vendor of the device.

So, I'm still pretty negative about adding drm controller at this
point.  There isn't enough of common resource model defined yet and
until that gets sorted out I think it's in the best interest of
everyone involved to keep it inside drm or specific driver proper.

Thanks.

-- 
tejun
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH][drm-next] drm/amd/display: fix dereference of pointer fs_params before it is null checked

2018-11-20 Thread Li, Sun peng (Leo)


On 2018-11-20 12:17 p.m., Colin King wrote:
> From: Colin Ian King 
> 
> Currently there are several instances of pointer fs_params being
> dereferenced before fs_params is being null checked.  Fix this by
> only dereferencing fs_params after the null check.
> 
> Detected by CoverityScan, CID#1475565 ("Dereference before null check")
> 
> Fixes: e1e8a020c6b8 ("drm/amd/display: Add support for Freesync 2 HDR and 
> Content to Display Mapping")
> Signed-off-by: Colin Ian King 

Reviewed-by: Leo Li 

Thanks!

> ---
>   .../drm/amd/display/modules/color/color_gamma.c  | 16 +++-
>   1 file changed, 11 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c 
> b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
> index 7480f072c375..bbecbaefb741 100644
> --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
> +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
> @@ -813,20 +813,26 @@ static bool build_freesync_hdr(struct pwl_float_data_ex 
> *rgb_regamma,
>   const struct hw_x_point *coord_x = coordinate_x;
>   struct fixed31_32 scaledX = dc_fixpt_zero;
>   struct fixed31_32 scaledX1 = dc_fixpt_zero;
> - struct fixed31_32 max_display = 
> dc_fixpt_from_int(fs_params->max_display);
> - struct fixed31_32 min_display = 
> dc_fixpt_from_fraction(fs_params->min_display, 1);
> - struct fixed31_32 max_content = 
> dc_fixpt_from_int(fs_params->max_content);
> - struct fixed31_32 min_content = 
> dc_fixpt_from_fraction(fs_params->min_content, 1);
> + struct fixed31_32 max_display;
> + struct fixed31_32 min_display;
> + struct fixed31_32 max_content;
> + struct fixed31_32 min_content;
>   struct fixed31_32 clip = dc_fixpt_one;
>   struct fixed31_32 output;
>   bool use_eetf = false;
>   bool is_clipped = false;
> - struct fixed31_32 sdr_white_level = 
> dc_fixpt_from_int(fs_params->sdr_white_level);
> + struct fixed31_32 sdr_white_level;
>   
>   if (fs_params == NULL || fs_params->max_content == 0 ||
>   fs_params->max_display == 0)
>   return false;
>   
> + max_display = dc_fixpt_from_int(fs_params->max_display);
> + min_display = dc_fixpt_from_fraction(fs_params->min_display, 1);
> + max_content = dc_fixpt_from_int(fs_params->max_content);
> + min_content = dc_fixpt_from_fraction(fs_params->min_content, 1);
> + sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level);
> +
>   if (fs_params->min_display > 1000) // cap at 0.1 at the bottom
>   min_display = dc_fixpt_from_fraction(1, 10);
>   if (fs_params->max_display < 100) // cap at 100 at the top
> 
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH RFC 4/5] drm/amdgpu: Add accounting of command submission via DRM cgroup

2018-11-20 Thread Kenny Ho
Account for the number of command submitted to amdgpu by type on a per
cgroup basis, for the purpose of profiling/monitoring applications.

x prefix in the control file name x.cmd_submitted.amd.stat signify
experimental.

Change-Id: Ibc22e5bda600f54fe820fe0af5400ca348691550
Signed-off-by: Kenny Ho 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c  |  5 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.c | 54 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.h |  5 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c| 15 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h|  5 +-
 5 files changed, 83 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 663043c8f0f5..b448160aed89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -33,6 +33,7 @@
 #include "amdgpu_trace.h"
 #include "amdgpu_gmc.h"
 #include "amdgpu_gem.h"
+#include "amdgpu_drmcgrp.h"
 
 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
  struct drm_amdgpu_cs_chunk_fence *data,
@@ -1275,6 +1276,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, 
struct drm_file *filp)
union drm_amdgpu_cs *cs = data;
struct amdgpu_cs_parser parser = {};
bool reserved_buffers = false;
+   struct amdgpu_ring *ring;
int i, r;
 
if (!adev->accel_working)
@@ -1317,6 +1319,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, 
struct drm_file *filp)
if (r)
goto out;
 
+   ring = to_amdgpu_ring(parser.entity->rq->sched);
+   amdgpu_drmcgrp_count_cs(current, dev, ring->funcs->type);
+
r = amdgpu_cs_submit(, cs);
 
 out:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.c
index ed8aac17769c..853b77532428 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.c
@@ -1,11 +1,65 @@
 // SPDX-License-Identifier: MIT
 // Copyright 2018 Advanced Micro Devices, Inc.
 #include 
+#include 
 #include 
 #include 
+#include "amdgpu_ring.h"
 #include "amdgpu_drmcgrp.h"
 
+void amdgpu_drmcgrp_count_cs(struct task_struct *task, struct drm_device *dev,
+   enum amdgpu_ring_type r_type)
+{
+   struct drmcgrp *drmcgrp = get_drmcgrp(task);
+   struct drmcgrp_device_resource *ddr;
+   struct drmcgrp *p;
+   struct amd_drmcgrp_dev_resource *a_ddr;
+
+   if (drmcgrp == NULL)
+   return;
+
+   ddr = drmcgrp->dev_resources[dev->primary->index];
+
+   mutex_lock(>ddev->mutex);
+   for (p = drmcgrp; p != NULL; p = parent_drmcgrp(drmcgrp)) {
+   a_ddr = ddr_amdddr(p->dev_resources[dev->primary->index]);
+
+   a_ddr->cs_count[r_type]++;
+   }
+   mutex_unlock(>ddev->mutex);
+}
+
+int amd_drmcgrp_cmd_submit_accounting_read(struct seq_file *sf, void *v)
+{
+   struct drmcgrp *drmcgrp = css_drmcgrp(seq_css(sf));
+   struct drmcgrp_device_resource *ddr = NULL;
+   struct amd_drmcgrp_dev_resource *a_ddr = NULL;
+   int i, j;
+
+   seq_puts(sf, "---\n");
+   for (i = 0; i < MAX_DRM_DEV; i++) {
+   ddr = drmcgrp->dev_resources[i];
+
+   if (ddr == NULL || ddr->ddev->vid != amd_drmcgrp_vendor_id)
+   continue;
+
+   a_ddr = ddr_amdddr(ddr);
+
+   seq_printf(sf, "card%d:\n", i);
+   for (j = 0; j < __MAX_AMDGPU_RING_TYPE; j++)
+   seq_printf(sf, "  %s: %llu\n", amdgpu_ring_names[j], 
a_ddr->cs_count[j]);
+   }
+
+   return 0;
+}
+
+
 struct cftype files[] = {
+   {
+   .name = "x.cmd_submitted.amd.stat",
+   .seq_show = amd_drmcgrp_cmd_submit_accounting_read,
+   .flags = CFTYPE_NOT_ON_ROOT,
+   },
{ } /* terminate */
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.h
index e2934b7a49f5..f894a9a1059f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.h
@@ -5,12 +5,17 @@
 #define _AMDGPU_DRMCGRP_H
 
 #include 
+#include "amdgpu_ring.h"
 
 /* for AMD specific DRM resources */
 struct amd_drmcgrp_dev_resource {
struct drmcgrp_device_resource ddr;
+   u64 cs_count[__MAX_AMDGPU_RING_TYPE];
 };
 
+void amdgpu_drmcgrp_count_cs(struct task_struct *task, struct drm_device *dev,
+   enum amdgpu_ring_type r_type);
+
 static inline struct amd_drmcgrp_dev_resource *ddr_amdddr(struct 
drmcgrp_device_resource *ddr)
 {
return ddr ? container_of(ddr, struct amd_drmcgrp_dev_resource, ddr) : 
NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index b70e85ec147d..1606f84d2334 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -34,6 

[PATCH RFC 1/5] cgroup: Introduce cgroup for drm subsystem

2018-11-20 Thread Kenny Ho
Change-Id: I6830d3990f63f0c13abeba29b1d330cf28882831
Signed-off-by: Kenny Ho 
---
 include/linux/cgroup_drm.h| 32 
 include/linux/cgroup_subsys.h |  4 +++
 init/Kconfig  |  5 
 kernel/cgroup/Makefile|  1 +
 kernel/cgroup/drm.c   | 46 +++
 5 files changed, 88 insertions(+)
 create mode 100644 include/linux/cgroup_drm.h
 create mode 100644 kernel/cgroup/drm.c

diff --git a/include/linux/cgroup_drm.h b/include/linux/cgroup_drm.h
new file mode 100644
index ..79ab38b0f46d
--- /dev/null
+++ b/include/linux/cgroup_drm.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ */
+#ifndef _CGROUP_DRM_H
+#define _CGROUP_DRM_H
+
+#ifdef CONFIG_CGROUP_DRM
+
+#include 
+
+struct drmcgrp {
+   struct cgroup_subsys_state  css;
+};
+
+static inline struct drmcgrp *css_drmcgrp(struct cgroup_subsys_state *css)
+{
+   return css ? container_of(css, struct drmcgrp, css) : NULL;
+}
+
+static inline struct drmcgrp *get_drmcgrp(struct task_struct *task)
+{
+   return css_drmcgrp(task_get_css(task, drm_cgrp_id));
+}
+
+
+static inline struct drmcgrp *parent_drmcgrp(struct drmcgrp *cg)
+{
+   return css_drmcgrp(cg->css.parent);
+}
+
+#endif /* CONFIG_CGROUP_DRM */
+#endif /* _CGROUP_DRM_H */
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index acb77dcff3b4..ddedad809e8b 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -61,6 +61,10 @@ SUBSYS(pids)
 SUBSYS(rdma)
 #endif
 
+#if IS_ENABLED(CONFIG_CGROUP_DRM)
+SUBSYS(drm)
+#endif
+
 /*
  * The following subsystems are not supported on the default hierarchy.
  */
diff --git a/init/Kconfig b/init/Kconfig
index a4112e95724a..bee1e164443a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -836,6 +836,11 @@ config CGROUP_RDMA
  Attaching processes with active RDMA resources to the cgroup
  hierarchy is allowed even if can cross the hierarchy's limit.
 
+config CGROUP_DRM
+   bool "DRM controller (EXPERIMENTAL)"
+   help
+ Provides accounting and enforcement of resources in the DRM subsystem.
+
 config CGROUP_FREEZER
bool "Freezer controller"
help
diff --git a/kernel/cgroup/Makefile b/kernel/cgroup/Makefile
index bfcdae896122..6af14bd93050 100644
--- a/kernel/cgroup/Makefile
+++ b/kernel/cgroup/Makefile
@@ -4,5 +4,6 @@ obj-y := cgroup.o rstat.o namespace.o cgroup-v1.o
 obj-$(CONFIG_CGROUP_FREEZER) += freezer.o
 obj-$(CONFIG_CGROUP_PIDS) += pids.o
 obj-$(CONFIG_CGROUP_RDMA) += rdma.o
+obj-$(CONFIG_CGROUP_DRM) += drm.o
 obj-$(CONFIG_CPUSETS) += cpuset.o
 obj-$(CONFIG_CGROUP_DEBUG) += debug.o
diff --git a/kernel/cgroup/drm.c b/kernel/cgroup/drm.c
new file mode 100644
index ..d9e194b9aead
--- /dev/null
+++ b/kernel/cgroup/drm.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: MIT
+// Copyright 2018 Advanced Micro Devices, Inc.
+#include 
+#include 
+#include 
+
+static u64 drmcgrp_test_read(struct cgroup_subsys_state *css,
+   struct cftype *cft)
+{
+   return 88;
+}
+
+static void drmcgrp_css_free(struct cgroup_subsys_state *css)
+{
+   struct drmcgrp *drmcgrp = css_drmcgrp(css);
+
+   kfree(css_drmcgrp(css));
+}
+
+static struct cgroup_subsys_state *
+drmcgrp_css_alloc(struct cgroup_subsys_state *parent_css)
+{
+   struct drmcgrp *drmcgrp;
+
+   drmcgrp = kzalloc(sizeof(struct drmcgrp), GFP_KERNEL);
+   if (!drmcgrp)
+   return ERR_PTR(-ENOMEM);
+
+   return >css;
+}
+
+struct cftype files[] = {
+   {
+   .name = "drm_test",
+   .read_u64 = drmcgrp_test_read,
+   },
+   { } /* terminate */
+};
+
+struct cgroup_subsys drm_cgrp_subsys = {
+   .css_alloc  = drmcgrp_css_alloc,
+   .css_free   = drmcgrp_css_free,
+   .early_init = false,
+   .legacy_cftypes = files,
+   .dfl_cftypes= files,
+};
-- 
2.19.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH RFC 3/5] drm/amdgpu: Add DRM cgroup support for AMD devices

2018-11-20 Thread Kenny Ho
Change-Id: Ib66c44ac1b1c367659e362a2fc05b6fbb3805876
Signed-off-by: Kenny Ho 
---
 drivers/gpu/drm/amd/amdgpu/Makefile |  3 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c  |  7 
 drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.c | 37 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.h | 19 +++
 include/drm/drmcgrp_vendors.h   |  1 +
 5 files changed, 67 insertions(+)
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.c
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.h

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile 
b/drivers/gpu/drm/amd/amdgpu/Makefile
index 138cb787d27e..5cf8048f2d75 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -186,4 +186,7 @@ amdgpu-y += $(AMD_DISPLAY_FILES)
 
 endif
 
+#DRM cgroup controller
+amdgpu-y += amdgpu_drmcgrp.o
+
 obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 30bc345d6fdf..ad0373f83ed3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -33,6 +33,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -2645,6 +2646,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed;
}
 
+   /* TODO:docs */
+   if (drmcgrp_vendors[amd_drmcgrp_vendor_id] == NULL)
+   drmcgrp_register_vendor(_drmcgrp_vendor, 
amd_drmcgrp_vendor_id);
+
+   drmcgrp_register_device(adev->ddev, amd_drmcgrp_vendor_id);
+
return 0;
 
 failed:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.c
new file mode 100644
index ..ed8aac17769c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: MIT
+// Copyright 2018 Advanced Micro Devices, Inc.
+#include 
+#include 
+#include 
+#include "amdgpu_drmcgrp.h"
+
+struct cftype files[] = {
+   { } /* terminate */
+};
+
+struct cftype *drmcgrp_amd_get_cftypes(void)
+{
+   return files;
+}
+
+struct drmcgrp_device_resource *amd_drmcgrp_alloc_dev_resource(void)
+{
+   struct amd_drmcgrp_dev_resource *a_ddr;
+
+   a_ddr = kzalloc(sizeof(struct amd_drmcgrp_dev_resource), GFP_KERNEL);
+   if (!a_ddr)
+   return ERR_PTR(-ENOMEM);
+
+   return _ddr->ddr;
+}
+
+void amd_drmcgrp_free_dev_resource(struct drmcgrp_device_resource *ddr)
+{
+   kfree(ddr_amdddr(ddr));
+}
+
+struct drmcgrp_vendor amd_drmcgrp_vendor = {
+   .get_cftypes = drmcgrp_amd_get_cftypes,
+   .alloc_dev_resource = amd_drmcgrp_alloc_dev_resource,
+   .free_dev_resource = amd_drmcgrp_free_dev_resource,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.h
new file mode 100644
index ..e2934b7a49f5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ */
+#ifndef _AMDGPU_DRMCGRP_H
+#define _AMDGPU_DRMCGRP_H
+
+#include 
+
+/* for AMD specific DRM resources */
+struct amd_drmcgrp_dev_resource {
+   struct drmcgrp_device_resource ddr;
+};
+
+static inline struct amd_drmcgrp_dev_resource *ddr_amdddr(struct 
drmcgrp_device_resource *ddr)
+{
+   return ddr ? container_of(ddr, struct amd_drmcgrp_dev_resource, ddr) : 
NULL;
+}
+
+#endif /* _AMDGPU_DRMCGRP_H */
diff --git a/include/drm/drmcgrp_vendors.h b/include/drm/drmcgrp_vendors.h
index b04d8649851b..6cfbf1825344 100644
--- a/include/drm/drmcgrp_vendors.h
+++ b/include/drm/drmcgrp_vendors.h
@@ -3,5 +3,6 @@
  */
 #if IS_ENABLED(CONFIG_CGROUP_DRM)
 
+DRMCGRP_VENDOR(amd)
 
 #endif
-- 
2.19.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH RFC 5/5] drm/amdgpu: Add accounting of buffer object creation request via DRM cgroup

2018-11-20 Thread Kenny Ho
Account for the total size of buffer object requested to amdgpu by
buffer type on a per cgroup basis.

x prefix in the control file name x.bo_requested.amd.stat signify
experimental.

Change-Id: Ifb680c4bcf3652879a7a659510e25680c2465cf6
Signed-off-by: Kenny Ho 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.c | 56 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.h |  3 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 13 +
 include/uapi/drm/amdgpu_drm.h   | 24 ++---
 4 files changed, 90 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.c
index 853b77532428..e3d98ed01b79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.c
@@ -7,6 +7,57 @@
 #include "amdgpu_ring.h"
 #include "amdgpu_drmcgrp.h"
 
+void amdgpu_drmcgrp_count_bo_req(struct task_struct *task, struct drm_device 
*dev,
+   u32 domain, unsigned long size)
+{
+   struct drmcgrp *drmcgrp = get_drmcgrp(task);
+   struct drmcgrp_device_resource *ddr;
+   struct drmcgrp *p;
+   struct amd_drmcgrp_dev_resource *a_ddr;
+int i;
+
+   if (drmcgrp == NULL)
+   return;
+
+   ddr = drmcgrp->dev_resources[dev->primary->index];
+
+   mutex_lock(>ddev->mutex);
+   for (p = drmcgrp; p != NULL; p = parent_drmcgrp(drmcgrp)) {
+   a_ddr = ddr_amdddr(p->dev_resources[dev->primary->index]);
+
+   for (i = 0; i < __MAX_AMDGPU_MEM_DOMAIN; i++)
+   if ( (1 << i) & domain)
+   a_ddr->bo_req_count[i] += size;
+   }
+   mutex_unlock(>ddev->mutex);
+}
+
+int amd_drmcgrp_bo_req_stat_read(struct seq_file *sf, void *v)
+{
+   struct drmcgrp *drmcgrp = css_drmcgrp(seq_css(sf));
+   struct drmcgrp_device_resource *ddr = NULL;
+   struct amd_drmcgrp_dev_resource *a_ddr = NULL;
+   int i, j;
+
+   seq_puts(sf, "---\n");
+   for (i = 0; i < MAX_DRM_DEV; i++) {
+   ddr = drmcgrp->dev_resources[i];
+
+   if (ddr == NULL || ddr->ddev->vid != amd_drmcgrp_vendor_id)
+   continue;
+
+   a_ddr = ddr_amdddr(ddr);
+
+   seq_printf(sf, "card%d:\n", i);
+   for (j = 0; j < __MAX_AMDGPU_MEM_DOMAIN; j++)
+   seq_printf(sf, "  %s: %llu\n", 
amdgpu_mem_domain_names[j], a_ddr->bo_req_count[j]);
+   }
+
+   return 0;
+}
+
+
+
 void amdgpu_drmcgrp_count_cs(struct task_struct *task, struct drm_device *dev,
enum amdgpu_ring_type r_type)
 {
@@ -55,6 +106,11 @@ int amd_drmcgrp_cmd_submit_accounting_read(struct seq_file 
*sf, void *v)
 
 
 struct cftype files[] = {
+   {
+   .name = "x.bo_requested.amd.stat",
+   .seq_show = amd_drmcgrp_bo_req_stat_read,
+   .flags = CFTYPE_NOT_ON_ROOT,
+   },
{
.name = "x.cmd_submitted.amd.stat",
.seq_show = amd_drmcgrp_cmd_submit_accounting_read,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.h
index f894a9a1059f..8b9d61e47dde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drmcgrp.h
@@ -11,10 +11,13 @@
 struct amd_drmcgrp_dev_resource {
struct drmcgrp_device_resource ddr;
u64 cs_count[__MAX_AMDGPU_RING_TYPE];
+   u64 bo_req_count[__MAX_AMDGPU_MEM_DOMAIN];
 };
 
 void amdgpu_drmcgrp_count_cs(struct task_struct *task, struct drm_device *dev,
enum amdgpu_ring_type r_type);
+void amdgpu_drmcgrp_count_bo_req(struct task_struct *task, struct drm_device 
*dev,
+   u32 domain, unsigned long size);
 
 static inline struct amd_drmcgrp_dev_resource *ddr_amdddr(struct 
drmcgrp_device_resource *ddr)
 {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7b3d1ebda9df..339e1d3edad8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -31,6 +31,17 @@
 #include 
 #include "amdgpu.h"
 #include "amdgpu_display.h"
+#include "amdgpu_drmcgrp.h"
+
+char const *amdgpu_mem_domain_names[] = {
+   [AMDGPU_MEM_DOMAIN_CPU] = "cpu",
+   [AMDGPU_MEM_DOMAIN_GTT] = "gtt",
+   [AMDGPU_MEM_DOMAIN_VRAM]= "vram",
+   [AMDGPU_MEM_DOMAIN_GDS] = "gds",
+   [AMDGPU_MEM_DOMAIN_GWS] = "gws",
+   [AMDGPU_MEM_DOMAIN_OA]  = "oa",
+   [__MAX_AMDGPU_MEM_DOMAIN]   = "_max"
+};
 
 void amdgpu_gem_object_free(struct drm_gem_object *gobj)
 {
@@ -52,6 +63,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, 
unsigned long size,
struct amdgpu_bo_param bp;
int r;
 
+   amdgpu_drmcgrp_count_bo_req(current, adev->ddev, initial_domain, size);
+
memset(, 0, sizeof(bp));
*obj = NULL;
/* At least align on page 

[PATCH RFC 2/5] cgroup: Add mechanism to register vendor specific DRM devices

2018-11-20 Thread Kenny Ho
Since many parts of the DRM subsystem has vendor-specific
implementations, we introduce mechanisms for vendor to register their
specific resources and control files to the DRM cgroup subsystem.  A
vendor will register itself with the DRM cgroup subsystem first before
registering individual DRM devices to the cgroup subsystem.

In addition to the cgroup_subsys_state that is common to all DRM
devices, a device-specific state is introduced and it is allocated
according to the vendor of the device.

Change-Id: I908ee6975ea0585e4c30eafde4599f87094d8c65
Signed-off-by: Kenny Ho 
---
 include/drm/drm_cgroup.h  | 39 
 include/drm/drmcgrp_vendors.h |  7 +++
 include/linux/cgroup_drm.h| 26 +++
 kernel/cgroup/drm.c   | 84 +++
 4 files changed, 156 insertions(+)
 create mode 100644 include/drm/drm_cgroup.h
 create mode 100644 include/drm/drmcgrp_vendors.h

diff --git a/include/drm/drm_cgroup.h b/include/drm/drm_cgroup.h
new file mode 100644
index ..26cbea7059a6
--- /dev/null
+++ b/include/drm/drm_cgroup.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ */
+#ifndef __DRM_CGROUP_H__
+#define __DRM_CGROUP_H__
+
+#define DRMCGRP_VENDOR(_x) _x ## _drmcgrp_vendor_id,
+enum drmcgrp_vendor_id {
+#include 
+   DRMCGRP_VENDOR_COUNT,
+};
+#undef DRMCGRP_VENDOR
+
+#define DRMCGRP_VENDOR(_x) extern struct drmcgrp_vendor _x ## _drmcgrp_vendor;
+#include 
+#undef DRMCGRP_VENDOR
+
+
+
+#ifdef CONFIG_CGROUP_DRM
+
+extern struct drmcgrp_vendor *drmcgrp_vendors[];
+
+int drmcgrp_register_vendor(struct drmcgrp_vendor *vendor, enum 
drmcgrp_vendor_id id);
+int drmcgrp_register_device(struct drm_device *device, enum drmcgrp_vendor_id 
id);
+
+#else
+static int drmcgrp_register_vendor(struct drmcgrp_vendor *vendor, enum 
drmcgrp_vendor_id id)
+{
+   return 0;
+}
+
+static int drmcgrp_register_device(struct drm_device *device, enum 
drmcgrp_vendor_id id)
+{
+   return 0;
+}
+
+#endif /* CONFIG_CGROUP_DRM */
+#endif /* __DRM_CGROUP_H__ */
diff --git a/include/drm/drmcgrp_vendors.h b/include/drm/drmcgrp_vendors.h
new file mode 100644
index ..b04d8649851b
--- /dev/null
+++ b/include/drm/drmcgrp_vendors.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ */
+#if IS_ENABLED(CONFIG_CGROUP_DRM)
+
+
+#endif
diff --git a/include/linux/cgroup_drm.h b/include/linux/cgroup_drm.h
index 79ab38b0f46d..a776662d9593 100644
--- a/include/linux/cgroup_drm.h
+++ b/include/linux/cgroup_drm.h
@@ -6,10 +6,36 @@
 
 #ifdef CONFIG_CGROUP_DRM
 
+#include 
 #include 
+#include 
+#include 
+
+/* limit defined per the way drm_minor_alloc operates */
+#define MAX_DRM_DEV (64 * DRM_MINOR_RENDER)
+
+struct drmcgrp_device {
+   enum drmcgrp_vendor_id  vid;
+   struct drm_device   *dev;
+   struct mutexmutex;
+};
+
+/* vendor-common resource counting goes here */
+/* this struct should be included in the vendor specific resource */
+struct drmcgrp_device_resource {
+   struct drmcgrp_device   *ddev;
+};
+
+struct drmcgrp_vendor {
+   struct cftype *(*get_cftypes)(void);
+   struct drmcgrp_device_resource *(*alloc_dev_resource)(void);
+   void (*free_dev_resource)(struct drmcgrp_device_resource *dev_resource);
+};
+
 
 struct drmcgrp {
struct cgroup_subsys_state  css;
+   struct drmcgrp_device_resource  *dev_resources[MAX_DRM_DEV];
 };
 
 static inline struct drmcgrp *css_drmcgrp(struct cgroup_subsys_state *css)
diff --git a/kernel/cgroup/drm.c b/kernel/cgroup/drm.c
index d9e194b9aead..f9630cc389bc 100644
--- a/kernel/cgroup/drm.c
+++ b/kernel/cgroup/drm.c
@@ -1,8 +1,30 @@
 // SPDX-License-Identifier: MIT
 // Copyright 2018 Advanced Micro Devices, Inc.
+#include 
 #include 
 #include 
+#include 
+#include 
+#include 
 #include 
+#include 
+#include 
+
+/* generate an array of drm cgroup vendor pointers */
+#define DRMCGRP_VENDOR(_x)[_x ## _drmcgrp_vendor_id] = NULL,
+struct drmcgrp_vendor *drmcgrp_vendors[] = {
+#include 
+};
+#undef DRMCGRP_VENDOR
+EXPORT_SYMBOL(drmcgrp_vendors);
+
+static DEFINE_MUTEX(drmcgrp_mutex);
+
+/* indexed by drm_minor for access speed */
+static struct drmcgrp_device   *known_drmcgrp_devs[MAX_DRM_DEV];
+
+static int max_minor;
+
 
 static u64 drmcgrp_test_read(struct cgroup_subsys_state *css,
struct cftype *cft)
@@ -13,6 +35,12 @@ static u64 drmcgrp_test_read(struct cgroup_subsys_state *css,
 static void drmcgrp_css_free(struct cgroup_subsys_state *css)
 {
struct drmcgrp *drmcgrp = css_drmcgrp(css);
+   int i;
+
+   for (i = 0; i <= max_minor; i++) {
+   if (drmcgrp->dev_resources[i] != NULL)
+   
drmcgrp_vendors[known_drmcgrp_devs[i]->vid]->free_dev_resource(drmcgrp->dev_resources[i]);
+   }
 
kfree(css_drmcgrp(css));
 }
@@ -21,11 +49,27 @@ static struct cgroup_subsys_state *
 

[PATCH][drm-next] drm/amd/display: fix dereference of pointer fs_params before it is null checked

2018-11-20 Thread Colin King
From: Colin Ian King 

Currently there are several instances of pointer fs_params being
dereferenced before fs_params is being null checked.  Fix this by
only dereferencing fs_params after the null check.

Detected by CoverityScan, CID#1475565 ("Dereference before null check")

Fixes: e1e8a020c6b8 ("drm/amd/display: Add support for Freesync 2 HDR and 
Content to Display Mapping")
Signed-off-by: Colin Ian King 
---
 .../drm/amd/display/modules/color/color_gamma.c  | 16 +++-
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c 
b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 7480f072c375..bbecbaefb741 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -813,20 +813,26 @@ static bool build_freesync_hdr(struct pwl_float_data_ex 
*rgb_regamma,
const struct hw_x_point *coord_x = coordinate_x;
struct fixed31_32 scaledX = dc_fixpt_zero;
struct fixed31_32 scaledX1 = dc_fixpt_zero;
-   struct fixed31_32 max_display = 
dc_fixpt_from_int(fs_params->max_display);
-   struct fixed31_32 min_display = 
dc_fixpt_from_fraction(fs_params->min_display, 1);
-   struct fixed31_32 max_content = 
dc_fixpt_from_int(fs_params->max_content);
-   struct fixed31_32 min_content = 
dc_fixpt_from_fraction(fs_params->min_content, 1);
+   struct fixed31_32 max_display;
+   struct fixed31_32 min_display;
+   struct fixed31_32 max_content;
+   struct fixed31_32 min_content;
struct fixed31_32 clip = dc_fixpt_one;
struct fixed31_32 output;
bool use_eetf = false;
bool is_clipped = false;
-   struct fixed31_32 sdr_white_level = 
dc_fixpt_from_int(fs_params->sdr_white_level);
+   struct fixed31_32 sdr_white_level;
 
if (fs_params == NULL || fs_params->max_content == 0 ||
fs_params->max_display == 0)
return false;
 
+   max_display = dc_fixpt_from_int(fs_params->max_display);
+   min_display = dc_fixpt_from_fraction(fs_params->min_display, 1);
+   max_content = dc_fixpt_from_int(fs_params->max_content);
+   min_content = dc_fixpt_from_fraction(fs_params->min_content, 1);
+   sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level);
+
if (fs_params->min_display > 1000) // cap at 0.1 at the bottom
min_display = dc_fixpt_from_fraction(1, 10);
if (fs_params->max_display < 100) // cap at 100 at the top
-- 
2.19.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] pci: fix incorrect value returned from pcie_get_speed_cap

2018-11-20 Thread Alex Deucher
On Tue, Nov 20, 2018 at 10:13 AM Bjorn Helgaas  wrote:
>
> On Tue, Nov 20, 2018 at 09:17:52AM -0500, Alex Deucher wrote:
> > On Mon, Nov 19, 2018 at 7:47 PM Bjorn Helgaas  wrote:
> > > On Tue, Oct 30, 2018 at 12:36:08PM -0400, Mikulas Patocka wrote:
> > > > The macros PCI_EXP_LNKCAP_SLS_*GB are values, not bit masks. We must 
> > > > mask
> > > > the register and compare it against them.
> > > >
> > > > This patch fixes errors "amdgpu: [powerplay] failed to send message 261
> > > > ret is 0" errors when PCIe-v3 card is plugged into PCIe-v1 slot, because
> > > > the slot is being incorrectly reported as PCIe-v3 capable.
> > > >
> > > > Signed-off-by: Mikulas Patocka 
> > > > Fixes: 6cf57be0f78e ("PCI: Add pcie_get_speed_cap() to find max 
> > > > supported link speed")
> > > > Cc: sta...@vger.kernel.org# v4.17+
> > > >
> > > > ---
> > > >  drivers/pci/pci.c |8 
> > > >  1 file changed, 4 insertions(+), 4 deletions(-)
> > > >
> > > > Index: linux-4.19/drivers/pci/pci.c
> > > > ===
> > > > --- linux-4.19.orig/drivers/pci/pci.c 2018-10-30 16:58:58.0 
> > > > +0100
> > > > +++ linux-4.19/drivers/pci/pci.c  2018-10-30 16:58:58.0 
> > > > +0100
> > > > @@ -5492,13 +5492,13 @@ enum pci_bus_speed pcie_get_speed_cap(st
> > > >
> > > >   pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, );
> > > >   if (lnkcap) {
> > > > - if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
> > > > + if ((lnkcap & PCI_EXP_LNKCAP_SLS) == 
> > > > PCI_EXP_LNKCAP_SLS_16_0GB)
> > > >   return PCIE_SPEED_16_0GT;
> > > > - else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
> > > > + else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == 
> > > > PCI_EXP_LNKCAP_SLS_8_0GB)
> > > >   return PCIE_SPEED_8_0GT;
> > > > - else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
> > > > + else if ((lnkcap & PCI_EXP_LNKCAP_SLS) 
> > > > ==PCI_EXP_LNKCAP_SLS_5_0GB)
> > > >   return PCIE_SPEED_5_0GT;
> > > > - else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
> > > > + else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == 
> > > > PCI_EXP_LNKCAP_SLS_2_5GB)
> > > >   return PCIE_SPEED_2_5GT;
> > > >   }
> > > >
> > >
> > > I'd like to apply this as below, where I removed the 8_0GB and 16_0GB
> > > cases as recommended by the spec.  I can't test it myself, and the
> > > bugzillas don't contain enough information for me to confirm that the
> > > patch below is enough (the "lspci -vv" output of the root port and GPU
> > > is what I would need).
> > >
> > > I'm confused about the fact that 6cf57be0f78e appeared in v4.17, but
> > > v4.18 works fine according to both bugzillas.
> >
> > This issue affects AMD GPUs because we switched from using an open
> > coded check for pcie link speeds in the driver to using the common
> > pcie variants in
> > https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?h=v4.19=5d9a6330403271fbb1244f14380a7cc44662796f
>
> OK, thanks.  I added that SHA1 and a note to explain the connection.
>
> > The patch below would regress performance, at least on AMD GPUs, since
> > we'd end up reporting a max speed of gen 2 (5 GT/s) which would cause
> > the driver to limit the speed to gen2 even if gen3 or 4 are available.
>
> I guess this means these parts are broken with respect to the spec,
> since they support gen3 speeds but don't implement LnkCap2?

Sorry, I mis-read the patch.  The patch is correct and our parts are
compliant.  We are only hitting this issue on non-gen3 platforms which
would end up in the second case.  Sorry for the confusion.  You new
patch is:
Acked-by: Alex Deucher 

Alex

>
> Gen2, i.e., PCIe r2.1, only defined 2.5 GT/s and 5 GT/s.  Devices
> capable of the higher speeds added by PCIe r3.0 are supposed to
> implement LnkCap2, but if we're even getting to this code, it means
> LnkCap2 was zero.
>
> If we confirm that this is a device defect, the question is what the
> best way to work around it is.  Probably the original patch is easier
> than some sort of quirk, but we'd need to expand the comment a little
> bit to explain why we're not following the spec recommendation.
>
> It looks like lspci probably also needs some updating here -- it
> currently doesn't do anything at all with LnkCap2.
>
> > > I also don't have a good feel for whether this is urgent enough to be
> > > a v4.20 fix or whether it can wait for v4.21.  Evidence either way
> > > would help.
> >
> > I'd like it to land for 4.19 and 4.20 at least.  Alternatively, we
> > could revert all of the drm patches to and bring back all the open
> > coded implementations, but it's a fairly large number of patches to
> > revert.
>
> OK, sounds like it makes sense to do this for v4.20 and backport it at
> least to v4.19 stable.  I do want to get the places below fixed also.
> They may not be as urgent, but we might as 

Re: [PATCH 1/4] drm/edid: Pass connector to AVI inforframe functions

2018-11-20 Thread Thierry Reding
On Tue, Nov 20, 2018 at 06:13:42PM +0200, Ville Syrjala wrote:
> From: Ville Syrjälä 
> 
> Make life easier for drivers by simply passing the connector
> to drm_hdmi_avi_infoframe_from_display_mode() and
> drm_hdmi_avi_infoframe_quant_range(). That way drivers don't
> need to worry about is_hdmi2_sink mess.
> 
> Cc: Alex Deucher 
> Cc: "Christian König" 
> Cc: "David (ChunMing) Zhou" 
> Cc: Archit Taneja 
> Cc: Andrzej Hajda 
> Cc: Laurent Pinchart 
> Cc: Inki Dae 
> Cc: Joonyoung Shim 
> Cc: Seung-Woo Kim 
> Cc: Kyungmin Park 
> Cc: Russell King 
> Cc: CK Hu 
> Cc: Philipp Zabel 
> Cc: Rob Clark 
> Cc: Ben Skeggs 
> Cc: Tomi Valkeinen 
> Cc: Sandy Huang 
> Cc: "Heiko Stübner" 
> Cc: Benjamin Gaignard 
> Cc: Vincent Abriou 
> Cc: Thierry Reding 
> Cc: Eric Anholt 
> Cc: Shawn Guo 
> Cc: Ilia Mirkin 
> Cc: amd-gfx@lists.freedesktop.org
> Cc: linux-arm-...@vger.kernel.org
> Cc: freedr...@lists.freedesktop.org
> Cc: nouv...@lists.freedesktop.org
> Cc: linux-te...@vger.kernel.org
> Signed-off-by: Ville Syrjälä 
> ---
>  drivers/gpu/drm/amd/amdgpu/dce_v10_0.c|  2 +-
>  drivers/gpu/drm/amd/amdgpu/dce_v11_0.c|  2 +-
>  drivers/gpu/drm/amd/amdgpu/dce_v6_0.c |  3 ++-
>  drivers/gpu/drm/amd/amdgpu/dce_v8_0.c |  2 +-
>  drivers/gpu/drm/bridge/analogix-anx78xx.c |  5 ++--
>  drivers/gpu/drm/bridge/sii902x.c  |  3 ++-
>  drivers/gpu/drm/bridge/sil-sii8620.c  |  3 +--
>  drivers/gpu/drm/bridge/synopsys/dw-hdmi.c |  3 ++-
>  drivers/gpu/drm/drm_edid.c| 33 ++-
>  drivers/gpu/drm/exynos/exynos_hdmi.c  |  3 ++-
>  drivers/gpu/drm/i2c/tda998x_drv.c |  3 ++-
>  drivers/gpu/drm/i915/intel_hdmi.c | 14 +-
>  drivers/gpu/drm/i915/intel_lspcon.c   | 15 ++-
>  drivers/gpu/drm/i915/intel_sdvo.c | 10 ---
>  drivers/gpu/drm/mediatek/mtk_hdmi.c   |  3 ++-
>  drivers/gpu/drm/msm/hdmi/hdmi_bridge.c|  3 ++-
>  drivers/gpu/drm/nouveau/dispnv50/disp.c   |  7 +++--
>  drivers/gpu/drm/omapdrm/omap_encoder.c|  5 ++--
>  drivers/gpu/drm/radeon/radeon_audio.c |  2 +-
>  drivers/gpu/drm/rockchip/inno_hdmi.c  |  4 ++-
>  drivers/gpu/drm/sti/sti_hdmi.c|  3 ++-
>  drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c|  3 ++-
>  drivers/gpu/drm/tegra/hdmi.c  |  3 ++-
>  drivers/gpu/drm/tegra/sor.c   |  3 ++-
>  drivers/gpu/drm/vc4/vc4_hdmi.c| 11 +---
>  drivers/gpu/drm/zte/zx_hdmi.c |  4 ++-
>  include/drm/drm_edid.h|  8 +++---
>  27 files changed, 94 insertions(+), 66 deletions(-)

That's actually a lot nicer:

Acked-by: Thierry Reding 


signature.asc
Description: PGP signature
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 3/4] drm/radeon: Use drm_hdmi_avi_infoframe_quant_range()

2018-11-20 Thread Ville Syrjala
From: Ville Syrjälä 

Fill out the AVI infoframe quantization range bits using
drm_hdmi_avi_infoframe_quant_range() instead of hand rolling it.

This changes the behaviour slightly as
drm_hdmi_avi_infoframe_quant_range() will set a non-zero Q bit
even when QS==0 iff the Q bit matched the default quantization
range for the given mode. This matches the recommendation in
HDMI 2.0 and is allowed even before that.

Cc: Alex Deucher 
Cc: "Christian König" 
Cc: "David (ChunMing) Zhou" 
Cc: amd-gfx@lists.freedesktop.org
Signed-off-by: Ville Syrjälä 
---
 drivers/gpu/drm/radeon/radeon_audio.c | 13 +
 1 file changed, 5 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon_audio.c 
b/drivers/gpu/drm/radeon/radeon_audio.c
index 5a7d48339b32..708765bf9e66 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -523,14 +523,11 @@ static int radeon_audio_set_avi_packet(struct drm_encoder 
*encoder,
}
 
if (radeon_encoder->output_csc != RADEON_OUTPUT_CSC_BYPASS) {
-   if 
(drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
-   if (radeon_encoder->output_csc == 
RADEON_OUTPUT_CSC_TVRGB)
-   frame.quantization_range = 
HDMI_QUANTIZATION_RANGE_LIMITED;
-   else
-   frame.quantization_range = 
HDMI_QUANTIZATION_RANGE_FULL;
-   } else {
-   frame.quantization_range = 
HDMI_QUANTIZATION_RANGE_DEFAULT;
-   }
+   drm_hdmi_avi_infoframe_quant_range(, connector, mode,
+  radeon_encoder->output_csc 
== RADEON_OUTPUT_CSC_TVRGB ?
+  
HDMI_QUANTIZATION_RANGE_LIMITED :
+  HDMI_QUANTIZATION_RANGE_FULL,
+  
drm_rgb_quant_range_selectable(radeon_connector_edid(connector)));
}
 
err = hdmi_avi_infoframe_pack(, buffer, sizeof(buffer));
-- 
2.18.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 4/4] drm/edid: Add display_info.rgb_quant_range_selectable

2018-11-20 Thread Ville Syrjala
From: Ville Syrjälä 

Move the CEA-861 QS bit handling entirely into the edid code. No
need to bother the drivers with this.

Cc: Alex Deucher 
Cc: "Christian König" 
Cc: "David (ChunMing) Zhou" 
Cc: amd-gfx@lists.freedesktop.org
Cc: Eric Anholt  (supporter:DRM DRIVERS FOR VC4)
Signed-off-by: Ville Syrjälä 
---
 drivers/gpu/drm/drm_edid.c| 70 ---
 drivers/gpu/drm/i915/intel_drv.h  |  1 -
 drivers/gpu/drm/i915/intel_hdmi.c |  8 +--
 drivers/gpu/drm/i915/intel_lspcon.c   |  3 +-
 drivers/gpu/drm/i915/intel_sdvo.c |  7 +--
 drivers/gpu/drm/radeon/radeon_audio.c |  3 +-
 drivers/gpu/drm/vc4/vc4_hdmi.c|  9 +---
 include/drm/drm_connector.h   |  6 +++
 include/drm/drm_edid.h|  4 +-
 9 files changed, 43 insertions(+), 68 deletions(-)

diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 501ac05ba7da..cbee2f745548 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3641,6 +3641,20 @@ static bool cea_db_is_hdmi_forum_vsdb(const u8 *db)
return oui == HDMI_FORUM_IEEE_OUI;
 }
 
+static bool cea_db_is_vcdb(const u8 *db)
+{
+   if (cea_db_tag(db) != USE_EXTENDED_TAG)
+   return false;
+
+   if (cea_db_payload_len(db) != 2)
+   return false;
+
+   if (cea_db_extended_tag(db) != EXT_VIDEO_CAPABILITY_BLOCK)
+   return false;
+
+   return true;
+}
+
 static bool cea_db_is_y420cmdb(const u8 *db)
 {
if (cea_db_tag(db) != USE_EXTENDED_TAG)
@@ -4223,41 +4237,6 @@ bool drm_detect_monitor_audio(struct edid *edid)
 }
 EXPORT_SYMBOL(drm_detect_monitor_audio);
 
-/**
- * drm_rgb_quant_range_selectable - is RGB quantization range selectable?
- * @edid: EDID block to scan
- *
- * Check whether the monitor reports the RGB quantization range selection
- * as supported. The AVI infoframe can then be used to inform the monitor
- * which quantization range (full or limited) is used.
- *
- * Return: True if the RGB quantization range is selectable, false otherwise.
- */
-bool drm_rgb_quant_range_selectable(struct edid *edid)
-{
-   u8 *edid_ext;
-   int i, start, end;
-
-   edid_ext = drm_find_cea_extension(edid);
-   if (!edid_ext)
-   return false;
-
-   if (cea_db_offsets(edid_ext, , ))
-   return false;
-
-   for_each_cea_db(edid_ext, i, start, end) {
-   if (cea_db_tag(_ext[i]) == USE_EXTENDED_TAG &&
-   cea_db_payload_len(_ext[i]) == 2 &&
-   cea_db_extended_tag(_ext[i]) ==
-   EXT_VIDEO_CAPABILITY_BLOCK) {
-   DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]);
-   return edid_ext[i + 2] & EDID_CEA_VCDB_QS;
-   }
-   }
-
-   return false;
-}
-EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
 
 /**
  * drm_default_rgb_quant_range - default RGB quantization range
@@ -4278,6 +4257,16 @@ drm_default_rgb_quant_range(const struct 
drm_display_mode *mode)
 }
 EXPORT_SYMBOL(drm_default_rgb_quant_range);
 
+static void drm_parse_vcdb(struct drm_connector *connector, const u8 *db)
+{
+   struct drm_display_info *info = >display_info;
+
+   DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", db[2]);
+
+   if (db[2] & EDID_CEA_VCDB_QS)
+   info->rgb_quant_range_selectable = true;
+}
+
 static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector,
   const u8 *db)
 {
@@ -4452,6 +4441,8 @@ static void drm_parse_cea_ext(struct drm_connector 
*connector,
drm_parse_hdmi_forum_vsdb(connector, db);
if (cea_db_is_y420cmdb(db))
drm_parse_y420cmdb_bitmap(connector, db);
+   if (cea_db_is_vcdb(db))
+   drm_parse_vcdb(connector, db);
}
 }
 
@@ -4472,6 +4463,7 @@ drm_reset_display_info(struct drm_connector *connector)
info->max_tmds_clock = 0;
info->dvi_dual = false;
info->has_hdmi_infoframe = false;
+   info->rgb_quant_range_selectable = false;
memset(>hdmi, 0, sizeof(info->hdmi));
 
info->non_desktop = 0;
@@ -4939,15 +4931,15 @@ EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
  * @connector: the connector
  * @mode: DRM display mode
  * @rgb_quant_range: RGB quantization range (Q)
- * @rgb_quant_range_selectable: Sink support selectable RGB quantization range 
(QS)
  */
 void
 drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
   struct drm_connector *connector,
   const struct drm_display_mode *mode,
-  enum hdmi_quantization_range rgb_quant_range,
-  bool rgb_quant_range_selectable)
+  enum hdmi_quantization_range rgb_quant_range)
 {
+   const struct drm_display_info *info = >display_info;
+
   

[PATCH 1/4] drm/edid: Pass connector to AVI inforframe functions

2018-11-20 Thread Ville Syrjala
From: Ville Syrjälä 

Make life easier for drivers by simply passing the connector
to drm_hdmi_avi_infoframe_from_display_mode() and
drm_hdmi_avi_infoframe_quant_range(). That way drivers don't
need to worry about is_hdmi2_sink mess.

Cc: Alex Deucher 
Cc: "Christian König" 
Cc: "David (ChunMing) Zhou" 
Cc: Archit Taneja 
Cc: Andrzej Hajda 
Cc: Laurent Pinchart 
Cc: Inki Dae 
Cc: Joonyoung Shim 
Cc: Seung-Woo Kim 
Cc: Kyungmin Park 
Cc: Russell King 
Cc: CK Hu 
Cc: Philipp Zabel 
Cc: Rob Clark 
Cc: Ben Skeggs 
Cc: Tomi Valkeinen 
Cc: Sandy Huang 
Cc: "Heiko Stübner" 
Cc: Benjamin Gaignard 
Cc: Vincent Abriou 
Cc: Thierry Reding 
Cc: Eric Anholt 
Cc: Shawn Guo 
Cc: Ilia Mirkin 
Cc: amd-gfx@lists.freedesktop.org
Cc: linux-arm-...@vger.kernel.org
Cc: freedr...@lists.freedesktop.org
Cc: nouv...@lists.freedesktop.org
Cc: linux-te...@vger.kernel.org
Signed-off-by: Ville Syrjälä 
---
 drivers/gpu/drm/amd/amdgpu/dce_v10_0.c|  2 +-
 drivers/gpu/drm/amd/amdgpu/dce_v11_0.c|  2 +-
 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c |  3 ++-
 drivers/gpu/drm/amd/amdgpu/dce_v8_0.c |  2 +-
 drivers/gpu/drm/bridge/analogix-anx78xx.c |  5 ++--
 drivers/gpu/drm/bridge/sii902x.c  |  3 ++-
 drivers/gpu/drm/bridge/sil-sii8620.c  |  3 +--
 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c |  3 ++-
 drivers/gpu/drm/drm_edid.c| 33 ++-
 drivers/gpu/drm/exynos/exynos_hdmi.c  |  3 ++-
 drivers/gpu/drm/i2c/tda998x_drv.c |  3 ++-
 drivers/gpu/drm/i915/intel_hdmi.c | 14 +-
 drivers/gpu/drm/i915/intel_lspcon.c   | 15 ++-
 drivers/gpu/drm/i915/intel_sdvo.c | 10 ---
 drivers/gpu/drm/mediatek/mtk_hdmi.c   |  3 ++-
 drivers/gpu/drm/msm/hdmi/hdmi_bridge.c|  3 ++-
 drivers/gpu/drm/nouveau/dispnv50/disp.c   |  7 +++--
 drivers/gpu/drm/omapdrm/omap_encoder.c|  5 ++--
 drivers/gpu/drm/radeon/radeon_audio.c |  2 +-
 drivers/gpu/drm/rockchip/inno_hdmi.c  |  4 ++-
 drivers/gpu/drm/sti/sti_hdmi.c|  3 ++-
 drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c|  3 ++-
 drivers/gpu/drm/tegra/hdmi.c  |  3 ++-
 drivers/gpu/drm/tegra/sor.c   |  3 ++-
 drivers/gpu/drm/vc4/vc4_hdmi.c| 11 +---
 drivers/gpu/drm/zte/zx_hdmi.c |  4 ++-
 include/drm/drm_edid.h|  8 +++---
 27 files changed, 94 insertions(+), 66 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c 
b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 4cfecdce29a3..1f0426d2fc2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1682,7 +1682,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder 
*encoder,
dce_v10_0_audio_write_sad_regs(encoder);
dce_v10_0_audio_write_latency_fields(encoder, mode);
 
-   err = drm_hdmi_avi_infoframe_from_display_mode(, mode, false);
+   err = drm_hdmi_avi_infoframe_from_display_mode(, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c 
b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 7c868916d90f..2280b971d758 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1724,7 +1724,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder 
*encoder,
dce_v11_0_audio_write_sad_regs(encoder);
dce_v11_0_audio_write_latency_fields(encoder, mode);
 
-   err = drm_hdmi_avi_infoframe_from_display_mode(, mode, false);
+   err = drm_hdmi_avi_infoframe_from_display_mode(, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c 
b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 17eaaba36017..db443ec53d3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1423,6 +1423,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct 
drm_encoder *encoder,
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+   struct drm_connector *connector = 
amdgpu_get_connector_for_encoder(encoder);
struct hdmi_avi_infoframe frame;
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
uint8_t *payload = buffer + 3;
@@ -1430,7 +1431,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct 
drm_encoder *encoder,
ssize_t err;
u32 tmp;
 
-   err = drm_hdmi_avi_infoframe_from_display_mode(, mode, false);
+   err = drm_hdmi_avi_infoframe_from_display_mode(, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c 

[PATCH v4 2/2] drm/amd: Add abm level drm property

2018-11-20 Thread David Francis
Adaptive Backlight Management (ABM) is a feature
that reduces backlight level to save power, while
increasing pixel contrast and pixel luminance
to maintain readability and image quality.

ABM will adjust in response to the
pixel luminance of the displayed content.

ABM is made available as a drm property on eDP
monitors called "abm level", which ranges from 0 to 4.
When this property is set to 0, ABM is off.  Levels 1
to 4 represent different ranges of backlight reduction.
At higher levels both the backlight reduction and pixel
adjustment will be greater.

ABM requires DMCU firmware, which is currently available for
Raven ASICs only.  If the feature does not work, please
ensure your firmware is up to date.

v2:
Fix commit message, only attach property if DMCU loaded
v3:
Store ABM level in crtc state to accommodate dc
v4:
Fix ABM saving on dpms cycle

Signed-off-by: David Francis 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c   |  5 +++
 drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h  |  2 ++
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 36 ---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  3 ++
 drivers/gpu/drm/amd/display/dc/core/dc.c  | 11 +-
 drivers/gpu/drm/amd/display/dc/dc.h   |  1 +
 6 files changed, 53 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 7d6a36bca9dd..ced8cefa223b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -637,6 +637,11 @@ int amdgpu_display_modeset_create_props(struct 
amdgpu_device *adev)
 "freesync_capable");
if (!adev->mode_info.freesync_capable_property)
return -ENOMEM;
+   adev->mode_info.abm_level_property =
+   drm_property_create_range(adev->ddev, 0,
+   "abm level", 0, 4);
+   if (!adev->mode_info.abm_level_property)
+   return -ENOMEM;
}
 
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 1627dd3413c7..2938635c0fc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -342,6 +342,8 @@ struct amdgpu_mode_info {
struct drm_property *freesync_property;
/* it is used to know about display capability of freesync mode */
struct drm_property *freesync_capable_property;
+   /* Adaptive Backlight Modulation (power feature) */
+   struct drm_property *abm_level_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f71febb4210d..95b1106e0662 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2920,6 +2920,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->adjust = cur->adjust;
state->vrr_infopacket = cur->vrr_infopacket;
state->freesync_enabled = cur->freesync_enabled;
+   state->abm_level = cur->abm_level;
 
/* TODO Duplicate dc_stream after objects are stream object is 
flattened */
 
@@ -3038,6 +3039,9 @@ int amdgpu_dm_connector_atomic_set_property(struct 
drm_connector *connector,
} else if (property == adev->mode_info.freesync_capable_property) {
dm_new_state->freesync_capable = val;
ret = 0;
+   } else if (property == adev->mode_info.abm_level_property) {
+   dm_new_state->abm_level = val;
+   ret = 0;
}
 
return ret;
@@ -3086,7 +3090,11 @@ int amdgpu_dm_connector_atomic_get_property(struct 
drm_connector *connector,
} else if (property == adev->mode_info.freesync_capable_property) {
*val = dm_state->freesync_capable;
ret = 0;
+   } else if (property == adev->mode_info.abm_level_property) {
+   *val = dm_state->abm_level;
+   ret = 0;
}
+
return ret;
 }
 
@@ -3151,6 +3159,7 @@ amdgpu_dm_connector_atomic_duplicate_state(struct 
drm_connector *connector)
 
new_state->freesync_capable = state->freesync_capable;
new_state->freesync_enable = state->freesync_enable;
+   new_state->abm_level = state->abm_level;
 
return _state->base;
 }
@@ -3904,6 +3913,12 @@ void amdgpu_dm_connector_init_helper(struct 
amdgpu_display_manager *dm,
drm_object_attach_property(>base.base,
adev->mode_info.freesync_capable_property, 0);
}
+
+   if (connector_type == DRM_MODE_CONNECTOR_eDP &&
+   dc_is_dmcu_initialized(adev->dm.dc)) {
+   drm_object_attach_property(>base.base,
+   

[PATCH v4 1/2] drm/amd: Load DMCU IRAM

2018-11-20 Thread David Francis
DMCU IRAM must be loaded by the driver before DMCU
can function.

Move the IRAM code out of the shadows and into a new file
modules/power/power_helpers.c

The IRAM table contains the backlight curve and ABM parameters

Add this new file to the Makefiles

Call dmcu_load_iram in late init of DM

Move struct dmcu_version from dc.h to dmcu.h to allow
dmcu to be included on its own

Signed-off-by: David Francis 
---
 drivers/gpu/drm/amd/display/Makefile  |   3 +-
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  21 ++
 drivers/gpu/drm/amd/display/dc/dc.h   |   8 +-
 drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h  |   7 +
 .../drm/amd/display/modules/power/Makefile|  31 ++
 .../amd/display/modules/power/power_helpers.c | 326 ++
 .../amd/display/modules/power/power_helpers.h |  47 +++
 7 files changed, 435 insertions(+), 8 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/display/modules/power/Makefile
 create mode 100644 drivers/gpu/drm/amd/display/modules/power/power_helpers.c
 create mode 100644 drivers/gpu/drm/amd/display/modules/power/power_helpers.h

diff --git a/drivers/gpu/drm/amd/display/Makefile 
b/drivers/gpu/drm/amd/display/Makefile
index c97dc9613325..cfde1568c79a 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -32,11 +32,12 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power
 
 #TODO: remove when Timing Sync feature is complete
 subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
 
-DAL_LIBS = amdgpu_dm dcmodules/freesync modules/color 
modules/info_packet
+DAL_LIBS = amdgpu_dm dcmodules/freesync modules/color 
modules/info_packet modules/power
 
 AMD_DAL = $(addsuffix /Makefile, $(addprefix 
$(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 0c1a533eb531..f71febb4210d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -72,6 +72,7 @@
 #endif
 
 #include "modules/inc/mod_freesync.h"
+#include "modules/power/power_helpers.h"
 
 #define FIRMWARE_RAVEN_DMCU"amdgpu/raven_dmcu.bin"
 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
@@ -643,6 +644,26 @@ static int dm_late_init(void *handle)
 {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+   struct dmcu_iram_parameters params;
+   unsigned int linear_lut[16];
+   int i;
+   struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
+   bool ret;
+
+   for (i = 0; i < 16; i++)
+   linear_lut[i] = 0x * i / 15;
+
+   params.set = 0;
+   params.backlight_ramping_start = 0x;
+   params.backlight_ramping_reduction = 0x;
+   params.backlight_lut_array_size = 16;
+   params.backlight_lut_array = linear_lut;
+
+   ret = dmcu_load_iram(dmcu, params);
+
+   if (!ret)
+   return -EINVAL;
+
return detect_mst_link_for_all_connectors(adev->ddev);
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 18865a76ea55..6b0988310138 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -36,6 +36,7 @@
 
 #include "inc/hw_sequencer.h"
 #include "inc/compressor.h"
+#include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
 #define DC_VER "3.2.06"
@@ -47,13 +48,6 @@
 
/***
  * Display Core Interfaces
  
**/
-struct dmcu_version {
-   unsigned int date;
-   unsigned int month;
-   unsigned int year;
-   unsigned int interface_version;
-};
-
 struct dc_versions {
const char *dc_ver;
struct dmcu_version dmcu_version;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h 
b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index 4550747fb61c..cb85eaa9857f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -32,6 +32,13 @@ enum dmcu_state {
DMCU_RUNNING = 1
 };
 
+struct dmcu_version {
+   unsigned int date;
+   unsigned int month;
+   unsigned int year;
+   unsigned int interface_version;
+};
+
 struct dmcu {
struct dc_context *ctx;
const struct dmcu_funcs *funcs;
diff --git a/drivers/gpu/drm/amd/display/modules/power/Makefile 
b/drivers/gpu/drm/amd/display/modules/power/Makefile
new file mode 100644
index ..87851f892a52
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/power/Makefile
@@ -0,0 +1,31 @@
+#
+# Copyright 2017 Advanced Micro Devices, 

[PATCH] drm/amd: Query and use ACPI backlight caps

2018-11-20 Thread David Francis
ACPI ATIF has a function called query
backlight transfer characteristics.  Among the
information returned by this function is
the minimum and maximum input signals for the
backlight

Call that function on ACPI init.  When DM
backlight device is updated, copy over the
backlight caps into DM, but only once.  Use
the backlight caps in the backlight-to-dc
calculation.

Signed-off-by: David Francis 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h   |  3 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c  | 83 +++
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 59 ++---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 13 +++
 drivers/gpu/drm/amd/include/amd_acpi.h| 24 ++
 5 files changed, 170 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 2c80453ca350..adbad0e2d4ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1255,6 +1255,9 @@ bool 
amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade
 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise);
 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
+
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
+   struct amdgpu_dm_backlight_caps *caps);
 #else
 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 471266901d1b..47db65926d71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -65,6 +65,7 @@ struct amdgpu_atif {
struct amdgpu_atif_functions functions;
struct amdgpu_atif_notification_cfg notification_cfg;
struct amdgpu_encoder *encoder_for_bl;
+   struct amdgpu_dm_backlight_caps backlight_caps;
 };
 
 /* Call the ATIF method
@@ -297,6 +298,65 @@ static int amdgpu_atif_get_notification_params(struct 
amdgpu_atif *atif)
return err;
 }
 
+/**
+ * amdgpu_atif_query_backlight_caps - get min and max backlight input signal
+ *
+ * @handle: acpi handle
+ *
+ * Execute the QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS ATIF function
+ * to determine the acceptable range of backlight values
+ *
+ * Backlight_caps.caps_valid will be set to true if the query is successful
+ *
+ * The input signals are in range 0-255
+ *
+ * This function assumes the display with backlight is the first LCD
+ *
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atif_query_backlight_caps(struct amdgpu_atif *atif)
+{
+   union acpi_object *info;
+   struct atif_qbtc_output characteristics;
+   struct atif_qbtc_arguments arguments;
+   struct acpi_buffer params;
+   size_t size;
+   int err = 0;
+
+   arguments.size = sizeof(arguments);
+   arguments.requested_display = ATIF_QBTC_REQUEST_LCD1;
+
+   params.length = sizeof(arguments);
+   params.pointer = (void *)
+
+   info = amdgpu_atif_call(atif,
+   ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS,
+   );
+   if (!info) {
+   err = -EIO;
+   goto out;
+   }
+
+   size = *(u16 *) info->buffer.pointer;
+   if (size < 10) {
+   err = -EINVAL;
+   goto out;
+   }
+
+   memset(, 0, sizeof(characteristics));
+   size = min(sizeof(characteristics), size);
+   memcpy(, info->buffer.pointer, size);
+
+   atif->backlight_caps.caps_valid = true;
+   atif->backlight_caps.min_input_signal =
+   characteristics.min_input_signal;
+   atif->backlight_caps.max_input_signal =
+   characteristics.max_input_signal;
+out:
+   kfree(info);
+   return err;
+}
+
 /**
  * amdgpu_atif_get_sbios_requests - get requested sbios event
  *
@@ -786,6 +846,17 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
}
}
 
+   if (atif->functions.query_backlight_transfer_characteristics) {
+   ret = amdgpu_atif_query_backlight_caps(atif);
+   if (ret) {
+   DRM_DEBUG_DRIVER("Call to 
QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS failed: %d\n",
+   ret);
+   atif->backlight_caps.caps_valid = false;
+   }
+   } else {
+   atif->backlight_caps.caps_valid = false;
+   }
+
 out:
adev->acpi_nb.notifier_call = amdgpu_acpi_event;
register_acpi_notifier(>acpi_nb);
@@ -793,6 +864,18 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
return ret;
 }
 
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
+   struct amdgpu_dm_backlight_caps *caps)
+{
+   if (!adev->atif) {
+  

Re: [PATCH] pci: fix incorrect value returned from pcie_get_speed_cap

2018-11-20 Thread Bjorn Helgaas
On Tue, Nov 20, 2018 at 09:17:52AM -0500, Alex Deucher wrote:
> On Mon, Nov 19, 2018 at 7:47 PM Bjorn Helgaas  wrote:
> > On Tue, Oct 30, 2018 at 12:36:08PM -0400, Mikulas Patocka wrote:
> > > The macros PCI_EXP_LNKCAP_SLS_*GB are values, not bit masks. We must mask
> > > the register and compare it against them.
> > >
> > > This patch fixes errors "amdgpu: [powerplay] failed to send message 261
> > > ret is 0" errors when PCIe-v3 card is plugged into PCIe-v1 slot, because
> > > the slot is being incorrectly reported as PCIe-v3 capable.
> > >
> > > Signed-off-by: Mikulas Patocka 
> > > Fixes: 6cf57be0f78e ("PCI: Add pcie_get_speed_cap() to find max supported 
> > > link speed")
> > > Cc: sta...@vger.kernel.org# v4.17+
> > >
> > > ---
> > >  drivers/pci/pci.c |8 
> > >  1 file changed, 4 insertions(+), 4 deletions(-)
> > >
> > > Index: linux-4.19/drivers/pci/pci.c
> > > ===
> > > --- linux-4.19.orig/drivers/pci/pci.c 2018-10-30 16:58:58.0 +0100
> > > +++ linux-4.19/drivers/pci/pci.c  2018-10-30 16:58:58.0 +0100
> > > @@ -5492,13 +5492,13 @@ enum pci_bus_speed pcie_get_speed_cap(st
> > >
> > >   pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, );
> > >   if (lnkcap) {
> > > - if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
> > > + if ((lnkcap & PCI_EXP_LNKCAP_SLS) == 
> > > PCI_EXP_LNKCAP_SLS_16_0GB)
> > >   return PCIE_SPEED_16_0GT;
> > > - else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
> > > + else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == 
> > > PCI_EXP_LNKCAP_SLS_8_0GB)
> > >   return PCIE_SPEED_8_0GT;
> > > - else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
> > > + else if ((lnkcap & PCI_EXP_LNKCAP_SLS) 
> > > ==PCI_EXP_LNKCAP_SLS_5_0GB)
> > >   return PCIE_SPEED_5_0GT;
> > > - else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
> > > + else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == 
> > > PCI_EXP_LNKCAP_SLS_2_5GB)
> > >   return PCIE_SPEED_2_5GT;
> > >   }
> > >
> >
> > I'd like to apply this as below, where I removed the 8_0GB and 16_0GB
> > cases as recommended by the spec.  I can't test it myself, and the
> > bugzillas don't contain enough information for me to confirm that the
> > patch below is enough (the "lspci -vv" output of the root port and GPU
> > is what I would need).
> >
> > I'm confused about the fact that 6cf57be0f78e appeared in v4.17, but
> > v4.18 works fine according to both bugzillas.
> 
> This issue affects AMD GPUs because we switched from using an open
> coded check for pcie link speeds in the driver to using the common
> pcie variants in
> https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?h=v4.19=5d9a6330403271fbb1244f14380a7cc44662796f

OK, thanks.  I added that SHA1 and a note to explain the connection.

> The patch below would regress performance, at least on AMD GPUs, since
> we'd end up reporting a max speed of gen 2 (5 GT/s) which would cause
> the driver to limit the speed to gen2 even if gen3 or 4 are available.

I guess this means these parts are broken with respect to the spec,
since they support gen3 speeds but don't implement LnkCap2?

Gen2, i.e., PCIe r2.1, only defined 2.5 GT/s and 5 GT/s.  Devices
capable of the higher speeds added by PCIe r3.0 are supposed to
implement LnkCap2, but if we're even getting to this code, it means
LnkCap2 was zero.

If we confirm that this is a device defect, the question is what the
best way to work around it is.  Probably the original patch is easier
than some sort of quirk, but we'd need to expand the comment a little
bit to explain why we're not following the spec recommendation.

It looks like lspci probably also needs some updating here -- it
currently doesn't do anything at all with LnkCap2.

> > I also don't have a good feel for whether this is urgent enough to be
> > a v4.20 fix or whether it can wait for v4.21.  Evidence either way
> > would help.
> 
> I'd like it to land for 4.19 and 4.20 at least.  Alternatively, we
> could revert all of the drm patches to and bring back all the open
> coded implementations, but it's a fairly large number of patches to
> revert.

OK, sounds like it makes sense to do this for v4.20 and backport it at
least to v4.19 stable.  I do want to get the places below fixed also.
They may not be as urgent, but we might as well try and make
everything consistent while we're looking at it.

> > We also need similar fixes in pci_set_bus_speed(), pcie_speeds()
> > (hfi1), cobalt_pcie_status_show(), hba_ioctl_callback(),
> > qla24xx_pci_info_str(), and maybe a couple other places.
> >
> > Bjorn
> >
> >
> > commit 871f73abf4b8e6aee8a206775f944ede7c7d7250
> > Author: Mikulas Patocka 
> > Date:   Tue Oct 30 12:36:08 2018 -0400
> >
> > PCI: Fix incorrect value returned from pcie_get_speed_cap()
> >
> > The macros 

Re: [PATCH v6 3/5] drm: Document variable refresh properties

2018-11-20 Thread Pekka Paalanen
On Wed, 7 Nov 2018 15:10:31 +
"Kazlauskas, Nicholas"  wrote:

> On 11/7/18 9:57 AM, Wentland, Harry wrote:
> > 
> > 
> > On 2018-11-06 3:24 p.m., Nicholas Kazlauskas wrote:  
> >> These include the drm_connector 'vrr_capable' and the drm_crtc
> >> 'vrr_enabled' properties.
> >>
> >> Signed-off-by: Nicholas Kazlauskas 
> >> Cc: Harry Wentland 
> >> Cc: Manasi Navare 
> >> Cc: Pekka Paalanen 
> >> Cc: Ville Syrjälä 
> >> Cc: Michel Dänzer 
> >> ---
> >>   Documentation/gpu/drm-kms.rst   |  7 
> >>   drivers/gpu/drm/drm_connector.c | 61 +
> >>   2 files changed, 68 insertions(+)
> >>
> >> diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
> >> index 4b1501b4835b..8da2a178cf85 100644
> >> --- a/Documentation/gpu/drm-kms.rst
> >> +++ b/Documentation/gpu/drm-kms.rst
> >> @@ -575,6 +575,13 @@ Explicit Fencing Properties
> >>   .. kernel-doc:: drivers/gpu/drm/drm_atomic_uapi.c
> >>  :doc: explicit fencing properties
> >>   
> >> +
> >> +Variable Refresh Properties
> >> +---
> >> +
> >> +.. kernel-doc:: drivers/gpu/drm/drm_connector.c
> >> +   :doc: Variable refresh properties
> >> +
> >>   Existing KMS Properties
> >>   ---
> >>   
> >> diff --git a/drivers/gpu/drm/drm_connector.c 
> >> b/drivers/gpu/drm/drm_connector.c
> >> index 49290060ab7b..a6adf5450db3 100644
> >> --- a/drivers/gpu/drm/drm_connector.c
> >> +++ b/drivers/gpu/drm/drm_connector.c
> >> @@ -1255,6 +1255,67 @@ int drm_mode_create_scaling_mode_property(struct 
> >> drm_device *dev)
> >>   }
> >>   EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
> >>   
> >> +/**
> >> + * DOC: Variable refresh properties
> >> + *
> >> + * Variable refresh rate capable displays can dynamically adjust their
> >> + * refresh rate by extending the duration of their vertical porch until  
> > 
> > vertical porch -> vertical front porch
> >   
> >> + * page flip or timeout occurs. This can reduce or remove stuttering
> >> + * and latency in scenarios where the page flip does not align with the
> >> + * vblank interval.
> >> + *
> >> + * An example scenario would be an application flipping at a constant rate
> >> + * of 48Hz on a 60Hz display. The page flip will frequently miss the 
> >> vblank
> >> + * interval and the same contents will be displayed twice. This can be
> >> + * observed as stuttering for content with motion.
> >> + *
> >> + * If variable refresh rate was active on a display that supported a
> >> + * variable refresh range from 35Hz to 60Hz no stuttering would be 
> >> observable
> >> + * for the example scenario. The minimum supported variable refresh rate 
> >> of
> >> + * 35Hz is below the page flip frequency and the vertical front porch can
> >> + * be extended until the page flip occurs. The vblank interval will be
> >> + * directly aligned to the page flip rate.
> >> + *
> >> + * Userspace control for variable refresh rate is supported via properties
> >> + * on the _connector and _crtc objects.
> >> + *
> >> + * "vrr_capable":
> >> + *Optional _connector boolean property that drivers should 
> >> attach
> >> + *with drm_connector_attach_vrr_capable_property() on connectors 
> >> that
> >> + *could support variable refresh rates. Drivers should update the
> >> + *property value by calling 
> >> drm_connector_set_vrr_capable_property().
> >> + *
> >> + *Absence of the property should indicate absence of support.
> >> + *
> >> + * "vrr_enabled":
> >> + *Default _crtc boolean property that notifies the driver 
> >> that the
> >> + *content on the CRTC is suitable for variable refresh rate 
> >> presentation.
> >> + *The driver will take this property as a hint to enable variable
> >> + *refresh rate support if the receiver supports it, ie. if the
> >> + *"vrr_capable" property is true on the _connector object. The
> >> + *veritcal front porch duration will be extended until page-flip 
> >> or  
> > 
> > veritcal -> vertical
> >   
> >> + *timeout when enabled.
> >> + *
> >> + *The minimum vertical front porch duration is defined as the 
> >> vertical
> >> + *front porch duration for the current mode.
> >> + *
> >> + *The maximum vertical front porch duration is greater than or 
> >> equal to
> >> + *the minimum vertical front porch duration. The duration is 
> >> derived
> >> + *from the minimum supported variable refresh rate for the 
> >> connector.
> >> + *
> >> + *The driver may place further restrictions within these minimum
> >> + *and maximum bounds.
> >> + *
> >> + *Some displays may exhibit noticeable differences in brightness 
> >> when
> >> + *varying vertical front porch duration.
> >> + *  
> > 
> > Maybe something like this makes sense here:
> > 
> >   * Some displays may exhibit noticeable differences in brightness when
> >   * varying vertical front porch 

Re: [PATCH] pci: fix incorrect value returned from pcie_get_speed_cap

2018-11-20 Thread Alex Deucher
On Mon, Nov 19, 2018 at 7:47 PM Bjorn Helgaas  wrote:
>
> On Tue, Oct 30, 2018 at 12:36:08PM -0400, Mikulas Patocka wrote:
> > The macros PCI_EXP_LNKCAP_SLS_*GB are values, not bit masks. We must mask
> > the register and compare it against them.
> >
> > This patch fixes errors "amdgpu: [powerplay] failed to send message 261
> > ret is 0" errors when PCIe-v3 card is plugged into PCIe-v1 slot, because
> > the slot is being incorrectly reported as PCIe-v3 capable.
> >
> > Signed-off-by: Mikulas Patocka 
> > Fixes: 6cf57be0f78e ("PCI: Add pcie_get_speed_cap() to find max supported 
> > link speed")
> > Cc: sta...@vger.kernel.org# v4.17+
> >
> > ---
> >  drivers/pci/pci.c |8 
> >  1 file changed, 4 insertions(+), 4 deletions(-)
> >
> > Index: linux-4.19/drivers/pci/pci.c
> > ===
> > --- linux-4.19.orig/drivers/pci/pci.c 2018-10-30 16:58:58.0 +0100
> > +++ linux-4.19/drivers/pci/pci.c  2018-10-30 16:58:58.0 +0100
> > @@ -5492,13 +5492,13 @@ enum pci_bus_speed pcie_get_speed_cap(st
> >
> >   pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, );
> >   if (lnkcap) {
> > - if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
> > + if ((lnkcap & PCI_EXP_LNKCAP_SLS) == 
> > PCI_EXP_LNKCAP_SLS_16_0GB)
> >   return PCIE_SPEED_16_0GT;
> > - else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
> > + else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == 
> > PCI_EXP_LNKCAP_SLS_8_0GB)
> >   return PCIE_SPEED_8_0GT;
> > - else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
> > + else if ((lnkcap & PCI_EXP_LNKCAP_SLS) 
> > ==PCI_EXP_LNKCAP_SLS_5_0GB)
> >   return PCIE_SPEED_5_0GT;
> > - else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
> > + else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == 
> > PCI_EXP_LNKCAP_SLS_2_5GB)
> >   return PCIE_SPEED_2_5GT;
> >   }
> >
>
> I'd like to apply this as below, where I removed the 8_0GB and 16_0GB
> cases as recommended by the spec.  I can't test it myself, and the
> bugzillas don't contain enough information for me to confirm that the
> patch below is enough (the "lspci -vv" output of the root port and GPU
> is what I would need).
>
> I'm confused about the fact that 6cf57be0f78e appeared in v4.17, but
> v4.18 works fine according to both bugzillas.

This issue affects AMD GPUs because we switched from using an open
coded check for pcie link speeds in the driver to using the common
pcie variants in
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?h=v4.19=5d9a6330403271fbb1244f14380a7cc44662796f

The patch below would regress performance, at least on AMD GPUs, since
we'd end up reporting a max speed of gen 2 (5 GT/s) which would cause
the driver to limit the speed to gen2 even if gen3 or 4 are available.

>
> I also don't have a good feel for whether this is urgent enough to be
> a v4.20 fix or whether it can wait for v4.21.  Evidence either way
> would help.

I'd like it to land for 4.19 and 4.20 at least.  Alternatively, we
could revert all of the drm patches to and bring back all the open
coded implementations, but it's a fairly large number of patches to
revert.

Alex

>
> We also need similar fixes in pci_set_bus_speed(), pcie_speeds()
> (hfi1), cobalt_pcie_status_show(), hba_ioctl_callback(),
> qla24xx_pci_info_str(), and maybe a couple other places.
>
> Bjorn
>
>
> commit 871f73abf4b8e6aee8a206775f944ede7c7d7250
> Author: Mikulas Patocka 
> Date:   Tue Oct 30 12:36:08 2018 -0400
>
> PCI: Fix incorrect value returned from pcie_get_speed_cap()
>
> The macros PCI_EXP_LNKCAP_SLS_*GB are values, not bit masks.  We must mask
> the register and compare it against them.
>
> This patch fixes errors "amdgpu: [powerplay] failed to send message 261 
> ret
> is 0" errors when PCIe-v3 card is plugged into PCIe-v1 slot, because the
> slot is being incorrectly reported as PCIe-v3 capable.
>
> Fixes: 6cf57be0f78e ("PCI: Add pcie_get_speed_cap() to find max supported 
> link speed")
> Link: https://bugs.freedesktop.org/show_bug.cgi?id=108704
> Link: https://bugs.freedesktop.org/show_bug.cgi?id=108778
> Signed-off-by: Mikulas Patocka 
> [bhelgaas: update comment, remove use of PCI_EXP_LNKCAP_SLS_8_0GB and
> PCI_EXP_LNKCAP_SLS_16_0GB since those should be covered by 
> PCI_EXP_LNKCAP2]
> Signed-off-by: Bjorn Helgaas 
> Acked-by: Alex Deucher 
> Cc: sta...@vger.kernel.org  # v4.17+
>
> diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
> index d068f11d08a7..8563d1d9b102 100644
> --- a/drivers/pci/pci.c
> +++ b/drivers/pci/pci.c
> @@ -5556,9 +5556,13 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev 
> *dev)
> u32 lnkcap2, lnkcap;
>
> /*
> -* PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link
> -* Speeds Vector in Link Capabilities 2 when 

Re: [PATCH] drm/amdgpu: Remove dead static variable

2018-11-20 Thread Koenig, Christian
Am 20.11.18 um 11:48 schrieb Emil Velikov:
> On Mon, 19 Nov 2018 at 11:19, Christian König
>  wrote:
>> Am 19.11.18 um 12:07 schrieb Rex Zhu:
>>> The static struct drm_driver *driver was
>>> not used because drm_pci_init was deprecated
>>>
>>> Signed-off-by: Rex Zhu 
>> Reviewed-by: Christian König 
>>
>> Can you of hand see what "pdriver" is used for? That looks suspicious
>> like something deprecated as well.
>>
> Seeming copy/paste from the radeon driver. The latter used to support
> UMS and KMS at some point in the past.

Yeah, thought so. Rex cam up with a V2 where both variables are removed.

Thanks,
Christian.

>
> HTH
> -Emil

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: Remove dead static variable

2018-11-20 Thread Emil Velikov
On Mon, 19 Nov 2018 at 11:19, Christian König
 wrote:
>
> Am 19.11.18 um 12:07 schrieb Rex Zhu:
> > The static struct drm_driver *driver was
> > not used because drm_pci_init was deprecated
> >
> > Signed-off-by: Rex Zhu 
>
> Reviewed-by: Christian König 
>
> Can you of hand see what "pdriver" is used for? That looks suspicious
> like something deprecated as well.
>
Seeming copy/paste from the radeon driver. The latter used to support
UMS and KMS at some point in the past.

HTH
-Emil
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 3/3] drm/amdgpu: enable paging queue based on FW version

2018-11-20 Thread Christian König

Am 19.11.18 um 22:16 schrieb Yang, Philip:

On 2018-11-19 3:57 p.m., Deucher, Alexander wrote:

-Original Message-
From: amd-gfx  On Behalf Of
Yang, Philip
Sent: Monday, November 19, 2018 3:20 PM
To: amd-gfx@lists.freedesktop.org
Cc: Yang, Philip 
Subject: [PATCH 3/3] drm/amdgpu: enable paging queue based on FW
version

Based SDMA fw version to enable has_page_queue support. Have to move
sdma_v4_0_init_microcode from sw_init to early_init, to load firmware and
init fw_version before set_ring/buffer/vm_pte_funcs use it.

Change-Id: Ife5d4659d28bc2a7012b48947b27e929749d87c1
Signed-off-by: Philip Yang 
---
   drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 46 +
-
   1 file changed, 30 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4d873fd3242c..0a3b68dd49a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1447,23 +1447,44 @@ static void
sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);  }

+static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device
+*adev) {
+   uint fw_version = adev->sdma.instance[0].fw_version;
+
+   switch (adev->asic_type) {
+   case CHIP_VEGA10:
+   return fw_version >= 430;
+   case CHIP_VEGA12:
+   return fw_version >= 31;
+   case CHIP_VEGA20:
+   return fw_version >= 115;
+   default:
+   return false;
+   }
+}
+
   static int sdma_v4_0_early_init(void *handle)  {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+   int r;

-   if (adev->asic_type == CHIP_RAVEN) {
+   if (adev->asic_type == CHIP_RAVEN)
adev->sdma.num_instances = 1;
-   adev->sdma.has_page_queue = false;
-   } else {
+   else
adev->sdma.num_instances = 2;
-   /* TODO: Page queue breaks driver reload under SRIOV */
-   if ((adev->asic_type == CHIP_VEGA10) &&
amdgpu_sriov_vf((adev)))
-   adev->sdma.has_page_queue = false;
-   else if (adev->asic_type != CHIP_VEGA20 &&
-   adev->asic_type != CHIP_VEGA12)
-   adev->sdma.has_page_queue = true;
+
+   r = sdma_v4_0_init_microcode(adev);
+   if (r) {
+   DRM_ERROR("Failed to load sdma firmware!\n");
+   return r;

I think this should be ok.  As long as you've verified that 
sdam_v4_0_init_microcode() doesn't depend on any other init from another module 
like psp.  I took a quick look at the code and it seems like we should be ok.
Acked-by: Alex Deucher 

sdma_v4_0_init_microcode() reads fw binary file and setup fw version etc
data structure. It doesn't depend on other init so it is fine to move it
from sw_init() to early_init(). sdma_v4_0_load_microcode() will start fw
from sdma_v4_0_start() which depends on psp, gmc etc. This happens after
early_init()->sw_init()->hw_init().


Yeah, that sounds like it should work.

Patches #1 and #2 are Reviewed-by: Christian König 



Patch #3 is Acked-by: Christian König 

Thanks for taking care of that,
Christian.




Philip

}

+   /* TODO: Page queue breaks driver reload under SRIOV */
+   if ((adev->asic_type == CHIP_VEGA10) &&
amdgpu_sriov_vf((adev)))
+   adev->sdma.has_page_queue = false;
+   else if (sdma_v4_0_fw_support_paging_queue(adev))
+   adev->sdma.has_page_queue = true;
+
sdma_v4_0_set_ring_funcs(adev);
sdma_v4_0_set_buffer_funcs(adev);
sdma_v4_0_set_vm_pte_funcs(adev);
@@ -1472,7 +1493,6 @@ static int sdma_v4_0_early_init(void *handle)
return 0;
   }

-
   static int sdma_v4_0_sw_init(void *handle)  {
struct amdgpu_ring *ring;
@@ -1491,12 +1511,6 @@ static int sdma_v4_0_sw_init(void *handle)
if (r)
return r;

-   r = sdma_v4_0_init_microcode(adev);
-   if (r) {
-   DRM_ERROR("Failed to load sdma firmware!\n");
-   return r;
-   }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = >sdma.instance[i].ring;
ring->ring_obj = NULL;
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: enable paging queue doorbell support v2

2018-11-20 Thread Christian König

Am 19.11.18 um 21:08 schrieb Kuehling, Felix:

Hi Christian,

On 2018-11-19 6:24 a.m., Christian König wrote:

Am 15.11.18 um 20:10 schrieb Yang, Philip:

paging queues doorbell index use existing assignment
sDMA_HI_PRI_ENGINE0/1
index, and increase SDMA_DOORBELL_RANGE size from 2 dwords to 4
dwords to
enable the new doorbell index.

v2: disable paging queue doorbell on Vega10 and Vega12 with SRIOV

Change-Id: I9adb965f16ee4089d261d9a22231337739184e49
Signed-off-by: Philip Yang 

Acked-by: Christian König 

This change was superseded by "drm/amdgpu: enable paging queue doorbell
support v3", which doesn't need to change the doorbell routing and no
special rules for SRIOV. Instead it uses two pages from the doorbell
aperture. That's OK, because KFD only uses page 2 and up.


Ah, yeah hadn't seen that patch yet.

Thanks,
Christian.



Regards,
   Felix



---
   drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c |  2 +-
   drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c |  2 +-
   drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 26 ++
   3 files changed, 20 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 6f9c54978cc1..0eb42c29ecac 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -80,7 +80,7 @@ static void nbio_v6_1_sdma_doorbell_range(struct
amdgpu_device *adev, int instan
     if (use_doorbell) {
   doorbell_range = REG_SET_FIELD(doorbell_range,
BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
-    doorbell_range = REG_SET_FIELD(doorbell_range,
BIF_SDMA0_DOORBELL_RANGE, SIZE, 2);
+    doorbell_range = REG_SET_FIELD(doorbell_range,
BIF_SDMA0_DOORBELL_RANGE, SIZE, 4);
   } else
   doorbell_range = REG_SET_FIELD(doorbell_range,
BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
   diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index f8cee95d61cc..9342ee03d7d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -76,7 +76,7 @@ static void nbio_v7_4_sdma_doorbell_range(struct
amdgpu_device *adev, int instan
     if (use_doorbell) {
   doorbell_range = REG_SET_FIELD(doorbell_range,
BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
-    doorbell_range = REG_SET_FIELD(doorbell_range,
BIF_SDMA0_DOORBELL_RANGE, SIZE, 2);
+    doorbell_range = REG_SET_FIELD(doorbell_range,
BIF_SDMA0_DOORBELL_RANGE, SIZE, 4);
   } else
   doorbell_range = REG_SET_FIELD(doorbell_range,
BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
   diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index f4490cdd9804..1f19f15bb171 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -925,11 +925,9 @@ static void sdma_v4_0_page_resume(struct
amdgpu_device *adev, unsigned int i)
   OFFSET, ring->doorbell_index);
   WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell);
   WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset);
-    /* TODO: enable doorbell support */
-    /*adev->nbio_funcs->sdma_doorbell_range(adev, i,
ring->use_doorbell,
-  ring->doorbell_index);*/
   -    sdma_v4_0_ring_set_wptr(ring);
+    /* paging queue doorbell index is already enabled at
sdma_v4_0_gfx_resume */
+    sdma_v4_0_page_ring_set_wptr(ring);
     /* set minor_ptr_update to 0 after wptr programed */
   WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0);
@@ -1504,9 +1502,6 @@ static int sdma_v4_0_sw_init(void *handle)
   ring->ring_obj = NULL;
   ring->use_doorbell = true;
   -    DRM_INFO("use_doorbell being set to: [%s]\n",
-    ring->use_doorbell?"true":"false");
-
   if (adev->asic_type == CHIP_VEGA10)
   ring->doorbell_index = (i == 0) ?
   (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 << 1) //get
DWORD offset
@@ -1516,6 +1511,8 @@ static int sdma_v4_0_sw_init(void *handle)
   (AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD
offset
   : (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get
DWORD offset
   +    DRM_DEBUG("use_doorbell being set to: [%s] doorbell index
%d\n",
+    ring->use_doorbell?"true":"false", ring->doorbell_index);
     sprintf(ring->name, "sdma%d", i);
   r = amdgpu_ring_init(adev, ring, 1024,
@@ -1529,7 +1526,20 @@ static int sdma_v4_0_sw_init(void *handle)
   if (adev->sdma.has_page_queue) {
   ring = >sdma.instance[i].page;
   ring->ring_obj = NULL;
-    ring->use_doorbell = false;
+    if (!amdgpu_sriov_vf(adev) || adev->asic_type ==
CHIP_VEGA20)
+    ring->use_doorbell = true;
+
+    if (adev->asic_type == CHIP_VEGA10)
+    ring->doorbell_index = (i == 0) ?
+    (AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE0 << 1)
+   

Re: [PATCH 2/2] drm/atomic: Create and use __drm_atomic_helper_crtc_reset() everywhere

2018-11-20 Thread Philipp Zabel
On Mon, 2018-11-12 at 16:01 +0100, Maarten Lankhorst wrote:
> We already have __drm_atomic_helper_connector_reset() and
> __drm_atomic_helper_plane_reset(), extend this to crtc as well.
> 
> Most drivers already have a gpu reset hook, correct it.
> Nouveau already implemented its own __drm_atomic_helper_crtc_reset(),
> convert it to the common one.
> 
> Signed-off-by: Maarten Lankhorst 
[...]
> diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c 
> b/drivers/gpu/drm/imx/ipuv3-crtc.c
> index 7d4b710b837a..8bc36f0d2b6b 100644
> --- a/drivers/gpu/drm/imx/ipuv3-crtc.c
> +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
> @@ -120,12 +120,9 @@ static void imx_drm_crtc_reset(struct drm_crtc *crtc)
>   memset(state, 0, sizeof(*state));
>   } else {
>   state = kzalloc(sizeof(*state), GFP_KERNEL);
> - if (!state)
> - return;
> - crtc->state = >base;
>   }
>  
> - state->base.crtc = crtc;
> + __drm_atomic_helper_crtc_reset(crtc, >base);
>  }
>  
>  static struct drm_crtc_state *imx_drm_crtc_duplicate_state(struct drm_crtc 
> *crtc)
> diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c 
> b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
> index 92ecb9bf982c..a743e5ed1177 100644
> --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
> +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
> @@ -119,12 +119,9 @@ static void mtk_drm_crtc_reset(struct drm_crtc *crtc)
>   memset(state, 0, sizeof(*state));
>   } else {
>   state = kzalloc(sizeof(*state), GFP_KERNEL);
> - if (!state)
> - return;
> - crtc->state = >base;
>   }
>  
> - state->base.crtc = crtc;
> + __drm_atomic_helper_crtc_reset(crtc, >base);
>  }
>  
>  static struct drm_crtc_state *mtk_drm_crtc_duplicate_state(struct drm_crtc 
> *crtc)

For the imx-drm and mediatek drivers,

Acked-by: Philipp Zabel 

regards
Philipp
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH v2] drm/amdgpu: Enable HDP memory light sleep

2018-11-20 Thread Quan, Evan
Reviewed-by: Evan Quan 

> -Original Message-
> From: amd-gfx  On Behalf Of
> Kenneth Feng
> Sent: 2018年11月20日 16:06
> To: amd-gfx@lists.freedesktop.org
> Cc: Feng, Kenneth 
> Subject: [PATCH v2] drm/amdgpu: Enable HDP memory light sleep
> 
> Due to the register name and setting change of HDP memory light sleep on
> Vega20,change accordingly in the driver.
> 
> Signed-off-by: Kenneth Feng 
> ---
>  drivers/gpu/drm/amd/amdgpu/soc15.c | 39
> +++---
>  1 file changed, 32 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c
> b/drivers/gpu/drm/amd/amdgpu/soc15.c
> index bf5e6a4..4cc0dcb 100644
> --- a/drivers/gpu/drm/amd/amdgpu/soc15.c
> +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
> @@ -65,6 +65,13 @@
>  #define mmMP0_MISC_LIGHT_SLEEP_CTRL
> 0x01ba
>  #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX
> 0
> 
> +/* for Vega20 register name change */
> +#define mmHDP_MEM_POWER_CTRL 0x00d4
> +#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK
>   0x0001L
> +#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK
>   0x0002L
> +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK
>   0x0001L
> +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK
>   0x0002L
> +#define mmHDP_MEM_POWER_CTRL_BASE_IDX0
>  /*
>   * Indirect registers accessor
>   */
> @@ -870,15 +877,33 @@ static void soc15_update_hdp_light_sleep(struct
> amdgpu_device *adev, bool enable  {
>   uint32_t def, data;
> 
> - def = data = RREG32(SOC15_REG_OFFSET(HDP, 0,
> mmHDP_MEM_POWER_LS));
> + if (adev->asic_type == CHIP_VEGA20) {
> + def = data = RREG32(SOC15_REG_OFFSET(HDP, 0,
> mmHDP_MEM_POWER_CTRL));
> 
> - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
> - data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
> - else
> - data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
> + if (enable && (adev->cg_flags &
> AMD_CG_SUPPORT_HDP_LS))
> + data |=
> HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
> +
>   HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
> +
>   HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
> +
>   HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
> + else
> + data &=
> ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
> +
>   HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
> +
>   HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
> +
>   HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
> 
> - if (def != data)
> - WREG32(SOC15_REG_OFFSET(HDP, 0,
> mmHDP_MEM_POWER_LS), data);
> + if (def != data)
> + WREG32(SOC15_REG_OFFSET(HDP, 0,
> mmHDP_MEM_POWER_CTRL), data);
> + } else {
> + def = data = RREG32(SOC15_REG_OFFSET(HDP, 0,
> mmHDP_MEM_POWER_LS));
> +
> + if (enable && (adev->cg_flags &
> AMD_CG_SUPPORT_HDP_LS))
> + data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
> + else
> + data &=
> ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
> +
> + if (def != data)
> + WREG32(SOC15_REG_OFFSET(HDP, 0,
> mmHDP_MEM_POWER_LS), data);
> + }
>  }
> 
>  static void soc15_update_drm_clock_gating(struct amdgpu_device *adev,
> bool enable)
> --
> 2.7.4
> 
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH v2] drm/amdgpu: Enable HDP memory light sleep

2018-11-20 Thread Kenneth Feng
Due to the register name and setting change of HDP
memory light sleep on Vega20,change accordingly in
the driver.

Signed-off-by: Kenneth Feng 
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 39 +++---
 1 file changed, 32 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c 
b/drivers/gpu/drm/amd/amdgpu/soc15.c
index bf5e6a4..4cc0dcb 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -65,6 +65,13 @@
 #define mmMP0_MISC_LIGHT_SLEEP_CTRL
 0x01ba
 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX   
 0
 
+/* for Vega20 register name change */
+#define mmHDP_MEM_POWER_CTRL   0x00d4
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x0001L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK   0x0002L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK  0x0001L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK0x0002L
+#define mmHDP_MEM_POWER_CTRL_BASE_IDX  0
 /*
  * Indirect registers accessor
  */
@@ -870,15 +877,33 @@ static void soc15_update_hdp_light_sleep(struct 
amdgpu_device *adev, bool enable
 {
uint32_t def, data;
 
-   def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+   if (adev->asic_type == CHIP_VEGA20) {
+   def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, 
mmHDP_MEM_POWER_CTRL));
 
-   if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
-   data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
-   else
-   data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+   if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+   data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
+   HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
+   HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
+   HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
+   else
+   data &= 
~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
+   HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
+   HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
+   HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
 
-   if (def != data)
-   WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
+   if (def != data)
+   WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), 
data);
+   } else {
+   def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, 
mmHDP_MEM_POWER_LS));
+
+   if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+   data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+   else
+   data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+
+   if (def != data)
+   WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), 
data);
+   }
 }
 
 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool 
enable)
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx