Re: [PATCH 3/4] drm/amdkfd: report pcie bandwidth as number of lanes
On 2021-06-21 3:23 p.m., Jonathan Kim wrote: Similar to xGMI reporting the min/max bandwidth as the number of links between peers, PCIe will report the min/max bandwidth as the number of supported lanes. Signed-off-by: Jonathan Kim --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 24 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 3 +++ drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 3 +++ 3 files changed, 30 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index c84989eda8eb..99c662b70519 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -568,6 +568,30 @@ uint8_t amdgpu_amdkfd_get_xgmi_num_links(struct kgd_dev *dst, struct kgd_dev *sr return (uint8_t)ret; } +uint32_t amdgpu_amdkfd_get_pcie_min_lanes(struct kgd_dev *dev) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)dev; + int min_lane_shift = ffs(adev->pm.pcie_mlw_mask >> + CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT) - 1; + + if (min_lane_shift < 0) + return 0; + + return 1UL << min_lane_shift; +} + +uint32_t amdgpu_amdkfd_get_pcie_max_lanes(struct kgd_dev *dev) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)dev; + int max_lane_shift = fls(adev->pm.pcie_mlw_mask >> + CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT) - 1; + + if (max_lane_shift < 0) + return 0; + + return 1UL << max_lane_shift; +} + uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 20e4bfce62be..88322c72a43d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -31,6 +31,7 @@ #include #include #include +#include "amd_pcie.h" #include "amdgpu_sync.h" #include "amdgpu_vm.h" @@ -227,6 +228,8 @@ uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd); int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd); uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src); uint8_t amdgpu_amdkfd_get_xgmi_num_links(struct kgd_dev *dst, struct kgd_dev *src); +uint32_t amdgpu_amdkfd_get_pcie_min_lanes(struct kgd_dev *dev); +uint32_t amdgpu_amdkfd_get_pcie_max_lanes(struct kgd_dev *dev); /* Read user wptr from a specified user address space with page fault * disabled. The memory must be pinned and mapped to the hardware when diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 75047b77649b..f70d69035fe7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1036,6 +1036,7 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink, props->max_latency = iolink->maximum_latency; props->min_bandwidth = iolink->minimum_bandwidth; props->max_bandwidth = iolink->maximum_bandwidth; + props->rec_transfer_size = iolink->recommended_transfer_size; @@ -1993,6 +1994,8 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, sub_type_hdr->maximum_bandwidth = 1; } else { sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS; + sub_type_hdr->minimum_bandwidth = amdgpu_amdkfd_get_pcie_min_lanes(kdev->kgd); + sub_type_hdr->maximum_bandwidth = amdgpu_amdkfd_get_pcie_max_lanes(kdev->kgd); Similar to XGMI, I did not mean to directly report the number of lanes here. Instead, used it to calculate the actual bandwidth in MB/s. Regards, Felix } sub_type_hdr->proximity_domain_from = proximity_domain; ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
Re: [PATCH 2/4] drm/amdkfd: report num xgmi links between direct peers to the kfd
On 2021-06-21 3:23 p.m., Jonathan Kim wrote: Since Min/Max bandwidth was never used, it will repurposed to report the number of xgmi links between direct peers to the KFD topology. Signed-off-by: Jonathan Kim --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 15 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 12 drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h | 2 ++ drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 11 +-- drivers/gpu/drm/amd/amdkfd/kfd_crat.h | 4 ++-- 6 files changed, 41 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index bfab2f9fdd17..c84989eda8eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -553,6 +553,21 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s return (uint8_t)ret; } +uint8_t amdgpu_amdkfd_get_xgmi_num_links(struct kgd_dev *dst, struct kgd_dev *src) +{ + struct amdgpu_device *peer_adev = (struct amdgpu_device *)src; + struct amdgpu_device *adev = (struct amdgpu_device *)dst; + int ret = amdgpu_xgmi_get_num_links(adev, peer_adev); + + if (ret < 0) { + DRM_ERROR("amdgpu: failed to get xgmi num links between node %d and %d. ret = %d\n", + adev->gmc.xgmi.physical_node_id, + peer_adev->gmc.xgmi.physical_node_id, ret); + ret = 0; + } + return (uint8_t)ret; +} + uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index fabc68eec36a..20e4bfce62be 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -226,6 +226,7 @@ uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd); uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd); int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd); uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src); +uint8_t amdgpu_amdkfd_get_xgmi_num_links(struct kgd_dev *dst, struct kgd_dev *src); /* Read user wptr from a specified user address space with page fault * disabled. The memory must be pinned and mapped to the hardware when diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 8567d5d77346..258cf86b32f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -486,6 +486,18 @@ int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev, return -EINVAL; } +int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev, + struct amdgpu_device *peer_adev) +{ + struct psp_xgmi_topology_info *top = >psp.xgmi_context.top_info; + int i; + + for (i = 0 ; i < top->num_nodes; ++i) + if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id) + return top->nodes[i].num_links; + return -EINVAL; +} + int amdgpu_xgmi_add_device(struct amdgpu_device *adev) { struct psp_xgmi_topology_info *top_info; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index 12969c0830d5..d2189bf7d428 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -59,6 +59,8 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev); int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate); int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); +int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev, + struct amdgpu_device *peer_adev); uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr); static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index c6b02aee4993..75047b77649b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1034,8 +1034,8 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink, props->min_latency = iolink->minimum_latency; props->max_latency = iolink->maximum_latency; - props->min_bandwidth = iolink->minimum_bandwidth_mbs; - props->max_bandwidth = iolink->maximum_bandwidth_mbs; + props->min_bandwidth = iolink->minimum_bandwidth; + props->max_bandwidth = iolink->maximum_bandwidth; props->rec_transfer_size =
Re: [PATCH 2/2] drm/ttm: Fix COW check
On 2021-07-09 3:37 p.m., Christian König wrote: Am 09.07.21 um 21:31 schrieb Felix Kuehling: On 2021-07-09 2:38 a.m., Christian König wrote: Am 08.07.21 um 21:36 schrieb Alex Deucher: From: Felix Kuehling KFD Thunk maps invisible VRAM BOs with PROT_NONE, MAP_PRIVATE. is_cow_mapping returns true for these mappings. Add a check for vm_flags & VM_WRITE to avoid mmap failures on private read-only or PROT_NONE mappings. I'm pretty sure that this is not working as expected. Not sure what you mean. Debugger access to the memory through the PROT_NONE VMAs is definitely working, with both ptrace and /proc//mem. Fixes: f91142c62161 ("drm/ttm: nuke VM_MIXEDMAP on BO mappings v3") Signed-off-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index f56be5bc0861..a75e90c7d4aa 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -552,7 +552,7 @@ static const struct vm_operations_struct ttm_bo_vm_ops = { int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo) { /* Enforce no COW since would have really strange behavior with it. */ - if (is_cow_mapping(vma->vm_flags)) + if (is_cow_mapping(vma->vm_flags) && (vma->vm_flags & VM_WRITE)) is_cow_mapping() already checks for VM_MAYWRITE, so this here shouldn't be necessary. AFAICT, VM_MAYWRITE is not based on the PROT_... bits used to create the VMA, but based on the permissions of the file. So as long as the render node is writable, VM_MAYWRITE is set for all VMAs that map it. I would agree that it's probably a bad idea for the Thunk to map these VMAs with MAP_PRIVATE. We can try changing the Thunk to use MAP_SHARED for these PROT_NONE mappings. But that doesn't change the fact that this kernel patch broke existing usermode. For the record, changing the Thunk to use MAP_SHARED works. Yeah, but see the discussion around MAP_PRIVATE and COW regarding this. What Thunk did here was a really bad idea. Hindsight ... Which part was a bad idea? * Creating a PROT_NONE VMA? That's necessary to let ptrace or /proc//mem access the memory. It's a brilliant idea, IMHO * Making that VMA MAP_PRIVATE? Probably a bad idea in hindsight. The process itself can't access this memory, let alone write to it. So I didn't think too much about whether to make it shared or private. Either way, we are not causing any actual COW on these mappings because they are not writable, and we never make them writable with mprotect either * Introducing a COW check after letting it slide for 15 years? It's a reasonable check. In this case it catches a false positive. Had this check been in place 4 or 5 years ago, it would have easily prevented this mess I think we have only two options here: 1. Accept the breakage of thunk. Really? 2. Add a workaround in amdgpu/amdkfd to force MAP_PRIVATE into MAP_SHARED in the kernel. I tried to do this in amdgpu_gem_object_mmap and spent 4 hours debugging why it doesn't work. It breaks because the mapping->i_mmap_writable gets unbalanced and causes subsequent mappings to fail when that atomic counter becomes negative (indicating DENYWRITE). I could fix it by calling mapping_map_writable. But I don't think this is intended as driver API. I see no precedent of any driver calling this. For the record this works, but it feels fragile and ugly: --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -255,6 +255,20 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) return -EPERM; + /* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings +* for debugger access to invisible VRAM. Should have used MAP_SHARED +* instead. +*/ + if (is_cow_mapping(vma->vm_flags) && + !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) { + int ret; + + ret = mapping_map_writable(vma->vm_file->f_mapping); + if (ret) + return ret; + vma->vm_flags |= VM_SHARED | VM_MAYSHARE; + } + return drm_gem_ttm_mmap(obj, vma); } 3. Improve my previous workaround by adding a similar check for COW in ttm_bo_vm_ops.mprotect. I'll send an updated patch. Regards, Felix Regards, Christian. Regards, Felix Christian. return -EINVAL; ttm_bo_get(bo); ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
[PATCH 1/1] drm/ttm: Fix COW check
KFD Thunk maps invisible VRAM BOs with PROT_NONE, MAP_PRIVATE. is_cow_mapping returns true for these mappings. Add a check for vm_flags & VM_WRITE to avoid mmap failures on private read-only or PROT_NONE mappings. v2: protect against mprotect making a mapping writable after the fact Fixes: f91142c62161 ("drm/ttm: nuke VM_MIXEDMAP on BO mappings v3") Signed-off-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo_vm.c | 13 - 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index f56be5bc0861..9ad211ede611 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -542,17 +542,28 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, } EXPORT_SYMBOL(ttm_bo_vm_access); +static int ttm_bo_vm_mprotect(struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned long newflags) +{ + /* Enforce no COW since would have really strange behavior with it. */ + if (is_cow_mapping(newflags) && (newflags & VM_WRITE)) + return -EINVAL; + + return 0; +} + static const struct vm_operations_struct ttm_bo_vm_ops = { .fault = ttm_bo_vm_fault, .open = ttm_bo_vm_open, .close = ttm_bo_vm_close, .access = ttm_bo_vm_access, + .mprotect = ttm_bo_vm_mprotect, }; int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo) { /* Enforce no COW since would have really strange behavior with it. */ - if (is_cow_mapping(vma->vm_flags)) + if (is_cow_mapping(vma->vm_flags) && (vma->vm_flags & VM_WRITE)) return -EINVAL; ttm_bo_get(bo); -- 2.32.0 ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
RE: [PATCH 4/4] drm/amdkfd: add direct link flag to link properties
[AMD Official Use Only] This series Acked-by: Harish Kasiviswanathan -Original Message- From: amd-gfx On Behalf Of Jonathan Kim Sent: Monday, June 21, 2021 3:24 PM To: amd-gfx@lists.freedesktop.org Cc: Kuehling, Felix ; Kim, Jonathan ; Zhang, Hawking Subject: [PATCH 4/4] drm/amdkfd: add direct link flag to link properties Flag peers as a direct link if over PCIe or over xGMI if they are adjacent in the hive. Signed-off-by: Jonathan Kim --- drivers/gpu/drm/amd/amdkfd/kfd_crat.h | 3 ++- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 11 +++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h index d1f6de5edfb9..0d661d60ece6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h @@ -232,8 +232,9 @@ struct crat_subtype_ccompute { #define CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT(1 << 2) #define CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT(1 << 3) #define CRAT_IOLINK_FLAGS_NO_PEER_TO_PEER_DMA (1 << 4) +#define CRAT_IOLINK_FLAGS_DIRECT_LINK (1 << 5) #define CRAT_IOLINK_FLAGS_BI_DIRECTIONAL (1 << 31) -#define CRAT_IOLINK_FLAGS_RESERVED_MASK0x7fe0 +#define CRAT_IOLINK_FLAGS_RESERVED_MASK0x7fc0 /* * IO interface types diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index b1ce072aa20b..037fa12ac1bc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1244,6 +1244,15 @@ static void kfd_set_iolink_non_coherent(struct kfd_topology_device *to_dev, } } +static void kfd_set_iolink_direct_link(struct kfd_topology_device *dev, + struct kfd_iolink_properties *link) { + if (link->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS || + (link->iolink_type == CRAT_IOLINK_TYPE_XGMI && + link->max_bandwidth)) + link->flags |= CRAT_IOLINK_FLAGS_DIRECT_LINK; } + static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev) { struct kfd_iolink_properties *link, *inbound_link; @@ -1256,6 +1265,7 @@ static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev) list_for_each_entry(link, >io_link_props, list) { link->flags = CRAT_IOLINK_FLAGS_ENABLED; kfd_set_iolink_no_atomics(dev, NULL, link); + kfd_set_iolink_direct_link(dev, link); peer_dev = kfd_topology_device_by_proximity_domain( link->node_to); @@ -1270,6 +1280,7 @@ static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev) inbound_link->flags = CRAT_IOLINK_FLAGS_ENABLED; kfd_set_iolink_no_atomics(peer_dev, dev, inbound_link); kfd_set_iolink_non_coherent(peer_dev, link, inbound_link); + kfd_set_iolink_direct_link(peer_dev, inbound_link); } } } -- 2.25.1 ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=04%7C01%7Charish.kasiviswanathan%40amd.com%7C92cd10a23764408da0ec08d934ea2680%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637599002557866754%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000sdata=utRekyrx5G9DD4TSayfxKwLFWv%2FnFlgzSErE7m6zdEw%3Dreserved=0 ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
RE: [PATCH 00/12] DC Patches June 29, 2021
[Public] Hi all and sorry for the delay, This patchset was tested on the following systems: HP Envy 360, with Ryzen 5 4500U, with the following display types: eDP 1080p 60hz, 4k 60hz (via USB-C to DP/HDMI), 1440p 144hz (via USB-C to DP/HDMI), 1680*1050 60hz (via USB-C to DP and then DP to DVI/VGA) AMD Ryzen 9 5900H, with the following display types: eDP 1080p 60hz, 4k 60hz (via USB-C to DP/HDMI), 1440p 144hz (via USB-C to DP/HDMI), 1680*1050 60hz (via USB-C to DP and then DP to DVI/VGA) Sapphire Pulse RX5700XT with the following display types: 4k 60hz (via DP/HDMI), 1440p 144hz (via DP/HDMI), 1680*1050 60hz (via DP to DVI/VGA) Reference AMD RX6800 with the following display types: 4k 60hz (via DP/HDMI and USB-C to DP/HDMI), 1440p 144hz (via USB-C to DP/HDMI and USB-C to DP/HDMI), 1680*1050 60hz (via DP to DVI/VGA) Included testing using a Startech DP 1.4 MST hub at 2x 4k 60hz, and 3x 1080p 60hz on all systems. Tested-by: Daniel Wheeler Thank you, Dan Wheeler Technologist | AMD SW Display -- 1 Commerce Valley Dr E, Thornhill, ON L3T 7X6 Facebook | Twitter | amd.com -Original Message- From: amd-gfx On Behalf Of Rodrigo Siqueira Sent: June 29, 2021 11:54 AM To: amd-gfx@lists.freedesktop.org Cc: Li, Sun peng (Leo) ; Wentland, Harry ; Zhuo, Qingqing ; Siqueira, Rodrigo ; Jacob, Anson ; Pillai, Aurabindo ; Lakha, Bhawanpreet ; R, Bindu Subject: [PATCH 00/12] DC Patches June 29, 2021 DC version 3.2.142 brings improvements in multiple areas. In summary, we highlight: - Freesync improvements - Remove unnecessary assert - Firmware release 0.0.72 - Improve the EDID manipulation and DML calculations Alvin Lee (1): drm/amd/display: Adjust types and formatting for future development Anthony Koo (1): drm/amd/display: [FW Promotion] Release 0.0.72 Aric Cyr (3): drm/amd/display: 3.2.142 drm/amd/display: Round KHz up when calculating clock requests drm/amd/display: increase max EDID size to 2k Chun-Liang Chang (1): drm/amd/display: DMUB Outbound Interrupt Process-X86 Dmytro Laktyushkin (1): drm/amd/display: remove faulty assert Nicholas Kazlauskas (1): drm/amd/display: Fix updating infoframe for DCN3.1 eDP Stylon Wang (1): drm/amd/display: Add Freesync HDMI support to DM with DMUB Wang (1): drm/amd/display: Add null checks Wenjing Liu (1): drm/amd/display: isolate link training setting override to its own function Wesley Chalmers (1): Revert "drm/amd/display: Always write repeater mode regardless of LTTPR" .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 95 +- .../display/dc/clk_mgr/dcn10/rv1_clk_mgr.c| 12 +- .../dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c | 4 +- .../display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c | 12 +- .../dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c | 16 +-- .../display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c | 25 ++-- .../display/dc/clk_mgr/dcn301/dcn301_smu.c| 10 +- .../amd/display/dc/clk_mgr/dcn31/dcn31_smu.c | 10 +- .../gpu/drm/amd/display/dc/core/dc_link_dp.c | 121 +- drivers/gpu/drm/amd/display/dc/core/dc_stat.c | 24 .../gpu/drm/amd/display/dc/core/dc_stream.c | 3 + drivers/gpu/drm/amd/display/dc/dc.h | 2 +- drivers/gpu/drm/amd/display/dc/dc_stat.h | 1 + drivers/gpu/drm/amd/display/dc/dc_types.h | 2 +- .../drm/amd/display/dc/dcn31/dcn31_hwseq.c| 2 +- .../dc/dml/dcn21/display_mode_vba_21.c| 11 +- .../dc/dml/dcn30/display_mode_vba_30.c| 18 +-- .../dc/dml/dcn31/display_mode_vba_31.c| 15 ++- .../amd/display/dc/dml/display_mode_enums.h | 4 +- .../drm/amd/display/dc/dml/display_mode_vba.c | 12 +- .../drm/amd/display/dc/dml/display_mode_vba.h | 4 +- .../gpu/drm/amd/display/dc/inc/dc_link_dp.h | 1 - .../amd/display/dc/inc/hw/clk_mgr_internal.h | 5 + drivers/gpu/drm/amd/display/dc/irq_types.h| 2 +- drivers/gpu/drm/amd/display/dmub/dmub_srv.h | 18 +++ .../gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 11 +- .../gpu/drm/amd/display/dmub/src/dmub_dcn31.c | 15 +++ .../gpu/drm/amd/display/dmub/src/dmub_dcn31.h | 13 +- .../gpu/drm/amd/display/dmub/src/dmub_srv.c | 17 +++ .../include/asic_reg/dcn/dcn_3_1_2_sh_mask.h | 4 + 30 files changed, 338 insertions(+), 151 deletions(-) -- 2.25.1 ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=04%7C01%7Cdaniel.wheeler%40amd.com%7C277feeb3b4624c5460fc08d93b163dc9%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637605788987674214%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000sdata=j6iE3LOdDB2kUkSApf2WIiNWIFStAZwvnvknpiZvbFg%3Dreserved=0 ___
Re: [PATCH 2/2] drm/ttm: Fix COW check
On 2021-07-09 2:38 a.m., Christian König wrote: Am 08.07.21 um 21:36 schrieb Alex Deucher: From: Felix Kuehling KFD Thunk maps invisible VRAM BOs with PROT_NONE, MAP_PRIVATE. is_cow_mapping returns true for these mappings. Add a check for vm_flags & VM_WRITE to avoid mmap failures on private read-only or PROT_NONE mappings. I'm pretty sure that this is not working as expected. Not sure what you mean. Debugger access to the memory through the PROT_NONE VMAs is definitely working, with both ptrace and /proc//mem. Fixes: f91142c62161 ("drm/ttm: nuke VM_MIXEDMAP on BO mappings v3") Signed-off-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index f56be5bc0861..a75e90c7d4aa 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -552,7 +552,7 @@ static const struct vm_operations_struct ttm_bo_vm_ops = { int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo) { /* Enforce no COW since would have really strange behavior with it. */ - if (is_cow_mapping(vma->vm_flags)) + if (is_cow_mapping(vma->vm_flags) && (vma->vm_flags & VM_WRITE)) is_cow_mapping() already checks for VM_MAYWRITE, so this here shouldn't be necessary. AFAICT, VM_MAYWRITE is not based on the PROT_... bits used to create the VMA, but based on the permissions of the file. So as long as the render node is writable, VM_MAYWRITE is set for all VMAs that map it. I would agree that it's probably a bad idea for the Thunk to map these VMAs with MAP_PRIVATE. We can try changing the Thunk to use MAP_SHARED for these PROT_NONE mappings. But that doesn't change the fact that this kernel patch broke existing usermode. Regards, Felix Christian. return -EINVAL; ttm_bo_get(bo); ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
Re: [PATCH 2/2] drm/ttm: Fix COW check
Am 09.07.21 um 21:31 schrieb Felix Kuehling: On 2021-07-09 2:38 a.m., Christian König wrote: Am 08.07.21 um 21:36 schrieb Alex Deucher: From: Felix Kuehling KFD Thunk maps invisible VRAM BOs with PROT_NONE, MAP_PRIVATE. is_cow_mapping returns true for these mappings. Add a check for vm_flags & VM_WRITE to avoid mmap failures on private read-only or PROT_NONE mappings. I'm pretty sure that this is not working as expected. Not sure what you mean. Debugger access to the memory through the PROT_NONE VMAs is definitely working, with both ptrace and /proc//mem. Fixes: f91142c62161 ("drm/ttm: nuke VM_MIXEDMAP on BO mappings v3") Signed-off-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index f56be5bc0861..a75e90c7d4aa 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -552,7 +552,7 @@ static const struct vm_operations_struct ttm_bo_vm_ops = { int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo) { /* Enforce no COW since would have really strange behavior with it. */ - if (is_cow_mapping(vma->vm_flags)) + if (is_cow_mapping(vma->vm_flags) && (vma->vm_flags & VM_WRITE)) is_cow_mapping() already checks for VM_MAYWRITE, so this here shouldn't be necessary. AFAICT, VM_MAYWRITE is not based on the PROT_... bits used to create the VMA, but based on the permissions of the file. So as long as the render node is writable, VM_MAYWRITE is set for all VMAs that map it. I would agree that it's probably a bad idea for the Thunk to map these VMAs with MAP_PRIVATE. We can try changing the Thunk to use MAP_SHARED for these PROT_NONE mappings. But that doesn't change the fact that this kernel patch broke existing usermode. Yeah, but see the discussion around MAP_PRIVATE and COW regarding this. What Thunk did here was a really bad idea. I think we have only two options here: 1. Accept the breakage of thunk. 2. Add a workaround in amdgpu/amdkfd to force MAP_PRIVATE into MAP_SHARED in the kernel. Regards, Christian. Regards, Felix Christian. return -EINVAL; ttm_bo_get(bo); ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
RE: [PATCH] drm/amd/pm: Fix BACO state setting for Beige_Goby
[Public] Reviewed-by: Guchun Chen Regards, Guchun -Original Message- From: Chengming Gui Sent: Friday, July 9, 2021 4:29 PM To: amd-gfx@lists.freedesktop.org Cc: Zhou1, Tao ; Chen, Jiansong (Simon) ; Chen, Guchun ; Feng, Kenneth ; Zhang, Hawking ; Gui, Jack Subject: [PATCH] drm/amd/pm: Fix BACO state setting for Beige_Goby Correct BACO state setting for Beige_Goby Signed-off-by: Chengming Gui Change-Id: I28b9a526f1b353c3986225f075c613ba88b6ae2b --- drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 388c5cb5c647..0a5d46ac9ccd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1528,6 +1528,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: if (amdgpu_runtime_pm == 2) ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, -- 2.17.1 ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
RE: [PATCH] drm/amd/pm: Fix BACO state setting for Beige_Goby
[AMD Official Use Only] Reviewed-by: Jiansong Chen -Original Message- From: Chengming Gui Sent: Friday, July 9, 2021 4:29 PM To: amd-gfx@lists.freedesktop.org Cc: Zhou1, Tao ; Chen, Jiansong (Simon) ; Chen, Guchun ; Feng, Kenneth ; Zhang, Hawking ; Gui, Jack Subject: [PATCH] drm/amd/pm: Fix BACO state setting for Beige_Goby Correct BACO state setting for Beige_Goby Signed-off-by: Chengming Gui Change-Id: I28b9a526f1b353c3986225f075c613ba88b6ae2b --- drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 388c5cb5c647..0a5d46ac9ccd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1528,6 +1528,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: if (amdgpu_runtime_pm == 2) ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, -- 2.17.1 ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
[PATCH] drm/amdgpu: return error type when programing registers fails
Signed-off-by: Roy Sun --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 22 -- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index bc4347a72301..af92c6f63dee 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -56,6 +56,15 @@ #define GFX10_NUM_GFX_RINGS_Sienna_Cichlid 1 #define GFX10_MEC_HPD_SIZE 2048 +#define INTERFACE_NOT_ENABLED_FLAG 0x400 +#define WRONG_OPERATION_TYPE_FLAG 0x200 +#define NOT_IN_RANGE_FLAG 0x100 + +#define RLCG_UNKNOWN_TYPE 0 +#define RLCG_INTERFACE_NOT_ENABLED 1 +#define RLCG_WRONG_OPERATION_TYPE 2 +#define RLCG_NOT_IN_RANGE 3 + #define F32_CE_PROGRAM_RAM_SIZE65536 #define RLCG_UCODE_LOADING_START_ADDRESS 0x2000L @@ -1533,8 +1542,17 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32 udelay(10); } - if (i >= retries) - pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset); + if (i >= retries) { + int error_type = RLCG_UNKNOWN_TYPE; + + if (tmp & INTERFACE_NOT_ENABLED_FLAG && !error_type) + error_type = RLCG_INTERFACE_NOT_ENABLED; + if (tmp & WRONG_OPERATION_TYPE_FLAG && !error_type) + error_type = RLCG_WRONG_OPERATION_TYPE; + if (tmp & NOT_IN_RANGE_FLAG && !error_type) + error_type = RLCG_NOT_IN_RANGE; + pr_err("timeout: rlcg program reg:0x%05x failed! Error type: %d.\n", offset, error_type); + } } ret = readl(scratch_reg0); -- 2.32.0 ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
RE: [PATCH] drm/amdkfd: Only apply heavy-weight TLB flush on Aldebaran
[Public] Original patch will cause regressions on Aldebaran as well, so this workaround is still invalid. Regards, Guchun -Original Message- From: amd-gfx On Behalf Of Eric Huang Sent: Friday, July 9, 2021 3:54 AM To: amd-gfx@lists.freedesktop.org Cc: Huang, JinHuiEric ; Kuehling, Felix Subject: [PATCH] drm/amdkfd: Only apply heavy-weight TLB flush on Aldebaran It is to workaround HW bug on other Asics and based on reverting two commits: drm/amdkfd: Add heavy-weight TLB flush after unmapping drm/amdkfd: Add memory sync before TLB flush on unmap Signed-off-by: Eric Huang --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 37 +--- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index ebb4872c5a9d..5f2655cf0162 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1773,26 +1773,29 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, } mutex_unlock(>mutex); - err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true); - if (err) { - pr_debug("Sync memory failed, wait interrupted by user signal\n"); - goto sync_memory_failed; - } + if (dev->device_info->asic_family == CHIP_ALDEBARAN) { + err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, + (struct kgd_mem *) mem, true); - /* Flush TLBs after waiting for the page table updates to complete */ - for (i = 0; i < args->n_devices; i++) { - peer = kfd_device_by_id(devices_arr[i]); - if (WARN_ON_ONCE(!peer)) - continue; - peer_pdd = kfd_get_process_device_data(peer, p); - if (WARN_ON_ONCE(!peer_pdd)) - continue; - if (!amdgpu_read_lock(peer->ddev, true)) { - kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT); - amdgpu_read_unlock(peer->ddev); + if (err) { + pr_debug("Sync memory failed, wait interrupted by user signal\n"); + goto sync_memory_failed; } - } + /* Flush TLBs after waiting for the page table updates to complete */ + for (i = 0; i < args->n_devices; i++) { + peer = kfd_device_by_id(devices_arr[i]); + if (WARN_ON_ONCE(!peer)) + continue; + peer_pdd = kfd_get_process_device_data(peer, p); + if (WARN_ON_ONCE(!peer_pdd)) + continue; + if (!amdgpu_read_lock(peer->ddev, true)) { + kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT); + amdgpu_read_unlock(peer->ddev); + } + } + } kfree(devices_arr); return 0; -- 2.25.1 ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=04%7C01%7Cguchun.chen%40amd.com%7C69113cf367eb450a8f8808d9424a23fe%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637613708477013366%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000sdata=0ESYvG5kCSJaFT9dR4jW5VacL8x7TghGw1aKWTRa9R4%3Dreserved=0 ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
Re: [PATCH 2/2] drm/ttm: Fix COW check
On Fri, Jul 9, 2021 at 8:38 AM Christian König wrote: > > > > Am 08.07.21 um 21:36 schrieb Alex Deucher: > > From: Felix Kuehling > > > > KFD Thunk maps invisible VRAM BOs with PROT_NONE, MAP_PRIVATE. > > is_cow_mapping returns true for these mappings. Add a check for > > vm_flags & VM_WRITE to avoid mmap failures on private read-only or > > PROT_NONE mappings. > > I'm pretty sure that this is not working as expected. > > > > > Fixes: f91142c62161 ("drm/ttm: nuke VM_MIXEDMAP on BO mappings v3") > > Signed-off-by: Felix Kuehling > > Signed-off-by: Alex Deucher > > --- > > drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 +- > > 1 file changed, 1 insertion(+), 1 deletion(-) > > > > diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c > > b/drivers/gpu/drm/ttm/ttm_bo_vm.c > > index f56be5bc0861..a75e90c7d4aa 100644 > > --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c > > +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c > > @@ -552,7 +552,7 @@ static const struct vm_operations_struct ttm_bo_vm_ops > > = { > > int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object > > *bo) > > { > > /* Enforce no COW since would have really strange behavior with it. */ > > - if (is_cow_mapping(vma->vm_flags)) > > + if (is_cow_mapping(vma->vm_flags) && (vma->vm_flags & VM_WRITE)) > > is_cow_mapping() already checks for VM_MAYWRITE, so this here shouldn't > be necessary. MAYWRITE != WRITE But then you need to make sure you do catch mprotect() calls to catch the cow, and I'm not sure that's even possible. Otherwise it'll go boom again on the page refcount. -Daniel > > Christian. > > > return -EINVAL; > > > > ttm_bo_get(bo); > -- Daniel Vetter Software Engineer, Intel Corporation http://blog.ffwll.ch ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
Re: [PATCH 2/2] drm/ttm: Fix COW check
Am 08.07.21 um 21:36 schrieb Alex Deucher: From: Felix Kuehling KFD Thunk maps invisible VRAM BOs with PROT_NONE, MAP_PRIVATE. is_cow_mapping returns true for these mappings. Add a check for vm_flags & VM_WRITE to avoid mmap failures on private read-only or PROT_NONE mappings. I'm pretty sure that this is not working as expected. Fixes: f91142c62161 ("drm/ttm: nuke VM_MIXEDMAP on BO mappings v3") Signed-off-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index f56be5bc0861..a75e90c7d4aa 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -552,7 +552,7 @@ static const struct vm_operations_struct ttm_bo_vm_ops = { int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo) { /* Enforce no COW since would have really strange behavior with it. */ - if (is_cow_mapping(vma->vm_flags)) + if (is_cow_mapping(vma->vm_flags) && (vma->vm_flags & VM_WRITE)) is_cow_mapping() already checks for VM_MAYWRITE, so this here shouldn't be necessary. Christian. return -EINVAL; ttm_bo_get(bo); ___ amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx