Re: [Intel-gfx] [PATCH 2/2] drm/i915: Fix i915_vma_pin_iomap()

2022-05-11 Thread Matthew Auld
On Fri, 6 May 2022 at 14:11, Juha-Pekka Heikkila
 wrote:
>
> From: CQ Tang 
>
> Display might allocate a smem object and call
> i915_vma_pin_iomap(), the existing code will fail.
>
> This fix was suggested by Chris P Wilson, that we pin
> the smem with i915_gem_object_pin_map_unlocked().
>
> Signed-off-by: CQ Tang 
> Signed-off-by: Juha-Pekka Heikkila 
> Cc: Chris Wilson 
> Cc: Jari Tahvanainen 
> ---
>  drivers/gpu/drm/i915/i915_vma.c | 34 ++---
>  1 file changed, 23 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index 162e8d83691b..8ce016ef3dba 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -550,13 +550,6 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
> if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
> return IO_ERR_PTR(-EINVAL);
>
> -   if (!i915_gem_object_is_lmem(vma->obj)) {
> -   if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
> -   err = -ENODEV;
> -   goto err;
> -   }
> -   }
> -
> GEM_BUG_ON(!i915_vma_is_ggtt(vma));
> GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
> GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
> @@ -572,17 +565,31 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
> if (i915_gem_object_is_lmem(vma->obj))
> ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
>   
> vma->obj->base.size);
> -   else
> +   else if (i915_vma_is_map_and_fenceable(vma))
> ptr = 
> io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
> vma->node.start,
> vma->node.size);
> +   else {
> +   ptr = (void __iomem *)
> +   i915_gem_object_pin_map_unlocked(vma->obj,
> +   I915_MAP_WC);

Note that with this patch we could now also get rid of lmem_io_map()
and just use this path instead. We just need to ensure we always have
the object lock held when calling pin_iomap, and drop the _unlocked
here, or maybe add an pin_iomap_unlocked if that is easier. If we are
always holding the object lock, we can then also maybe drop the
lockless tricks below...

> +   if (IS_ERR(ptr)) {
> +   err = PTR_ERR(ptr);
> +   goto err;
> +   }
> +   ptr = page_pack_bits(ptr, 1);

We should try to avoid pointer packing, but I guess here this is just
re-using the mm.mapping stuff.

> +   }
> +
> if (ptr == NULL) {
> err = -ENOMEM;
> goto err;
> }
>
> if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
> -   io_mapping_unmap(ptr);
> +   if (page_unmask_bits(ptr))
> +   __i915_gem_object_release_map(vma->obj);
> +   else
> +   io_mapping_unmap(ptr);
> ptr = vma->iomap;
> }
> }
> @@ -596,7 +603,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
> i915_vma_set_ggtt_write(vma);
>
> /* NB Access through the GTT requires the device to be awake. */
> -   return ptr;
> +   return page_mask_bits(ptr);
>
>  err_unpin:
> __i915_vma_unpin(vma);
> @@ -614,6 +621,8 @@ void i915_vma_unpin_iomap(struct i915_vma *vma)
>  {
> GEM_BUG_ON(vma->iomap == NULL);
>
> +   /* XXX We keep the mapping until __i915_vma_unbind()/evict() */
> +
> i915_vma_flush_writes(vma);
>
> i915_vma_unpin_fence(vma);
> @@ -1761,7 +1770,10 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
> if (vma->iomap == NULL)
> return;
>
> -   io_mapping_unmap(vma->iomap);
> +   if (page_unmask_bits(vma->iomap))
> +   __i915_gem_object_release_map(vma->obj);
> +   else
> +   io_mapping_unmap(vma->iomap);
> vma->iomap = NULL;

I don't suppose you want to type a patch that also moves the
__i915_vma_iounmap() in vma_evict out from under the
is_map_and_fenceable check, since we are currently leaking that lmem
mapping, and now also that pin_map? I could have sworn that internal
fixed that somewhere...

With that "leak" fixed,
Reviewed-by: Matthew Auld 

>  }





>
> --
> 2.25.1
>


[Intel-gfx] [PATCH 2/2] drm/i915: Fix i915_vma_pin_iomap()

2022-05-06 Thread Juha-Pekka Heikkila
From: CQ Tang 

Display might allocate a smem object and call
i915_vma_pin_iomap(), the existing code will fail.

This fix was suggested by Chris P Wilson, that we pin
the smem with i915_gem_object_pin_map_unlocked().

Signed-off-by: CQ Tang 
Signed-off-by: Juha-Pekka Heikkila 
Cc: Chris Wilson 
Cc: Jari Tahvanainen 
---
 drivers/gpu/drm/i915/i915_vma.c | 34 ++---
 1 file changed, 23 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 162e8d83691b..8ce016ef3dba 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -550,13 +550,6 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
return IO_ERR_PTR(-EINVAL);
 
-   if (!i915_gem_object_is_lmem(vma->obj)) {
-   if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
-   err = -ENODEV;
-   goto err;
-   }
-   }
-
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
@@ -572,17 +565,31 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
if (i915_gem_object_is_lmem(vma->obj))
ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
  vma->obj->base.size);
-   else
+   else if (i915_vma_is_map_and_fenceable(vma))
ptr = 
io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
vma->node.start,
vma->node.size);
+   else {
+   ptr = (void __iomem *)
+   i915_gem_object_pin_map_unlocked(vma->obj,
+   I915_MAP_WC);
+   if (IS_ERR(ptr)) {
+   err = PTR_ERR(ptr);
+   goto err;
+   }
+   ptr = page_pack_bits(ptr, 1);
+   }
+
if (ptr == NULL) {
err = -ENOMEM;
goto err;
}
 
if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
-   io_mapping_unmap(ptr);
+   if (page_unmask_bits(ptr))
+   __i915_gem_object_release_map(vma->obj);
+   else
+   io_mapping_unmap(ptr);
ptr = vma->iomap;
}
}
@@ -596,7 +603,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
i915_vma_set_ggtt_write(vma);
 
/* NB Access through the GTT requires the device to be awake. */
-   return ptr;
+   return page_mask_bits(ptr);
 
 err_unpin:
__i915_vma_unpin(vma);
@@ -614,6 +621,8 @@ void i915_vma_unpin_iomap(struct i915_vma *vma)
 {
GEM_BUG_ON(vma->iomap == NULL);
 
+   /* XXX We keep the mapping until __i915_vma_unbind()/evict() */
+
i915_vma_flush_writes(vma);
 
i915_vma_unpin_fence(vma);
@@ -1761,7 +1770,10 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
if (vma->iomap == NULL)
return;
 
-   io_mapping_unmap(vma->iomap);
+   if (page_unmask_bits(vma->iomap))
+   __i915_gem_object_release_map(vma->obj);
+   else
+   io_mapping_unmap(vma->iomap);
vma->iomap = NULL;
 }
 
-- 
2.25.1