Re: [PATCH 3/5] drm/i915: Add a function to mmap framebuffer obj

2023-04-04 Thread Das, Nirmoy

Hi Andi,

On 4/4/2023 6:57 PM, Andi Shyti wrote:

Hi Nirmoy,

[...]


+int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct 
*vma)
+{
+   struct drm_i915_private *i915 = to_i915(obj->base.dev);
+   struct drm_device *dev = >drm;
+   struct i915_mmap_offset *mmo = NULL;
+   enum i915_mmap_type mmap_type;
+   struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+
+   if (drm_dev_is_unplugged(dev))
+   return -ENODEV;
+
+   /* handle ttm object */
+   if (obj->ops->mmap_ops) {
+   /*
+* ttm fault handler, ttm_bo_vm_fault_reserved() uses fake 
offset
+* to calculate page offset so set that up.
+*/
+   vma->vm_pgoff += drm_vma_node_start(>base.vma_node);

you could have kept my r-b.



I wasn't sure, so I removed it :)



  Good work here!

Reviewed-by: Andi Shyti 



Thanks,

Nirmoy



Thanks,
Andi


+   } else {
+   /* handle stolen and smem objects */
+   mmap_type = i915_ggtt_has_aperture(ggtt) ? I915_MMAP_TYPE_GTT : 
I915_MMAP_TYPE_WC;
+   mmo = mmap_offset_attach(obj, mmap_type, NULL);
+   if (!mmo)
+   return -ENODEV;
+   }
+
+   /*
+* When we install vm_ops for mmap we are too late for
+* the vm_ops->open() which increases the ref_count of
+* this obj and then it gets decreased by the vm_ops->close().
+* To balance this increase the obj ref_count here.
+*/
+   obj = i915_gem_object_get(obj);
+   return i915_gem_object_mmap(obj, mmo, vma);
+}


Re: [PATCH 3/5] drm/i915: Add a function to mmap framebuffer obj

2023-04-04 Thread Andi Shyti
Hi Nirmoy,

[...]

> +int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct 
> *vma)
> +{
> + struct drm_i915_private *i915 = to_i915(obj->base.dev);
> + struct drm_device *dev = >drm;
> + struct i915_mmap_offset *mmo = NULL;
> + enum i915_mmap_type mmap_type;
> + struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
> +
> + if (drm_dev_is_unplugged(dev))
> + return -ENODEV;
> +
> + /* handle ttm object */
> + if (obj->ops->mmap_ops) {
> + /*
> +  * ttm fault handler, ttm_bo_vm_fault_reserved() uses fake 
> offset
> +  * to calculate page offset so set that up.
> +  */
> + vma->vm_pgoff += drm_vma_node_start(>base.vma_node);

you could have kept my r-b. Good work here!

Reviewed-by: Andi Shyti  

Thanks,
Andi

> + } else {
> + /* handle stolen and smem objects */
> + mmap_type = i915_ggtt_has_aperture(ggtt) ? I915_MMAP_TYPE_GTT : 
> I915_MMAP_TYPE_WC;
> + mmo = mmap_offset_attach(obj, mmap_type, NULL);
> + if (!mmo)
> + return -ENODEV;
> + }
> +
> + /*
> +  * When we install vm_ops for mmap we are too late for
> +  * the vm_ops->open() which increases the ref_count of
> +  * this obj and then it gets decreased by the vm_ops->close().
> +  * To balance this increase the obj ref_count here.
> +  */
> + obj = i915_gem_object_get(obj);
> + return i915_gem_object_mmap(obj, mmo, vma);
> +}


[PATCH 3/5] drm/i915: Add a function to mmap framebuffer obj

2023-04-04 Thread Nirmoy Das
Implement i915_gem_fb_mmap() to enable fb_ops.fb_mmap()
callback for i915's framebuffer objects.

v2: add a comment why i915_gem_object_get() needed(Andi).
v3: mmap also ttm objects.

Cc: Matthew Auld 
Cc: Andi Shyti 
Cc: Ville Syrjälä 
Cc: Jani Nikula 
Cc: Imre Deak 
Signed-off-by: Nirmoy Das 
---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c | 137 +++
 drivers/gpu/drm/i915/gem/i915_gem_mman.h |   2 +-
 2 files changed, 93 insertions(+), 46 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c 
b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index d3c1dee16af2..3dbacdf0911a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -927,53 +927,15 @@ static struct file *mmap_singleton(struct 
drm_i915_private *i915)
return file;
 }
 
-/*
- * This overcomes the limitation in drm_gem_mmap's assignment of a
- * drm_gem_object as the vma->vm_private_data. Since we need to
- * be able to resolve multiple mmap offsets which could be tied
- * to a single gem object.
- */
-int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static int
+i915_gem_object_mmap(struct drm_i915_gem_object *obj,
+struct i915_mmap_offset *mmo,
+struct vm_area_struct *vma)
 {
-   struct drm_vma_offset_node *node;
-   struct drm_file *priv = filp->private_data;
-   struct drm_device *dev = priv->minor->dev;
-   struct drm_i915_gem_object *obj = NULL;
-   struct i915_mmap_offset *mmo = NULL;
+   struct drm_i915_private *i915 = to_i915(obj->base.dev);
+   struct drm_device *dev = >drm;
struct file *anon;
 
-   if (drm_dev_is_unplugged(dev))
-   return -ENODEV;
-
-   rcu_read_lock();
-   drm_vma_offset_lock_lookup(dev->vma_offset_manager);
-   node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
- vma->vm_pgoff,
- vma_pages(vma));
-   if (node && drm_vma_node_is_allowed(node, priv)) {
-   /*
-* Skip 0-refcnted objects as it is in the process of being
-* destroyed and will be invalid when the vma manager lock
-* is released.
-*/
-   if (!node->driver_private) {
-   mmo = container_of(node, struct i915_mmap_offset, 
vma_node);
-   obj = i915_gem_object_get_rcu(mmo->obj);
-
-   GEM_BUG_ON(obj && obj->ops->mmap_ops);
-   } else {
-   obj = i915_gem_object_get_rcu
-   (container_of(node, struct drm_i915_gem_object,
- base.vma_node));
-
-   GEM_BUG_ON(obj && !obj->ops->mmap_ops);
-   }
-   }
-   drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
-   rcu_read_unlock();
-   if (!obj)
-   return node ? -EACCES : -EINVAL;
-
if (i915_gem_object_is_readonly(obj)) {
if (vma->vm_flags & VM_WRITE) {
i915_gem_object_put(obj);
@@ -1005,7 +967,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct 
*vma)
if (obj->ops->mmap_ops) {
vma->vm_page_prot = 
pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
vma->vm_ops = obj->ops->mmap_ops;
-   vma->vm_private_data = node->driver_private;
+   vma->vm_private_data = obj->base.vma_node.driver_private;
return 0;
}
 
@@ -1043,6 +1005,91 @@ int i915_gem_mmap(struct file *filp, struct 
vm_area_struct *vma)
return 0;
 }
 
+/*
+ * This overcomes the limitation in drm_gem_mmap's assignment of a
+ * drm_gem_object as the vma->vm_private_data. Since we need to
+ * be able to resolve multiple mmap offsets which could be tied
+ * to a single gem object.
+ */
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+   struct drm_vma_offset_node *node;
+   struct drm_file *priv = filp->private_data;
+   struct drm_device *dev = priv->minor->dev;
+   struct drm_i915_gem_object *obj = NULL;
+   struct i915_mmap_offset *mmo = NULL;
+
+   if (drm_dev_is_unplugged(dev))
+   return -ENODEV;
+
+   rcu_read_lock();
+   drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+   node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+   if (node && drm_vma_node_is_allowed(node, priv)) {
+   /*
+* Skip 0-refcnted objects as it is in the process of being
+* destroyed and will be invalid when the vma manager lock
+* is released.
+*/
+   if (!node->driver_private) {
+