Re: [RFC PATCH 1/2] drm/i915: Add a function to mmap framebuffer obj

2023-03-20 Thread Das, Nirmoy



On 3/20/2023 1:38 AM, Andi Shyti wrote:

Hi Nirmoy,

On Thu, Mar 16, 2023 at 06:22:19PM +0100, Nirmoy Das wrote:

Implement i915_gem_fb_mmap() to enable fb_ops.fb_mmap()
callback for i915's framebuffer objects.

v2: add a comment why i915_gem_object_get() needed(Andi).

Cc: Matthew Auld 
Cc: Andi Shyti 
Cc: Ville Syrjälä 
Cc: Jani Nikula 
Cc: Imre Deak 
Signed-off-by: Nirmoy Das 

I think you can fire the PATCH here instead of the RFC. Looks
good to me.

Reviewed-by: Andi Shyti 



Thanks, I will do that.

Nirmoy



Andi


Re: [RFC PATCH 1/2] drm/i915: Add a function to mmap framebuffer obj

2023-03-19 Thread Andi Shyti
Hi Nirmoy,

On Thu, Mar 16, 2023 at 06:22:19PM +0100, Nirmoy Das wrote:
> Implement i915_gem_fb_mmap() to enable fb_ops.fb_mmap()
> callback for i915's framebuffer objects.
> 
> v2: add a comment why i915_gem_object_get() needed(Andi).
> 
> Cc: Matthew Auld 
> Cc: Andi Shyti 
> Cc: Ville Syrjälä 
> Cc: Jani Nikula 
> Cc: Imre Deak 
> Signed-off-by: Nirmoy Das 

I think you can fire the PATCH here instead of the RFC. Looks
good to me.

Reviewed-by: Andi Shyti 

Andi


[RFC PATCH 1/2] drm/i915: Add a function to mmap framebuffer obj

2023-03-16 Thread Nirmoy Das
Implement i915_gem_fb_mmap() to enable fb_ops.fb_mmap()
callback for i915's framebuffer objects.

v2: add a comment why i915_gem_object_get() needed(Andi).

Cc: Matthew Auld 
Cc: Andi Shyti 
Cc: Ville Syrjälä 
Cc: Jani Nikula 
Cc: Imre Deak 
Signed-off-by: Nirmoy Das 
---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c | 127 +++
 drivers/gpu/drm/i915/gem/i915_gem_mman.h |   2 +-
 2 files changed, 83 insertions(+), 46 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c 
b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index d3c1dee16af2..341e952d3510 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -927,53 +927,15 @@ static struct file *mmap_singleton(struct 
drm_i915_private *i915)
return file;
 }
 
-/*
- * This overcomes the limitation in drm_gem_mmap's assignment of a
- * drm_gem_object as the vma->vm_private_data. Since we need to
- * be able to resolve multiple mmap offsets which could be tied
- * to a single gem object.
- */
-int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static int
+i915_gem_object_mmap(struct drm_i915_gem_object *obj,
+struct i915_mmap_offset *mmo,
+struct vm_area_struct *vma)
 {
-   struct drm_vma_offset_node *node;
-   struct drm_file *priv = filp->private_data;
-   struct drm_device *dev = priv->minor->dev;
-   struct drm_i915_gem_object *obj = NULL;
-   struct i915_mmap_offset *mmo = NULL;
+   struct drm_i915_private *i915 = to_i915(obj->base.dev);
+   struct drm_device *dev = >drm;
struct file *anon;
 
-   if (drm_dev_is_unplugged(dev))
-   return -ENODEV;
-
-   rcu_read_lock();
-   drm_vma_offset_lock_lookup(dev->vma_offset_manager);
-   node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
- vma->vm_pgoff,
- vma_pages(vma));
-   if (node && drm_vma_node_is_allowed(node, priv)) {
-   /*
-* Skip 0-refcnted objects as it is in the process of being
-* destroyed and will be invalid when the vma manager lock
-* is released.
-*/
-   if (!node->driver_private) {
-   mmo = container_of(node, struct i915_mmap_offset, 
vma_node);
-   obj = i915_gem_object_get_rcu(mmo->obj);
-
-   GEM_BUG_ON(obj && obj->ops->mmap_ops);
-   } else {
-   obj = i915_gem_object_get_rcu
-   (container_of(node, struct drm_i915_gem_object,
- base.vma_node));
-
-   GEM_BUG_ON(obj && !obj->ops->mmap_ops);
-   }
-   }
-   drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
-   rcu_read_unlock();
-   if (!obj)
-   return node ? -EACCES : -EINVAL;
-
if (i915_gem_object_is_readonly(obj)) {
if (vma->vm_flags & VM_WRITE) {
i915_gem_object_put(obj);
@@ -1005,7 +967,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct 
*vma)
if (obj->ops->mmap_ops) {
vma->vm_page_prot = 
pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
vma->vm_ops = obj->ops->mmap_ops;
-   vma->vm_private_data = node->driver_private;
+   vma->vm_private_data = obj->base.vma_node.driver_private;
return 0;
}
 
@@ -1043,6 +1005,81 @@ int i915_gem_mmap(struct file *filp, struct 
vm_area_struct *vma)
return 0;
 }
 
+/*
+ * This overcomes the limitation in drm_gem_mmap's assignment of a
+ * drm_gem_object as the vma->vm_private_data. Since we need to
+ * be able to resolve multiple mmap offsets which could be tied
+ * to a single gem object.
+ */
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+   struct drm_vma_offset_node *node;
+   struct drm_file *priv = filp->private_data;
+   struct drm_device *dev = priv->minor->dev;
+   struct drm_i915_gem_object *obj = NULL;
+   struct i915_mmap_offset *mmo = NULL;
+
+   if (drm_dev_is_unplugged(dev))
+   return -ENODEV;
+
+   rcu_read_lock();
+   drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+   node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+   if (node && drm_vma_node_is_allowed(node, priv)) {
+   /*
+* Skip 0-refcnted objects as it is in the process of being
+* destroyed and will be invalid when the vma manager lock
+* is released.
+*/
+   if (!node->driver_private) {
+   mmo = container_of(node,