Hi Nirmoy,

[...]

> +int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct 
> *vma)
> +{
> +     struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +     struct drm_device *dev = &i915->drm;
> +     struct i915_mmap_offset *mmo = NULL;
> +     enum i915_mmap_type mmap_type;
> +     struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
> +
> +     if (drm_dev_is_unplugged(dev))
> +             return -ENODEV;
> +
> +     /* handle ttm object */
> +     if (obj->ops->mmap_ops) {
> +             /*
> +              * ttm fault handler, ttm_bo_vm_fault_reserved() uses fake 
> offset
> +              * to calculate page offset so set that up.
> +              */
> +             vma->vm_pgoff += drm_vma_node_start(&obj->base.vma_node);

you could have kept my r-b. Good work here!

Reviewed-by: Andi Shyti <andi.sh...@linux.intel.com> 

Thanks,
Andi

> +     } else {
> +             /* handle stolen and smem objects */
> +             mmap_type = i915_ggtt_has_aperture(ggtt) ? I915_MMAP_TYPE_GTT : 
> I915_MMAP_TYPE_WC;
> +             mmo = mmap_offset_attach(obj, mmap_type, NULL);
> +             if (!mmo)
> +                     return -ENODEV;
> +     }
> +
> +     /*
> +      * When we install vm_ops for mmap we are too late for
> +      * the vm_ops->open() which increases the ref_count of
> +      * this obj and then it gets decreased by the vm_ops->close().
> +      * To balance this increase the obj ref_count here.
> +      */
> +     obj = i915_gem_object_get(obj);
> +     return i915_gem_object_mmap(obj, mmo, vma);
> +}

Reply via email to