Re: [Intel-gfx] [PATCH 1/2] drm/i915/dmabuf: dmabuf cleanup

2022-05-31 Thread Ruhl, Michael J
ping?

Any comments on this cleanup and the open coding fix (patch 2?)

Thanks,

Mike


>-Original Message-
>From: Ruhl, Michael J 
>Sent: Friday, May 6, 2022 3:09 PM
>To: intel-gfx@lists.freedesktop.org; airl...@linux.ie; dan...@ffwll.ch
>Cc: tvrtko.ursu...@linux.intel.com; De Marchi, Lucas
>; Ruhl, Michael J ;
>Ursulin, Tvrtko 
>Subject: [PATCH 1/2] drm/i915/dmabuf: dmabuf cleanup
>
>Some minor cleanup of some variables for consistency.
>
>Normalize struct sg_table to sgt.
>Normalize struct dma_buf_attachment to attach.
>checkpatch issues sizeof(), !NULL updates.
>
>Cc: Tvrtko Ursulin 
>Signed-off-by: Michael J. Ruhl 
>---
> drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 47 --
> 1 file changed, 25 insertions(+), 22 deletions(-)
>
>diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
>b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
>index f5062d0c6333..5f327eac26e6 100644
>--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
>+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
>@@ -25,43 +25,46 @@ static struct drm_i915_gem_object
>*dma_buf_to_obj(struct dma_buf *buf)
>   return to_intel_bo(buf->priv);
> }
>
>-static struct sg_table *i915_gem_map_dma_buf(struct
>dma_buf_attachment *attachment,
>+static struct sg_table *i915_gem_map_dma_buf(struct
>dma_buf_attachment *attach,
>enum dma_data_direction dir)
> {
>-  struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment-
>>dmabuf);
>-  struct sg_table *st;
>+  struct drm_i915_gem_object *obj = dma_buf_to_obj(attach-
>>dmabuf);
>+  struct sg_table *sgt;
>   struct scatterlist *src, *dst;
>   int ret, i;
>
>-  /* Copy sg so that we make an independent mapping */
>-  st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
>-  if (st == NULL) {
>+  /*
>+   * Make a copy of the object's sgt, so that we can make an
>independent
>+   * mapping
>+   */
>+  sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
>+  if (!sgt) {
>   ret = -ENOMEM;
>   goto err;
>   }
>
>-  ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
>+  ret = sg_alloc_table(sgt, obj->mm.pages->nents, GFP_KERNEL);
>   if (ret)
>   goto err_free;
>
>   src = obj->mm.pages->sgl;
>-  dst = st->sgl;
>+  dst = sgt->sgl;
>   for (i = 0; i < obj->mm.pages->nents; i++) {
>   sg_set_page(dst, sg_page(src), src->length, 0);
>   dst = sg_next(dst);
>   src = sg_next(src);
>   }
>
>-  ret = dma_map_sgtable(attachment->dev, st, dir,
>DMA_ATTR_SKIP_CPU_SYNC);
>+  ret = dma_map_sgtable(attach->dev, sgt, dir,
>DMA_ATTR_SKIP_CPU_SYNC);
>   if (ret)
>   goto err_free_sg;
>
>-  return st;
>+  return sgt;
>
> err_free_sg:
>-  sg_free_table(st);
>+  sg_free_table(sgt);
> err_free:
>-  kfree(st);
>+  kfree(sgt);
> err:
>   return ERR_PTR(ret);
> }
>@@ -236,15 +239,15 @@ struct dma_buf *i915_gem_prime_export(struct
>drm_gem_object *gem_obj, int flags)
> static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object
>*obj)
> {
>   struct drm_i915_private *i915 = to_i915(obj->base.dev);
>-  struct sg_table *pages;
>+  struct sg_table *sgt;
>   unsigned int sg_page_sizes;
>
>   assert_object_held(obj);
>
>-  pages = dma_buf_map_attachment(obj->base.import_attach,
>- DMA_BIDIRECTIONAL);
>-  if (IS_ERR(pages))
>-  return PTR_ERR(pages);
>+  sgt = dma_buf_map_attachment(obj->base.import_attach,
>+   DMA_BIDIRECTIONAL);
>+  if (IS_ERR(sgt))
>+  return PTR_ERR(sgt);
>
>   /*
>* DG1 is special here since it still snoops transactions even with
>@@ -261,16 +264,16 @@ static int
>i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
>   (!HAS_LLC(i915) && !IS_DG1(i915)))
>   wbinvd_on_all_cpus();
>
>-  sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
>-  __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
>+  sg_page_sizes = i915_sg_dma_sizes(sgt->sgl);
>+  __i915_gem_object_set_pages(obj, sgt, sg_page_sizes);
>
>   return 0;
> }
>
> static void i915_gem_object_put_pages_dmabuf(struct
>drm_i915_gem_object *obj,
>-   struct sg_table *pages)
>+   struct sg_table *sgt)
> {
>-  dma_buf_unmap_attachment(obj->base.import_attach, pages,
>+  dma_buf_unmap_attachment(obj->base.import_attach, sgt,
>DMA_BIDIRECTIONAL);
> }
>
>@@ -313,7 +316,7 @@ struct drm_gem_object
>*i915_gem_prime_import(struct drm_device *dev,
>   get_dma_buf(dma_buf);
>
>   obj = i915_gem_object_alloc();
>-  if (obj == NULL) {
>+  if (!obj) {
>   ret = -ENOMEM;
>   goto fail_detach;
>   }
>--
>2.31.1



[Intel-gfx] [PATCH 1/2] drm/i915/dmabuf: dmabuf cleanup

2022-05-06 Thread Michael J. Ruhl
Some minor cleanup of some variables for consistency.

Normalize struct sg_table to sgt.
Normalize struct dma_buf_attachment to attach.
checkpatch issues sizeof(), !NULL updates.

Cc: Tvrtko Ursulin 
Signed-off-by: Michael J. Ruhl 
---
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 47 --
 1 file changed, 25 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index f5062d0c6333..5f327eac26e6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -25,43 +25,46 @@ static struct drm_i915_gem_object *dma_buf_to_obj(struct 
dma_buf *buf)
return to_intel_bo(buf->priv);
 }
 
-static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment 
*attachment,
+static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attach,
 enum dma_data_direction dir)
 {
-   struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
-   struct sg_table *st;
+   struct drm_i915_gem_object *obj = dma_buf_to_obj(attach->dmabuf);
+   struct sg_table *sgt;
struct scatterlist *src, *dst;
int ret, i;
 
-   /* Copy sg so that we make an independent mapping */
-   st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
-   if (st == NULL) {
+   /*
+* Make a copy of the object's sgt, so that we can make an independent
+* mapping
+*/
+   sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+   if (!sgt) {
ret = -ENOMEM;
goto err;
}
 
-   ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
+   ret = sg_alloc_table(sgt, obj->mm.pages->nents, GFP_KERNEL);
if (ret)
goto err_free;
 
src = obj->mm.pages->sgl;
-   dst = st->sgl;
+   dst = sgt->sgl;
for (i = 0; i < obj->mm.pages->nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst);
src = sg_next(src);
}
 
-   ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC);
+   ret = dma_map_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
if (ret)
goto err_free_sg;
 
-   return st;
+   return sgt;
 
 err_free_sg:
-   sg_free_table(st);
+   sg_free_table(sgt);
 err_free:
-   kfree(st);
+   kfree(sgt);
 err:
return ERR_PTR(ret);
 }
@@ -236,15 +239,15 @@ struct dma_buf *i915_gem_prime_export(struct 
drm_gem_object *gem_obj, int flags)
 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
 {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
-   struct sg_table *pages;
+   struct sg_table *sgt;
unsigned int sg_page_sizes;
 
assert_object_held(obj);
 
-   pages = dma_buf_map_attachment(obj->base.import_attach,
-  DMA_BIDIRECTIONAL);
-   if (IS_ERR(pages))
-   return PTR_ERR(pages);
+   sgt = dma_buf_map_attachment(obj->base.import_attach,
+DMA_BIDIRECTIONAL);
+   if (IS_ERR(sgt))
+   return PTR_ERR(sgt);
 
/*
 * DG1 is special here since it still snoops transactions even with
@@ -261,16 +264,16 @@ static int i915_gem_object_get_pages_dmabuf(struct 
drm_i915_gem_object *obj)
(!HAS_LLC(i915) && !IS_DG1(i915)))
wbinvd_on_all_cpus();
 
-   sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
-   __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+   sg_page_sizes = i915_sg_dma_sizes(sgt->sgl);
+   __i915_gem_object_set_pages(obj, sgt, sg_page_sizes);
 
return 0;
 }
 
 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
-struct sg_table *pages)
+struct sg_table *sgt)
 {
-   dma_buf_unmap_attachment(obj->base.import_attach, pages,
+   dma_buf_unmap_attachment(obj->base.import_attach, sgt,
 DMA_BIDIRECTIONAL);
 }
 
@@ -313,7 +316,7 @@ struct drm_gem_object *i915_gem_prime_import(struct 
drm_device *dev,
get_dma_buf(dma_buf);
 
obj = i915_gem_object_alloc();
-   if (obj == NULL) {
+   if (!obj) {
ret = -ENOMEM;
goto fail_detach;
}
-- 
2.31.1