There is no reason to disable pagefaults and preemption as a side effect of
kmap_atomic_prot().

Use kmap_local_page_prot() instead and document the reasoning for the
mapping usage with the given pgprot.

Remove the NULL pointer check for the map. These functions return a valid
address for valid pages and the return was bogus anyway as it would have
left preemption and pagefaults disabled.

Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Cc: VMware Graphics <linux-graphics-maintai...@vmware.com>
Cc: Roland Scheidegger <srol...@vmware.com>
Cc: David Airlie <airl...@linux.ie>
Cc: Daniel Vetter <dan...@ffwll.ch>
Cc: dri-de...@lists.freedesktop.org
---
V3: New patch
---
 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c |   30 ++++++++++++------------------
 1 file changed, 12 insertions(+), 18 deletions(-)

--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -375,12 +375,12 @@ static int vmw_bo_cpu_blit_line(struct v
                copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
 
                if (unmap_src) {
-                       kunmap_atomic(d->src_addr);
+                       kunmap_local(d->src_addr);
                        d->src_addr = NULL;
                }
 
                if (unmap_dst) {
-                       kunmap_atomic(d->dst_addr);
+                       kunmap_local(d->dst_addr);
                        d->dst_addr = NULL;
                }
 
@@ -388,12 +388,8 @@ static int vmw_bo_cpu_blit_line(struct v
                        if (WARN_ON_ONCE(dst_page >= d->dst_num_pages))
                                return -EINVAL;
 
-                       d->dst_addr =
-                               kmap_atomic_prot(d->dst_pages[dst_page],
-                                                d->dst_prot);
-                       if (!d->dst_addr)
-                               return -ENOMEM;
-
+                       d->dst_addr = 
kmap_local_page_prot(d->dst_pages[dst_page],
+                                                          d->dst_prot);
                        d->mapped_dst = dst_page;
                }
 
@@ -401,12 +397,8 @@ static int vmw_bo_cpu_blit_line(struct v
                        if (WARN_ON_ONCE(src_page >= d->src_num_pages))
                                return -EINVAL;
 
-                       d->src_addr =
-                               kmap_atomic_prot(d->src_pages[src_page],
-                                                d->src_prot);
-                       if (!d->src_addr)
-                               return -ENOMEM;
-
+                       d->src_addr = 
kmap_local_page_prot(d->src_pages[src_page],
+                                                          d->src_prot);
                        d->mapped_src = src_page;
                }
                diff->do_cpy(diff, d->dst_addr + dst_page_offset,
@@ -436,8 +428,10 @@ static int vmw_bo_cpu_blit_line(struct v
  *
  * Performs a CPU blit from one buffer object to another avoiding a full
  * bo vmap which may exhaust- or fragment vmalloc space.
- * On supported architectures (x86), we're using kmap_atomic which avoids
- * cross-processor TLB- and cache flushes and may, on non-HIGHMEM systems
+ *
+ * On supported architectures (x86), we're using kmap_local_prot() which
+ * avoids cross-processor TLB- and cache flushes. kmap_local_prot() will
+ * either map a highmem page with the proper pgprot on HIGHMEM=y systems or
  * reference already set-up mappings.
  *
  * Neither of the buffer objects may be placed in PCI memory
@@ -500,9 +494,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_ob
        }
 out:
        if (d.src_addr)
-               kunmap_atomic(d.src_addr);
+               kunmap_local(d.src_addr);
        if (d.dst_addr)
-               kunmap_atomic(d.dst_addr);
+               kunmap_local(d.dst_addr);
 
        return ret;
 }

_______________________________________________
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau

Reply via email to