Similar to the patch a few back in the series, we can always map and
unmap page directories when we do their allocation and teardown. Page
directory pages only exist on gen8+, so this should only effect behavior
on those platforms.

Signed-off-by: Ben Widawsky <b...@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 79 +++++++++----------------------------
 1 file changed, 19 insertions(+), 60 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index abef33dd..ad2f2c5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -283,21 +283,23 @@ err_out:
        return ret;
 }
 
-static void __free_pd_single(struct i915_pagedir *pd)
+static void __free_pd_single(struct i915_pagedir *pd, struct drm_device *dev)
 {
+       i915_dma_unmap_single(pd, dev);
        __free_page(pd->page);
        kfree(pd);
 }
 
-#define free_pd_single(pd) do { \
+#define free_pd_single(pd, dev) do { \
        if ((pd)->page) { \
-               __free_pd_single(pd); \
+               __free_pd_single(pd, dev); \
        } \
 } while (0)
 
-static struct i915_pagedir *alloc_pd_single(void)
+static struct i915_pagedir *alloc_pd_single(struct drm_device *dev)
 {
        struct i915_pagedir *pd;
+       int ret;
 
        pd = kzalloc(sizeof(*pd), GFP_KERNEL);
        if (!pd)
@@ -309,6 +311,13 @@ static struct i915_pagedir *alloc_pd_single(void)
                return ERR_PTR(-ENOMEM);
        }
 
+       ret = i915_dma_map_px_single(pd, dev);
+       if (ret) {
+               __free_page(pd->page);
+               kfree(pd);
+               return ERR_PTR(ret);
+       }
+
        return pd;
 }
 
@@ -466,30 +475,7 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
 
        for (i = 0; i < ppgtt->num_pd_pages; i++) {
                gen8_free_page_tables(ppgtt->pdp.pagedir[i], ppgtt->base.dev);
-               free_pd_single(ppgtt->pdp.pagedir[i]);
-       }
-}
-
-static void gen8_ppgtt_dma_unmap_pages(struct i915_hw_ppgtt *ppgtt)
-{
-       struct drm_device *dev = ppgtt->base.dev;
-       int i, j;
-
-       for (i = 0; i < ppgtt->num_pd_pages; i++) {
-               /* TODO: In the future we'll support sparse mappings, so this
-                * will have to change. */
-               if (!ppgtt->pdp.pagedir[i]->daddr)
-                       continue;
-
-               i915_dma_unmap_single(ppgtt->pdp.pagedir[i], dev);
-
-               for (j = 0; j < I915_PDES_PER_PD; j++) {
-                       struct i915_pagedir *pd = ppgtt->pdp.pagedir[i];
-                       struct i915_pagetab *pt =  pd->page_tables[j];
-                       dma_addr_t addr = pt->daddr;
-                       if (addr)
-                               i915_dma_unmap_single(pt, dev);
-               }
+               free_pd_single(ppgtt->pdp.pagedir[i], ppgtt->base.dev);
        }
 }
 
@@ -501,7 +487,6 @@ static void gen8_ppgtt_cleanup(struct i915_address_space 
*vm)
        list_del(&vm->global_link);
        drm_mm_takedown(&vm->mm);
 
-       gen8_ppgtt_dma_unmap_pages(ppgtt);
        gen8_ppgtt_free(ppgtt);
 }
 
@@ -531,7 +516,7 @@ static int gen8_ppgtt_allocate_page_directories(struct 
i915_hw_ppgtt *ppgtt,
        int i;
 
        for (i = 0; i < max_pdp; i++) {
-               ppgtt->pdp.pagedir[i] = alloc_pd_single();
+               ppgtt->pdp.pagedir[i] = alloc_pd_single(ppgtt->base.dev);
                if (IS_ERR(ppgtt->pdp.pagedir[i]))
                        goto unwind_out;
        }
@@ -543,7 +528,8 @@ static int gen8_ppgtt_allocate_page_directories(struct 
i915_hw_ppgtt *ppgtt,
 
 unwind_out:
        while (i--)
-               free_pd_single(ppgtt->pdp.pagedir[i]);
+               free_pd_single(ppgtt->pdp.pagedir[i],
+                              ppgtt->base.dev);
 
        return -ENOMEM;
 }
@@ -571,19 +557,6 @@ err_out:
        return ret;
 }
 
-static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
-                                            const int pdpe)
-{
-       int ret;
-
-       ret = i915_dma_map_px_single(ppgtt->pdp.pagedir[pdpe],
-                                    ppgtt->base.dev);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
 /**
  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
  * with a net effect resembling a 2-level page table in normal x86 terms. Each
@@ -609,16 +582,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, 
uint64_t size)
                return ret;
 
        /*
-        * 2. Create DMA mappings for the page directories and page tables.
-        */
-       for (i = 0; i < max_pdp; i++) {
-               ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
-               if (ret)
-                       goto bail;
-       }
-
-       /*
-        * 3. Map all the page directory entires to point to the page tables
+        * 2. Map all the page directory entires to point to the page tables
         * we've allocated.
         *
         * For now, the PPGTT helper functions all require that the PDEs are
@@ -652,11 +616,6 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, 
uint64_t size)
                         ppgtt->num_pd_entries,
                         (ppgtt->num_pd_entries - min_pt_pages) + size % 
(1<<30));
        return 0;
-
-bail:
-       gen8_ppgtt_dma_unmap_pages(ppgtt);
-       gen8_ppgtt_free(ppgtt);
-       return ret;
 }
 
 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
@@ -1034,7 +993,7 @@ static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
        for (i = 0; i < ppgtt->num_pd_entries; i++)
                free_pt_single(ppgtt->pd.page_tables[i], ppgtt->base.dev);
 
-       free_pd_single(&ppgtt->pd);
+       free_pd_single(&ppgtt->pd, ppgtt->base.dev);
 }
 
 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
-- 
1.9.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to