Up until now, ppgtt->pdp has always been the root of our page tables.
Legacy 32b addresses acted like it had 1 PDP with 4 PDPEs.

In preparation for 4 level page tables, we need to stop use ppgtt->pdp
directly unless we know it's what we want. The future structure will use
ppgtt->pml4 for the top level, and the pdp is just one of the entries
being pointed to by a pml4e.

This patch addresses some carelessness done throughout development wrt
assumptions made of the root page tables.

Signed-off-by: Ben Widawsky <[email protected]>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 129 ++++++++++++++++++++++++------------
 1 file changed, 85 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index df3cd41..c4b53ef 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -499,6 +499,7 @@ static void gen8_ppgtt_clear_range(struct 
i915_address_space *vm,
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
+       struct i915_pagedirpo *pdp = &ppgtt->pdp; /* FIXME: 48b */
        gen8_gtt_pte_t *pt_vaddr, scratch_pte;
        unsigned pdpe = gen8_pdpe_index(start);
        unsigned pde = gen8_pde_index(start);
@@ -510,7 +511,7 @@ static void gen8_ppgtt_clear_range(struct 
i915_address_space *vm,
                                      I915_CACHE_LLC, use_scratch);
 
        while (num_entries) {
-               struct i915_pagedir *pd = ppgtt->pdp.pagedirs[pdpe];
+               struct i915_pagedir *pd = pdp->pagedirs[pdpe];
                struct i915_pagetab *pt = pd->page_tables[pde];
                struct page *page_table = pt->page;
 
@@ -544,6 +545,7 @@ static void gen8_ppgtt_insert_entries(struct 
i915_address_space *vm,
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
+       struct i915_pagedirpo *pdp = &ppgtt->pdp; /* FIXME: 48b */
        gen8_gtt_pte_t *pt_vaddr;
        unsigned pdpe = gen8_pdpe_index(start);
        unsigned pde = gen8_pde_index(start);
@@ -554,7 +556,7 @@ static void gen8_ppgtt_insert_entries(struct 
i915_address_space *vm,
 
        for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
                if (pt_vaddr == NULL) {
-                       struct i915_pagedir *pd = ppgtt->pdp.pagedirs[pdpe];
+                       struct i915_pagedir *pd = pdp->pagedirs[pdpe];
                        struct i915_pagetab *pt = pd->page_tables[pde];
                        struct page *page_table = pt->page;
                        pt_vaddr = kmap_atomic(page_table);
@@ -636,23 +638,22 @@ static void gen8_unmap_pagetable(struct i915_hw_ppgtt 
*ppgtt,
        gen8_map_pagedir(pd, ppgtt->scratch_pt, pde, ppgtt->base.dev);
 }
 
-static void gen8_teardown_va_range(struct i915_address_space *vm,
-                                  uint64_t start, uint64_t length)
+static void gen8_teardown_va_range_3lvl(struct i915_address_space *vm,
+                                       struct i915_pagedirpo *pdp,
+                                       uint64_t start, uint64_t length)
 {
-       struct i915_hw_ppgtt *ppgtt =
-                       container_of(vm, struct i915_hw_ppgtt, base);
        struct drm_device *dev = vm->dev;
        struct i915_pagedir *pd;
        struct i915_pagetab *pt;
        uint64_t temp;
        uint32_t pdpe, pde, orig_start = start;
 
-       if (!ppgtt->pdp.pagedirs) {
+       if (!pdp || !pdp->pagedirs) {
                /* If pagedirs are already free, there is nothing to do.*/
                return;
        }
 
-       gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
+       gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
                uint64_t pd_len = gen8_clamp_pd(start, length);
                uint64_t pd_start = start;
 
@@ -660,12 +661,12 @@ static void gen8_teardown_va_range(struct 
i915_address_space *vm,
                 * down, and up.
                 */
                if (!pd) {
-                       WARN(test_bit(pdpe, ppgtt->pdp.used_pdpes),
+                       WARN(test_bit(pdpe, pdp->used_pdpes),
                             "PDPE %d is not allocated, but is reserved (%p)\n",
                             pdpe, vm);
                        continue;
                } else {
-                       WARN(!test_bit(pdpe, ppgtt->pdp.used_pdpes),
+                       WARN(!test_bit(pdpe, pdp->used_pdpes),
                             "PDPE %d not reserved, but is allocated (%p)",
                             pdpe, vm);
                }
@@ -691,6 +692,8 @@ static void gen8_teardown_va_range(struct 
i915_address_space *vm,
                                     gen8_pte_count(pd_start, pd_len));
 
                        if (bitmap_empty(pt->used_ptes, GEN8_PTES_PER_PT)) {
+                               struct i915_hw_ppgtt *ppgtt =
+                                       container_of(vm, struct i915_hw_ppgtt, 
base);
                                trace_i915_pagetable_destroy(vm,
                                                             pde,
                                                             pd_start & 
GENMASK_ULL(64, GEN8_PDE_SHIFT),
@@ -705,23 +708,42 @@ static void gen8_teardown_va_range(struct 
i915_address_space *vm,
 
                if (bitmap_empty(pd->used_pdes, I915_PDES_PER_PD)) {
                        free_pd_single(pd, dev);
-                       ppgtt->pdp.pagedirs[pdpe] = NULL;
+                       pdp->pagedirs[pdpe] = NULL;
                        trace_i915_pagedirectory_destroy(vm, pdpe,
                                                         start & 
GENMASK_ULL(64, GEN8_PDPE_SHIFT),
                                                         GEN8_PDPE_SHIFT);
-                       WARN_ON(!test_and_clear_bit(pdpe, 
ppgtt->pdp.used_pdpes));
+                       WARN_ON(!test_and_clear_bit(pdpe, pdp->used_pdpes));
                }
        }
 
-       if (bitmap_empty(ppgtt->pdp.used_pdpes, I915_PDPES_PER_PDP(dev))) {
+       if (bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev))) {
                /* TODO: When pagetables are fully dynamic:
-               free_pdp_single(&ppgtt->pdp, dev); */
+               free_pdp_single(pdp, dev); */
                trace_i915_pagedirpo_destroy(vm, 0,
                                             orig_start & GENMASK_ULL(64, 
GEN8_PML4E_SHIFT),
                                             GEN8_PML4E_SHIFT);
        }
 }
 
+static void gen8_teardown_va_range_4lvl(struct i915_address_space *vm,
+                                       struct i915_pml4 *pml4,
+                                       uint64_t start, uint64_t length)
+{
+       BUG();
+}
+
+static void gen8_teardown_va_range(struct i915_address_space *vm,
+                                  uint64_t start, uint64_t length)
+{
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(vm, struct i915_hw_ppgtt, base);
+
+       if (!HAS_48B_PPGTT(vm->dev))
+               gen8_teardown_va_range_3lvl(vm, &ppgtt->pdp, start, length);
+       else
+               gen8_teardown_va_range_4lvl(vm, &ppgtt->pml4, start, length);
+}
+
 static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
 {
        trace_i915_va_teardown(&ppgtt->base,
@@ -747,7 +769,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space 
*vm)
 
 /**
  * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
- * @ppgtt:     Master ppgtt structure.
+ * @vm:                Master vm structure.
  * @pd:                Page directory for this address range.
  * @start:     Starting virtual address to begin allocations.
  * @length     Size of the allocations.
@@ -763,13 +785,13 @@ static void gen8_ppgtt_cleanup(struct i915_address_space 
*vm)
  *
  * Return: 0 if success; negative error code otherwise.
  */
-static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
+static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
                                     struct i915_pagedir *pd,
                                     uint64_t start,
                                     uint64_t length,
                                     unsigned long *new_pts)
 {
-       struct drm_device *dev = ppgtt->base.dev;
+       struct drm_device *dev = vm->dev;
        struct i915_pagetab *unused;
        uint64_t temp;
        uint32_t pde;
@@ -784,7 +806,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt 
*ppgtt,
                        goto unwind_out;
 
                set_bit(pde, new_pts);
-               trace_i915_pagetable_alloc(&ppgtt->base, pde, start, 
GEN8_PDE_SHIFT);
+               trace_i915_pagetable_alloc(vm, pde, start, GEN8_PDE_SHIFT);
        }
 
        return 0;
@@ -798,7 +820,7 @@ unwind_out:
 
 /**
  * gen8_ppgtt_alloc_pagedirs() - Allocate page directories for VA range.
- * @ppgtt:     Master ppgtt structure.
+ * @vm:                Master vm structure.
  * @pdp:       Page directory pointer for this address range.
  * @start:     Starting virtual address to begin allocations.
  * @length     Size of the allocations.
@@ -819,17 +841,17 @@ unwind_out:
  *
  * Return: 0 if success; negative error code otherwise.
  */
-static int gen8_ppgtt_alloc_pagedirs(struct i915_hw_ppgtt *ppgtt,
+static int gen8_ppgtt_alloc_pagedirs(struct i915_address_space *vm,
                                     struct i915_pagedirpo *pdp,
                                     uint64_t start,
                                     uint64_t length,
                                     unsigned long *new_pds)
 {
-       struct drm_device *dev = ppgtt->base.dev;
+       struct drm_device *dev = vm->dev;
        struct i915_pagedir *unused;
        uint64_t temp;
        uint32_t pdpe;
-       size_t pdpes =  I915_PDPES_PER_PDP(ppgtt->base.dev);
+       size_t pdpes =  I915_PDPES_PER_PDP(vm->dev);
 
        BUG_ON(!bitmap_empty(new_pds, pdpes));
 
@@ -841,13 +863,13 @@ static int gen8_ppgtt_alloc_pagedirs(struct i915_hw_ppgtt 
*ppgtt,
                if (unused)
                        continue;
 
-               pd = alloc_pd_single(ppgtt->base.dev);
+               pd = alloc_pd_single(dev);
                if (IS_ERR(pd))
                        goto unwind_out;
 
                pdp->pagedirs[pdpe] = pd;
                set_bit(pdpe, new_pds);
-               trace_i915_pagedirectory_alloc(&ppgtt->base, pdpe, start,
+               trace_i915_pagedirectory_alloc(vm, pdpe, start,
                                               GEN8_PDPE_SHIFT);
        }
 
@@ -855,7 +877,7 @@ static int gen8_ppgtt_alloc_pagedirs(struct i915_hw_ppgtt 
*ppgtt,
 
 unwind_out:
        for_each_set_bit(pdpe, new_pds, pdpes)
-               free_pd_single(pdp->pagedirs[pdpe], ppgtt->base.dev);
+               free_pd_single(pdp->pagedirs[pdpe], dev);
 
        return -ENOMEM;
 }
@@ -910,12 +932,11 @@ err_out:
        return -ENOMEM;
 }
 
-static int gen8_alloc_va_range(struct i915_address_space *vm,
-                              uint64_t start,
-                              uint64_t length)
+static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
+                                   struct i915_pagedirpo *pdp,
+                                   uint64_t start,
+                                   uint64_t length)
 {
-       struct i915_hw_ppgtt *ppgtt =
-               container_of(vm, struct i915_hw_ppgtt, base);
        unsigned long *new_page_dirs, **new_page_tables;
        struct drm_device *dev = vm->dev;
        struct i915_pagedir *pd;
@@ -945,17 +966,15 @@ static int gen8_alloc_va_range(struct i915_address_space 
*vm,
                return ret;
 
        /* Do the allocations first so we can easily bail out */
-       ret = gen8_ppgtt_alloc_pagedirs(ppgtt, &ppgtt->pdp, start, length,
-                                       new_page_dirs);
+       ret = gen8_ppgtt_alloc_pagedirs(vm, pdp, start, length, new_page_dirs);
        if (ret) {
                free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
                return ret;
        }
 
-       gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
+       gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
                bitmap_zero(new_page_tables[pdpe], I915_PDES_PER_PD);
-               ret = gen8_ppgtt_alloc_pagetabs(ppgtt, pd, start, length,
-                                               new_page_tables[pdpe]);
+               ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length, 
new_page_tables[pdpe]);
                if (ret)
                        goto err_out;
        }
@@ -963,7 +982,7 @@ static int gen8_alloc_va_range(struct i915_address_space 
*vm,
        start = orig_start;
        length = orig_length;
 
-       gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
+       gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
                struct i915_pagetab *pt;
                uint64_t pd_len = gen8_clamp_pd(start, length);
                uint64_t pd_start = start;
@@ -981,7 +1000,7 @@ static int gen8_alloc_va_range(struct i915_address_space 
*vm,
                        set_bit(pde, pd->used_pdes);
                }
 
-               set_bit(pdpe, ppgtt->pdp.used_pdpes);
+               set_bit(pdpe, pdp->used_pdpes);
 
                gen8_map_pagetable_range(vm, pd, start, length);
        }
@@ -992,16 +1011,36 @@ static int gen8_alloc_va_range(struct i915_address_space 
*vm,
 err_out:
        while (pdpe--) {
                for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES_PER_PD)
-                       free_pt_single(pd->page_tables[temp], ppgtt->base.dev);
+                       free_pt_single(pd->page_tables[temp], dev);
        }
 
        for_each_set_bit(pdpe, new_page_dirs, pdpes)
-               free_pd_single(ppgtt->pdp.pagedirs[pdpe], ppgtt->base.dev);
+               free_pd_single(pdp->pagedirs[pdpe], dev);
 
        free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
        return ret;
 }
 
+static int __noreturn gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
+                                              struct i915_pml4 *pml4,
+                                              uint64_t start,
+                                              uint64_t length)
+{
+       BUG();
+}
+
+static int gen8_alloc_va_range(struct i915_address_space *vm,
+                              uint64_t start, uint64_t length)
+{
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(vm, struct i915_hw_ppgtt, base);
+
+       if (!HAS_48B_PPGTT(vm->dev))
+               return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
+       else
+               return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, 
length);
+}
+
 static void gen8_ppgtt_fini_common(struct i915_hw_ppgtt *ppgtt)
 {
        free_pt_scratch(ppgtt->scratch_pd, ppgtt->base.dev);
@@ -1046,12 +1085,13 @@ static int gen8_aliasing_ppgtt_init(struct 
i915_hw_ppgtt *ppgtt)
 {
        struct drm_device *dev = ppgtt->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_pagedirpo *pdp = &ppgtt->pdp; /* FIXME: 48b */
        struct i915_pagedir *pd;
        uint64_t temp, start = 0, size = dev_priv->gtt.base.total;
        uint32_t pdpe;
        int ret;
 
-       ret = gen8_ppgtt_init_common(ppgtt, dev_priv->gtt.base.total);
+       ret = gen8_ppgtt_init_common(ppgtt, size);
        if (ret)
                return ret;
 
@@ -1061,7 +1101,7 @@ static int gen8_aliasing_ppgtt_init(struct i915_hw_ppgtt 
*ppgtt)
                return ret;
        }
 
-       gen8_for_each_pdpe(pd, &ppgtt->pdp, start, size, temp, pdpe)
+       gen8_for_each_pdpe(pd, pdp, start, size, temp, pdpe)
                gen8_map_pagetable_range(&ppgtt->base, pd, start, size);
 
        ppgtt->base.allocate_va_range = NULL;
@@ -2101,6 +2141,7 @@ void gen8_for_every_pdpe_pde(struct i915_hw_ppgtt *ppgtt,
                             void *data)
 {
        struct drm_device *dev = ppgtt->base.dev;
+       struct i915_pagedirpo *pdp = &ppgtt->pdp; /* FIXME: 48b */
        uint64_t start = ppgtt->base.start;
        uint64_t length = ppgtt->base.total;
        uint64_t pdpe, pde, temp;
@@ -2108,18 +2149,18 @@ void gen8_for_every_pdpe_pde(struct i915_hw_ppgtt 
*ppgtt,
        struct i915_pagedir *pd;
        struct i915_pagetab *pt;
 
-       gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
+       gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
                uint64_t pd_start = start, pd_length = length;
                int i;
 
                if (pd == NULL) {
                        for (i = 0; i < I915_PDES_PER_PD; i++)
-                               callback(&ppgtt->pdp, NULL, NULL, pdpe, i, 
data);
+                               callback(pdp, NULL, NULL, pdpe, i, data);
                        continue;
                }
 
                gen8_for_each_pde(pt, pd, pd_start, pd_length, temp, pde) {
-                       callback(&ppgtt->pdp, pd, pt, pdpe, pde, data);
+                       callback(pdp, pd, pt, pdpe, pde, data);
                }
        }
 }
-- 
1.9.2

_______________________________________________
Intel-gfx mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to