On Fri, Nov 24, 2017 at 10:14:30AM +0100, Ingo Molnar wrote:
> +static pte_t *kaiser_shadow_pagetable_walk(unsigned long address,
> +                                        unsigned long flags)
> +{
> +     pte_t *pte;
> +     pmd_t *pmd;
> +     pud_t *pud;
> +     p4d_t *p4d;
> +     pgd_t *pgd = kernel_to_shadow_pgdp(pgd_offset_k(address));
> +     gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
> +
> +     if (flags & KAISER_WALK_ATOMIC) {
> +             gfp &= ~GFP_KERNEL;
> +             gfp |= __GFP_HIGH | __GFP_ATOMIC;
> +     }
> +
> +     if (address < PAGE_OFFSET) {
> +             WARN_ONCE(1, "attempt to walk user address\n");
> +             return NULL;
> +     }
> +
> +     if (pgd_none(*pgd)) {
> +             WARN_ONCE(1, "All shadow pgds should have been populated\n");
> +             return NULL;
> +     }
> +     BUILD_BUG_ON(pgd_large(*pgd) != 0);
> +
> +     p4d = p4d_offset(pgd, address);
> +     BUILD_BUG_ON(p4d_large(*p4d) != 0);
> +     if (p4d_none(*p4d)) {
> +             unsigned long new_pud_page = __get_free_page(gfp);
> +             if (!new_pud_page)
> +                     return NULL;
> +
> +             spin_lock(&shadow_table_allocation_lock);
> +             if (p4d_none(*p4d))
> +                     set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
> +             else
> +                     free_page(new_pud_page);
> +             spin_unlock(&shadow_table_allocation_lock);

So mm/memory.c has two patterns here.. I prefer the other one:

                spin_lock(&shadow_table_allocation_lock);
                if (p4d_none(*p4d)) {
                        set_p4d(p4d, __p4d(_KERNEL_TABLE | __pa(new_pud_page)));
                        new_pud_page = NULL;
                }
                spin_unlock(&shadow_table_allocation_lock);
                if (new_pud_page)
                        free_page(new_pud_page);

> +     }

Reply via email to