On Wed, May 29, 2019 at 02:20:46PM -0700, Song Liu wrote:
> @@ -2133,10 +2133,15 @@ static void __split_huge_pmd_locked(struct 
> vm_area_struct *vma, pmd_t *pmd,
>       VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
>       VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
>                               && !pmd_devmap(*pmd));
> +     /* only file backed vma need preallocate pgtable*/
> +     VM_BUG_ON(vma_is_anonymous(vma) && prealloc_pgtable);
>  
>       count_vm_event(THP_SPLIT_PMD);
>  
> -     if (!vma_is_anonymous(vma)) {
> +     if (prealloc_pgtable) {
> +             pgtable_trans_huge_deposit(mm, pmd, prealloc_pgtable);
> +             mm_inc_nr_pmds(mm);
> +     } else if (!vma_is_anonymous(vma)) {
>               _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
>               /*
>                * We are going to unmap this huge page. So

Nope. This going to leak a page table for architectures where
arch_needs_pgtable_deposit() is true.

-- 
 Kirill A. Shutemov

Reply via email to