Jérôme Glisse <[email protected]> writes:

> This patch add a new memory migration helpers, which migrate memory
> backing a range of virtual address of a process to different memory
> (which can be allocated through special allocator). It differs from
> numa migration by working on a range of virtual address and thus by
> doing migration in chunk that can be large enough to use DMA engine
> or special copy offloading engine.
>
> Expected users are any one with heterogeneous memory where different
> memory have different characteristics (latency, bandwidth, ...). As
> an example IBM platform with CAPI bus can make use of this feature
> to migrate between regular memory and CAPI device memory. New CPU
> architecture with a pool of high performance memory not manage as
> cache but presented as regular memory (while being faster and with
> lower latency than DDR) will also be prime user of this patch.
>
> Migration to private device memory will be usefull for device that
> have large pool of such like GPU, NVidia plans to use HMM for that.
>



..............


>+
> +static int hmm_collect_walk_pmd(pmd_t *pmdp,
> +                             unsigned long start,
> +                             unsigned long end,
> +                             struct mm_walk *walk)
> +{
> +     struct hmm_migrate *migrate = walk->private;
> +     struct mm_struct *mm = walk->vma->vm_mm;
> +     unsigned long addr = start;
> +     spinlock_t *ptl;
> +     hmm_pfn_t *pfns;
> +     int pages = 0;
> +     pte_t *ptep;
> +
> +again:
> +     if (pmd_none(*pmdp))
> +             return 0;
> +
> +     split_huge_pmd(walk->vma, pmdp, addr);
> +     if (pmd_trans_unstable(pmdp))
> +             goto again;
> +
> +     pfns = &migrate->pfns[(addr - migrate->start) >> PAGE_SHIFT];
> +     ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
> +     arch_enter_lazy_mmu_mode();
> +
> +     for (; addr < end; addr += PAGE_SIZE, pfns++, ptep++) {
> +             unsigned long pfn;
> +             swp_entry_t entry;
> +             struct page *page;
> +             hmm_pfn_t flags;
> +             bool write;
> +             pte_t pte;
> +
> +             pte = ptep_get_and_clear(mm, addr, ptep);
> +             if (!pte_present(pte)) {
> +                     if (pte_none(pte))
> +                             continue;
> +
> +                     entry = pte_to_swp_entry(pte);
> +                     if (!is_device_entry(entry)) {
> +                             set_pte_at(mm, addr, ptep, pte);
> +                             continue;
> +                     }
> +
> +                     flags = HMM_PFN_DEVICE | HMM_PFN_UNADDRESSABLE;
> +                     page = device_entry_to_page(entry);
> +                     write = is_write_device_entry(entry);
> +                     pfn = page_to_pfn(page);
> +
> +                     if (!(page->pgmap->flags & MEMORY_MOVABLE)) {
> +                             set_pte_at(mm, addr, ptep, pte);
> +                             continue;
> +                     }
> +
> +             } else {
> +                     pfn = pte_pfn(pte);
> +                     page = pfn_to_page(pfn);
> +                     write = pte_write(pte);
> +                     flags = is_zone_device_page(page) ? HMM_PFN_DEVICE : 0;
> +             }
> +
> +             /* FIXME support THP see hmm_migrate_page_check() */
> +             if (PageTransCompound(page))
> +                     continue;
> +
> +             *pfns = hmm_pfn_from_pfn(pfn) | HMM_PFN_MIGRATE | flags;
> +             *pfns |= write ? HMM_PFN_WRITE : 0;
> +             migrate->npages++;
> +             get_page(page);
> +
> +             if (!trylock_page(page)) {
> +                     set_pte_at(mm, addr, ptep, pte);
> +             } else {
> +                     pte_t swp_pte;
> +
> +                     *pfns |= HMM_PFN_LOCKED;
> +
> +                     entry = make_migration_entry(page, write);
> +                     swp_pte = swp_entry_to_pte(entry);
> +                     if (pte_soft_dirty(pte))
> +                             swp_pte = pte_swp_mksoft_dirty(swp_pte);
> +                     set_pte_at(mm, addr, ptep, swp_pte);
> +
> +                     page_remove_rmap(page, false);
> +                     put_page(page);
> +                     pages++;
> +             }

Can you explain this. What does a failure to lock means here. Also why
convert the pte to migration entries here ? We do that in try_to_unmap right ?


> +     }
> +
> +     arch_leave_lazy_mmu_mode();
> +     pte_unmap_unlock(ptep - 1, ptl);
> +
> +     /* Only flush the TLB if we actually modified any entries */
> +     if (pages)
> +             flush_tlb_range(walk->vma, start, end);
> +
> +     return 0;
> +}
> 

Reply via email to