This patches uses newly added FOLL_SPLIT_PMD in uprobe. This enables easy
regroup of huge pmd after the uprobe is disabled (in next patch).

Signed-off-by: Song Liu <songliubrav...@fb.com>
---
 kernel/events/uprobes.c | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index f7c61a1ef720..a20d7b43a056 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -153,7 +153,7 @@ static int __replace_page(struct vm_area_struct *vma, 
unsigned long addr,
 {
        struct mm_struct *mm = vma->vm_mm;
        struct page_vma_mapped_walk pvmw = {
-               .page = old_page,
+               .page = compound_head(old_page),
                .vma = vma,
                .address = addr,
        };
@@ -165,8 +165,6 @@ static int __replace_page(struct vm_area_struct *vma, 
unsigned long addr,
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
                                addr + PAGE_SIZE);
 
-       VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page);
-
        if (!orig) {
                err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
                                            &memcg, false);
@@ -483,7 +481,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct 
mm_struct *mm,
 retry:
        /* Read the page with vaddr into memory */
        ret = get_user_pages_remote(NULL, mm, vaddr, 1,
-                       FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL);
+                       FOLL_FORCE | FOLL_SPLIT_PMD, &old_page, &vma, NULL);
        if (ret <= 0)
                return ret;
 
-- 
2.17.1

Reply via email to