On 3/29/21 12:38 PM, Yang Shi wrote:
Since commit 5a52c9df62b4 ("uprobe: use FOLL_SPLIT_PMD instead of FOLL_SPLIT")
and commit ba925fa35057 ("s390/gmap: improve THP splitting") FOLL_SPLIT
has not been used anymore.  Remove the dead code.

Signed-off-by: Yang Shi <shy828...@gmail.com>
---
  include/linux/mm.h |  1 -
  mm/gup.c           | 28 ++--------------------------
  2 files changed, 2 insertions(+), 27 deletions(-)


Looks nice.

As long as I'm running git grep here, there is one more search hit that should 
also
be fixed up, as part of a "remove FOLL_SPLIT" patch:

git grep -nw FOLL_SPLIT
Documentation/vm/transhuge.rst:57:follow_page, the FOLL_SPLIT bit can be 
specified as a parameter to

Reviewed-by: John Hubbard <jhubb...@nvidia.com>

thanks,
--
John Hubbard
NVIDIA

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8ba434287387..3568836841f9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2780,7 +2780,6 @@ struct page *follow_page(struct vm_area_struct *vma, 
unsigned long address,
  #define FOLL_NOWAIT   0x20    /* if a disk transfer is needed, start the IO
                                 * and return without waiting upon it */
  #define FOLL_POPULATE 0x40    /* fault in page */
-#define FOLL_SPLIT     0x80    /* don't return transhuge pages, split them */
  #define FOLL_HWPOISON 0x100   /* check page is hwpoisoned */
  #define FOLL_NUMA     0x200   /* force NUMA hinting page fault */
  #define FOLL_MIGRATION        0x400   /* wait for page to replace migration 
entry */
diff --git a/mm/gup.c b/mm/gup.c
index e40579624f10..f3d45a8f18ae 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -435,18 +435,6 @@ static struct page *follow_page_pte(struct vm_area_struct 
*vma,
                }
        }
- if (flags & FOLL_SPLIT && PageTransCompound(page)) {
-               get_page(page);
-               pte_unmap_unlock(ptep, ptl);
-               lock_page(page);
-               ret = split_huge_page(page);
-               unlock_page(page);
-               put_page(page);
-               if (ret)
-                       return ERR_PTR(ret);
-               goto retry;
-       }
-
        /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
        if (unlikely(!try_grab_page(page, flags))) {
                page = ERR_PTR(-ENOMEM);
@@ -591,7 +579,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct 
*vma,
                spin_unlock(ptl);
                return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
        }
-       if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
+       if (flags & FOLL_SPLIT_PMD) {
                int ret;
                page = pmd_page(*pmd);
                if (is_huge_zero_page(page)) {
@@ -600,19 +588,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct 
*vma,
                        split_huge_pmd(vma, pmd, address);
                        if (pmd_trans_unstable(pmd))
                                ret = -EBUSY;
-               } else if (flags & FOLL_SPLIT) {
-                       if (unlikely(!try_get_page(page))) {
-                               spin_unlock(ptl);
-                               return ERR_PTR(-ENOMEM);
-                       }
-                       spin_unlock(ptl);
-                       lock_page(page);
-                       ret = split_huge_page(page);
-                       unlock_page(page);
-                       put_page(page);
-                       if (pmd_none(*pmd))
-                               return no_page_table(vma, flags);
-               } else {  /* flags & FOLL_SPLIT_PMD */
+               } else {
                        spin_unlock(ptl);
                        split_huge_pmd(vma, pmd, address);
                        ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;


Reply via email to