As for FOLL_LONGTERM, it is checked in the slow path
__gup_longterm_unlocked(). But it is not checked in the fast path, which
means a possible leak of CMA page to longterm pinned requirement through
this crack.

Place a check in the fast path.

Signed-off-by: Pingfan Liu <kernelf...@gmail.com>
Cc: Ira Weiny <ira.we...@intel.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Mike Rapoport <r...@linux.ibm.com>
Cc: Dan Williams <dan.j.willi...@intel.com>
Cc: Matthew Wilcox <wi...@infradead.org>
Cc: John Hubbard <jhubb...@nvidia.com>
Cc: "Aneesh Kumar K.V" <aneesh.ku...@linux.ibm.com>
Cc: Keith Busch <keith.bu...@intel.com>
Cc: Christoph Hellwig <h...@infradead.org>
Cc: linux-kernel@vger.kernel.org
---
 mm/gup.c | 23 +++++++++++++++++++++++
 1 file changed, 23 insertions(+)

diff --git a/mm/gup.c b/mm/gup.c
index f173fcb..0e59af9 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2196,6 +2196,26 @@ static int __gup_longterm_unlocked(unsigned long start, 
int nr_pages,
        return ret;
 }
 
+#ifdef CONFIG_CMA
+static inline int reject_cma_pages(int nr_pinned, struct page **pages)
+{
+       int i;
+
+       for (i = 0; i < nr_pinned; i++)
+               if (is_migrate_cma_page(pages[i])) {
+                       put_user_pages(pages + i, nr_pinned - i);
+                       return i;
+               }
+
+       return nr_pinned;
+}
+#else
+static inline int reject_cma_pages(int nr_pinned, struct page **pages)
+{
+       return nr_pinned;
+}
+#endif
+
 /**
  * get_user_pages_fast() - pin user pages in memory
  * @start:     starting user address
@@ -2236,6 +2256,9 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
                ret = nr;
        }
 
+       if (unlikely(gup_flags & FOLL_LONGTERM) && nr)
+               nr = reject_cma_pages(nr, pages);
+
        if (nr < nr_pages) {
                /* Try to get the remaining pages with get_user_pages */
                start += nr << PAGE_SHIFT;
-- 
2.7.5

Reply via email to