As for FOLL_LONGTERM, it is checked in the slow path __gup_longterm_unlocked(). But it is not checked in the fast path, which means a possible leak of CMA page to longterm pinned requirement through this crack.
Place a check in the fast path. Signed-off-by: Pingfan Liu <[email protected]> Cc: Ira Weiny <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Mike Rapoport <[email protected]> Cc: Dan Williams <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: John Hubbard <[email protected]> Cc: "Aneesh Kumar K.V" <[email protected]> Cc: Keith Busch <[email protected]> Cc: [email protected] --- mm/gup.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/mm/gup.c b/mm/gup.c index f173fcb..6fe2feb 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2196,6 +2196,29 @@ static int __gup_longterm_unlocked(unsigned long start, int nr_pages, return ret; } +#if defined(CONFIG_CMA) +static inline int reject_cma_pages(int nr_pinned, unsigned int gup_flags, + struct page **pages) +{ + if (unlikely(gup_flags & FOLL_LONGTERM)) { + int i = 0; + + for (i = 0; i < nr_pinned; i++) + if (is_migrate_cma_page(pages[i])) { + put_user_pages(pages + i, nr_pinned - i); + return i; + } + } + return nr_pinned; +} +#else +static inline int reject_cma_pages(int nr_pinned, unsigned int gup_flags, + struct page **pages) +{ + return nr_pinned; +} +#endif + /** * get_user_pages_fast() - pin user pages in memory * @start: starting user address @@ -2236,6 +2259,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, ret = nr; } + nr = reject_cma_pages(nr, gup_flags, pages); if (nr < nr_pages) { /* Try to get the remaining pages with get_user_pages */ start += nr << PAGE_SHIFT; -- 2.7.5

