Mark __split_huge_pmd(), split_huge_pmd() and split_huge_pmd_address() with __must_check so the compiler warns if any caller ignores the return value. Not checking return value and operating on the basis that the pmd is split could result in a kernel bug. The possibility of an order-0 allocation failing for page table allocation is very low, but it should be handled correctly.
Signed-off-by: Usama Arif <[email protected]> --- include/linux/huge_mm.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 207bf7cd95c78..b4c2fd4252097 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -419,7 +419,7 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped); void reparent_deferred_split_queue(struct mem_cgroup *memcg); #endif -int __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, +int __must_check __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long address, bool freeze); /** @@ -448,7 +448,7 @@ static inline bool pmd_is_huge(pmd_t pmd) return false; } -static inline int split_huge_pmd(struct vm_area_struct *vma, +static inline int __must_check split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long address) { if (pmd_is_huge(*pmd)) @@ -456,7 +456,7 @@ static inline int split_huge_pmd(struct vm_area_struct *vma, return 0; } -int split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, +int __must_check split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze); void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, -- 2.47.3
