* Lorenzo Stoakes <[email protected]> [260122 16:06]: > This patch updates secretmem to use the new vma_flags_t type which will > soon supersede vm_flags_t altogether. > > In order to make this change we also have to update mlock_future_ok(), we > replace the vm_flags_t parameter with a simple boolean is_vma_locked one, > which also simplifies the invocation here. > > This is laying the groundwork for eliminating the vm_flags_t in > vm_area_desc and more broadly throughout the kernel. > > No functional changes intended. > > Signed-off-by: Lorenzo Stoakes <[email protected]>
With the type fix for brk - I assume sparse would have detected this as well. Reviewed-by: Liam R. Howlett <[email protected]> > --- > mm/internal.h | 2 +- > mm/mmap.c | 8 ++++---- > mm/mremap.c | 2 +- > mm/secretmem.c | 7 +++---- > mm/vma.c | 2 +- > 5 files changed, 10 insertions(+), 11 deletions(-) > > diff --git a/mm/internal.h b/mm/internal.h > index ef71a1d9991f..d67e8bb75734 100644 > --- a/mm/internal.h > +++ b/mm/internal.h > @@ -1046,7 +1046,7 @@ extern long populate_vma_page_range(struct > vm_area_struct *vma, > unsigned long start, unsigned long end, int *locked); > extern long faultin_page_range(struct mm_struct *mm, unsigned long start, > unsigned long end, bool write, int *locked); > -bool mlock_future_ok(const struct mm_struct *mm, vm_flags_t vm_flags, > +bool mlock_future_ok(const struct mm_struct *mm, bool is_vma_locked, > unsigned long bytes); > > /* > diff --git a/mm/mmap.c b/mm/mmap.c > index 038ff5f09df0..354479c95896 100644 > --- a/mm/mmap.c > +++ b/mm/mmap.c > @@ -225,12 +225,12 @@ static inline unsigned long round_hint_to_min(unsigned > long hint) > return hint; > } > > -bool mlock_future_ok(const struct mm_struct *mm, vm_flags_t vm_flags, > - unsigned long bytes) > +bool mlock_future_ok(const struct mm_struct *mm, bool is_vma_locked, > + unsigned long bytes) > { > unsigned long locked_pages, limit_pages; > > - if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) > + if (!is_vma_locked || capable(CAP_IPC_LOCK)) > return true; > > locked_pages = bytes >> PAGE_SHIFT; > @@ -416,7 +416,7 @@ unsigned long do_mmap(struct file *file, unsigned long > addr, > if (!can_do_mlock()) > return -EPERM; > > - if (!mlock_future_ok(mm, vm_flags, len)) > + if (!mlock_future_ok(mm, vm_flags & VM_LOCKED, len)) > return -EAGAIN; > > if (file) { > diff --git a/mm/mremap.c b/mm/mremap.c > index 8391ae17de64..2be876a70cc0 100644 > --- a/mm/mremap.c > +++ b/mm/mremap.c > @@ -1740,7 +1740,7 @@ static int check_prep_vma(struct vma_remap_struct *vrm) > if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) > return -EFAULT; > > - if (!mlock_future_ok(mm, vma->vm_flags, vrm->delta)) > + if (!mlock_future_ok(mm, vma->vm_flags & VM_LOCKED, vrm->delta)) > return -EAGAIN; > > if (!may_expand_vm(mm, vma->vm_flags, vrm->delta >> PAGE_SHIFT)) > diff --git a/mm/secretmem.c b/mm/secretmem.c > index edf111e0a1bb..11a779c812a7 100644 > --- a/mm/secretmem.c > +++ b/mm/secretmem.c > @@ -122,13 +122,12 @@ static int secretmem_mmap_prepare(struct vm_area_desc > *desc) > { > const unsigned long len = vma_desc_size(desc); > > - if ((desc->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) > + if (!vma_desc_test_flags(desc, VMA_SHARED_BIT, VMA_MAYSHARE_BIT)) > return -EINVAL; > > - if (!mlock_future_ok(desc->mm, desc->vm_flags | VM_LOCKED, len)) > + vma_desc_set_flags(desc, VMA_LOCKED_BIT, VMA_DONTDUMP_BIT); > + if (!mlock_future_ok(desc->mm, /*is_vma_locked=*/ true, len)) > return -EAGAIN; > - > - desc->vm_flags |= VM_LOCKED | VM_DONTDUMP; > desc->vm_ops = &secretmem_vm_ops; > > return 0; > diff --git a/mm/vma.c b/mm/vma.c > index f352d5c72212..39dcd9ddd4ba 100644 > --- a/mm/vma.c > +++ b/mm/vma.c > @@ -3053,7 +3053,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, > return -ENOMEM; > > /* mlock limit tests */ > - if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) > + if (!mlock_future_ok(mm, vma->vm_flags & VM_LOCKED, grow << PAGE_SHIFT)) > return -ENOMEM; > > /* Check to ensure the stack will not grow into a hugetlb-only region */ > -- > 2.52.0 >
