Re: [PATCH v4 5/7] mm: replace vma->vm_flags indirect modification in ksm_madvise

2023-02-07 Thread Hyeonggon Yoo
On Thu, Jan 26, 2023 at 11:37:50AM -0800, Suren Baghdasaryan wrote:
> Replace indirect modifications to vma->vm_flags with calls to modifier
> functions to be able to track flag changes and to keep vma locking
> correctness.
> 
> Signed-off-by: Suren Baghdasaryan 
> Acked-by: Michal Hocko 
> Acked-by: Mel Gorman 
> Acked-by: Mike Rapoport (IBM) 
> ---
>  arch/powerpc/kvm/book3s_hv_uvmem.c | 6 +-
>  arch/s390/mm/gmap.c| 6 +-
>  2 files changed, 10 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c 
> b/arch/powerpc/kvm/book3s_hv_uvmem.c
> index 1d67baa5557a..709ebd578394 100644
> --- a/arch/powerpc/kvm/book3s_hv_uvmem.c
> +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
> @@ -393,6 +393,7 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm,
>  {
>   unsigned long gfn = memslot->base_gfn;
>   unsigned long end, start = gfn_to_hva(kvm, gfn);
> + unsigned long vm_flags;
>   int ret = 0;
>   struct vm_area_struct *vma;
>   int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
> @@ -409,12 +410,15 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm,
>   ret = H_STATE;
>   break;
>   }
> + /* Copy vm_flags to avoid partial modifications in ksm_madvise 
> */
> + vm_flags = vma->vm_flags;
>   ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
> -   merge_flag, &vma->vm_flags);
> +   merge_flag, &vm_flags);
>   if (ret) {
>   ret = H_STATE;
>   break;
>   }
> + vm_flags_reset(vma, vm_flags);
>   start = vma->vm_end;
>   } while (end > vma->vm_end);
>  
> diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
> index ab836597419d..5a716bdcba05 100644
> --- a/arch/s390/mm/gmap.c
> +++ b/arch/s390/mm/gmap.c
> @@ -2587,14 +2587,18 @@ int gmap_mark_unmergeable(void)
>  {
>   struct mm_struct *mm = current->mm;
>   struct vm_area_struct *vma;
> + unsigned long vm_flags;
>   int ret;
>   VMA_ITERATOR(vmi, mm, 0);
>  
>   for_each_vma(vmi, vma) {
> + /* Copy vm_flags to avoid partial modifications in ksm_madvise 
> */
> + vm_flags = vma->vm_flags;
>   ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
> -   MADV_UNMERGEABLE, &vma->vm_flags);
> +   MADV_UNMERGEABLE, &vm_flags);
>   if (ret)
>   return ret;
> + vm_flags_reset(vma, vm_flags);
>   }
>   mm->def_flags &= ~VM_MERGEABLE;
>   return 0;
> -- 

Reviewed-by: Hyeonggon Yoo <42.hye...@gmail.com>

> 2.39.1
> 
> 


Re: [PATCH v4 5/7] mm: replace vma->vm_flags indirect modification in ksm_madvise

2023-01-26 Thread Michael Ellerman
Suren Baghdasaryan  writes:
> Replace indirect modifications to vma->vm_flags with calls to modifier
> functions to be able to track flag changes and to keep vma locking
> correctness.
>
> Signed-off-by: Suren Baghdasaryan 
> Acked-by: Michal Hocko 
> Acked-by: Mel Gorman 
> Acked-by: Mike Rapoport (IBM) 
> ---
>  arch/powerpc/kvm/book3s_hv_uvmem.c | 6 +-

Acked-by: Michael Ellerman  (powerpc)

cheers


[PATCH v4 5/7] mm: replace vma->vm_flags indirect modification in ksm_madvise

2023-01-26 Thread Suren Baghdasaryan
Replace indirect modifications to vma->vm_flags with calls to modifier
functions to be able to track flag changes and to keep vma locking
correctness.

Signed-off-by: Suren Baghdasaryan 
Acked-by: Michal Hocko 
Acked-by: Mel Gorman 
Acked-by: Mike Rapoport (IBM) 
---
 arch/powerpc/kvm/book3s_hv_uvmem.c | 6 +-
 arch/s390/mm/gmap.c| 6 +-
 2 files changed, 10 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c 
b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 1d67baa5557a..709ebd578394 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -393,6 +393,7 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm,
 {
unsigned long gfn = memslot->base_gfn;
unsigned long end, start = gfn_to_hva(kvm, gfn);
+   unsigned long vm_flags;
int ret = 0;
struct vm_area_struct *vma;
int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
@@ -409,12 +410,15 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm,
ret = H_STATE;
break;
}
+   /* Copy vm_flags to avoid partial modifications in ksm_madvise 
*/
+   vm_flags = vma->vm_flags;
ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
- merge_flag, &vma->vm_flags);
+ merge_flag, &vm_flags);
if (ret) {
ret = H_STATE;
break;
}
+   vm_flags_reset(vma, vm_flags);
start = vma->vm_end;
} while (end > vma->vm_end);
 
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index ab836597419d..5a716bdcba05 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2587,14 +2587,18 @@ int gmap_mark_unmergeable(void)
 {
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
+   unsigned long vm_flags;
int ret;
VMA_ITERATOR(vmi, mm, 0);
 
for_each_vma(vmi, vma) {
+   /* Copy vm_flags to avoid partial modifications in ksm_madvise 
*/
+   vm_flags = vma->vm_flags;
ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
- MADV_UNMERGEABLE, &vma->vm_flags);
+ MADV_UNMERGEABLE, &vm_flags);
if (ret)
return ret;
+   vm_flags_reset(vma, vm_flags);
}
mm->def_flags &= ~VM_MERGEABLE;
return 0;
-- 
2.39.1