The commit is pushed to "branch-rh7-3.10.0-1160.6.1.vz7.171.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git after rh7-3.10.0-1160.6.1.vz7.171.1 ------> commit 78e3059a4c0cc2637f36cb56d2d9a3bcec3a8945 Author: Davidlohr Bueso <d...@stgolabs.net> Date: Thu Dec 3 11:40:40 2020 +0300
ms/mm: use new helper functions around the i_mmap_mutex Convert all open coded mutex_lock/unlock calls to the i_mmap_[lock/unlock]_write() helpers. Signed-off-by: Davidlohr Bueso <dbu...@suse.de> Acked-by: Rik van Riel <r...@redhat.com> Acked-by: "Kirill A. Shutemov" <kir...@shutemov.name> Acked-by: Hugh Dickins <hu...@google.com> Cc: Oleg Nesterov <o...@redhat.com> Acked-by: Peter Zijlstra (Intel) <pet...@infradead.org> Cc: Srikar Dronamraju <sri...@linux.vnet.ibm.com> Acked-by: Mel Gorman <mgor...@suse.de> Signed-off-by: Andrew Morton <a...@linux-foundation.org> Signed-off-by: Linus Torvalds <torva...@linux-foundation.org> https://jira.sw.ru/browse/PSBM-122663 (cherry picked from commit 83cde9e8ba95d180eaefefe834958fbf7008cf39) Signed-off-by: Andrey Ryabinin <aryabi...@virtuozzo.com> --- fs/dax.c | 4 ++-- fs/hugetlbfs/inode.c | 12 ++++++------ kernel/events/uprobes.c | 4 ++-- kernel/fork.c | 4 ++-- mm/hugetlb.c | 12 ++++++------ mm/memory-failure.c | 4 ++-- mm/memory.c | 28 ++++++++++++++-------------- mm/mmap.c | 14 +++++++------- mm/mremap.c | 4 ++-- mm/nommu.c | 14 +++++++------- mm/rmap.c | 6 +++--- 11 files changed, 53 insertions(+), 53 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index f77aa0c..3ccab89 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -918,7 +918,7 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, spinlock_t *ptl; bool changed; - mutex_lock(&mapping->i_mmap_mutex); + i_mmap_lock_write(mapping); vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { unsigned long address; @@ -969,7 +969,7 @@ unlock_pte: if (changed) mmu_notifier_invalidate_page(vma->vm_mm, address); } - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); } static int dax_writeback_one(struct dax_device *dax_dev, diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index da213b1..79c34c8 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -493,11 +493,11 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, if (unlikely(page_mapped(page))) { BUG_ON(truncate_op); - mutex_lock(&mapping->i_mmap_mutex); + i_mmap_lock_write(mapping); hugetlb_vmdelete_list(&mapping->i_mmap, next * pages_per_huge_page(h), (next + 1) * pages_per_huge_page(h)); - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); } lock_page(page); @@ -553,10 +553,10 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) pgoff = offset >> PAGE_SHIFT; i_size_write(inode, offset); - mutex_lock(&mapping->i_mmap_mutex); + i_mmap_lock_write(mapping); if (!RB_EMPTY_ROOT(&mapping->i_mmap)) hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); remove_inode_hugepages(inode, offset, LLONG_MAX); return 0; } @@ -578,12 +578,12 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) struct address_space *mapping = inode->i_mapping; mutex_lock(&inode->i_mutex); - mutex_lock(&mapping->i_mmap_mutex); + i_mmap_lock_write(mapping); if (!RB_EMPTY_ROOT(&mapping->i_mmap)) hugetlb_vmdelete_list(&mapping->i_mmap, hole_start >> PAGE_SHIFT, hole_end >> PAGE_SHIFT); - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); remove_inode_hugepages(inode, hole_start, hole_end); mutex_unlock(&inode->i_mutex); } diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index d291a16..6c714b0 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -690,7 +690,7 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register) int more = 0; again: - mutex_lock(&mapping->i_mmap_mutex); + i_mmap_lock_write(mapping); vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { if (!valid_vma(vma, is_register)) continue; @@ -721,7 +721,7 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register) info->mm = vma->vm_mm; info->vaddr = offset_to_vaddr(vma, offset); } - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); if (!more) goto out; diff --git a/kernel/fork.c b/kernel/fork.c index 9467e21..b6a5279 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -504,7 +504,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) get_file(file); if (tmp->vm_flags & VM_DENYWRITE) atomic_dec(&inode->i_writecount); - mutex_lock(&mapping->i_mmap_mutex); + i_mmap_lock_write(mapping); if (tmp->vm_flags & VM_SHARED) atomic_inc(&mapping->i_mmap_writable); flush_dcache_mmap_lock(mapping); @@ -512,7 +512,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); } /* diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f26e97f..dd751e3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3563,7 +3563,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, * this mapping should be shared between all the VMAs, * __unmap_hugepage_range() is called as the lock is already held */ - mutex_lock(&mapping->i_mmap_mutex); + i_mmap_lock_write(mapping); vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { /* Do not unmap the current VMA */ if (iter_vma == vma) @@ -3588,7 +3588,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, unmap_hugepage_range(iter_vma, address, address + huge_page_size(h), page); } - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); return 1; } @@ -4415,7 +4415,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, BUG_ON(address >= end); flush_cache_range(vma, address, end); - mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); + i_mmap_lock_write(vma->vm_file->f_mapping); for (; address < end; address += huge_page_size(h)) { spinlock_t *ptl; ptep = huge_pte_offset(mm, address); @@ -4463,7 +4463,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, */ flush_tlb_range(vma, start, end); mmu_notifier_invalidate_range(mm, start, end); - mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); + i_mmap_unlock_write(vma->vm_file->f_mapping); return pages << h->order; } @@ -4677,7 +4677,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) if (!vma_shareable(vma, addr)) return (pte_t *)pmd_alloc(mm, pud, addr); - mutex_lock(&mapping->i_mmap_mutex); + i_mmap_lock_write(mapping); vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { if (svma == vma) continue; @@ -4704,7 +4704,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) spin_unlock(ptl); out: pte = (pte_t *)pmd_alloc(mm, pud, addr); - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); return pte; } diff --git a/mm/memory-failure.c b/mm/memory-failure.c index db46da6..a0ea96c 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -496,7 +496,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, struct task_struct *tsk; struct address_space *mapping = page->mapping; - mutex_lock(&mapping->i_mmap_mutex); + i_mmap_lock_write(mapping); qread_lock(&tasklist_lock); for_each_process(tsk) { pgoff_t pgoff = page_to_pgoff(page); @@ -518,7 +518,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, } } qread_unlock(&tasklist_lock); - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); } /* diff --git a/mm/memory.c b/mm/memory.c index 5a712f7..8f7736d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1434,9 +1434,9 @@ static void unmap_single_vma(struct mmu_gather *tlb, * safe to do nothing in this case. */ if (vma->vm_file) { - mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); + i_mmap_lock_write(vma->vm_file->f_mapping); __unmap_hugepage_range_final(tlb, vma, start, end, NULL); - mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); + i_mmap_unlock_write(vma->vm_file->f_mapping); } } else unmap_page_range(tlb, vma, start, end, details); @@ -2702,10 +2702,10 @@ void unmap_mapping_range(struct address_space *mapping, if (details.last_index < details.first_index) details.last_index = ULONG_MAX; - mutex_lock(&mapping->i_mmap_mutex); + i_mmap_lock_write(mapping); if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) unmap_mapping_range_tree(&mapping->i_mmap, &details); - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); } EXPORT_SYMBOL(unmap_mapping_range); @@ -4362,14 +4362,14 @@ restart: mutex_lock_nested(&peer->i_mmap_mutex, SINGLE_DEPTH_NESTING); if (!peer->i_peer_file) { - mutex_unlock(&peer->i_mmap_mutex); + i_mmap_unlock_write(peer); goto restart; } - mutex_lock(&mapping->i_mmap_mutex); + i_mmap_lock_write(mapping); rcu_assign_pointer(mapping->i_peer_file, peer->i_peer_file); list_add(&mapping->i_peer_list, &peer->i_peer_list); - mutex_unlock(&mapping->i_mmap_mutex); - mutex_unlock(&peer->i_mmap_mutex); + i_mmap_unlock_write(mapping); + i_mmap_unlock_write(peer); invalidate_mapping_pages(mapping, 0, -1); @@ -4392,11 +4392,11 @@ static bool synchronize_mapping_faults_vma(struct address_space *mapping, vma->vm_private_data2 = vma; atomic_inc(&mm->mm_count); - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); down_write(&mm->mmap_sem); up_write(&mm->mmap_sem); mmdrop(mm); - mutex_lock(&mapping->i_mmap_mutex); + i_mmap_lock_write(mapping); return true; } @@ -4421,7 +4421,7 @@ void close_mapping_peer(struct address_space *mapping) if (!file) return; - mutex_lock(&mapping->i_mmap_mutex); + i_mmap_lock_write(mapping); rcu_assign_pointer(mapping->i_peer_file, NULL); @@ -4436,17 +4436,17 @@ void close_mapping_peer(struct address_space *mapping) unmap_mapping_range_tree(&mapping->i_mmap, &details); } - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); peer = file->f_mapping; - mutex_lock(&peer->i_mmap_mutex); + i_mmap_lock_write(peer); list_del_init(&mapping->i_peer_list); if (list_empty(&peer->i_peer_list)) rcu_assign_pointer(peer->i_peer_file, NULL); else file = NULL; - mutex_unlock(&peer->i_mmap_mutex); + i_mmap_unlock_write(peer); if (file) { atomic_inc(&file->f_inode->i_writecount); diff --git a/mm/mmap.c b/mm/mmap.c index 750294f..837a1ac 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -273,9 +273,9 @@ void unlink_file_vma(struct vm_area_struct *vma) if (file) { struct address_space *mapping = file->f_mapping; - mutex_lock(&mapping->i_mmap_mutex); + i_mmap_lock_write(mapping); __remove_shared_vm_struct(vma, file, mapping); - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); } } @@ -721,13 +721,13 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, mapping = vma->vm_file->f_mapping; if (mapping) - mutex_lock(&mapping->i_mmap_mutex); + i_mmap_lock_write(mapping); __vma_link(mm, vma, prev, rb_link, rb_parent); __vma_link_file(vma); if (mapping) - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); mm->map_count++; validate_mm(mm); @@ -895,7 +895,7 @@ again: if (adjust_next) uprobe_munmap(next, next->vm_start, next->vm_end); - mutex_lock(&mapping->i_mmap_mutex); + i_mmap_lock_write(mapping); if (insert) { /* * Put into interval tree now, so instantiated pages @@ -992,7 +992,7 @@ again: anon_vma_unlock_write(anon_vma); } if (mapping) - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); if (root) { uprobe_mmap(vma); @@ -3586,7 +3586,7 @@ static void vm_unlock_mapping(struct address_space *mapping) * AS_MM_ALL_LOCKS can't change to 0 from under us * because we hold the mm_all_locks_mutex. */ - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); if (!test_and_clear_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); diff --git a/mm/mremap.c b/mm/mremap.c index 21cd904..3e50a89 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -77,7 +77,7 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, static void take_rmap_locks(struct vm_area_struct *vma) { if (vma->vm_file) - mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); + i_mmap_lock_write(vma->vm_file->f_mapping); if (vma->anon_vma) anon_vma_lock_write(vma->anon_vma); } @@ -87,7 +87,7 @@ static void drop_rmap_locks(struct vm_area_struct *vma) if (vma->anon_vma) anon_vma_unlock_write(vma->anon_vma); if (vma->vm_file) - mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); + i_mmap_unlock_write(vma->vm_file->f_mapping); } static pte_t move_soft_dirty_pte(pte_t pte) diff --git a/mm/nommu.c b/mm/nommu.c index 5b100ba..f994621 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -753,11 +753,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) if (vma->vm_file) { mapping = vma->vm_file->f_mapping; - mutex_lock(&mapping->i_mmap_mutex); + i_mmap_lock_write(mapping); flush_dcache_mmap_lock(mapping); vma_interval_tree_insert(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); } /* add the VMA to the tree */ @@ -826,11 +826,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) if (vma->vm_file) { mapping = vma->vm_file->f_mapping; - mutex_lock(&mapping->i_mmap_mutex); + i_mmap_lock_write(mapping); flush_dcache_mmap_lock(mapping); vma_interval_tree_remove(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); } /* remove from the MM's tree and list */ @@ -2134,14 +2134,14 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; down_write(&nommu_region_sem); - mutex_lock(&inode->i_mapping->i_mmap_mutex); + i_mmap_lock_write(inode->i_mapping); /* search for VMAs that fall within the dead zone */ vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { /* found one - only interested if it's shared out of the page * cache */ if (vma->vm_flags & VM_SHARED) { - mutex_unlock(&inode->i_mapping->i_mmap_mutex); + i_mmap_unlock_write(inode->i_mapping); up_write(&nommu_region_sem); return -ETXTBSY; /* not quite true, but near enough */ } @@ -2169,7 +2169,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, } } - mutex_unlock(&inode->i_mapping->i_mmap_mutex); + i_mmap_unlock_write(inode->i_mapping); up_write(&nommu_region_sem); return 0; } diff --git a/mm/rmap.c b/mm/rmap.c index 66dd9c1..478f384 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1748,7 +1748,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) if (!mapping_mapped(peer)) continue; - mutex_lock(&peer->i_mmap_mutex); + i_mmap_lock_write(peer); vma_interval_tree_foreach(vma, &peer->i_mmap, pgoff, pgoff) { unsigned long address = vma_address(page, vma); @@ -1764,7 +1764,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) cond_resched(); } - mutex_unlock(&peer->i_mmap_mutex); + i_mmap_unlock_write(peer); if (ret != SWAP_AGAIN) goto done; @@ -1772,7 +1772,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) goto done; } done: - mutex_unlock(&mapping->i_mmap_mutex); + i_mmap_unlock_write(mapping); return ret; } _______________________________________________ Devel mailing list Devel@openvz.org https://lists.openvz.org/mailman/listinfo/devel