From: "Kirill A. Shutemov" <kirill.shute...@linux.intel.com>

We have remap_file_pages(2) emulation in -mm tree for few release cycles
and we plan to have it mainline in v3.20. This patchset removes rest of
VM_NONLINEAR infrastructure.

Patches 1-8 take care about generic code. They are pretty
straight-forward and can be applied without other of patches.

Rest patches removes pte_file()-related stuff from architecture-specific
code. It usually frees up one bit in non-present pte. I've tried to reuse
that bit for swap offset, where I was able to figure out how to do that.

For obvious reason I cannot test all that arch-specific code and would
like to see acks from maintainers.

In total, remap_file_pages(2) required about 1.4K lines of not-so-trivial
kernel code. That's too much for functionality nobody uses.

Tested-by: Felipe Balbi <ba...@ti.com>

This patch (of 38):

We don't create non-linear mappings anymore. Let's drop code which
handles them on unmap/zap.

Signed-off-by: Kirill A. Shutemov <kirill.shute...@linux.intel.com>
Signed-off-by: Andrew Morton <a...@linux-foundation.org>
Signed-off-by: Linus Torvalds <torva...@linux-foundation.org>

https://jira.sw.ru/browse/PSBM-52992

(cherry picked from commit 8a5f14a23177061ec11daeaa3d09d0765d785c47)
Signed-off-by: Andrey Ryabinin <aryabi...@virtuozzo.com>
---
 include/linux/mm.h |  1 -
 mm/madvise.c       |  9 +-----
 mm/memory.c        | 83 ++++++++++++++----------------------------------------
 3 files changed, 22 insertions(+), 71 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index a282ed2..2acaa83 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1032,7 +1032,6 @@ extern void user_shm_unlock(size_t, struct user_struct *);
  * Parameter block passed down to zap_pte_range in exceptional cases.
  */
 struct zap_details {
-       struct vm_area_struct *nonlinear_vma;   /* Check page->index if set */
        struct address_space *check_mapping;    /* Check page->mapping if set */
        pgoff_t first_index;                    /* Lowest page->index to unmap 
*/
        pgoff_t last_index;                     /* Highest page->index to unmap 
*/
diff --git a/mm/madvise.c b/mm/madvise.c
index f11f53f..7dbec3a 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -279,14 +279,7 @@ static long madvise_dontneed(struct vm_area_struct * vma,
        if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
                return -EINVAL;
 
-       if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
-               struct zap_details details = {
-                       .nonlinear_vma = vma,
-                       .last_index = ULONG_MAX,
-               };
-               zap_page_range(vma, start, end - start, &details);
-       } else
-               zap_page_range(vma, start, end - start, NULL);
+       zap_page_range(vma, start, end - start, NULL);
        return 0;
 }
 
diff --git a/mm/memory.c b/mm/memory.c
index c3a3655..59c8701 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1113,6 +1113,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
        spinlock_t *ptl;
        pte_t *start_pte;
        pte_t *pte;
+       swp_entry_t entry;
 
 again:
        init_rss_vec(rss);
@@ -1138,28 +1139,12 @@ again:
                                if (details->check_mapping &&
                                    details->check_mapping != page->mapping)
                                        continue;
-                               /*
-                                * Each page->index must be checked when
-                                * invalidating or truncating nonlinear.
-                                */
-                               if (details->nonlinear_vma &&
-                                   (page->index < details->first_index ||
-                                    page->index > details->last_index))
-                                       continue;
                        }
                        ptent = ptep_get_and_clear_full(mm, addr, pte,
                                                        tlb->fullmm);
                        tlb_remove_tlb_entry(tlb, pte, addr);
                        if (unlikely(!page))
                                continue;
-                       if (unlikely(details) && details->nonlinear_vma
-                           && linear_page_index(details->nonlinear_vma,
-                                               addr) != page->index) {
-                               pte_t ptfile = pgoff_to_pte(page->index);
-                               if (pte_soft_dirty(ptent))
-                                       ptfile = pte_file_mksoft_dirty(ptfile);
-                               set_pte_at(mm, addr, pte, ptfile);
-                       }
                        if (PageAnon(page))
                                rss[MM_ANONPAGES]--;
                        else {
@@ -1178,33 +1163,25 @@ again:
                                break;
                        continue;
                }
-               /*
-                * If details->check_mapping, we leave swap entries;
-                * if details->nonlinear_vma, we leave file entries.
-                */
+               /* If details->check_mapping, we leave swap entries. */
                if (unlikely(details))
                        continue;
-               if (pte_file(ptent)) {
-                       if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
-                               print_bad_pte(vma, addr, ptent, NULL);
-               } else {
-                       swp_entry_t entry = pte_to_swp_entry(ptent);
 
-                       if (!non_swap_entry(entry))
-                               rss[MM_SWAPENTS]--;
-                       else if (is_migration_entry(entry)) {
-                               struct page *page;
+               entry = pte_to_swp_entry(ptent);
+               if (!non_swap_entry(entry))
+                       rss[MM_SWAPENTS]--;
+               else if (is_migration_entry(entry)) {
+                       struct page *page;
 
-                               page = migration_entry_to_page(entry);
+                       page = migration_entry_to_page(entry);
 
-                               if (PageAnon(page))
-                                       rss[MM_ANONPAGES]--;
-                               else
-                                       rss[MM_FILEPAGES]--;
-                       }
-                       if (unlikely(!free_swap_and_cache(entry)))
-                               print_bad_pte(vma, addr, ptent, NULL);
+                       if (PageAnon(page))
+                               rss[MM_ANONPAGES]--;
+                       else
+                               rss[MM_FILEPAGES]--;
                }
+               if (unlikely(!free_swap_and_cache(entry)))
+                       print_bad_pte(vma, addr, ptent, NULL);
                pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
        } while (pte++, addr += PAGE_SIZE, addr != end);
 
@@ -1313,7 +1290,7 @@ static void unmap_page_range(struct mmu_gather *tlb,
        pgd_t *pgd;
        unsigned long next;
 
-       if (details && !details->check_mapping && !details->nonlinear_vma)
+       if (details && !details->check_mapping)
                details = NULL;
 
        BUG_ON(addr >= end);
@@ -1409,7 +1386,7 @@ void unmap_vmas(struct mmu_gather *tlb,
  * @vma: vm_area_struct holding the applicable pages
  * @start: starting address of pages to zap
  * @size: number of bytes to zap
- * @details: details of nonlinear truncation or shared cache invalidation
+ * @details: details of shared cache invalidation
  *
  * Caller must protect the VMA list
  */
@@ -1435,7 +1412,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned 
long start,
  * @vma: vm_area_struct holding the applicable pages
  * @address: starting address of pages to zap
  * @size: number of bytes to zap
- * @details: details of nonlinear truncation or shared cache invalidation
+ * @details: details of shared cache invalidation
  *
  * The range must fit into one VMA.
  */
@@ -3147,25 +3124,11 @@ static inline void unmap_mapping_range_tree(struct 
rb_root *root,
        }
 }
 
-static inline void unmap_mapping_range_list(struct list_head *head,
-                                           struct zap_details *details)
-{
-       struct vm_area_struct *vma;
-
-       /*
-        * In nonlinear VMAs there is no correspondence between virtual address
-        * offset and file offset.  So we must perform an exhaustive search
-        * across *all* the pages in each nonlinear VMA, not just the pages
-        * whose virtual address lies outside the file truncation point.
-        */
-       list_for_each_entry(vma, head, shared.nonlinear) {
-               details->nonlinear_vma = vma;
-               unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, 
details);
-       }
-}
-
 /**
- * unmap_mapping_range - unmap the portion of all mmaps in the specified 
address_space corresponding to the specified page range in the underlying file.
+ * unmap_mapping_range - unmap the portion of all mmaps in the specified
+ * address_space corresponding to the specified page range in the underlying
+ * file.
+ *
  * @mapping: the address space containing mmaps to be unmapped.
  * @holebegin: byte in first page to unmap, relative to the start of
  * the underlying file.  This will be rounded down to a PAGE_SIZE
@@ -3194,7 +3157,6 @@ void unmap_mapping_range(struct address_space *mapping,
        }
 
        details.check_mapping = even_cows? NULL: mapping;
-       details.nonlinear_vma = NULL;
        details.first_index = hba;
        details.last_index = hba + hlen - 1;
        if (details.last_index < details.first_index)
@@ -3204,8 +3166,6 @@ void unmap_mapping_range(struct address_space *mapping,
        mutex_lock(&mapping->i_mmap_mutex);
        if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
-       if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
-               unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
        mutex_unlock(&mapping->i_mmap_mutex);
 }
 EXPORT_SYMBOL(unmap_mapping_range);
@@ -4654,7 +4614,6 @@ void close_mapping_peer(struct address_space *mapping)
 
                synchronize_mapping_faults(mapping);
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
-               unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
        }
 
        mutex_unlock(&mapping->i_mmap_mutex);
-- 
2.7.3

_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to