The commit is pushed to "branch-rh8-4.18.0-240.1.1.vz8.5.x-ovz" and will appear 
at https://src.openvz.org/scm/ovz/vzkernel.git
after rh8-4.18.0-240.1.1.vz8.5.47
------>
commit 83f773fe77fea56d52618ec0f3c67369310289b6
Author: Will Deacon <will.dea...@arm.com>
Date:   Mon Jun 21 20:01:19 2021 +0300

    ms/asm-generic/tlb: Track which levels of the page tables have been cleared
    
    It is common for architectures with hugepage support to require only a
    single TLB invalidation operation per hugepage during unmap(), rather than
    iterating through the mapping at a PAGE_SIZE increment. Currently,
    however, the level in the page table where the unmap() operation occurs
    is not stored in the mmu_gather structure, therefore forcing
    architectures to issue additional TLB invalidation operations or to give
    up and over-invalidate by e.g. invalidating the entire TLB.
    
    Ideally, we could add an interval rbtree to the mmu_gather structure,
    which would allow us to associate the correct mapping granule with the
    various sub-mappings within the range being invalidated. However, this
    is costly in terms of book-keeping and memory management, so instead we
    approximate by keeping track of the page table levels that are cleared
    and provide a means to query the smallest granule required for invalidation.
    
    Acked-by: Peter Zijlstra (Intel) <pet...@infradead.org>
    Acked-by: Nicholas Piggin <npig...@gmail.com>
    Signed-off-by: Will Deacon <will.dea...@arm.com>
    
    https://jira.sw.ru/browse/PSBM-101300
    (cherry picked from ms commit a6d60245d6d9b1caf66b0d94419988c4836980af)
    Signed-off-by: Andrey Ryabinin <aryabi...@virtuozzo.com>
    
    (cherry picked from vz7 commit a67d2f03fd7fa7e727d060857a22c56ec858cdb9)
    https://jira.sw.ru/browse/PSBM-127854
    
    Signed-off-by: Valeriy Vdovin <valeriy.vdo...@virtuozzo.com>
---
 include/asm-generic/tlb.h | 50 +++++++++++++++++++++++++++++++++++++++++++----
 mm/memory.c               |  4 +++-
 2 files changed, 49 insertions(+), 5 deletions(-)

diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index ef44ee9c8b42..43c4aca1a6b8 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -124,6 +124,14 @@ struct mmu_gather {
         * we have removed page directories
         */
        unsigned int            freed_tables : 1;
+       /*
+        * at which levels have we cleared entries?
+        */
+       unsigned int            cleared_ptes : 1;
+       unsigned int            cleared_pmds : 1;
+       unsigned int            cleared_puds : 1;
+       unsigned int            cleared_p4ds : 1;
+
        struct mmu_gather_batch *active;
        struct mmu_gather_batch local;
        struct page             *__pages[MMU_GATHER_BUNDLE];
@@ -158,6 +166,29 @@ static inline void __tlb_reset_range(struct mmu_gather 
*tlb)
                tlb->end = 0;
        }
        tlb->freed_tables = 0;
+       tlb->cleared_ptes = 0;
+       tlb->cleared_pmds = 0;
+       tlb->cleared_puds = 0;
+       tlb->cleared_p4ds = 0;
+}
+
+static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
+{
+       if (tlb->cleared_ptes)
+               return PAGE_SHIFT;
+       if (tlb->cleared_pmds)
+               return PMD_SHIFT;
+       if (tlb->cleared_puds)
+               return PUD_SHIFT;
+       if (tlb->cleared_p4ds)
+               return P4D_SHIFT;
+
+       return PAGE_SHIFT;
+}
+
+static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
+{
+       return 1UL << tlb_get_unmap_shift(tlb);
 }
 
 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
@@ -240,13 +271,19 @@ static inline void 
tlb_remove_check_page_size_change(struct mmu_gather *tlb,
 #define tlb_remove_tlb_entry(tlb, ptep, address)               \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
+               tlb->cleared_ptes = 1;                          \
                __tlb_remove_tlb_entry(tlb, ptep, address);     \
        } while (0)
 
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)            \
-       do {                                                         \
-               __tlb_adjust_range(tlb, address, huge_page_size(h)); \
-               __tlb_remove_tlb_entry(tlb, ptep, address);          \
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)       \
+       do {                                                    \
+               unsigned long _sz = huge_page_size(h);          \
+               __tlb_adjust_range(tlb, address, _sz);          \
+               if (_sz == PMD_SIZE)                            \
+                       tlb->cleared_pmds = 1;                  \
+               else if (_sz == PUD_SIZE)                       \
+                       tlb->cleared_puds = 1;                  \
+               __tlb_remove_tlb_entry(tlb, ptep, address);     \
        } while (0)
 
 /**
@@ -260,6 +297,7 @@ static inline void tlb_remove_check_page_size_change(struct 
mmu_gather *tlb,
 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)                   \
        do {                                                            \
                __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE);       \
+               tlb->cleared_pmds = 1;                          \
                __tlb_remove_pmd_tlb_entry(tlb, pmdp, address);         \
        } while (0)
 
@@ -274,6 +312,7 @@ static inline void tlb_remove_check_page_size_change(struct 
mmu_gather *tlb,
 #define tlb_remove_pud_tlb_entry(tlb, pudp, address)                   \
        do {                                                            \
                __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE);       \
+               tlb->cleared_puds = 1;                          \
                __tlb_remove_pud_tlb_entry(tlb, pudp, address);         \
        } while (0)
 
@@ -300,6 +339,7 @@ static inline void tlb_remove_check_page_size_change(struct 
mmu_gather *tlb,
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
                tlb->freed_tables = 1;                          \
+               tlb->cleared_pmds = 1;                          \
                __pte_free_tlb(tlb, ptep, address);             \
        } while (0)
 #endif
@@ -309,6 +349,7 @@ static inline void tlb_remove_check_page_size_change(struct 
mmu_gather *tlb,
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);            \
                tlb->freed_tables = 1;                          \
+               tlb->cleared_puds = 1;                          \
                __pmd_free_tlb(tlb, pmdp, address);             \
        } while (0)
 #endif
@@ -319,6 +360,7 @@ static inline void tlb_remove_check_page_size_change(struct 
mmu_gather *tlb,
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
                tlb->freed_tables = 1;                          \
+               tlb->cleared_p4ds = 1;                          \
                __pud_free_tlb(tlb, pudp, address);             \
        } while (0)
 #endif
diff --git a/mm/memory.c b/mm/memory.c
index 5105a0e727fd..3919486a1ada 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -269,8 +269,10 @@ void arch_tlb_finish_mmu(struct mmu_gather *tlb,
 {
        struct mmu_gather_batch *batch, *next;
 
-       if (force)
+       if (force) {
+               __tlb_reset_range(tlb);
                __tlb_adjust_range(tlb, start, end - start);
+       }
 
        tlb_flush_mmu(tlb);
 
_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to