3.16.51-rc1 review patch.  If anyone has any objections, please let me know.

------------------

From: Martin Schwidefsky <[email protected]>

commit 60f07c8ec5fae06c23e9fd7bab67dabce92b3414 upstream.

The order in __tlb_flush_mm_lazy is to flush TLB first and then clear
the mm->context.flush_mm bit. This can lead to missed flushes as the
bit can be set anytime, the order needs to be the other way aronud.

But this leads to a different race, __tlb_flush_mm_lazy may be called
on two CPUs concurrently. If mm->context.flush_mm is cleared first then
another CPU can bypass __tlb_flush_mm_lazy although the first CPU has
not done the flush yet. In a virtualized environment the time until the
flush is finally completed can be arbitrarily long.

Add a spinlock to serialize __tlb_flush_mm_lazy and use the function
in finish_arch_post_lock_switch as well.

Reviewed-by: Heiko Carstens <[email protected]>
Signed-off-by: Martin Schwidefsky <[email protected]>
[bwh: Backported to 3.16: adjust context]
Signed-off-by: Ben Hutchings <[email protected]>
---
 arch/s390/include/asm/mmu.h         | 2 ++
 arch/s390/include/asm/mmu_context.h | 4 ++--
 arch/s390/include/asm/tlbflush.h    | 4 +++-
 3 files changed, 7 insertions(+), 3 deletions(-)

--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -5,6 +5,7 @@
 #include <linux/errno.h>
 
 typedef struct {
+       spinlock_t lock;
        cpumask_t cpu_attach_mask;
        atomic_t attach_count;
        unsigned int flush_mm;
@@ -21,6 +22,7 @@ typedef struct {
 } mm_context_t;
 
 #define INIT_MM_CONTEXT(name)                                                \
+       .context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock),              \
        .context.list_lock    = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
        .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list),    \
        .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -15,6 +15,7 @@
 static inline int init_new_context(struct task_struct *tsk,
                                   struct mm_struct *mm)
 {
+       spin_lock_init(&mm->context.lock);
        spin_lock_init(&mm->context.list_lock);
        INIT_LIST_HEAD(&mm->context.pgtable_list);
        INIT_LIST_HEAD(&mm->context.gmap_list);
@@ -95,8 +96,7 @@ static inline void finish_arch_post_lock
                        cpu_relax();
 
                cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
-               if (mm->context.flush_mm)
-                       __tlb_flush_mm(mm);
+               __tlb_flush_mm_lazy(mm);
                preempt_enable();
        }
        set_fs(current->thread.mm_segment);
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -164,10 +164,12 @@ static inline void __tlb_flush_mm(struct
 
 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
 {
+       spin_lock(&mm->context.lock);
        if (mm->context.flush_mm) {
-               __tlb_flush_mm(mm);
                mm->context.flush_mm = 0;
+               __tlb_flush_mm(mm);
        }
+       spin_unlock(&mm->context.lock);
 }
 
 /*

Reply via email to