4.2.8-ckt10 -stable review patch.  If anyone has any objections, please let me 
know.

---8<------------------------------------------------------------

From: Jiang Liu <[email protected]>

commit 111abeba67e0dbdc26537429de9155e4f1d807d8 upstream.

There's a race condition between

x86_vector_free_irqs()
{
        free_apic_chip_data(irq_data->chip_data);
        xxxxx   //irq_data->chip_data has been freed, but the pointer
                //hasn't been reset yet
        irq_domain_reset_irq_data(irq_data);
}

and

smp_irq_move_cleanup_interrupt()
{
        raw_spin_lock(&vector_lock);
        data = apic_chip_data(irq_desc_get_irq_data(desc));
        access data->xxxx       // may access freed memory
        raw_spin_unlock(&desc->lock);
}

which may cause smp_irq_move_cleanup_interrupt() to access freed memory.

Call irq_domain_reset_irq_data(), which clears the pointer with vector lock
held.

[ tglx: Free memory outside of lock held region. ]

Signed-off-by: Jiang Liu <[email protected]>
Tested-by: Borislav Petkov <[email protected]>
Tested-by: Joe Lawrence <[email protected]>
Cc: Jeremiah Mahler <[email protected]>
Cc: [email protected]
Cc: Guenter Roeck <[email protected]>
Link: 
http://lkml.kernel.org/r/[email protected]
Signed-off-by: Thomas Gleixner <[email protected]>
[ kamal: backport to 4.2-stable; prereq for
  1bdb897 x86/apic: Handle zero vector gracefully in clear_vector_irq() ]
Signed-off-by: Kamal Mostafa <[email protected]>
---
 arch/x86/kernel/apic/vector.c | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index ea4ba83..7889bec 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -225,9 +225,7 @@ static int assign_irq_vector_policy(int irq, int node,
 static void clear_irq_vector(int irq, struct apic_chip_data *data)
 {
        int cpu, vector;
-       unsigned long flags;
 
-       raw_spin_lock_irqsave(&vector_lock, flags);
        BUG_ON(!data->cfg.vector);
 
        vector = data->cfg.vector;
@@ -237,10 +235,8 @@ static void clear_irq_vector(int irq, struct 
apic_chip_data *data)
        data->cfg.vector = 0;
        cpumask_clear(data->domain);
 
-       if (likely(!data->move_in_progress)) {
-               raw_spin_unlock_irqrestore(&vector_lock, flags);
+       if (likely(!data->move_in_progress))
                return;
-       }
 
        for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
                for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
@@ -252,7 +248,6 @@ static void clear_irq_vector(int irq, struct apic_chip_data 
*data)
                }
        }
        data->move_in_progress = 0;
-       raw_spin_unlock_irqrestore(&vector_lock, flags);
 }
 
 void init_irq_alloc_info(struct irq_alloc_info *info,
@@ -273,19 +268,24 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, 
struct irq_alloc_info *src)
 static void x86_vector_free_irqs(struct irq_domain *domain,
                                 unsigned int virq, unsigned int nr_irqs)
 {
+       struct apic_chip_data *apic_data;
        struct irq_data *irq_data;
+       unsigned long flags;
        int i;
 
        for (i = 0; i < nr_irqs; i++) {
                irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
                if (irq_data && irq_data->chip_data) {
+                       raw_spin_lock_irqsave(&vector_lock, flags);
                        clear_irq_vector(virq + i, irq_data->chip_data);
-                       free_apic_chip_data(irq_data->chip_data);
+                       apic_data = irq_data->chip_data;
+                       irq_domain_reset_irq_data(irq_data);
+                       raw_spin_unlock_irqrestore(&vector_lock, flags);
+                       free_apic_chip_data(apic_data);
 #ifdef CONFIG_X86_IO_APIC
                        if (virq + i < nr_legacy_irqs())
                                legacy_irq_data[virq + i] = NULL;
 #endif
-                       irq_domain_reset_irq_data(irq_data);
                }
        }
 }
-- 
2.7.4

Reply via email to