3.4.106-rt132-rc1 stable review patch.
If anyone has any objections, please let me know.

------------------

From: Mike Galbraith <umgwanakikb...@gmail.com>

Shrug.  Lots of hobbyists have a beast in their basement, right?

Cc: stable...@vger.kernel.org
Signed-off-by: Mike Galbraith <mgalbra...@suse.de>
Signed-off-by: Sebastian Andrzej Siewior <bige...@linutronix.de>
Signed-off-by: Steven Rostedt <rost...@goodmis.org>
---
 arch/x86/include/asm/uv/uv_bau.h   | 12 ++++++------
 arch/x86/include/asm/uv/uv_hub.h   |  2 +-
 arch/x86/kernel/apic/x2apic_uv_x.c | 12 ++++++------
 arch/x86/platform/uv/tlb_uv.c      | 30 +++++++++++++++---------------
 arch/x86/platform/uv/uv_time.c     | 21 +++++++++++++--------
 5 files changed, 41 insertions(+), 36 deletions(-)

diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 6149b476d9df..ca5cfc1a3c6e 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -602,8 +602,8 @@ struct bau_control {
        unsigned short          uvhub_quiesce;
        short                   socket_acknowledge_count[DEST_Q_SIZE];
        cycles_t                send_message;
-       spinlock_t              uvhub_lock;
-       spinlock_t              queue_lock;
+       raw_spinlock_t          uvhub_lock;
+       raw_spinlock_t          queue_lock;
        /* tunables */
        int                     max_concurr;
        int                     max_concurr_const;
@@ -760,15 +760,15 @@ static inline int atom_asr(short i, struct atomic_short 
*v)
  * to be lowered below the current 'v'.  atomic_add_unless can only stop
  * on equal.
  */
-static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
+static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int 
u)
 {
-       spin_lock(lock);
+       raw_spin_lock(lock);
        if (atomic_read(v) >= u) {
-               spin_unlock(lock);
+               raw_spin_unlock(lock);
                return 0;
        }
        atomic_inc(v);
-       spin_unlock(lock);
+       raw_spin_unlock(lock);
        return 1;
 }
 
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 21f7385badb8..953f5c666ee0 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -475,7 +475,7 @@ struct uv_blade_info {
        unsigned short  nr_online_cpus;
        unsigned short  pnode;
        short           memory_nid;
-       spinlock_t      nmi_lock;
+       raw_spinlock_t  nmi_lock;
        unsigned long   nmi_count;
 };
 extern struct uv_blade_info *uv_blade_info;
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c 
b/arch/x86/kernel/apic/x2apic_uv_x.c
index 87bfa69e216e..8e56e4f72694 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -56,7 +56,7 @@ int uv_min_hub_revision_id;
 EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
 unsigned int uv_apicid_hibits;
 EXPORT_SYMBOL_GPL(uv_apicid_hibits);
-static DEFINE_SPINLOCK(uv_nmi_lock);
+static DEFINE_RAW_SPINLOCK(uv_nmi_lock);
 
 static struct apic apic_x2apic_uv_x;
 
@@ -695,13 +695,13 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs 
*regs)
        real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
 
        if (unlikely(real_uv_nmi)) {
-               spin_lock(&uv_blade_info[bid].nmi_lock);
+               raw_spin_lock(&uv_blade_info[bid].nmi_lock);
                real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & 
UV_NMI_PENDING_MASK);
                if (real_uv_nmi) {
                        uv_blade_info[bid].nmi_count++;
                        uv_write_local_mmr(UVH_NMI_MMR_CLEAR, 
UV_NMI_PENDING_MASK);
                }
-               spin_unlock(&uv_blade_info[bid].nmi_lock);
+               raw_spin_unlock(&uv_blade_info[bid].nmi_lock);
        }
 
        if (likely(__get_cpu_var(cpu_last_nmi_count) == 
uv_blade_info[bid].nmi_count))
@@ -713,10 +713,10 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs 
*regs)
         * Use a lock so only one cpu prints at a time.
         * This prevents intermixed output.
         */
-       spin_lock(&uv_nmi_lock);
+       raw_spin_lock(&uv_nmi_lock);
        pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
        dump_stack();
-       spin_unlock(&uv_nmi_lock);
+       raw_spin_unlock(&uv_nmi_lock);
 
        return NMI_HANDLED;
 }
@@ -811,7 +811,7 @@ void __init uv_system_init(void)
                        uv_blade_info[blade].pnode = pnode;
                        uv_blade_info[blade].nr_possible_cpus = 0;
                        uv_blade_info[blade].nr_online_cpus = 0;
-                       spin_lock_init(&uv_blade_info[blade].nmi_lock);
+                       raw_spin_lock_init(&uv_blade_info[blade].nmi_lock);
                        max_pnode = max(pnode, max_pnode);
                        blade++;
                }
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 59880afa851f..0664c9920e58 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -39,7 +39,7 @@ static int timeout_base_ns[] = {
 static int timeout_us;
 static int nobau;
 static int baudisabled;
-static spinlock_t disable_lock;
+static raw_spinlock_t disable_lock;
 static cycles_t congested_cycles;
 
 /* tunables: */
@@ -545,7 +545,7 @@ int handle_uv2_busy(struct bau_control *bcp)
        cycles_t ttm;
 
        stat->s_uv2_wars++;
-       spin_lock(&hmaster->uvhub_lock);
+       raw_spin_lock(&hmaster->uvhub_lock);
        /* try for the original first */
        if (busy_one != normal) {
                if (!normal_busy(bcp))
@@ -595,12 +595,12 @@ int handle_uv2_busy(struct bau_control *bcp)
                 * free up.
                 */
                stat->s_uv2_war_waits++;
-               spin_unlock(&hmaster->uvhub_lock);
+               raw_spin_unlock(&hmaster->uvhub_lock);
                ttm = get_cycles();
                do {
                        cpu_relax();
                } while (normal_busy(bcp));
-               spin_lock(&hmaster->uvhub_lock);
+               raw_spin_lock(&hmaster->uvhub_lock);
                /* switch to the original descriptor */
                bcp->using_desc = normal;
                bau_desc_old = bcp->descriptor_base;
@@ -610,7 +610,7 @@ int handle_uv2_busy(struct bau_control *bcp)
                bau_desc_new += (ITEMS_PER_DESC * normal);
                *bau_desc_new = *bau_desc_old; /* copy the entire descriptor */
        }
-       spin_unlock(&hmaster->uvhub_lock);
+       raw_spin_unlock(&hmaster->uvhub_lock);
        return FLUSH_RETRY_BUSYBUG;
 }
 
@@ -724,9 +724,9 @@ static void destination_plugged(struct bau_desc *bau_desc,
 
                quiesce_local_uvhub(hmaster);
 
-               spin_lock(&hmaster->queue_lock);
+               raw_spin_lock(&hmaster->queue_lock);
                reset_with_ipi(&bau_desc->distribution, bcp);
-               spin_unlock(&hmaster->queue_lock);
+               raw_spin_unlock(&hmaster->queue_lock);
 
                end_uvhub_quiesce(hmaster);
 
@@ -746,9 +746,9 @@ static void destination_timeout(struct bau_desc *bau_desc,
 
                quiesce_local_uvhub(hmaster);
 
-               spin_lock(&hmaster->queue_lock);
+               raw_spin_lock(&hmaster->queue_lock);
                reset_with_ipi(&bau_desc->distribution, bcp);
-               spin_unlock(&hmaster->queue_lock);
+               raw_spin_unlock(&hmaster->queue_lock);
 
                end_uvhub_quiesce(hmaster);
 
@@ -765,7 +765,7 @@ static void disable_for_congestion(struct bau_control *bcp,
                                        struct ptc_stats *stat)
 {
        /* let only one cpu do this disabling */
-       spin_lock(&disable_lock);
+       raw_spin_lock(&disable_lock);
 
        if (!baudisabled && bcp->period_requests &&
            ((bcp->period_time / bcp->period_requests) > congested_cycles)) {
@@ -784,7 +784,7 @@ static void disable_for_congestion(struct bau_control *bcp,
                }
        }
 
-       spin_unlock(&disable_lock);
+       raw_spin_unlock(&disable_lock);
 }
 
 static void count_max_concurr(int stat, struct bau_control *bcp,
@@ -833,7 +833,7 @@ static void record_send_stats(cycles_t time1, cycles_t 
time2,
  */
 static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
 {
-       spinlock_t *lock = &hmaster->uvhub_lock;
+       raw_spinlock_t *lock = &hmaster->uvhub_lock;
        atomic_t *v;
 
        v = &hmaster->active_descriptor_count;
@@ -1850,8 +1850,8 @@ static void __init init_per_cpu_tunables(void)
                bcp->cong_reps                  = congested_reps;
                bcp->cong_period                = congested_period;
                bcp->clocks_per_100_usec =      usec_2_cycles(100);
-               spin_lock_init(&bcp->queue_lock);
-               spin_lock_init(&bcp->uvhub_lock);
+               raw_spin_lock_init(&bcp->queue_lock);
+               raw_spin_lock_init(&bcp->uvhub_lock);
        }
 }
 
@@ -2078,7 +2078,7 @@ static int __init uv_bau_init(void)
        }
 
        nuvhubs = uv_num_possible_blades();
-       spin_lock_init(&disable_lock);
+       raw_spin_lock_init(&disable_lock);
        congested_cycles = usec_2_cycles(congested_respns_us);
 
        uv_base_pnode = 0x7fffffff;
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index 5032e0d19b86..eb55dd021c64 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -58,7 +58,7 @@ static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
 
 /* There is one of these allocated per node */
 struct uv_rtc_timer_head {
-       spinlock_t      lock;
+       raw_spinlock_t  lock;
        /* next cpu waiting for timer, local node relative: */
        int             next_cpu;
        /* number of cpus on this node: */
@@ -178,7 +178,7 @@ static __init int uv_rtc_allocate_timers(void)
                                uv_rtc_deallocate_timers();
                                return -ENOMEM;
                        }
-                       spin_lock_init(&head->lock);
+                       raw_spin_lock_init(&head->lock);
                        head->ncpus = uv_blade_nr_possible_cpus(bid);
                        head->next_cpu = -1;
                        blade_info[bid] = head;
@@ -232,7 +232,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
        unsigned long flags;
        int next_cpu;
 
-       spin_lock_irqsave(&head->lock, flags);
+       raw_spin_lock_irqsave(&head->lock, flags);
 
        next_cpu = head->next_cpu;
        *t = expires;
@@ -244,12 +244,12 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
                if (uv_setup_intr(cpu, expires)) {
                        *t = ULLONG_MAX;
                        uv_rtc_find_next_timer(head, pnode);
-                       spin_unlock_irqrestore(&head->lock, flags);
+                       raw_spin_unlock_irqrestore(&head->lock, flags);
                        return -ETIME;
                }
        }
 
-       spin_unlock_irqrestore(&head->lock, flags);
+       raw_spin_unlock_irqrestore(&head->lock, flags);
        return 0;
 }
 
@@ -268,7 +268,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
        unsigned long flags;
        int rc = 0;
 
-       spin_lock_irqsave(&head->lock, flags);
+       raw_spin_lock_irqsave(&head->lock, flags);
 
        if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
                rc = 1;
@@ -280,7 +280,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
                        uv_rtc_find_next_timer(head, pnode);
        }
 
-       spin_unlock_irqrestore(&head->lock, flags);
+       raw_spin_unlock_irqrestore(&head->lock, flags);
 
        return rc;
 }
@@ -300,13 +300,18 @@ static int uv_rtc_unset_timer(int cpu, int force)
 static cycle_t uv_read_rtc(struct clocksource *cs)
 {
        unsigned long offset;
+       cycle_t cycles;
 
+       preempt_disable();
        if (uv_get_min_hub_revision_id() == 1)
                offset = 0;
        else
                offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
 
-       return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
+       cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
+       preempt_enable();
+
+       return cycles;
 }
 
 /*
-- 
2.1.4


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to