The __per_cpu_offset[] array has "nr_cpu_ids" elements so change the >
>= to prevent a read one element beyond the end of the array.

Fixes: 0504bc41a62c ("kernel/smp: Provide CSD lock timeout diagnostics")
Signed-off-by: Dan Carpenter <[email protected]>
---
 kernel/smp.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/kernel/smp.c b/kernel/smp.c
index 78b602cae6c2..f49966713ac3 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -171,7 +171,7 @@ static __always_inline bool 
csd_lock_wait_toolong(call_single_data_t *csd, u64 t
                *bug_id = atomic_inc_return(&csd_bug_count);
        cpu = csd_lock_wait_getcpu(csd);
        smp_mb(); // No stale cur_csd values!
-       if (WARN_ONCE(cpu < 0 || cpu > nr_cpu_ids, "%s: cpu = %d\n", __func__, 
cpu))
+       if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, 
cpu))
                cpu_cur_csd = READ_ONCE(per_cpu(cur_csd, 0));
        else
                cpu_cur_csd = READ_ONCE(per_cpu(cur_csd, cpu));
-- 
2.27.0

Reply via email to