The list_for_each_entry_rcu should be guarded by rcu_read_lock().This patch add
rcu_read_lock to protect the list_for_each_entry_rcu.

Signed-off-by: Chen Jun <jun.d.c...@intel.com>
---
 kernel/events/core.c |    2 ++
 1 files changed, 2 insertions(+), 0 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 301079d..e2f2fa5 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2442,6 +2442,7 @@ static void perf_adjust_freq_unthr_context(struct 
perf_event_context *ctx,
 
        raw_spin_lock(&ctx->lock);
        perf_pmu_disable(ctx->pmu);
+       rcu_read_lock();
 
        list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
                if (event->state != PERF_EVENT_STATE_ACTIVE)
@@ -2483,6 +2484,7 @@ static void perf_adjust_freq_unthr_context(struct 
perf_event_context *ctx,
                event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
        }
 
+       rcu_read_unlock();
        perf_pmu_enable(ctx->pmu);
        raw_spin_unlock(&ctx->lock);
 }
-- 
1.7.4.1



--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to