Now that call_rcu()'s callback is not invoked until after all
preempt-disable regions of code have completed (in addition to explicitly
marked RCU read-side critical sections), call_rcu() can be used in place
of call_rcu_sched().  This commit therefore makes that change.

Signed-off-by: Paul E. McKenney <paul...@linux.ibm.com>
Cc: Tejun Heo <t...@kernel.org>
Cc: Ming Lei <ming....@redhat.com>
Cc: Bart Van Assche <bvanass...@acm.org>
Cc: Jens Axboe <ax...@kernel.dk>
---
 lib/percpu-refcount.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index de10b8c0bff6..9877682e49c7 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -181,7 +181,7 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref 
*ref,
        ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
 
        percpu_ref_get(ref);    /* put after confirmation */
-       call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
+       call_rcu(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
 }
 
 static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
-- 
2.17.1

Reply via email to