Currently have_rcu_nocb_mask is used to avoid double allocation of
rcu_nocb_mask during boot up. Due to different representation of
cpumask_var_t on different kernel config CPUMASK=y(or n) it was okay.
But now we have a helper cpumask_available(), which can be utilized
to check whether rcu_nocb_mask has been allocated or not without using
a variable.

Removing the variable also reduces vmlinux size.

Unpatched version:
text       data     bss     dec     hex filename
13050393        7852470 14543408        35446271        21cddff vmlinux

Patched version:
 text      data     bss     dec     hex filename
13050390        7852438 14543408        35446236        21cdddc vmlinux

Signed-off-by: Rakib Mullick <rakib.mull...@gmail.com>
Cc: "Paul E. McKenney" <paul...@linux.vnet.ibm.com>
Cc: Josh Triplett <j...@joshtriplett.org>
Cc: Steven Rostedt <rost...@goodmis.org>
Cc: Mathieu Desnoyers <mathieu.desnoy...@efficios.com>
Cc: Lai Jiangshan <jiangshan...@gmail.com>
---
Patch applied on top of linus's tree (commit cf9b0772f2e41).

 kernel/rcu/tree_plugin.h | 11 ++++-------
 1 file changed, 4 insertions(+), 7 deletions(-)

diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index db85ca3..13a8e08 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -61,7 +61,6 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work);
 
 #ifdef CONFIG_RCU_NOCB_CPU
 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
-static bool have_rcu_nocb_mask;            /* Was rcu_nocb_mask allocated? */
 static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 
@@ -1752,7 +1751,6 @@ static void increment_cpu_stall_ticks(void)
 static int __init rcu_nocb_setup(char *str)
 {
        alloc_bootmem_cpumask_var(&rcu_nocb_mask);
-       have_rcu_nocb_mask = true;
        cpulist_parse(str, rcu_nocb_mask);
        return 1;
 }
@@ -1801,7 +1799,7 @@ static void rcu_init_one_nocb(struct rcu_node *rnp)
 /* Is the specified CPU a no-CBs CPU? */
 bool rcu_is_nocb_cpu(int cpu)
 {
-       if (have_rcu_nocb_mask)
+       if (cpumask_available(rcu_nocb_mask))
                return cpumask_test_cpu(cpu, rcu_nocb_mask);
        return false;
 }
@@ -2295,14 +2293,13 @@ void __init rcu_init_nohz(void)
                need_rcu_nocb_mask = true;
 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
 
-       if (!have_rcu_nocb_mask && need_rcu_nocb_mask) {
+       if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) {
                if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
                        pr_info("rcu_nocb_mask allocation failed, callback 
offloading disabled.\n");
                        return;
                }
-               have_rcu_nocb_mask = true;
        }
-       if (!have_rcu_nocb_mask)
+       if (!cpumask_available(rcu_nocb_mask))
                return;
 
 #if defined(CONFIG_NO_HZ_FULL)
@@ -2428,7 +2425,7 @@ static void __init rcu_organize_nocb_kthreads(struct 
rcu_state *rsp)
        struct rcu_data *rdp_leader = NULL;  /* Suppress misguided gcc warn. */
        struct rcu_data *rdp_prev = NULL;
 
-       if (!have_rcu_nocb_mask)
+       if (!cpumask_available(rcu_nocb_mask))
                return;
        if (ls == -1) {
                ls = int_sqrt(nr_cpu_ids);
-- 
2.9.3

Reply via email to