I noticed this vmlinux layout on i686 (where CONFIG_X86_L1_CACHE_SHIFT = 7) :

c06cdab4 d pid_caches_lh
c06cdb00 d qlowmark
c06cdb04 d qhimark
c06cdb08 d blimit
c06cdb80 d rcu_ctrlblk
c06cdc80 d rcu_bh_ctrlblk

This means that qlowmark, qhimark and blimit use a whole 128 bytes cache line. Linker is not smart enough for us.

Moving these three variables to read_mostly section saves 116 (128-12) bytes.

# size vmlinux vmlinux.before_patch
   text    data     bss     dec     hex filename
6343966  490818  630784 7465568  71ea60 vmlinux
6343966  490930  630784 7465680  71ead0 vmlinux.before_patch

Signed-off-by: Eric Dumazet <[EMAIL PROTECTED]>
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index a66d4d1..11c815c 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -75,9 +75,9 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
 
 /* Fake initialization required by compiler */
 static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
-static int blimit = 10;
-static int qhimark = 10000;
-static int qlowmark = 100;
+static int blimit __read_mostly = 10;
+static int qhimark __read_mostly = 10000;
+static int qlowmark __read_mostly = 100;
 
 static atomic_t rcu_barrier_cpu_count;
 static DEFINE_MUTEX(rcu_barrier_mutex);

Reply via email to