On Wed, 3 Feb 2016 12:40:09 -0800 Andrew Morton <a...@linux-foundation.org> 
wrote:

> On Wed, 3 Feb 2016 08:51:11 -0800 Andrew Morton <a...@linux-foundation.org> 
> wrote:
> 
> > > Lockdep initialization must happen early on,
> > 
> > It should happen at compile time.
> 
> Mike asked "but why not just use hlist".  And indeed I think that fixes
> the problem because hlist_heads *are* initialized at compile time.  For
> them, NULL is the empty state.
> 
> This compiles.  Probably we're now doing some unnecessary initialization
> with this approach - lockdep_init() and lockdep_initialized can simply
> be zapped, I think?
> 
> Plus of course we save a buncha memory.

Builds, runs, works.


From: Andrew Morton <a...@linux-foundation.org>
Subject: kernel/locking/lockdep.c: convert hash tables to hlists

Mike said:

: CONFIG_UBSAN_ALIGNMENT breaks x86-64 kernel with lockdep enabled, i.  e
: kernel with CONFIG_UBSAN_ALIGNMENT fails to load without even any error
: message.
: 
: The problem is that ubsan callbacks use spinlocks and might be called
: before lockdep is initialized.  Particularly this line in the
: reserve_ebda_region function causes problem:
: 
: lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
: 
: If i put lockdep_init() before reserve_ebda_region call in
: x86_64_start_reservations kernel loads well.

Fix this ordering issue permanently: change lockdep so that it uses hlists
for the hash tables.  Unlike a list_head, an hlist_head is in its
initialized state when it is all-zeroes, so lockdep is ready for operation
immediately upon boot - lockdep_init() need not have run.

The patch will also save some memory.

Probably lockdep_init() and lockdep_initialized can be done away with now.

Reported-by: Mike Krinkin <krinkin....@gmail.com>
Suggested-by: Mike Krinkin <krinkin....@gmail.com>
Cc: Andrey Ryabinin <aryabi...@virtuozzo.com>
Cc: Ingo Molnar <mi...@elte.hu>
Cc: Peter Zijlstra <pet...@infradead.org>
Signed-off-by: Andrew Morton <a...@linux-foundation.org>
---

 include/linux/lockdep.h  |    4 +--
 kernel/locking/lockdep.c |   42 ++++++++++++++++++-------------------
 2 files changed, 23 insertions(+), 23 deletions(-)

diff -puN 
kernel/locking/lockdep.c~kernel-locking-lockdepc-convert-hash-tables-to-hlists 
kernel/locking/lockdep.c
--- 
a/kernel/locking/lockdep.c~kernel-locking-lockdepc-convert-hash-tables-to-hlists
+++ a/kernel/locking/lockdep.c
@@ -292,7 +292,7 @@ LIST_HEAD(all_lock_classes);
 #define __classhashfn(key)     hash_long((unsigned long)key, CLASSHASH_BITS)
 #define classhashentry(key)    (classhash_table + __classhashfn((key)))
 
-static struct list_head classhash_table[CLASSHASH_SIZE];
+static struct hlist_head classhash_table[CLASSHASH_SIZE];
 
 /*
  * We put the lock dependency chains into a hash-table as well, to cache
@@ -303,7 +303,7 @@ static struct list_head classhash_table[
 #define __chainhashfn(chain)   hash_long(chain, CHAINHASH_BITS)
 #define chainhashentry(chain)  (chainhash_table + __chainhashfn((chain)))
 
-static struct list_head chainhash_table[CHAINHASH_SIZE];
+static struct hlist_head chainhash_table[CHAINHASH_SIZE];
 
 /*
  * The hash key of the lock dependency chains is a hash itself too:
@@ -666,7 +666,7 @@ static inline struct lock_class *
 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
 {
        struct lockdep_subclass_key *key;
-       struct list_head *hash_head;
+       struct hlist_head *hash_head;
        struct lock_class *class;
 
 #ifdef CONFIG_DEBUG_LOCKDEP
@@ -719,7 +719,7 @@ look_up_lock_class(struct lockdep_map *l
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return NULL;
 
-       list_for_each_entry_rcu(class, hash_head, hash_entry) {
+       hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
                if (class->key == key) {
                        /*
                         * Huh! same key, different name? Did someone trample
@@ -742,7 +742,7 @@ static inline struct lock_class *
 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 {
        struct lockdep_subclass_key *key;
-       struct list_head *hash_head;
+       struct hlist_head *hash_head;
        struct lock_class *class;
 
        DEBUG_LOCKS_WARN_ON(!irqs_disabled());
@@ -774,7 +774,7 @@ register_lock_class(struct lockdep_map *
         * We have to do the hash-walk again, to avoid races
         * with another CPU:
         */
-       list_for_each_entry_rcu(class, hash_head, hash_entry) {
+       hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
                if (class->key == key)
                        goto out_unlock_set;
        }
@@ -805,7 +805,7 @@ register_lock_class(struct lockdep_map *
         * We use RCU's safe list-add method to make
         * parallel walking of the hash-list safe:
         */
-       list_add_tail_rcu(&class->hash_entry, hash_head);
+       hlist_add_head_rcu(&class->hash_entry, hash_head);
        /*
         * Add it to the global list of classes:
         */
@@ -2017,7 +2017,7 @@ static inline int lookup_chain_cache(str
                                     u64 chain_key)
 {
        struct lock_class *class = hlock_class(hlock);
-       struct list_head *hash_head = chainhashentry(chain_key);
+       struct hlist_head *hash_head = chainhashentry(chain_key);
        struct lock_chain *chain;
        struct held_lock *hlock_curr;
        int i, j;
@@ -2033,7 +2033,7 @@ static inline int lookup_chain_cache(str
         * We can walk it lock-free, because entries only get added
         * to the hash:
         */
-       list_for_each_entry_rcu(chain, hash_head, entry) {
+       hlist_for_each_entry_rcu(chain, hash_head, entry) {
                if (chain->chain_key == chain_key) {
 cache_hit:
                        debug_atomic_inc(chain_lookup_hits);
@@ -2057,7 +2057,7 @@ cache_hit:
        /*
         * We have to walk the chain again locked - to avoid duplicates:
         */
-       list_for_each_entry(chain, hash_head, entry) {
+       hlist_for_each_entry(chain, hash_head, entry) {
                if (chain->chain_key == chain_key) {
                        graph_unlock();
                        goto cache_hit;
@@ -2091,7 +2091,7 @@ cache_hit:
                }
                chain_hlocks[chain->base + j] = class - lock_classes;
        }
-       list_add_tail_rcu(&chain->entry, hash_head);
+       hlist_add_head_rcu(&chain->entry, hash_head);
        debug_atomic_inc(chain_lookup_misses);
        inc_chains();
 
@@ -3875,7 +3875,7 @@ void lockdep_reset(void)
        nr_process_chains = 0;
        debug_locks = 1;
        for (i = 0; i < CHAINHASH_SIZE; i++)
-               INIT_LIST_HEAD(chainhash_table + i);
+               INIT_HLIST_HEAD(chainhash_table + i);
        raw_local_irq_restore(flags);
 }
 
@@ -3894,7 +3894,7 @@ static void zap_class(struct lock_class
        /*
         * Unhash the class and remove it from the all_lock_classes list:
         */
-       list_del_rcu(&class->hash_entry);
+       hlist_del_rcu(&class->hash_entry);
        list_del_rcu(&class->lock_entry);
 
        RCU_INIT_POINTER(class->key, NULL);
@@ -3917,7 +3917,7 @@ static inline int within(const void *add
 void lockdep_free_key_range(void *start, unsigned long size)
 {
        struct lock_class *class;
-       struct list_head *head;
+       struct hlist_head *head;
        unsigned long flags;
        int i;
        int locked;
@@ -3930,9 +3930,9 @@ void lockdep_free_key_range(void *start,
         */
        for (i = 0; i < CLASSHASH_SIZE; i++) {
                head = classhash_table + i;
-               if (list_empty(head))
+               if (!head)
                        continue;
-               list_for_each_entry_rcu(class, head, hash_entry) {
+               hlist_for_each_entry_rcu(class, head, hash_entry) {
                        if (within(class->key, start, size))
                                zap_class(class);
                        else if (within(class->name, start, size))
@@ -3962,7 +3962,7 @@ void lockdep_free_key_range(void *start,
 void lockdep_reset_lock(struct lockdep_map *lock)
 {
        struct lock_class *class;
-       struct list_head *head;
+       struct hlist_head *head;
        unsigned long flags;
        int i, j;
        int locked;
@@ -3987,9 +3987,9 @@ void lockdep_reset_lock(struct lockdep_m
        locked = graph_lock();
        for (i = 0; i < CLASSHASH_SIZE; i++) {
                head = classhash_table + i;
-               if (list_empty(head))
+               if (!head)
                        continue;
-               list_for_each_entry_rcu(class, head, hash_entry) {
+               hlist_for_each_entry_rcu(class, head, hash_entry) {
                        int match = 0;
 
                        for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
@@ -4027,10 +4027,10 @@ void lockdep_init(void)
                return;
 
        for (i = 0; i < CLASSHASH_SIZE; i++)
-               INIT_LIST_HEAD(classhash_table + i);
+               INIT_HLIST_HEAD(classhash_table + i);
 
        for (i = 0; i < CHAINHASH_SIZE; i++)
-               INIT_LIST_HEAD(chainhash_table + i);
+               INIT_HLIST_HEAD(chainhash_table + i);
 
        lockdep_initialized = 1;
 }
diff -puN 
include/linux/lockdep.h~kernel-locking-lockdepc-convert-hash-tables-to-hlists 
include/linux/lockdep.h
--- 
a/include/linux/lockdep.h~kernel-locking-lockdepc-convert-hash-tables-to-hlists
+++ a/include/linux/lockdep.h
@@ -66,7 +66,7 @@ struct lock_class {
        /*
         * class-hash:
         */
-       struct list_head                hash_entry;
+       struct hlist_node               hash_entry;
 
        /*
         * global list of all lock-classes:
@@ -199,7 +199,7 @@ struct lock_chain {
        u8                              irq_context;
        u8                              depth;
        u16                             base;
-       struct list_head                entry;
+       struct hlist_node               entry;
        u64                             chain_key;
 };
 
_

Reply via email to