The comments regarding initial chain key and BFS are outdated, so update them.
Signed-off-by: Yuyang Du <duyuy...@gmail.com> --- include/linux/lockdep.h | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index d7bec61..0246a70 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -195,8 +195,7 @@ struct lock_list { int distance; /* - * The parent field is used to implement breadth-first search, and the - * bit 0 is reused to indicate if the lock has been accessed in BFS. + * The parent field is used to implement breadth-first search. */ struct lock_list *parent; }; @@ -239,7 +238,7 @@ struct held_lock { * as likely as possible - hence the 64-bit width. * * The task struct holds the current hash value (initialized - * with zero), here we store the previous hash value: + * with INITIAL_CHAIN_KEY), here we store the previous hash value: */ u64 prev_chain_key; unsigned long acquire_ip; @@ -258,12 +257,12 @@ struct held_lock { /* * The lock-stack is unified in that the lock chains of interrupt * contexts nest ontop of process context chains, but we 'separate' - * the hashes by starting with 0 if we cross into an interrupt - * context, and we also keep do not add cross-context lock - * dependencies - the lock usage graph walking covers that area - * anyway, and we'd just unnecessarily increase the number of - * dependencies otherwise. [Note: hardirq and softirq contexts - * are separated from each other too.] + * the hashes by starting with a new chain if we cross into an + * interrupt context, and we also keep not adding cross-context + * lock dependencies - the lock usage graph walking covers that + * area anyway, and we'd just unnecessarily increase the number + * of dependencies otherwise. [Note: hardirq and softirq + * contexts are separated from each other too.] * * The following field is used to detect when we cross into an * interrupt context: -- 1.8.3.1