The following commit has been merged into the locking/core branch of tip:

Commit-ID:     2f0df49c89acaa58571d509830bc481250699885
Gitweb:        
https://git.kernel.org/tip/2f0df49c89acaa58571d509830bc481250699885
Author:        Steven Rostedt (VMware) <rost...@goodmis.org>
AuthorDate:    Fri, 11 Dec 2020 16:37:54 -05:00
Committer:     Peter Zijlstra <pet...@infradead.org>
CommitterDate: Fri, 22 Jan 2021 11:08:56 +01:00

jump_label: Do not profile branch annotations

While running my branch profiler that checks for incorrect "likely" and
"unlikely"s around the kernel, there's a large number of them that are
incorrect due to being "static_branches".

As static_branches are rather special, as they are likely or unlikely for
other reasons than normal annotations are used for, there's no reason to
have them be profiled.

Expose the "unlikely_notrace" and "likely_notrace" so that the
static_branch can use them, and have them be ignored by the branch
profilers.

Signed-off-by: Steven Rostedt (VMware) <rost...@goodmis.org>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Link: https://lkml.kernel.org/r/20201211163754.58517...@gandalf.local.home
---
 include/linux/compiler.h   |  2 ++
 include/linux/jump_label.h | 12 ++++++------
 2 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index b8fe0c2..df5b405 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -76,6 +76,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int 
val,
 #else
 # define likely(x)     __builtin_expect(!!(x), 1)
 # define unlikely(x)   __builtin_expect(!!(x), 0)
+# define likely_notrace(x)     likely(x)
+# define unlikely_notrace(x)   unlikely(x)
 #endif
 
 /* Optimization barrier */
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 3280962..d926912 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -261,14 +261,14 @@ static __always_inline void jump_label_init(void)
 
 static __always_inline bool static_key_false(struct static_key *key)
 {
-       if (unlikely(static_key_count(key) > 0))
+       if (unlikely_notrace(static_key_count(key) > 0))
                return true;
        return false;
 }
 
 static __always_inline bool static_key_true(struct static_key *key)
 {
-       if (likely(static_key_count(key) > 0))
+       if (likely_notrace(static_key_count(key) > 0))
                return true;
        return false;
 }
@@ -460,7 +460,7 @@ extern bool ____wrong_branch_error(void);
                branch = !arch_static_branch_jump(&(x)->key, true);             
\
        else                                                                    
\
                branch = ____wrong_branch_error();                              
\
-       likely(branch);                                                         
\
+       likely_notrace(branch);                                                 
        \
 })
 
 #define static_branch_unlikely(x)                                              
\
@@ -472,13 +472,13 @@ extern bool ____wrong_branch_error(void);
                branch = arch_static_branch(&(x)->key, false);                  
\
        else                                                                    
\
                branch = ____wrong_branch_error();                              
\
-       unlikely(branch);                                                       
\
+       unlikely_notrace(branch);                                               
        \
 })
 
 #else /* !CONFIG_JUMP_LABEL */
 
-#define static_branch_likely(x)                
likely(static_key_enabled(&(x)->key))
-#define static_branch_unlikely(x)      unlikely(static_key_enabled(&(x)->key))
+#define static_branch_likely(x)                
likely_notrace(static_key_enabled(&(x)->key))
+#define static_branch_unlikely(x)      
unlikely_notrace(static_key_enabled(&(x)->key))
 
 #endif /* CONFIG_JUMP_LABEL */
 

Reply via email to