From: Nadav Amit <na...@vmware.com>

The compiler is smart enough without these hints.

Cc: Andy Lutomirski <l...@kernel.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Suggested-by: Dave Hansen <dave.han...@linux.intel.com>
Reviewed-by: Dave Hansen <dave.han...@linux.intel.com>
Signed-off-by: Nadav Amit <na...@vmware.com>
---
 arch/x86/mm/tlb.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 17ec4bfeee67..f4b162f273f5 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -316,7 +316,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct 
*next,
        local_irq_restore(flags);
 }
 
-static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
+static unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
 {
        unsigned long next_tif = task_thread_info(next)->flags;
        unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
@@ -880,7 +880,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, 
flush_tlb_info);
 static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx);
 #endif
 
-static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
+static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
                        unsigned long start, unsigned long end,
                        unsigned int stride_shift, bool freed_tables,
                        u64 new_tlb_gen)
@@ -907,7 +907,7 @@ static inline struct flush_tlb_info 
*get_flush_tlb_info(struct mm_struct *mm,
        return info;
 }
 
-static inline void put_flush_tlb_info(void)
+static void put_flush_tlb_info(void)
 {
 #ifdef CONFIG_DEBUG_VM
        /* Complete reentrency prevention checks */
-- 
2.25.1

Reply via email to