Because DRn access is 'difficult' with virt; but the DR7 read is
cheaper than a cacheline miss on native, add a virt specific
fast path to local_db_save(), such that when breakpoints are not in
use we avoid touching DRn entirely.

Suggested-by: Andy Lutomirski <l...@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 arch/x86/include/asm/debugreg.h |    7 ++++++-
 arch/x86/kernel/hw_breakpoint.c |   26 ++++++++++++++++++++++----
 arch/x86/kvm/vmx/nested.c       |    2 +-
 3 files changed, 29 insertions(+), 6 deletions(-)

--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -85,8 +85,8 @@ static inline void hw_breakpoint_disable
        set_debugreg(0UL, 3);
 }
 
-static inline int hw_breakpoint_active(void)
+static inline bool hw_breakpoint_active(void)
 {
        return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
 }
 
@@ -117,6 +119,9 @@ static __always_inline unsigned long loc
 {
        unsigned long dr7;
 
+       if (static_cpu_has(X86_FEATURE_HYPERVISOR) && !hw_breakpoint_active())
+               return 0;
+
        get_debugreg(dr7, 7);
        dr7 &= ~0x400; /* architecturally set bit */
        if (dr7)
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -99,6 +99,8 @@ int arch_install_hw_breakpoint(struct pe
        unsigned long *dr7;
        int i;
 
+       lockdep_assert_irqs_disabled();
+
        for (i = 0; i < HBP_NUM; i++) {
                struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
 
@@ -117,6 +119,12 @@ int arch_install_hw_breakpoint(struct pe
        dr7 = this_cpu_ptr(&cpu_dr7);
        *dr7 |= encode_dr7(i, info->len, info->type);
 
+       /*
+        * Ensure we first write cpu_dr7 before we set the DR7 register.
+        * This ensures an NMI never see cpu_dr7 0 when DR7 is not.
+        */
+       barrier();
+
        set_debugreg(*dr7, 7);
        if (info->mask)
                set_dr_addr_mask(info->mask, i);
@@ -136,9 +144,11 @@ int arch_install_hw_breakpoint(struct pe
 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
 {
        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-       unsigned long *dr7;
+       unsigned long dr7;
        int i;
 
+       lockdep_assert_irqs_disabled();
+
        for (i = 0; i < HBP_NUM; i++) {
                struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
 
@@ -151,12 +161,20 @@ void arch_uninstall_hw_breakpoint(struct
        if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
                return;
 
-       dr7 = this_cpu_ptr(&cpu_dr7);
-       *dr7 &= ~__encode_dr7(i, info->len, info->type);
+       dr7 = this_cpu_read(cpu_dr7);
+       dr7 &= ~__encode_dr7(i, info->len, info->type);
 
-       set_debugreg(*dr7, 7);
+       set_debugreg(dr7, 7);
        if (info->mask)
                set_dr_addr_mask(0, i);
+
+       /*
+        * Ensure the write to cpu_dr7 is after we've set the DR7 register.
+        * This ensures an NMI never see cpu_dr7 0 when DR7 is not.
+        */
+       barrier();
+
+       this_cpu_write(cpu_dr7, dr7);
 }
 
 static int arch_bp_generic_len(int x86_len)
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3028,9 +3028,9 @@ static int nested_vmx_check_vmentry_hw(s
        /*
         * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
         */
-       local_irq_enable();
        if (hw_breakpoint_active())
                set_debugreg(__this_cpu_read(cpu_dr7), 7);
+       local_irq_enable();
        preempt_enable();
 
        /*


Reply via email to