Because DRn access is 'difficult' with virt; but the DR7 read is
cheaper than a cacheline miss on native, add a virt specific
fast path to local_db_save(), such that when breakpoints are not in
use we avoid touching DRn entirely.

Suggested-by: Andy Lutomirski <l...@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 arch/x86/include/asm/debugreg.h |    6 ++++++
 arch/x86/kernel/hw_breakpoint.c |   26 ++++++++++++++++++++++----
 arch/x86/kvm/vmx/nested.c       |    2 ++
 3 files changed, 30 insertions(+), 4 deletions(-)

--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -115,6 +115,12 @@ static inline void debug_stack_usage_dec
 
 static __always_inline void local_db_save(unsigned long *dr7)
 {
+       if (static_cpu_has(X86_FEATURE_HYPERVISOR) && !hw_breakpoint_active()) {
+               *dr7 = 0;
+               barrier();
+               return;
+       }
+
        get_debugreg(*dr7, 7);
        if (*dr7)
                set_debugreg(0, 7);
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -97,6 +97,8 @@ int arch_install_hw_breakpoint(struct pe
        unsigned long *dr7;
        int i;
 
+       lockdep_assert_irqs_disabled();
+
        for (i = 0; i < HBP_NUM; i++) {
                struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
 
@@ -115,6 +117,12 @@ int arch_install_hw_breakpoint(struct pe
        dr7 = this_cpu_ptr(&cpu_dr7);
        *dr7 |= encode_dr7(i, info->len, info->type);
 
+       /*
+        * Ensure we first write cpu_dr7 before we set the DR7 register.
+        * This ensures an NMI never see cpu_dr7 0 when DR7 is not.
+        */
+       barrier();
+
        set_debugreg(*dr7, 7);
        if (info->mask)
                set_dr_addr_mask(info->mask, i);
@@ -134,9 +142,11 @@ int arch_install_hw_breakpoint(struct pe
 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
 {
        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-       unsigned long *dr7;
+       unsigned long dr7;
        int i;
 
+       lockdep_assert_irqs_disabled();
+
        for (i = 0; i < HBP_NUM; i++) {
                struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
 
@@ -149,12 +159,20 @@ void arch_uninstall_hw_breakpoint(struct
        if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
                return;
 
-       dr7 = this_cpu_ptr(&cpu_dr7);
-       *dr7 &= ~__encode_dr7(i, info->len, info->type);
+       dr7 = this_cpu_read(cpu_dr7);
+       dr7 &= ~__encode_dr7(i, info->len, info->type);
 
-       set_debugreg(*dr7, 7);
+       set_debugreg(dr7, 7);
        if (info->mask)
                set_dr_addr_mask(0, i);
+
+       /*
+        * Ensure the write to cpu_dr7 is after we've set the DR7 register.
+        * This ensures an NMI never see cpu_dr7 0 when DR7 is not.
+        */
+       barrier();
+
+       this_cpu_write(cpu_dr7, dr7);
 }
 
 static int arch_bp_generic_len(int x86_len)
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3027,6 +3027,8 @@ static int nested_vmx_check_vmentry_hw(s
 
        /*
         * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
+        * XXX how is this not broken? access to cpu_dr7 ought to be with
+        * IRQs disabled.
         */
        local_irq_enable();
        if (hw_breakpoint_active())


Reply via email to