On Tue, Nov 19, 2024 at 04:35:00PM +0100, Valentin Schneider wrote:

> @@ -418,9 +419,20 @@ static inline void cpu_tlbstate_update_lam(unsigned long 
> lam, u64 untag_mask)
>  #endif
>  #endif /* !MODULE */
>  
> +#define __NATIVE_TLB_FLUSH_GLOBAL(suffix, cr4)               \
> +     native_write_cr4##suffix(cr4 ^ X86_CR4_PGE);    \
> +     native_write_cr4##suffix(cr4)
> +#define NATIVE_TLB_FLUSH_GLOBAL(cr4)         __NATIVE_TLB_FLUSH_GLOBAL(, cr4)
> +#define NATIVE_TLB_FLUSH_GLOBAL_NOINSTR(cr4) 
> __NATIVE_TLB_FLUSH_GLOBAL(_noinstr, cr4)
> +
>  static inline void __native_tlb_flush_global(unsigned long cr4)
>  {
> -     native_write_cr4(cr4 ^ X86_CR4_PGE);
> -     native_write_cr4(cr4);
> +     NATIVE_TLB_FLUSH_GLOBAL(cr4);
>  }
> +
> +static inline void __native_tlb_flush_global_noinstr(unsigned long cr4)
> +{
> +     NATIVE_TLB_FLUSH_GLOBAL_NOINSTR(cr4);
> +}

How about something like this instead? I've only compile tested the
tlb.c bit, but it should get __flush_tlb_global() to be noinstr I think,
including the Xen bit (unless I missed something but then objtool should
complain).

---
diff --git a/arch/x86/include/asm/invpcid.h b/arch/x86/include/asm/invpcid.h
index 734482afbf81..ff26136fcd9c 100644
--- a/arch/x86/include/asm/invpcid.h
+++ b/arch/x86/include/asm/invpcid.h
@@ -2,7 +2,7 @@
 #ifndef _ASM_X86_INVPCID
 #define _ASM_X86_INVPCID
 
-static inline void __invpcid(unsigned long pcid, unsigned long addr,
+static __always_inline void __invpcid(unsigned long pcid, unsigned long addr,
                             unsigned long type)
 {
        struct { u64 d[2]; } desc = { { pcid, addr } };
@@ -13,7 +13,7 @@ static inline void __invpcid(unsigned long pcid, unsigned 
long addr,
         * mappings, we don't want the compiler to reorder any subsequent
         * memory accesses before the TLB flush.
         */
-       asm volatile("invpcid %[desc], %[type]"
+       asm_inline volatile("invpcid %[desc], %[type]"
                     :: [desc] "m" (desc), [type] "r" (type) : "memory");
 }
 
@@ -23,26 +23,25 @@ static inline void __invpcid(unsigned long pcid, unsigned 
long addr,
 #define INVPCID_TYPE_ALL_NON_GLOBAL    3
 
 /* Flush all mappings for a given pcid and addr, not including globals. */
-static inline void invpcid_flush_one(unsigned long pcid,
-                                    unsigned long addr)
+static __always_inline void invpcid_flush_one(unsigned long pcid, unsigned 
long addr)
 {
        __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
 }
 
 /* Flush all mappings for a given PCID, not including globals. */
-static inline void invpcid_flush_single_context(unsigned long pcid)
+static __always_inline void invpcid_flush_single_context(unsigned long pcid)
 {
        __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
 }
 
 /* Flush all mappings, including globals, for all PCIDs. */
-static inline void invpcid_flush_all(void)
+static __always_inline void invpcid_flush_all(void)
 {
        __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
 }
 
 /* Flush all mappings for all PCIDs except globals. */
-static inline void invpcid_flush_all_nonglobals(void)
+static __always_inline void invpcid_flush_all_nonglobals(void)
 {
        __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
 }
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index d4eb9e1d61b8..b3daee3d4667 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -75,7 +75,7 @@ static inline void __flush_tlb_local(void)
        PVOP_VCALL0(mmu.flush_tlb_user);
 }
 
-static inline void __flush_tlb_global(void)
+static __always_inline void __flush_tlb_global(void)
 {
        PVOP_VCALL0(mmu.flush_tlb_kernel);
 }
diff --git a/arch/x86/include/asm/xen/hypercall.h 
b/arch/x86/include/asm/xen/hypercall.h
index a2dd24947eb8..b4c635b20538 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -357,8 +357,8 @@ MULTI_mmu_update(struct multicall_entry *mcl, struct 
mmu_update *req,
        trace_xen_mc_entry(mcl, 4);
 }
 
-static inline void
-MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
+static __always_inline void
+__MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
                int *success_count, domid_t domid)
 {
        mcl->op = __HYPERVISOR_mmuext_op;
@@ -366,6 +366,13 @@ MULTI_mmuext_op(struct multicall_entry *mcl, struct 
mmuext_op *op, int count,
        mcl->args[1] = count;
        mcl->args[2] = (unsigned long)success_count;
        mcl->args[3] = domid;
+}
+
+static inline void
+MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
+               int *success_count, domid_t domid)
+{
+       __MULTI_mmuext_op(mcl, op, count, success_count, domid);
 
        trace_xen_mc_entry(mcl, 4);
 }
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index b0d5a644fc84..0cfc00a34b7e 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -1168,9 +1168,10 @@ void flush_tlb_one_user(unsigned long addr)
 /*
  * Flush everything
  */
-STATIC_NOPV void native_flush_tlb_global(void)
+STATIC_NOPV noinstr void native_flush_tlb_global(void)
 {
        unsigned long flags;
+       unsigned long cr4;
 
        if (static_cpu_has(X86_FEATURE_INVPCID)) {
                /*
@@ -1189,9 +1190,15 @@ STATIC_NOPV void native_flush_tlb_global(void)
         * be called from deep inside debugging code.)
         */
        raw_local_irq_save(flags);
-
-       __native_tlb_flush_global(this_cpu_read(cpu_tlbstate.cr4));
-
+       cr4 = this_cpu_read(cpu_tlbstate.cr4);
+       asm volatile("mov %0,%%cr4": : "r" (cr4 ^ X86_CR4_PGE) : "memory");
+       asm volatile("mov %0,%%cr4": : "r" (cr4) : "memory");
+       /*
+        * In lieu of not having the pinning crap, hard fail if CR4 doesn't
+        * match the expected value. This ensures that anybody doing dodgy gets
+        * the fallthrough check.
+        */
+       BUG_ON(cr4 != this_cpu_read(cpu_tlbstate.cr4));
        raw_local_irq_restore(flags);
 }
 
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 55a4996d0c04..4eb265eb867a 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1231,22 +1231,22 @@ static noinstr void xen_write_cr2(unsigned long cr2)
        this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
 }
 
-static noinline void xen_flush_tlb(void)
+static noinline noinstr void xen_flush_tlb(void)
 {
        struct mmuext_op *op;
        struct multicall_space mcs;
 
-       preempt_disable();
+       preempt_disable_notrace();
 
        mcs = xen_mc_entry(sizeof(*op));
 
        op = mcs.args;
        op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
-       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+       __MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
 
-       xen_mc_issue(XEN_LAZY_MMU);
+       __xen_mc_issue(XEN_LAZY_MMU);
 
-       preempt_enable();
+       preempt_enable_notrace();
 }
 
 static void xen_flush_tlb_one_user(unsigned long addr)
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index e1b782e823e6..31eddca45c27 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -235,15 +235,19 @@ static inline struct multicall_space xen_mc_entry(size_t 
args)
 void xen_mc_flush(void);
 
 /* Issue a multicall if we're not in a lazy mode */
-static inline void xen_mc_issue(unsigned mode)
+static __always_inline void __xen_mc_issue(unsigned mode)
 {
-       trace_xen_mc_issue(mode);
-
        if ((xen_get_lazy_mode() & mode) == 0)
                xen_mc_flush();
 
        /* restore flags saved in xen_mc_batch */
-       local_irq_restore(this_cpu_read(xen_mc_irq_flags));
+       raw_local_irq_restore(this_cpu_read(xen_mc_irq_flags));
+}
+
+static inline void xen_mc_issue(unsigned mode)
+{
+       trace_xen_mc_issue(mode);
+       __xen_mc_issue(mode);
 }
 
 /* Set up a callback to be called when the current batch is flushed */

Reply via email to