The branch main has been updated by jhb:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=cc1cb9ea0c5607931fa9b7ecf786468d38fb8491

commit cc1cb9ea0c5607931fa9b7ecf786468d38fb8491
Author:     John Baldwin <j...@freebsd.org>
AuthorDate: 2023-10-11 21:32:06 +0000
Commit:     John Baldwin <j...@freebsd.org>
CommitDate: 2023-10-11 21:32:06 +0000

    x86: Rename {stop,start}_emulating to fpu_{enable,disable}
    
    While here, centralize the macros in <x86/fpu.h>.
    
    Reviewed by:    markj
    Differential Revision:  https://reviews.freebsd.org/D42135
---
 sys/amd64/amd64/fpu.c | 31 ++++++++++++++-----------------
 sys/amd64/vmm/vmm.c   | 15 ++++++---------
 sys/i386/i386/npx.c   | 35 ++++++++++++++++-------------------
 sys/x86/include/fpu.h |  9 +++++++++
 4 files changed, 45 insertions(+), 45 deletions(-)

diff --git a/sys/amd64/amd64/fpu.c b/sys/amd64/amd64/fpu.c
index c5b84d035d91..3432e62bf7f8 100644
--- a/sys/amd64/amd64/fpu.c
+++ b/sys/amd64/amd64/fpu.c
@@ -142,9 +142,6 @@ xsaveopt64(char *addr, uint64_t mask)
            "memory");
 }
 
-#define        start_emulating()       load_cr0(rcr0() | CR0_TS)
-#define        stop_emulating()        clts()
-
 CTASSERT(sizeof(struct savefpu) == 512);
 CTASSERT(sizeof(struct xstate_hdr) == 64);
 CTASSERT(sizeof(struct savefpu_ymm) == 832);
@@ -263,7 +260,7 @@ fpususpend(void *addr)
        u_long cr0;
 
        cr0 = rcr0();
-       stop_emulating();
+       fpu_enable();
        fpusave(addr);
        load_cr0(cr0);
 }
@@ -274,7 +271,7 @@ fpuresume(void *addr)
        u_long cr0;
 
        cr0 = rcr0();
-       stop_emulating();
+       fpu_enable();
        fninit();
        if (use_xsave)
                load_xcr(XCR0, xsave_mask);
@@ -393,13 +390,13 @@ fpuinit(void)
         * It is too early for critical_enter() to work on AP.
         */
        saveintr = intr_disable();
-       stop_emulating();
+       fpu_enable();
        fninit();
        control = __INITIAL_FPUCW__;
        fldcw(control);
        mxcsr = __INITIAL_MXCSR__;
        ldmxcsr(mxcsr);
-       start_emulating();
+       fpu_disable();
        intr_restore(saveintr);
        TSEXIT();
 }
@@ -430,7 +427,7 @@ fpuinitstate(void *arg __unused)
        cpu_thread_alloc(&thread0);
 
        saveintr = intr_disable();
-       stop_emulating();
+       fpu_enable();
 
        fpusave_fxsave(fpu_initialstate);
        if (fpu_initialstate->sv_env.en_mxcsr_mask)
@@ -473,7 +470,7 @@ fpuinitstate(void *arg __unused)
                }
        }
 
-       start_emulating();
+       fpu_disable();
        intr_restore(saveintr);
 }
 /* EFIRT needs this to be initialized before we can enter our EFI environment 
*/
@@ -488,9 +485,9 @@ fpuexit(struct thread *td)
 
        critical_enter();
        if (curthread == PCPU_GET(fpcurthread)) {
-               stop_emulating();
+               fpu_enable();
                fpusave(curpcb->pcb_save);
-               start_emulating();
+               fpu_disable();
                PCPU_SET(fpcurthread, NULL);
        }
        critical_exit();
@@ -741,7 +738,7 @@ restore_fpu_curthread(struct thread *td)
         */
        PCPU_SET(fpcurthread, td);
 
-       stop_emulating();
+       fpu_enable();
        fpu_clean_state();
        pcb = td->td_pcb;
 
@@ -803,7 +800,7 @@ fpudna(void)
                 * regardless of the eager/lazy FPU context switch
                 * mode.
                 */
-               stop_emulating();
+               fpu_enable();
        } else {
                if (__predict_false(PCPU_GET(fpcurthread) != NULL)) {
                        panic(
@@ -823,7 +820,7 @@ fpu_activate_sw(struct thread *td)
 
        if ((td->td_pflags & TDP_KTHREAD) != 0 || !PCB_USER_FPU(td->td_pcb)) {
                PCPU_SET(fpcurthread, NULL);
-               start_emulating();
+               fpu_disable();
        } else if (PCPU_GET(fpcurthread) != td) {
                restore_fpu_curthread(td);
        }
@@ -839,7 +836,7 @@ fpudrop(void)
        CRITICAL_ASSERT(td);
        PCPU_SET(fpcurthread, NULL);
        clear_pcb_flags(td->td_pcb, PCB_FPUINITDONE);
-       start_emulating();
+       fpu_disable();
 }
 
 /*
@@ -1139,7 +1136,7 @@ fpu_kern_enter(struct thread *td, struct fpu_kern_ctx 
*ctx, u_int flags)
 
        if ((flags & FPU_KERN_NOCTX) != 0) {
                critical_enter();
-               stop_emulating();
+               fpu_enable();
                if (curthread == PCPU_GET(fpcurthread)) {
                        fpusave(curpcb->pcb_save);
                        PCPU_SET(fpcurthread, NULL);
@@ -1190,7 +1187,7 @@ fpu_kern_leave(struct thread *td, struct fpu_kern_ctx 
*ctx)
                CRITICAL_ASSERT(td);
 
                clear_pcb_flags(pcb,  PCB_FPUNOSAVE | PCB_FPUINITDONE);
-               start_emulating();
+               fpu_disable();
        } else {
                KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) != 0,
                    ("leaving not inuse ctx"));
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index bf5173d9d592..64ba16cc8969 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -256,9 +256,6 @@ DEFINE_VMMOPS_IFUNC(int, vcpu_snapshot, (void *vcpui,
 DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vcpui, uint64_t now))
 #endif
 
-#define        fpu_start_emulating()   load_cr0(rcr0() | CR0_TS)
-#define        fpu_stop_emulating()    clts()
-
 SDT_PROVIDER_DEFINE(vmm);
 
 static MALLOC_DEFINE(M_VM, "vm", "vm");
@@ -1291,7 +1288,7 @@ restore_guest_fpustate(struct vcpu *vcpu)
        fpuexit(curthread);
 
        /* restore guest FPU state */
-       fpu_stop_emulating();
+       fpu_enable();
        fpurestore(vcpu->guestfpu);
 
        /* restore guest XCR0 if XSAVE is enabled in the host */
@@ -1299,10 +1296,10 @@ restore_guest_fpustate(struct vcpu *vcpu)
                load_xcr(0, vcpu->guest_xcr0);
 
        /*
-        * The FPU is now "dirty" with the guest's state so turn on emulation
-        * to trap any access to the FPU by the host.
+        * The FPU is now "dirty" with the guest's state so disable
+        * the FPU to trap any access by the host.
         */
-       fpu_start_emulating();
+       fpu_disable();
 }
 
 static void
@@ -1319,9 +1316,9 @@ save_guest_fpustate(struct vcpu *vcpu)
        }
 
        /* save guest FPU state */
-       fpu_stop_emulating();
+       fpu_enable();
        fpusave(vcpu->guestfpu);
-       fpu_start_emulating();
+       fpu_disable();
 }
 
 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
diff --git a/sys/i386/i386/npx.c b/sys/i386/i386/npx.c
index 689796821d80..26def3f433f1 100644
--- a/sys/i386/i386/npx.c
+++ b/sys/i386/i386/npx.c
@@ -123,9 +123,6 @@ xsaveopt(char *addr, uint64_t mask)
            "memory");
 }
 
-#define        start_emulating()       load_cr0(rcr0() | CR0_TS)
-#define        stop_emulating()        clts()
-
 #define GET_FPU_CW(thread) \
        (cpu_fxsr ? \
                (thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_cw : \
@@ -224,7 +221,7 @@ npx_probe(void)
        /*
         * Don't trap while we're probing.
         */
-       stop_emulating();
+       fpu_enable();
 
        /*
         * Finish resetting the coprocessor, if any.  If there is an error
@@ -413,7 +410,7 @@ npxinit(bool bsp)
         * It is too early for critical_enter() to work on AP.
         */
        saveintr = intr_disable();
-       stop_emulating();
+       fpu_enable();
        if (cpu_fxsr)
                fninit();
        else
@@ -424,7 +421,7 @@ npxinit(bool bsp)
                mxcsr = __INITIAL_MXCSR__;
                ldmxcsr(mxcsr);
        }
-       start_emulating();
+       fpu_disable();
        intr_restore(saveintr);
 }
 
@@ -458,7 +455,7 @@ npxinitstate(void *arg __unused)
        }
 
        saveintr = intr_disable();
-       stop_emulating();
+       fpu_enable();
 
        if (cpu_fxsr)
                fpusave_fxsave(npx_initialstate);
@@ -515,7 +512,7 @@ npxinitstate(void *arg __unused)
                }
        }
 
-       start_emulating();
+       fpu_disable();
        intr_restore(saveintr);
 }
 SYSINIT(npxinitstate, SI_SUB_CPU, SI_ORDER_ANY, npxinitstate, NULL);
@@ -529,9 +526,9 @@ npxexit(struct thread *td)
 
        critical_enter();
        if (curthread == PCPU_GET(fpcurthread)) {
-               stop_emulating();
+               fpu_enable();
                fpusave(curpcb->pcb_save);
-               start_emulating();
+               fpu_disable();
                PCPU_SET(fpcurthread, NULL);
        }
        critical_exit();
@@ -810,7 +807,7 @@ restore_npx_curthread(struct thread *td, struct pcb *pcb)
         */
        PCPU_SET(fpcurthread, td);
 
-       stop_emulating();
+       fpu_enable();
        if (cpu_fxsr)
                fpu_clean_state();
 
@@ -863,7 +860,7 @@ npxdna(void)
                 * regardless of the eager/lazy FPU context switch
                 * mode.
                 */
-               stop_emulating();
+               fpu_enable();
        } else {
                if (__predict_false(PCPU_GET(fpcurthread) != NULL)) {
                        printf(
@@ -891,7 +888,7 @@ void
 npxsave(union savefpu *addr)
 {
 
-       stop_emulating();
+       fpu_enable();
        fpusave(addr);
 }
 
@@ -902,7 +899,7 @@ npxswitch(struct thread *td, struct pcb *pcb)
 
        if (lazy_fpu_switch || (td->td_pflags & TDP_KTHREAD) != 0 ||
            !PCB_USER_FPU(pcb)) {
-               start_emulating();
+               fpu_disable();
                PCPU_SET(fpcurthread, NULL);
        } else if (PCPU_GET(fpcurthread) != td) {
                restore_npx_curthread(td, pcb);
@@ -925,7 +922,7 @@ npxsuspend(union savefpu *addr)
                return;
        }
        cr0 = rcr0();
-       stop_emulating();
+       fpu_enable();
        fpusave(addr);
        load_cr0(cr0);
 }
@@ -940,7 +937,7 @@ npxresume(union savefpu *addr)
 
        cr0 = rcr0();
        npxinit(false);
-       stop_emulating();
+       fpu_enable();
        fpurstor(addr);
        load_cr0(cr0);
 }
@@ -962,7 +959,7 @@ npxdrop(void)
        CRITICAL_ASSERT(td);
        PCPU_SET(fpcurthread, NULL);
        td->td_pcb->pcb_flags &= ~PCB_NPXINITDONE;
-       start_emulating();
+       fpu_disable();
 }
 
 /*
@@ -1397,7 +1394,7 @@ fpu_kern_enter(struct thread *td, struct fpu_kern_ctx 
*ctx, u_int flags)
 
        if ((flags & FPU_KERN_NOCTX) != 0) {
                critical_enter();
-               stop_emulating();
+               fpu_enable();
                if (curthread == PCPU_GET(fpcurthread)) {
                        fpusave(curpcb->pcb_save);
                        PCPU_SET(fpcurthread, NULL);
@@ -1448,7 +1445,7 @@ fpu_kern_leave(struct thread *td, struct fpu_kern_ctx 
*ctx)
                CRITICAL_ASSERT(td);
 
                pcb->pcb_flags &= ~(PCB_NPXNOSAVE | PCB_NPXINITDONE);
-               start_emulating();
+               fpu_disable();
        } else {
                KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) != 0,
                    ("leaving not inuse ctx"));
diff --git a/sys/x86/include/fpu.h b/sys/x86/include/fpu.h
index e1ec6a592d21..0debf65d6fb9 100644
--- a/sys/x86/include/fpu.h
+++ b/sys/x86/include/fpu.h
@@ -213,4 +213,13 @@ struct savefpu_ymm {
  */
 #define        X86_XSTATE_XCR0_OFFSET  464
 
+#ifdef _KERNEL
+/*
+ * CR0_MP and CR0_EM are always set.  Use CR0_TS to force traps when
+ * FPU access is disabled.
+ */
+#define        fpu_enable()    clts()
+#define        fpu_disable()   load_cr0(rcr0() | CR0_TS)
+#endif
+
 #endif /* !_X86_FPU_H_ */

Reply via email to