The branch main has been updated by andrew:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=5577bb2f67ff442a92a3c0edb133e03bc28bb9d6

commit 5577bb2f67ff442a92a3c0edb133e03bc28bb9d6
Author:     Andrew Turner <and...@freebsd.org>
AuthorDate: 2024-08-19 12:44:11 +0000
Commit:     Andrew Turner <and...@freebsd.org>
CommitDate: 2024-08-20 08:49:16 +0000

    arm64/vmm: Support tlbi from VHE
    
    When invalidating the stage 2 TLB we need to ensure page tables updates
    have completed and for tlbi vmalle1is the HCR_EL2 TGE flag needs to be
    clear.
    
    To fix the former add a data barrier before the tlbi instructions. On
    non-VHE this will happen as part of the exception entry, so is only
    needed for VHE.
    
    The tlbi vmalle1is instruction operates on the EL2 & 0 regime when
    HCR_EL2 E2H and TGE flags are both set. By clearing the TGE flag it
    will stop this and operate on the EL1 & 0 regime we are expecting.
    
    Sponsored by:   Arm Ltd
    Differential Revision:  https://reviews.freebsd.org/D46080
---
 sys/arm64/vmm/vmm_hyp.c | 24 +++++++++++++++++++++++-
 1 file changed, 23 insertions(+), 1 deletion(-)

diff --git a/sys/arm64/vmm/vmm_hyp.c b/sys/arm64/vmm/vmm_hyp.c
index 9c84bb4b294e..bd119c80139b 100644
--- a/sys/arm64/vmm/vmm_hyp.c
+++ b/sys/arm64/vmm/vmm_hyp.c
@@ -619,6 +619,13 @@ VMM_HYP_FUNC(s2_tlbi_range)(uint64_t vttbr, vm_offset_t 
sva, vm_offset_t eva,
 {
        uint64_t end, r, start;
        uint64_t host_vttbr;
+#ifdef VMM_VHE
+       uint64_t host_tcr;
+#endif
+
+#ifdef VMM_VHE
+       dsb(ishst);
+#endif
 
 #define        TLBI_VA_SHIFT                   12
 #define        TLBI_VA_MASK                    ((1ul << 44) - 1)
@@ -631,6 +638,12 @@ VMM_HYP_FUNC(s2_tlbi_range)(uint64_t vttbr, vm_offset_t 
sva, vm_offset_t eva,
        WRITE_SPECIALREG(vttbr_el2, vttbr);
        isb();
 
+#ifdef VMM_VHE
+       host_tcr = READ_SPECIALREG(tcr_el2);
+       WRITE_SPECIALREG(tcr_el2, host_tcr & ~HCR_TGE);
+       isb();
+#endif
+
        /*
         * The CPU can cache the stage 1 + 2 combination so we need to ensure
         * the stage 2 is invalidated first, then when this has completed we
@@ -655,7 +668,12 @@ VMM_HYP_FUNC(s2_tlbi_range)(uint64_t vttbr, vm_offset_t 
sva, vm_offset_t eva,
        dsb(ish);
        isb();
 
-       /* Switch back t othe host vttbr */
+#ifdef VMM_VHE
+       WRITE_SPECIALREG(tcr_el2, host_tcr);
+       isb();
+#endif
+
+       /* Switch back to the host vttbr */
        WRITE_SPECIALREG(vttbr_el2, host_vttbr);
        isb();
 }
@@ -665,6 +683,10 @@ VMM_HYP_FUNC(s2_tlbi_all)(uint64_t vttbr)
 {
        uint64_t host_vttbr;
 
+#ifdef VMM_VHE
+       dsb(ishst);
+#endif
+
        /* Switch to the guest vttbr */
        /* TODO: Handle Cortex-A57/A72 erratum 131936 */
        host_vttbr = READ_SPECIALREG(vttbr_el2);

Reply via email to