We have been a bit too eager in Xen and use system-wide DSBs when this
can be limited to the inner-shareable domain.
Furthermore, the first DSB can be restrict further to only store in the
inner-shareable domain. This is because the DSB is here to ensure
visibility of the update to translation table walks.
Lastly, there are a lack of documentation in most of the TLBs helper.
Rather than trying to update the helpers one by one, this patch
introduce a per-arch macro to generate the TLB helpers. This will be
easier to update the TLBs helper in the future and the documentation.
Signed-off-by: Julien Grall <julien.gr...@arm.com>
---
xen/include/asm-arm/arm32/flushtlb.h | 73 ++++++++++++++--------------------
xen/include/asm-arm/arm64/flushtlb.h | 76 +++++++++++++++---------------------
2 files changed, 60 insertions(+), 89 deletions(-)
diff --git a/xen/include/asm-arm/arm32/flushtlb.h
b/xen/include/asm-arm/arm32/flushtlb.h
index b629db61cb..9085e65011 100644
--- a/xen/include/asm-arm/arm32/flushtlb.h
+++ b/xen/include/asm-arm/arm32/flushtlb.h
@@ -1,59 +1,44 @@
#ifndef __ASM_ARM_ARM32_FLUSHTLB_H__
#define __ASM_ARM_ARM32_FLUSHTLB_H__
-/* Flush local TLBs, current VMID only */
-static inline void flush_guest_tlb_local(void)
-{
- dsb(sy);
-
- WRITE_CP32((uint32_t) 0, TLBIALL);
-
- dsb(sy);
- isb();
+/*
+ * Every invalidation operation use the following patterns:
+ *
+ * DSB ISHST // Ensure prior page-tables updates have completed
+ * TLBI... // Invalidate the TLB
+ * DSB ISH // Ensure the TLB invalidation has completed
+ * ISB // See explanation below
+ *
+ * For Xen page-tables the ISB will discard any instructions fetched
+ * from the old mappings.
+ *
+ * For the Stage-2 page-tables the ISB ensures the completion of the DSB
+ * (and therefore the TLB invalidation) before continuing. So we know
+ * the TLBs cannot contain an entry for a mapping we may have removed.
+ */
+#define TLB_HELPER(name, tlbop) \
+static inline void name(void) \
+{ \
+ dsb(ishst); \
+ WRITE_CP32(0, tlbop); \
+ dsb(ish); \
+ isb(); \
}
-/* Flush inner shareable TLBs, current VMID only */
-static inline void flush_guest_tlb(void)
-{
- dsb(sy);
-
- WRITE_CP32((uint32_t) 0, TLBIALLIS);
+/* Flush local TLBs, current VMID only */
+TLB_HELPER(flush_guest_tlb_local, TLBIALL);
- dsb(sy);
- isb();
-}
+/* Flush inner shareable TLBs, current VMID only */
+TLB_HELPER(flush_guest_tlb, TLBIALLIS);
/* Flush local TLBs, all VMIDs, non-hypervisor mode */
-static inline void flush_all_guests_tlb_local(void)
-{
- dsb(sy);
-
- WRITE_CP32((uint32_t) 0, TLBIALLNSNH);
-
- dsb(sy);
- isb();
-}
+TLB_HELPER(flush_all_guests_tlb_local, TLBIALLNSNH);
/* Flush innershareable TLBs, all VMIDs, non-hypervisor mode */
-static inline void flush_all_guests_tlb(void)
-{
- dsb(sy);
-
- WRITE_CP32((uint32_t) 0, TLBIALLNSNHIS);
-
- dsb(sy);
- isb();
-}
+TLB_HELPER(flush_all_guests_tlb, TLBIALLNSNHIS);
/* Flush all hypervisor mappings from the TLB of the local processor. */
-static inline void flush_xen_tlb_local(void)
-{
- asm volatile("dsb;" /* Ensure preceding are visible */
- CMD_CP32(TLBIALLH)
- "dsb;" /* Ensure completion of the TLB flush */
- "isb;"
- : : : "memory");
-}
+TLB_HELPER(flush_xen_tlb_local, TLBIALLH);
/* Flush TLB of local processor for address va. */
static inline void __flush_xen_tlb_one_local(vaddr_t va)
diff --git a/xen/include/asm-arm/arm64/flushtlb.h
b/xen/include/asm-arm/arm64/flushtlb.h
index 2fed34b2ec..ceec59542e 100644
--- a/xen/include/asm-arm/arm64/flushtlb.h
+++ b/xen/include/asm-arm/arm64/flushtlb.h
@@ -1,60 +1,46 @@
#ifndef __ASM_ARM_ARM64_FLUSHTLB_H__
#define __ASM_ARM_ARM64_FLUSHTLB_H__
-/* Flush local TLBs, current VMID only */
-static inline void flush_guest_tlb_local(void)
-{
- asm volatile(
- "dsb sy;"
- "tlbi vmalls12e1;"
- "dsb sy;"
- "isb;"
- : : : "memory");
+/*
+ * Every invalidation operation use the following patterns:
+ *
+ * DSB ISHST // Ensure prior page-tables updates have completed
+ * TLBI... // Invalidate the TLB
+ * DSB ISH // Ensure the TLB invalidation has completed
+ * ISB // See explanation below
+ *
+ * For Xen page-tables the ISB will discard any instructions fetched
+ * from the old mappings.
+ *
+ * For the Stage-2 page-tables the ISB ensures the completion of the DSB
+ * (and therefore the TLB invalidation) before continuing. So we know
+ * the TLBs cannot contain an entry for a mapping we may have removed.
+ */
+#define TLB_HELPER(name, tlbop) \
+static inline void name(void) \
+{ \
+ asm volatile( \
+ "dsb ishst;" \
+ "tlbi " # tlbop ";" \
+ "dsb ish;" \
+ "isb;" \
+ : : : "memory"); \
}
+/* Flush local TLBs, current VMID only. */
+TLB_HELPER(flush_guest_tlb_local, vmalls12e1);
+
/* Flush innershareable TLBs, current VMID only */
-static inline void flush_guest_tlb(void)
-{
- asm volatile(
- "dsb sy;"
- "tlbi vmalls12e1is;"
- "dsb sy;"
- "isb;"
- : : : "memory");
-}
+TLB_HELPER(flush_guest_tlb, vmalls12e1is);
/* Flush local TLBs, all VMIDs, non-hypervisor mode */
-static inline void flush_all_guests_tlb_local(void)
-{
- asm volatile(
- "dsb sy;"
- "tlbi alle1;"
- "dsb sy;"
- "isb;"
- : : : "memory");
-}
+TLB_HELPER(flush_all_guests_tlb_local, alle1);
/* Flush innershareable TLBs, all VMIDs, non-hypervisor mode */
-static inline void flush_all_guests_tlb(void)
-{
- asm volatile(
- "dsb sy;"
- "tlbi alle1is;"
- "dsb sy;"
- "isb;"
- : : : "memory");
-}
+TLB_HELPER(flush_all_guests_tlb, alle1is);
/* Flush all hypervisor mappings from the TLB of the local processor. */
-static inline void flush_xen_tlb_local(void)
-{
- asm volatile (
- "dsb sy;" /* Ensure visibility of PTE writes */
- "tlbi alle2;" /* Flush hypervisor TLB */
- "dsb sy;" /* Ensure completion of TLB flush */
- "isb;"
- : : : "memory");
-}
+TLB_HELPER(flush_xen_tlb_local, alle2);
/* Flush TLB of local processor for address va. */
static inline void __flush_xen_tlb_one_local(vaddr_t va)