From: Mark Rutland <mark.rutl...@arm.com>

As with dsb() and isb(), add a __tlbi() helper so that we can avoid
distracting asm boilerplate every time we want a TLBI. As some TLBI
operations take an argument while others do not, some pre-processor is
used to handle these two cases with different assembly blocks.

The existing tlbflush.h code is moved over to use the helper.

Signed-off-by: Mark Rutland <mark.rutl...@arm.com>
Cc: Catalin Marinas <catalin.mari...@arm.com>
Cc: Marc Zyngier <marc.zyng...@arm.com>
Cc: Will Deacon <will.dea...@arm.com>
[ rename helper to __tlbi, update commit log ]
Signed-off-by: Punit Agrawal <punit.agra...@arm.com>
---
 arch/arm64/include/asm/tlbflush.h | 31 +++++++++++++++++++++++--------
 1 file changed, 23 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/include/asm/tlbflush.h 
b/arch/arm64/include/asm/tlbflush.h
index b460ae2..e9e42a7 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -25,6 +25,21 @@
 #include <asm/cputype.h>
 
 /*
+ * Raw TLBI operations. Drivers and most kernel code should use the TLB
+ * management routines below in preference to these. Where necessary, these can
+ * be used to avoid asm() boilerplate.
+ *
+ * Can be used as __tlbi(op) or __tlbi(op, arg), depending on whether a
+ * particular TLBI op takes an argument or not. The macros below handle 
invoking
+ * the asm with or without the register argument as appropriate.
+ */
+#define __TLBI_0(op, arg)              asm ("tlbi " #op)
+#define __TLBI_1(op, arg)              asm ("tlbi " #op ", %0" : : "r" (arg))
+#define __TLBI_N(op, arg, n, ...)      __TLBI_##n(op, arg)
+
+#define __tlbi(op, ...)                __TLBI_N(op, ##__VA_ARGS__, 1, 0)
+
+/*
  *     TLB Management
  *     ==============
  *
@@ -66,7 +81,7 @@
 static inline void local_flush_tlb_all(void)
 {
        dsb(nshst);
-       asm("tlbi       vmalle1");
+       __tlbi(vmalle1);
        dsb(nsh);
        isb();
 }
@@ -74,7 +89,7 @@ static inline void local_flush_tlb_all(void)
 static inline void flush_tlb_all(void)
 {
        dsb(ishst);
-       asm("tlbi       vmalle1is");
+       __tlbi(vmalle1is);
        dsb(ish);
        isb();
 }
@@ -84,7 +99,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
        unsigned long asid = ASID(mm) << 48;
 
        dsb(ishst);
-       asm("tlbi       aside1is, %0" : : "r" (asid));
+       __tlbi(aside1is, asid);
        dsb(ish);
 }
 
@@ -94,7 +109,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
        unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
 
        dsb(ishst);
-       asm("tlbi       vale1is, %0" : : "r" (addr));
+       __tlbi(vale1is, addr);
        dsb(ish);
 }
 
@@ -122,9 +137,9 @@ static inline void __flush_tlb_range(struct vm_area_struct 
*vma,
        dsb(ishst);
        for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
                if (last_level)
-                       asm("tlbi vale1is, %0" : : "r"(addr));
+                       __tlbi(vale1is, addr);
                else
-                       asm("tlbi vae1is, %0" : : "r"(addr));
+                       __tlbi(vae1is, addr);
        }
        dsb(ish);
 }
@@ -149,7 +164,7 @@ static inline void flush_tlb_kernel_range(unsigned long 
start, unsigned long end
 
        dsb(ishst);
        for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
-               asm("tlbi vaae1is, %0" : : "r"(addr));
+               __tlbi(vaae1is, addr);
        dsb(ish);
        isb();
 }
@@ -163,7 +178,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
 {
        unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
 
-       asm("tlbi       vae1is, %0" : : "r" (addr));
+       __tlbi(vae1is, addr);
        dsb(ish);
 }
 
-- 
2.8.1

Reply via email to