On 10/9/24 08:08, Richard Henderson wrote:
Because translation is special, we don't need the speed
of the direct-mapped softmmu tlb.  We cache a lookups in
DisasContextBase within the translator loop anyway.

Drop the addr_code comparator from CPUTLBEntry.
Go directly to the IntervalTree for MMU_INST_FETCH.
Derive exec flags from read flags.

Signed-off-by: Richard Henderson <richard.hender...@linaro.org>
---
  include/exec/cpu-all.h    |   3 +
  include/exec/tlb-common.h |   5 +-
  accel/tcg/cputlb.c        | 138 +++++++++++++++++++++++++++++---------
  3 files changed, 110 insertions(+), 36 deletions(-)

diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 6f09b86e7f..7f5a10962a 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -326,6 +326,9 @@ static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
      (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
      | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
+/* Filter read flags to exec flags. */
+#define TLB_EXEC_FLAGS_MASK  (TLB_MMIO)
+
  /*
   * Flags stored in CPUTLBEntryFull.slow_flags[x].
   * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
diff --git a/include/exec/tlb-common.h b/include/exec/tlb-common.h
index 300f9fae67..feaa471299 100644
--- a/include/exec/tlb-common.h
+++ b/include/exec/tlb-common.h
@@ -26,7 +26,6 @@ typedef union CPUTLBEntry {
      struct {
          uint64_t addr_read;
          uint64_t addr_write;
-        uint64_t addr_code;
          /*
           * Addend to virtual address to get host address.  IO accesses
           * use the corresponding iotlb value.
@@ -35,7 +34,7 @@ typedef union CPUTLBEntry {
      };
      /*
       * Padding to get a power of two size, as well as index
-     * access to addr_{read,write,code}.
+     * access to addr_{read,write}.
       */
      uint64_t addr_idx[(1 << CPU_TLB_ENTRY_BITS) / sizeof(uint64_t)];
  } CPUTLBEntry;
@@ -92,7 +91,7 @@ struct CPUTLBEntryFull {
       * Additional tlb flags for use by the slow path. If non-zero,
       * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
       */
-    uint8_t slow_flags[MMU_ACCESS_COUNT];
+    uint8_t slow_flags[2];
/*
       * Allow target-specific additions to this structure.
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 61daa89e06..7c8308355d 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -114,8 +114,9 @@ static inline uint64_t tlb_read_idx(const CPUTLBEntry 
*entry,
                        MMU_DATA_LOAD * sizeof(uint64_t));
      QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) !=
                        MMU_DATA_STORE * sizeof(uint64_t));
-    QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) !=
-                      MMU_INST_FETCH * sizeof(uint64_t));
+
+    tcg_debug_assert(access_type == MMU_DATA_LOAD ||
+                     access_type == MMU_DATA_STORE);
#if TARGET_LONG_BITS == 32
      /* Use qatomic_read, in case of addr_write; only care about low bits. */
@@ -490,8 +491,7 @@ static bool tlb_hit_page_mask_anyprot(CPUTLBEntry 
*tlb_entry,
      mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
return (page == (tlb_entry->addr_read & mask) ||
-            page == (tlb_addr_write(tlb_entry) & mask) ||
-            page == (tlb_entry->addr_code & mask));
+            page == (tlb_addr_write(tlb_entry) & mask));
  }
static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page)
@@ -1061,15 +1061,13 @@ static inline void tlb_set_compare(CPUTLBEntryFull 
*full, CPUTLBEntry *ent,
                                     vaddr address, int flags,
                                     MMUAccessType access_type, bool enable)
  {
-    if (enable) {
-        address |= flags & TLB_FLAGS_MASK;
-        flags &= TLB_SLOW_FLAGS_MASK;
-        if (flags) {
-            address |= TLB_FORCE_SLOW;
-        }
-    } else {
-        address = -1;
-        flags = 0;
+    if (!enable) {
+       address = TLB_INVALID_MASK;
+    }
+    address |= flags & TLB_FLAGS_MASK;
+    flags &= TLB_SLOW_FLAGS_MASK;
+    if (flags) {
+        address |= TLB_FORCE_SLOW;
      }

I'm not sure to follow this change correctly.
After, the final address and flags value depend on flags in parameter, while before, it used to depend on flags & enable parameter.

      ent->addr_idx[access_type] = address;
      full->slow_flags[access_type] = flags;
@@ -1215,9 +1213,6 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
      /* Now calculate the new entry */
      node->copy.addend = addend - addr_page;
- tlb_set_compare(full, &node->copy, addr_page, read_flags,
-                    MMU_INST_FETCH, prot & PAGE_EXEC);
-
      if (wp_flags & BP_MEM_READ) {
          read_flags |= TLB_WATCHPOINT;
      }
@@ -1392,21 +1387,52 @@ static void notdirty_write(CPUState *cpu, vaddr 
mem_vaddr, unsigned size,
      }
  }
-static int probe_access_internal(CPUState *cpu, vaddr addr,
-                                 int fault_size, MMUAccessType access_type,
-                                 int mmu_idx, bool nonfault,
-                                 void **phost, CPUTLBEntryFull **pfull,
-                                 uintptr_t retaddr, bool check_mem_cbs)
+static int probe_access_internal_code(CPUState *cpu, vaddr addr,
+                                      int fault_size, int mmu_idx,
+                                      bool nonfault,
+                                      void **phost, CPUTLBEntryFull **pfull,
+                                      uintptr_t retaddr)
+{
+    CPUTLBEntryTree *t = tlbtree_lookup_addr(&cpu->neg.tlb.d[mmu_idx], addr);
+    int flags;
+
+    if (!t || !(t->full.prot & PAGE_EXEC)) {
+        if (!tlb_fill_align(cpu, addr, MMU_INST_FETCH, mmu_idx,
+                            0, fault_size, nonfault, retaddr)) {
+            /* Non-faulting page table read failed.  */
+            *phost = NULL;
+            *pfull = NULL;
+            return TLB_INVALID_MASK;
+        }
+        t = tlbtree_lookup_addr(&cpu->neg.tlb.d[mmu_idx], addr);
+    }
+    flags = t->copy.addr_read & TLB_EXEC_FLAGS_MASK;
+    *pfull = &t->full;
+
+    if (flags) {
+        *phost = NULL;
+        return TLB_MMIO;
+    }
+
+    /* Everything else is RAM. */
+    *phost = (void *)((uintptr_t)addr + t->copy.addend);
+    return flags;
+}
+
+static int probe_access_internal_data(CPUState *cpu, vaddr addr,
+                                      int fault_size, MMUAccessType 
access_type,
+                                      int mmu_idx, bool nonfault,
+                                      void **phost, CPUTLBEntryFull **pfull,
+                                      uintptr_t retaddr, bool check_mem_cbs)
  {
      uintptr_t index = tlb_index(cpu, mmu_idx, addr);
      CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
      uint64_t tlb_addr = tlb_read_idx(entry, access_type);
-    vaddr page_addr = addr & TARGET_PAGE_MASK;
      int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
      CPUTLBEntryFull *full;
- if (!tlb_hit_page(tlb_addr, page_addr)) {
-        if (!tlbtree_hit(cpu, mmu_idx, access_type, page_addr)) {
+    if (!tlb_hit(tlb_addr, addr)) {
+        if (!tlbtree_hit(cpu, mmu_idx, access_type, addr)) {
              if (!tlb_fill_align(cpu, addr, access_type, mmu_idx,
                                  0, fault_size, nonfault, retaddr)) {
                  /* Non-faulting page table read failed.  */
@@ -1450,6 +1476,21 @@ static int probe_access_internal(CPUState *cpu, vaddr 
addr,
      return flags;
  }
+static int probe_access_internal(CPUState *cpu, vaddr addr,
+                                 int fault_size, MMUAccessType access_type,
+                                 int mmu_idx, bool nonfault,
+                                 void **phost, CPUTLBEntryFull **pfull,
+                                 uintptr_t retaddr, bool check_mem_cbs)
+{
+    if (access_type == MMU_INST_FETCH) {
+        return probe_access_internal_code(cpu, addr, fault_size, mmu_idx,
+                                          nonfault, phost, pfull, retaddr);
+    }
+    return probe_access_internal_data(cpu, addr, fault_size, access_type,
+                                      mmu_idx, nonfault, phost, pfull,
+                                      retaddr, check_mem_cbs);
+}
+
  int probe_access_full(CPUArchState *env, vaddr addr, int size,
                        MMUAccessType access_type, int mmu_idx,
                        bool nonfault, void **phost, CPUTLBEntryFull **pfull,
@@ -1582,9 +1623,9 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState 
*env, vaddr addr,
      CPUTLBEntryFull *full;
      void *p;
- (void)probe_access_internal(env_cpu(env), addr, 1, MMU_INST_FETCH,
-                                cpu_mmu_index(env_cpu(env), true), false,
-                                &p, &full, 0, false);
+    (void)probe_access_internal_code(env_cpu(env), addr, 1,
+                                     cpu_mmu_index(env_cpu(env), true),
+                                     false, &p, &full, 0);
      if (p == NULL) {
          return -1;
      }
@@ -1678,8 +1719,31 @@ typedef struct MMULookupLocals {
   * tlb_fill_align will longjmp out.  Return true if the softmmu tlb for
   * @mmu_idx may have resized.
   */
-static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop,
-                        int mmu_idx, MMUAccessType access_type, uintptr_t ra)
+static bool mmu_lookup1_code(CPUState *cpu, MMULookupPageData *data,
+                             MemOp memop, int mmu_idx, uintptr_t ra)
+{
+    vaddr addr = data->addr;
+    CPUTLBEntryTree *t = tlbtree_lookup_addr(&cpu->neg.tlb.d[mmu_idx], addr);
+    bool maybe_resized = true;
+
+    if (!t || !(t->full.prot & PAGE_EXEC)) {
+        tlb_fill_align(cpu, addr, MMU_INST_FETCH, mmu_idx,
+                       memop, data->size, false, ra);
+        maybe_resized = true;
+        t = tlbtree_lookup_addr(&cpu->neg.tlb.d[mmu_idx], addr);
+    }
+
+    data->full = &t->full;
+    data->flags = t->copy.addr_read & TLB_EXEC_FLAGS_MASK;
+    /* Compute haddr speculatively; depending on flags it might be invalid. */
+    data->haddr = (void *)((uintptr_t)addr + t->copy.addend);
+
+    return maybe_resized;
+}
+
+static bool mmu_lookup1_data(CPUState *cpu, MMULookupPageData *data,
+                             MemOp memop, int mmu_idx,
+                             MMUAccessType access_type, uintptr_t ra)
  {
      vaddr addr = data->addr;
      uintptr_t index = tlb_index(cpu, mmu_idx, addr);
@@ -1738,6 +1802,15 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData 
*data, MemOp memop,
      return maybe_resized;
  }
+static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop,
+                        int mmu_idx, MMUAccessType access_type, uintptr_t ra)
+{
+    if (access_type == MMU_INST_FETCH) {
+        return mmu_lookup1_code(cpu, data, memop, mmu_idx, ra);
+    }
+    return mmu_lookup1_data(cpu, data, memop, mmu_idx, access_type, ra);
+}
+
  /**
   * mmu_watch_or_dirty
   * @cpu: generic cpu state
@@ -1885,13 +1958,13 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr 
addr, MemOpIdx oi,
          }
      }
+ full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
+
      /*
       * Let the guest notice RMW on a write-only page.
       * We have just verified that the page is writable.
-     * Subpage lookups may have left TLB_INVALID_MASK set,
-     * but addr_read will only be -1 if PAGE_READ was unset.
       */
-    if (unlikely(tlbe->addr_read == -1)) {
+    if (unlikely(!(full->prot & PAGE_READ))) {
          tlb_fill_align(cpu, addr, MMU_DATA_LOAD, mmu_idx,
                         0, size, false, retaddr);
          /*
@@ -1929,7 +2002,6 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, 
MemOpIdx oi,
      }
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
-    full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
if (unlikely(flags & TLB_NOTDIRTY)) {
          notdirty_write(cpu, addr, size, full, retaddr);

Sounds good to have a fast path for code fetch. Did you measure the benefit, or just implemented this thinking it's worth?

Reply via email to