Link from the fast tlb entry to the interval tree node.

Signed-off-by: Richard Henderson <richard.hender...@linaro.org>
---
 include/exec/tlb-common.h |  2 ++
 accel/tcg/cputlb.c        | 59 ++++++++++++++-------------------------
 2 files changed, 23 insertions(+), 38 deletions(-)

diff --git a/include/exec/tlb-common.h b/include/exec/tlb-common.h
index feaa471299..3b57d61112 100644
--- a/include/exec/tlb-common.h
+++ b/include/exec/tlb-common.h
@@ -31,6 +31,8 @@ typedef union CPUTLBEntry {
          * use the corresponding iotlb value.
          */
         uintptr_t addend;
+        /* The defining IntervalTree entry. */
+        struct CPUTLBEntryTree *tree;
     };
     /*
      * Padding to get a power of two size, as well as index
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 7c8308355d..2a8d1b4fb2 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -505,7 +505,10 @@ static bool tlb_flush_entry_mask_locked(CPUTLBEntry 
*tlb_entry,
                                         vaddr mask)
 {
     if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
-        memset(tlb_entry, -1, sizeof(*tlb_entry));
+        tlb_entry->addr_read = -1;
+        tlb_entry->addr_write = -1;
+        tlb_entry->addend = 0;
+        tlb_entry->tree = NULL;
         return true;
     }
     return false;
@@ -1212,6 +1215,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
 
     /* Now calculate the new entry */
     node->copy.addend = addend - addr_page;
+    node->copy.tree = node;
 
     if (wp_flags & BP_MEM_READ) {
         read_flags |= TLB_WATCHPOINT;
@@ -1425,7 +1429,6 @@ static int probe_access_internal_data(CPUState *cpu, 
vaddr addr,
                                       void **phost, CPUTLBEntryFull **pfull,
                                       uintptr_t retaddr, bool check_mem_cbs)
 {
-    uintptr_t index = tlb_index(cpu, mmu_idx, addr);
     CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
     int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
@@ -1442,7 +1445,6 @@ static int probe_access_internal_data(CPUState *cpu, 
vaddr addr,
             }
 
             /* TLB resize via tlb_fill_align may have moved the entry.  */
-            index = tlb_index(cpu, mmu_idx, addr);
             entry = tlb_entry(cpu, mmu_idx, addr);
 
             /*
@@ -1456,7 +1458,7 @@ static int probe_access_internal_data(CPUState *cpu, 
vaddr addr,
     }
     flags &= tlb_addr;
 
-    *pfull = full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
+    *pfull = full = &entry->tree->full;
     flags |= full->slow_flags[access_type];
 
     /*
@@ -1659,7 +1661,6 @@ bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int 
mmu_idx,
                        bool is_store, struct qemu_plugin_hwaddr *data)
 {
     CPUTLBEntry *tlbe = tlb_entry(cpu, mmu_idx, addr);
-    uintptr_t index = tlb_index(cpu, mmu_idx, addr);
     MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
     uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
     CPUTLBEntryFull *full;
@@ -1668,7 +1669,7 @@ bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int 
mmu_idx,
         return false;
     }
 
-    full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
+    full = &tlbe->tree->full;
     data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
 
     /* We must have an iotlb entry for MMIO */
@@ -1716,20 +1717,17 @@ typedef struct MMULookupLocals {
  *
  * Resolve the translation for the one page at @data.addr, filling in
  * the rest of @data with the results.  If the translation fails,
- * tlb_fill_align will longjmp out.  Return true if the softmmu tlb for
- * @mmu_idx may have resized.
+ * tlb_fill_align will longjmp out.
  */
-static bool mmu_lookup1_code(CPUState *cpu, MMULookupPageData *data,
+static void mmu_lookup1_code(CPUState *cpu, MMULookupPageData *data,
                              MemOp memop, int mmu_idx, uintptr_t ra)
 {
     vaddr addr = data->addr;
     CPUTLBEntryTree *t = tlbtree_lookup_addr(&cpu->neg.tlb.d[mmu_idx], addr);
-    bool maybe_resized = true;
 
     if (!t || !(t->full.prot & PAGE_EXEC)) {
         tlb_fill_align(cpu, addr, MMU_INST_FETCH, mmu_idx,
                        memop, data->size, false, ra);
-        maybe_resized = true;
         t = tlbtree_lookup_addr(&cpu->neg.tlb.d[mmu_idx], addr);
     }
 
@@ -1737,19 +1735,16 @@ static bool mmu_lookup1_code(CPUState *cpu, 
MMULookupPageData *data,
     data->flags = t->copy.addr_read & TLB_EXEC_FLAGS_MASK;
     /* Compute haddr speculatively; depending on flags it might be invalid. */
     data->haddr = (void *)((uintptr_t)addr + t->copy.addend);
-
-    return maybe_resized;
 }
 
-static bool mmu_lookup1_data(CPUState *cpu, MMULookupPageData *data,
+static void mmu_lookup1_data(CPUState *cpu, MMULookupPageData *data,
                              MemOp memop, int mmu_idx,
                              MMUAccessType access_type, uintptr_t ra)
 {
     vaddr addr = data->addr;
-    uintptr_t index = tlb_index(cpu, mmu_idx, addr);
     CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
-    bool maybe_resized = false;
+    bool did_tlb_fill = false;
     CPUTLBEntryFull *full;
     int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
 
@@ -1758,8 +1753,7 @@ static bool mmu_lookup1_data(CPUState *cpu, 
MMULookupPageData *data,
         if (!tlbtree_hit(cpu, mmu_idx, access_type, addr)) {
             tlb_fill_align(cpu, addr, access_type, mmu_idx,
                            memop, data->size, false, ra);
-            maybe_resized = true;
-            index = tlb_index(cpu, mmu_idx, addr);
+            did_tlb_fill = true;
             entry = tlb_entry(cpu, mmu_idx, addr);
             /*
              * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
@@ -1771,11 +1765,11 @@ static bool mmu_lookup1_data(CPUState *cpu, 
MMULookupPageData *data,
         tlb_addr = tlb_read_idx(entry, access_type);
     }
 
-    full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
-    flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
+    full = &entry->tree->full;
+    flags &= tlb_addr;
     flags |= full->slow_flags[access_type];
 
-    if (likely(!maybe_resized)) {
+    if (likely(!did_tlb_fill)) {
         /* Alignment has not been checked by tlb_fill_align. */
         int a_bits = memop_alignment_bits(memop);
 
@@ -1798,17 +1792,15 @@ static bool mmu_lookup1_data(CPUState *cpu, 
MMULookupPageData *data,
     data->flags = flags;
     /* Compute haddr speculatively; depending on flags it might be invalid. */
     data->haddr = (void *)((uintptr_t)addr + entry->addend);
-
-    return maybe_resized;
 }
 
-static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop,
+static void mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop,
                         int mmu_idx, MMUAccessType access_type, uintptr_t ra)
 {
     if (access_type == MMU_INST_FETCH) {
-        return mmu_lookup1_code(cpu, data, memop, mmu_idx, ra);
+        mmu_lookup1_code(cpu, data, memop, mmu_idx, ra);
     }
-    return mmu_lookup1_data(cpu, data, memop, mmu_idx, access_type, ra);
+    mmu_lookup1_data(cpu, data, memop, mmu_idx, access_type, ra);
 }
 
 /**
@@ -1889,15 +1881,9 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, 
MemOpIdx oi,
         l->page[1].size = l->page[0].size - size0;
         l->page[0].size = size0;
 
-        /*
-         * Lookup both pages, recognizing exceptions from either.  If the
-         * second lookup potentially resized, refresh first CPUTLBEntryFull.
-         */
+        /* Lookup both pages, recognizing exceptions from either. */
         mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
-        if (mmu_lookup1(cpu, &l->page[1], 0, l->mmu_idx, type, ra)) {
-            uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
-            l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
-        }
+        mmu_lookup1(cpu, &l->page[1], 0, l->mmu_idx, type, ra);
 
         flags = l->page[0].flags | l->page[1].flags;
         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
@@ -1925,7 +1911,6 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, 
MemOpIdx oi,
 {
     uintptr_t mmu_idx = get_mmuidx(oi);
     MemOp mop = get_memop(oi);
-    uintptr_t index;
     CPUTLBEntry *tlbe;
     void *hostaddr;
     CPUTLBEntryFull *full;
@@ -1937,7 +1922,6 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, 
MemOpIdx oi,
     /* Adjust the given return address.  */
     retaddr -= GETPC_ADJ;
 
-    index = tlb_index(cpu, mmu_idx, addr);
     tlbe = tlb_entry(cpu, mmu_idx, addr);
 
     /* Check TLB entry and enforce page permissions.  */
@@ -1947,7 +1931,6 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, 
MemOpIdx oi,
             tlb_fill_align(cpu, addr, MMU_DATA_STORE, mmu_idx,
                            mop, size, false, retaddr);
             did_tlb_fill = true;
-            index = tlb_index(cpu, mmu_idx, addr);
             tlbe = tlb_entry(cpu, mmu_idx, addr);
             /*
              * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
@@ -1958,7 +1941,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, 
MemOpIdx oi,
         }
     }
 
-    full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
+    full = &tlbe->tree->full;
 
     /*
      * Let the guest notice RMW on a write-only page.
-- 
2.43.0


Reply via email to