On 10/9/24 08:08, Richard Henderson wrote:
Flush a page from the IntervalTree cache.

Signed-off-by: Richard Henderson <richard.hender...@linaro.org>
---
  accel/tcg/cputlb.c | 16 ++++++++++++----
  1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index d964e1b2e8..772656c7f8 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -573,6 +573,7 @@ static void tlb_flush_page_locked(CPUState *cpu, int midx, 
vaddr page)
      CPUTLBDesc *desc = &cpu->neg.tlb.d[midx];
      vaddr lp_addr = desc->large_page_addr;
      vaddr lp_mask = desc->large_page_mask;
+    CPUTLBEntryTree *node;
/* Check if we need to flush due to large pages. */
      if ((page & lp_mask) == lp_addr) {
@@ -580,10 +581,17 @@ static void tlb_flush_page_locked(CPUState *cpu, int 
midx, vaddr page)
                    VADDR_PRIx "/%016" VADDR_PRIx ")\n",
                    midx, lp_addr, lp_mask);
          tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
-    } else {
-        tlbfast_flush_range_locked(desc, &cpu->neg.tlb.f[midx],
-                                   page, TARGET_PAGE_SIZE, -1);
-        tlb_flush_vtlb_page_locked(cpu, midx, page);
+        return;
+    }
+
+    tlbfast_flush_range_locked(desc, &cpu->neg.tlb.f[midx],
+                               page, TARGET_PAGE_SIZE, -1);
+    tlb_flush_vtlb_page_locked(cpu, midx, page);
+
+    node = tlbtree_lookup_addr(desc, page);
+    if (node) {
+        interval_tree_remove(&node->itree, &desc->iroot);
+        g_free(node);
      }
  }

Reviewed-by: Pierrick Bouvier <pierrick.bouv...@linaro.org>

Reply via email to