On 4/4/25 16:56, Philippe Mathieu-Daudé wrote:
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
index d479f53ae02..ae12ad2d867 100644
--- a/accel/tcg/tb-maint.c
+++ b/accel/tcg/tb-maint.c
@@ -1057,10 +1057,7 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr,
uintptr_t pc)
* Without precise smc semantics, or when outside of a TB,
* we can skip to invalidate.
*/
-#ifndef TARGET_HAS_PRECISE_SMC
- pc = 0;
-#endif
- if (!pc) {
+ if (!target_has_precise_smc() || !pc) {
tb_invalidate_phys_page(addr);
return false;
}
For the record, in my v2 I reverse these tests, since !pc is simpler.
@@ -1109,10 +1106,9 @@ tb_invalidate_phys_page_range__locked(struct
page_collection *pages,
{
TranslationBlock *tb;
PageForEachNext n;
-#ifdef TARGET_HAS_PRECISE_SMC
bool current_tb_modified = false;
- TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
-#endif /* TARGET_HAS_PRECISE_SMC */
+ TranslationBlock *current_tb = (target_has_precise_smc() && retaddr)
+ ? tcg_tb_lookup(retaddr) : NULL;
/* Range may not cross a page. */
tcg_debug_assert(((start ^ last) & TARGET_PAGE_MASK) == 0);
@@ -1134,8 +1130,7 @@ tb_invalidate_phys_page_range__locked(struct
page_collection *pages,
tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK);
}
if (!(tb_last < start || tb_start > last)) {
-#ifdef TARGET_HAS_PRECISE_SMC
- if (current_tb == tb &&
+ if (target_has_precise_smc() && current_tb == tb &&
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
For the record, we can eliminate the target_has_precise_smc() test here, because we've set
current_tb == NULL, and thus the current_tb == tb test always fails ...
@@ -1157,15 +1151,13 @@ tb_invalidate_phys_page_range__locked(struct
page_collection *pages,
tlb_unprotect_code(start);
}
-#ifdef TARGET_HAS_PRECISE_SMC
- if (current_tb_modified) {
+ if (target_has_precise_smc() && current_tb_modified) {
... which in turn means that current_tb_modified is never set.
Thus only the one runtime test at the top of the function suffices.
r~