On 2/7/23 00:43, Anton Johansson wrote:
diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h
index b3f6e78835..083939b302 100644
--- a/accel/tcg/tb-jmp-cache.h
+++ b/accel/tcg/tb-jmp-cache.h
static inline TranslationBlock *
-tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t hash)
+tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t cflags, uint32_t hash)
{
-#if TARGET_TB_PCREL
- /* Use acquire to ensure current load of pc from jc. */
- return qatomic_load_acquire(&jc->array[hash].tb);
-#else
- /* Use rcu_read to ensure current load of pc from *tb. */
- return qatomic_rcu_read(&jc->array[hash].tb);
-#endif
+ if (cflags & CF_PCREL) {
+ /* Use acquire to ensure current load of pc from jc. */
+ return qatomic_load_acquire(&jc->array[hash].tb);
+ } else {
+ /* Use rcu_read to ensure current load of pc from *tb. */
+ return qatomic_rcu_read(&jc->array[hash].tb);
+ }
}
static inline target_ulong
tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb)
{
-#if TARGET_TB_PCREL
- return jc->array[hash].pc;
-#else
- return tb_pc(tb);
-#endif
+ if (tb_cflags(tb) & CF_PCREL) {
+ return jc->array[hash].pc;
+ } else {
+ return tb_pc(tb);
+ }
}
static inline void
tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash,
TranslationBlock *tb, target_ulong pc)
{
-#if TARGET_TB_PCREL
- jc->array[hash].pc = pc;
- /* Use store_release on tb to ensure pc is written first. */
- qatomic_store_release(&jc->array[hash].tb, tb);
-#else
- /* Use the pc value already stored in tb->pc. */
- qatomic_set(&jc->array[hash].tb, tb);
-#endif
+ if (tb_cflags(tb) & CF_PCREL) {
+ jc->array[hash].pc = pc;
+ /* Use store_release on tb to ensure pc is written first. */
+ qatomic_store_release(&jc->array[hash].tb, tb);
+ } else{
+ /* Use the pc value already stored in tb->pc. */
+ qatomic_set(&jc->array[hash].tb, tb);
+ }
}
These little functions made sense when they were isolating ifdefs.
When they protect a sequence of conditions,
if (CF_PCREL) {
a
} else {
b
}
if (CF_PCREL) {
c
} else {
d
}
if (CF_PCREL) {
e
} else {
f
}
we probably want to hoist one check in the callers.
+ return ((tb_cflags(a) & CF_PCREL || tb_pc(a) == tb_pc(b)) &&
Similarly things like this, where we have a PCREL test here, and also within
tb_pc().
- h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
+ h = tb_hash_func(phys_pc, (orig_cflags & CF_PCREL ? 0 : tb_pc(tb)),
etc.
This does too much at once, and also disables TARGET_TB_PCREL for one patch, changing
behaviour during bisection.
r~