From: Philippe Mathieu-Daudé <phi...@linaro.org> In preparation of having tcg_req_mo() access CPUState in the next commit, pass it to cpu_req_mo(), its single caller.
Signed-off-by: Philippe Mathieu-Daudé <phi...@linaro.org> Reviewed-by: Richard Henderson <richard.hender...@linaro.org> Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- accel/tcg/internal-target.h | 3 ++- accel/tcg/cputlb.c | 20 ++++++++++---------- accel/tcg/user-exec.c | 20 ++++++++++---------- 3 files changed, 22 insertions(+), 21 deletions(-) diff --git a/accel/tcg/internal-target.h b/accel/tcg/internal-target.h index 1a46a7c87d..23aac39b57 100644 --- a/accel/tcg/internal-target.h +++ b/accel/tcg/internal-target.h @@ -59,12 +59,13 @@ G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); /** * cpu_req_mo: + * @cpu: CPUState * @type: TCGBar * * If tcg_req_mo indicates a barrier for @type is required * for the guest memory model, issue a host memory barrier. */ -#define cpu_req_mo(type) \ +#define cpu_req_mo(cpu, type) \ do { \ if (tcg_req_mo(type)) { \ smp_mb(); \ diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 2cafd38d2a..35b1ff03a5 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -2324,7 +2324,7 @@ static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, MMULookupLocals l; bool crosspage; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); tcg_debug_assert(!crosspage); @@ -2339,7 +2339,7 @@ static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, uint16_t ret; uint8_t a, b; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); if (likely(!crosspage)) { return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); @@ -2363,7 +2363,7 @@ static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, bool crosspage; uint32_t ret; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); if (likely(!crosspage)) { return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); @@ -2384,7 +2384,7 @@ static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, bool crosspage; uint64_t ret; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); if (likely(!crosspage)) { return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); @@ -2407,7 +2407,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr, Int128 ret; int first; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l); if (likely(!crosspage)) { if (unlikely(l.page[0].flags & TLB_MMIO)) { @@ -2735,7 +2735,7 @@ static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val, MMULookupLocals l; bool crosspage; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); tcg_debug_assert(!crosspage); @@ -2749,7 +2749,7 @@ static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val, bool crosspage; uint8_t a, b; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); if (likely(!crosspage)) { do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); @@ -2771,7 +2771,7 @@ static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val, MMULookupLocals l; bool crosspage; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); if (likely(!crosspage)) { do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); @@ -2792,7 +2792,7 @@ static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val, MMULookupLocals l; bool crosspage; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); if (likely(!crosspage)) { do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); @@ -2815,7 +2815,7 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val, uint64_t a, b; int first; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); if (likely(!crosspage)) { if (unlikely(l.page[0].flags & TLB_MMIO)) { diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 1b878ead7a..3f4d682446 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -1061,7 +1061,7 @@ static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, void *haddr; uint8_t ret; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, access_type); ret = ldub_p(haddr); clear_helper_retaddr(); @@ -1075,7 +1075,7 @@ static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, uint16_t ret; MemOp mop = get_memop(oi); - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type); ret = load_atom_2(cpu, ra, haddr, mop); clear_helper_retaddr(); @@ -1093,7 +1093,7 @@ static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, uint32_t ret; MemOp mop = get_memop(oi); - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type); ret = load_atom_4(cpu, ra, haddr, mop); clear_helper_retaddr(); @@ -1111,7 +1111,7 @@ static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, uint64_t ret; MemOp mop = get_memop(oi); - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type); ret = load_atom_8(cpu, ra, haddr, mop); clear_helper_retaddr(); @@ -1130,7 +1130,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr, MemOp mop = get_memop(oi); tcg_debug_assert((mop & MO_SIZE) == MO_128); - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_LOAD); ret = load_atom_16(cpu, ra, haddr, mop); clear_helper_retaddr(); @@ -1146,7 +1146,7 @@ static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val, { void *haddr; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, MMU_DATA_STORE); stb_p(haddr, val); clear_helper_retaddr(); @@ -1158,7 +1158,7 @@ static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val, void *haddr; MemOp mop = get_memop(oi); - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE); if (mop & MO_BSWAP) { @@ -1174,7 +1174,7 @@ static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val, void *haddr; MemOp mop = get_memop(oi); - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE); if (mop & MO_BSWAP) { @@ -1190,7 +1190,7 @@ static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val, void *haddr; MemOp mop = get_memop(oi); - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE); if (mop & MO_BSWAP) { @@ -1206,7 +1206,7 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val, void *haddr; MemOpIdx mop = get_memop(oi); - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE); if (mop & MO_BSWAP) { -- 2.43.0