This fixes coverity issues 75235919, etc., 1524 /* Handle CPU specific unaligned behaviour */ CID 75235919: (OVERFLOW_BEFORE_WIDEN) 1525. overflow_before_widen: Potentially overflowing expression "1 << a_bits" with type "int" (32 bits, signed) is evaluated using 32-bit arithmetic, and then used in a context that expects an expression of type "target_ulong" (64 bits, unsigned). 1525 if (addr & ((1 << a_bits) - 1)) {
Signed-off-by: Yifei Jiang <jiangyi...@huawei.com> Signed-off-by: Mingwang Li <limingw...@huawei.com> Reported-by: Euler Robot <euler.ro...@huawei.com> --- accel/tcg/cputlb.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index e3b5750c3b..73b5e680be 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1412,7 +1412,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, retaddr -= GETPC_ADJ; /* Enforce guest required alignment. */ - if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { + if (unlikely(a_bits > 0 && (addr & (((target_ulong)1 << a_bits) - 1)))) { /* ??? Maybe indicate atomic op to cpu_unaligned_access */ cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); @@ -1522,7 +1522,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, size_t size = memop_size(op); /* Handle CPU specific unaligned behaviour */ - if (addr & ((1 << a_bits) - 1)) { + if (addr & (((target_ulong)1 << a_bits) - 1)) { cpu_unaligned_access(env_cpu(env), addr, access_type, mmu_idx, retaddr); } @@ -1911,7 +1911,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, size_t size = memop_size(op); /* Handle CPU specific unaligned behaviour */ - if (addr & ((1 << a_bits) - 1)) { + if (addr & (((target_ulong)1 << a_bits) - 1)) { cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); } -- 2.19.1