Avoid the use of the OptContext slots. Reviewed-by: Pierrick Bouvier <pierrick.bouv...@linaro.org> Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- tcg/optimize.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c index bfce2dcf60..56bf3c1aaa 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -2675,33 +2675,35 @@ static bool fold_sub2(OptContext *ctx, TCGOp *op) static bool fold_tcg_ld(OptContext *ctx, TCGOp *op) { + uint64_t z_mask = -1, s_mask; + /* We can't do any folding with a load, but we can record bits. */ switch (op->opc) { CASE_OP_32_64(ld8s): - ctx->s_mask = MAKE_64BIT_MASK(8, 56); + s_mask = MAKE_64BIT_MASK(8, 56); break; CASE_OP_32_64(ld8u): - ctx->z_mask = MAKE_64BIT_MASK(0, 8); - ctx->s_mask = MAKE_64BIT_MASK(9, 55); + z_mask = MAKE_64BIT_MASK(0, 8); + s_mask = MAKE_64BIT_MASK(9, 55); break; CASE_OP_32_64(ld16s): - ctx->s_mask = MAKE_64BIT_MASK(16, 48); + s_mask = MAKE_64BIT_MASK(16, 48); break; CASE_OP_32_64(ld16u): - ctx->z_mask = MAKE_64BIT_MASK(0, 16); - ctx->s_mask = MAKE_64BIT_MASK(17, 47); + z_mask = MAKE_64BIT_MASK(0, 16); + s_mask = MAKE_64BIT_MASK(17, 47); break; case INDEX_op_ld32s_i64: - ctx->s_mask = MAKE_64BIT_MASK(32, 32); + s_mask = MAKE_64BIT_MASK(32, 32); break; case INDEX_op_ld32u_i64: - ctx->z_mask = MAKE_64BIT_MASK(0, 32); - ctx->s_mask = MAKE_64BIT_MASK(33, 31); + z_mask = MAKE_64BIT_MASK(0, 32); + s_mask = MAKE_64BIT_MASK(33, 31); break; default: g_assert_not_reached(); } - return false; + return fold_masks_zs(ctx, op, z_mask, s_mask); } static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op) -- 2.43.0