On 12/10/24 07:23, Richard Henderson wrote:
Signed-off-by: Richard Henderson <richard.hender...@linaro.org>
---
  tcg/optimize.c | 22 ++++++++++++----------
  1 file changed, 12 insertions(+), 10 deletions(-)

diff --git a/tcg/optimize.c b/tcg/optimize.c
index 6412d5df37..4cccb3b0cf 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -2636,33 +2636,35 @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
  {
+    uint64_t z_mask = -1, s_mask;
+
      /* We can't do any folding with a load, but we can record bits. */
      switch (op->opc) {
      CASE_OP_32_64(ld8s):
-        ctx->s_mask = MAKE_64BIT_MASK(8, 56);
+        s_mask = MAKE_64BIT_MASK(8, 56);
          break;
      CASE_OP_32_64(ld8u):
-        ctx->z_mask = MAKE_64BIT_MASK(0, 8);
-        ctx->s_mask = MAKE_64BIT_MASK(9, 55);
+        z_mask = MAKE_64BIT_MASK(0, 8);
+        s_mask = MAKE_64BIT_MASK(9, 55);
          break;
      CASE_OP_32_64(ld16s):
-        ctx->s_mask = MAKE_64BIT_MASK(16, 48);
+        s_mask = MAKE_64BIT_MASK(16, 48);
          break;
      CASE_OP_32_64(ld16u):
-        ctx->z_mask = MAKE_64BIT_MASK(0, 16);
-        ctx->s_mask = MAKE_64BIT_MASK(17, 47);
+        z_mask = MAKE_64BIT_MASK(0, 16);
+        s_mask = MAKE_64BIT_MASK(17, 47);
          break;
      case INDEX_op_ld32s_i64:
-        ctx->s_mask = MAKE_64BIT_MASK(32, 32);
+        s_mask = MAKE_64BIT_MASK(32, 32);
          break;
      case INDEX_op_ld32u_i64:
-        ctx->z_mask = MAKE_64BIT_MASK(0, 32);
-        ctx->s_mask = MAKE_64BIT_MASK(33, 31);
+        z_mask = MAKE_64BIT_MASK(0, 32);
+        s_mask = MAKE_64BIT_MASK(33, 31);
          break;
      default:
          g_assert_not_reached();
      }
-    return false;
+    return fold_masks_zs(ctx, op, z_mask, s_mask);
  }
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)

Reviewed-by: Pierrick Bouvier <pierrick.bouv...@linaro.org>


Reply via email to