Hi all,
This simple patch will tighten the conditions when matching movw and
arm_movt rtx pattern.
Those two patterns will generate the following assembly:
movw w1, #:lower16: dummy + addend
movt w1, #:upper16: dummy + addend
The addend here is optional. However, it should be an 16-bit signed
value with in the range -32768 <= A <= 32768.
By impose this restriction explicitly, it will prevent LRA/reload code
from generation invalid high/lo_sum code for arm target.
In process_address_1(), if the address is not legitimate, it will try to
generate high/lo_sum pair to put the address into register. It will
check if the target support those newly generated reload instructions.
By define those two patterns, arm will reject them if conditions is not
meet.
Otherwise, it might generate movw/movt instructions with addend larger
than 32768, this will cause a GAS error. GAS will produce '''offset out
of range'' error message when the addend for MOVW/MOVT REL relocation is
too large.
arm-none-eabi regression tests Okay, Okay to commit to the trunk and
backport to 5.0?
Regards,
Renlin
gcc/ChangeLog:
2015-08-19 Renlin Li <renlin...@arm.com>
* config/arm/arm-protos.h (arm_valid_symbolic_address_p): Declare.
* config/arm/arm.c (arm_valid_symbolic_address_p): Define.
* config/arm/arm.md (arm_movt): Use arm_valid_symbolic_address_p.
* config/arm/constraints.md ("j"): Add check for high code.
diff --git a/gcc/config/arm/arm-protos.h b/gcc/config/arm/arm-protos.h
index 16eb854..ebaf746 100644
--- a/gcc/config/arm/arm-protos.h
+++ b/gcc/config/arm/arm-protos.h
@@ -312,6 +312,7 @@ extern int vfp3_const_double_for_bits (rtx);
extern void arm_emit_coreregs_64bit_shift (enum rtx_code, rtx, rtx, rtx, rtx,
rtx);
+extern bool arm_valid_symbolic_address_p (rtx);
extern bool arm_validize_comparison (rtx *, rtx *, rtx *);
#endif /* RTX_CODE */
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index cf60313..860c784 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -28811,6 +28811,35 @@ arm_emit_coreregs_64bit_shift (enum rtx_code code, rtx out, rtx in,
#undef BRANCH
}
+/* Returns true if the pattern is a valid symbolic address, which is either a
+ symbol_ref or a symbol_ref + offset. */
+bool
+arm_valid_symbolic_address_p (rtx addr)
+{
+ rtx xop0, xop1 = NULL_RTX;
+ rtx tmp = addr;
+
+ if (GET_CODE (tmp) == SYMBOL_REF || GET_CODE (tmp) == LABEL_REF)
+ return true;
+
+ /* (const (plus: symbol_ref const_int)) */
+ if (GET_CODE (addr) == CONST)
+ tmp = XEXP (addr, 0);
+
+ xop0 = XEXP (tmp, 0);
+ xop1 = XEXP (tmp, 1);
+ if (GET_CODE (tmp) == PLUS && GET_CODE (xop0) == SYMBOL_REF
+ && CONST_INT_P (xop1))
+ {
+ HOST_WIDE_INT offset = INTVAL (xop1);
+ if (offset < -0x8000 || offset > 0x7fff)
+ return false;
+ else
+ return true;
+ }
+
+ return false;
+}
/* Returns true if a valid comparison operation and makes
the operands in a form that is valid. */
diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md
index f63fc39..7ac4f34 100644
--- a/gcc/config/arm/arm.md
+++ b/gcc/config/arm/arm.md
@@ -5662,7 +5662,7 @@
[(set (match_operand:SI 0 "nonimmediate_operand" "=r")
(lo_sum:SI (match_operand:SI 1 "nonimmediate_operand" "0")
(match_operand:SI 2 "general_operand" "i")))]
- "arm_arch_thumb2"
+ "arm_arch_thumb2 && arm_valid_symbolic_address_p (operands[2])"
"movt%?\t%0, #:upper16:%c2"
[(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")
diff --git a/gcc/config/arm/constraints.md b/gcc/config/arm/constraints.md
index 42935a4..f9e11e0 100644
--- a/gcc/config/arm/constraints.md
+++ b/gcc/config/arm/constraints.md
@@ -67,7 +67,8 @@
(define_constraint "j"
"A constant suitable for a MOVW instruction. (ARM/Thumb-2)"
(and (match_test "TARGET_32BIT && arm_arch_thumb2")
- (ior (match_code "high")
+ (ior (and (match_code "high")
+ (match_test "arm_valid_symbolic_address_p (XEXP (op, 0))"))
(and (match_code "const_int")
(match_test "(ival & 0xffff0000) == 0")))))