diff --git a/gcc/config/arm/arm-protos.h b/gcc/config/arm/arm-protos.h
index 944cf10..bc70780 100644
--- a/gcc/config/arm/arm-protos.h
+++ b/gcc/config/arm/arm-protos.h
@@ -117,6 +117,8 @@ extern bool gen_movmem_ldrd_strd (rtx *);
 extern enum machine_mode arm_select_cc_mode (RTX_CODE, rtx, rtx);
 extern enum machine_mode arm_select_dominance_cc_mode (rtx, rtx,
 						       HOST_WIDE_INT);
+extern enum machine_mode arm_select_dominance_ccmp_mode (rtx, enum machine_mode,
+							 HOST_WIDE_INT);
 extern rtx arm_gen_compare_reg (RTX_CODE, rtx, rtx, rtx);
 extern rtx arm_gen_return_addr_mask (void);
 extern void arm_reload_in_hi (rtx *);
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index 23dfc0e..1c88cba 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -284,6 +284,12 @@ static unsigned arm_add_stmt_cost (void *data, int count,
 static void arm_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
 					 bool op0_preserve_value);
 static unsigned HOST_WIDE_INT arm_asan_shadow_offset (void);
+static int arm_select_ccmp_cmp_order (int, int);
+static rtx arm_gen_ccmp_first (int, rtx, rtx);
+static rtx arm_gen_ccmp_next (rtx, int, rtx, rtx, int, bool);
+static enum machine_mode arm_select_dominance_cc_mode_1 (enum rtx_code cond1,
+							 enum rtx_code cond2,
+							 HOST_WIDE_INT);
 
 /* Table of machine attributes.  */
 static const struct attribute_spec arm_attribute_table[] =
@@ -669,6 +675,14 @@ static const struct attribute_spec arm_attribute_table[] =
 #undef MAX_INSN_PER_IT_BLOCK
 #define MAX_INSN_PER_IT_BLOCK (arm_restrict_it ? 1 : 4)
 
+#undef TARGET_SELECT_CCMP_CMP_ORDER
+#define TARGET_SELECT_CCMP_CMP_ORDER arm_select_ccmp_cmp_order
+
+#undef TARGET_GEN_CCMP_FIRST
+#define TARGET_GEN_CCMP_FIRST arm_gen_ccmp_first
+
+#undef TARGET_GEN_CCMP_NEXT
+#define TARGET_GEN_CCMP_NEXT arm_gen_ccmp_next
 
 struct gcc_target targetm = TARGET_INITIALIZER;
 
@@ -14256,7 +14270,13 @@ arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
       cond1 = cond2;
       cond2 = temp;
     }
+  return arm_select_dominance_cc_mode_1 (cond1, cond2, cond_or);
+}
 
+static enum machine_mode
+arm_select_dominance_cc_mode_1 (enum rtx_code cond1, enum rtx_code cond2,
+				HOST_WIDE_INT cond_or)
+{
   switch (cond1)
     {
     case EQ:
@@ -14337,8 +14357,7 @@ arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
 	  gcc_unreachable ();
 	}
 
-    /* The remaining cases only occur when both comparisons are the
-       same.  */
+    /* The remaining cases only occur when both comparisons are the same.  */
     case NE:
       gcc_assert (cond1 == cond2);
       return CC_DNEmode;
@@ -14364,6 +14383,206 @@ arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
     }
 }
 
+static enum rtx_code
+arm_ccmode_to_code (enum machine_mode mode)
+{
+  switch (mode)
+    {
+    case CC_DNEmode:
+      return NE;
+    case CC_DEQmode:
+      return EQ;
+    case CC_DLEmode:
+      return LE;
+    case CC_DLTmode:
+      return LT;
+    case CC_DGEmode:
+      return GE;
+    case CC_DGTmode:
+      return GT;
+    case CC_DLEUmode:
+      return LEU;
+    case CC_DLTUmode:
+      return LTU;
+    case CC_DGEUmode:
+      return GEU;
+    case CC_DGTUmode:
+      return GTU;
+    default:
+      return UNKNOWN;
+    }
+}
+
+static enum machine_mode
+arm_code_to_ccmode (enum rtx_code code)
+{
+  switch (code)
+    {
+    case NE:
+      return CC_DNEmode;
+    case EQ:
+      return CC_DEQmode;
+    case LE:
+      return CC_DLEmode;
+    case LT:
+      return CC_DLTmode;
+    case GE:
+      return CC_DGEmode;
+    case GT:
+      return CC_DGTmode;
+    case LEU:
+      return CC_DLEUmode;
+    case LTU:
+      return CC_DLTUmode;
+    case GEU:
+      return CC_DGEUmode;
+    case GTU:
+      return CC_DGTUmode;
+    default:
+      return CCmode;
+    }
+}
+
+/* MODE is the CC mode result of the previous conditional compare.
+   X is the next compare.  */
+enum machine_mode
+arm_select_dominance_ccmp_mode (rtx x, enum machine_mode mode,
+			 	HOST_WIDE_INT cond_or)
+{
+  enum rtx_code cond1 = arm_ccmode_to_code (mode);
+  enum rtx_code cond2;
+
+  if (cond1 == UNKNOWN)
+    return CCmode;
+
+  /* Currently we will probably get the wrong result if the individual
+     comparisons are not simple.  */
+  if (arm_select_cc_mode (cond2 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
+      != CCmode)
+    return CCmode;
+
+  /* If the comparisons are not equal, and one doesn't dominate the other,
+     then we can't do this.  Since there is a conditional compare before
+     current insn, we can not swap the compares.  So we have to check the
+     dominate relation separately for DOM_CC_X_OR_Y and DOM_CC_X_AND_Y.  */
+  if (cond1 != cond2
+      && !(cond_or == DOM_CC_X_OR_Y ? comparison_dominates_p (cond1, cond2)
+				      : comparison_dominates_p (cond2, cond1)))
+    return CCmode;
+
+  if (cond_or == DOM_CC_X_OR_Y)
+    return arm_select_dominance_cc_mode_1 (cond1, cond2, cond_or);
+  else
+    return arm_select_dominance_cc_mode_1 (cond2, cond1, cond_or);
+}
+
+/* COND1 and COND2 should be enum rtx_code, which represent two compares.
+   There are order sensitive for conditional compare.  It returns
+     -1: if COND1-COND2 is a valid combination.
+      1: if COND2-COND1 is a valid combination.
+      0: invalid.  */
+
+static int
+arm_select_ccmp_cmp_order (int cond1, int cond2)
+{
+  if (cond1 == cond2)
+    return -1;
+  if (comparison_dominates_p ((enum rtx_code) cond1, (enum rtx_code) cond2))
+    return 1;
+  if (comparison_dominates_p ((enum rtx_code) cond2, (enum rtx_code) cond1))
+    return -1;
+  return 0;
+
+}
+
+static void
+arm_convert_to_SImode (rtx* op0, rtx* op1, int unsignedp)
+{
+  enum machine_mode mode;
+
+  mode = GET_MODE (*op0);
+  if (mode == VOIDmode)
+    mode = GET_MODE (*op1);
+
+  if (mode == QImode || mode == HImode)
+    {
+      *op0 = convert_modes (SImode, mode, *op0, unsignedp);
+      *op1 = convert_modes (SImode, mode, *op1, unsignedp);
+    }
+}
+
+static rtx
+arm_gen_ccmp_first (int code, rtx op0, rtx op1)
+{
+  enum machine_mode mode;
+  rtx cmp, target;
+  int unsignedp = code == LTU || code == LEU || code == GTU || code == GEU;
+
+  arm_convert_to_SImode (&op0, &op1, unsignedp);
+  if (!s_register_operand (op0, SImode) || !arm_add_operand (op1, SImode))
+     /* Do we need convert the operands to register?  If converting them to
+	registers, we add more overhead for conditional compare.  */
+    return NULL_RTX;
+
+  mode = arm_code_to_ccmode ((enum rtx_code) code);
+  if (mode == CCmode)
+    return NULL_RTX;
+
+  cmp = gen_rtx_fmt_ee (COMPARE, CCmode, op0, op1);
+  target = gen_rtx_REG (mode, CC_REGNUM);
+  emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM), cmp));
+  return target;
+}
+
+static rtx
+arm_gen_ccmp_next (rtx prev, int cmp_code, rtx op0, rtx op1,
+		   int bit_code, bool cc_p)
+{
+  rtx cmp0, cmp1, target, bit_op;
+  HOST_WIDE_INT cond_or;
+  enum machine_mode mode;
+  int unsignedp = cmp_code == LTU || cmp_code == LEU
+		  || cmp_code == GTU || cmp_code == GEU;
+
+  arm_convert_to_SImode (&op0, &op1, unsignedp);
+  if (!s_register_operand (op0, SImode) || !arm_add_operand (op1, SImode))
+     /* Do we need convert the operands to register?  If converting them to
+	registers, we add more overhead for conditional compare.  */
+    return NULL_RTX;
+
+  cmp1 = gen_rtx_fmt_ee ((enum rtx_code) cmp_code, SImode, op0, op1);
+  cond_or = bit_code == AND ? DOM_CC_X_AND_Y : DOM_CC_X_OR_Y;
+  mode = arm_select_dominance_ccmp_mode (cmp1, GET_MODE (prev), cond_or);
+  if (mode == CCmode)
+    return NULL_RTX;
+
+  cmp0 = gen_rtx_fmt_ee (NE, SImode, prev, const0_rtx);
+
+  bit_op = gen_rtx_fmt_ee ((enum rtx_code) bit_code, SImode, cmp0, cmp1);
+  if (cc_p)
+    {
+      /* Generate insn to match cmp_and/cmp_ior/ccmp_and/ccmp_ior.  */
+      target = gen_rtx_REG (mode, CC_REGNUM);
+      emit_insn (gen_rtx_SET (VOIDmode, target,
+			      gen_rtx_fmt_ee (COMPARE, VOIDmode,
+					      bit_op, const0_rtx)));
+    }
+  else
+    {
+      /* Generate insn to match and_scc_scc/ior_scc_scc or
+	 ccmp_and_scc_scc/ccmp_ior_scc_scc.  */
+      rtx par;
+
+      target = gen_reg_rtx (SImode);
+      par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
+      XVECEXP (par, 0, 0) = gen_rtx_SET (SImode, target, bit_op);
+      XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (CCmode,
+					     gen_rtx_REG (CCmode, CC_REGNUM));
+      emit_insn (par);
+    }
+  return target;
+}
+
 enum machine_mode
 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
 {
diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md
index 3726201..287070a 100644
--- a/gcc/config/arm/arm.md
+++ b/gcc/config/arm/arm.md
@@ -12797,3 +12797,218 @@
 (include "sync.md")
 ;; Fixed-point patterns
 (include "arm-fixed.md")
+
+(define_expand "cbranchcc4"
+  [(set (pc) (if_then_else
+	      (match_operator 0 "expandable_comparison_operator"
+	       [(match_operand 1 "dominant_cc_register" "")
+		(const_int 0)])
+	      (label_ref (match_operand 3 "" ""))
+	      (pc)))]
+  "TARGET_ARM || TARGET_THUMB2"
+  " ")
+
+;; The first compare in this pattern is the result of a previous CCMP.
+;; We can not swap it.  And we only need its flag.
+(define_insn "*ccmp_and"
+  [(set (match_operand 6 "dominant_cc_register" "")
+	(compare
+	 (and:SI
+	  (match_operator 4 "expandable_comparison_operator"
+	   [(match_operand 0 "dominant_cc_register" "")
+	    (match_operand:SI 1 "arm_add_operand" "")])
+	  (match_operator:SI 5 "arm_comparison_operator"
+	   [(match_operand:SI 2 "s_register_operand"
+	        "l,r,r,r,r")
+	    (match_operand:SI 3 "arm_add_operand"
+	        "lPy,rI,L,rI,L")]))
+	 (const_int 0)))]
+  "TARGET_32BIT"
+  {
+    static const char *const cmp2[2] =
+    {
+      "cmp%d4\t%2, %3",
+      "cmn%d4\t%2, #%n3"
+    };
+    static const char *const ite = "it\t%d4";
+    static const int cmp_idx[9] = {0, 0, 1, 0, 1};
+
+    if (TARGET_THUMB2)
+      output_asm_insn (ite, operands);
+
+    output_asm_insn (cmp2[cmp_idx[which_alternative]], operands);
+    return "";
+  }
+  [(set_attr "conds" "set")
+   (set_attr "predicable" "no")
+   (set_attr "arch" "t2,t2,t2,any,any")
+   (set_attr_alternative "length"
+      [(const_int 4)
+       (const_int 6)
+       (const_int 6)
+       (if_then_else (eq_attr "is_thumb" "no")
+           (const_int 4)
+           (const_int 6))
+       (if_then_else (eq_attr "is_thumb" "no")
+           (const_int 4)
+           (const_int 6))])]
+)
+
+;; The first compare in this pattern is the result of a previous CCMP.
+;; We can not swap it.  And we only need its flag.
+(define_insn "*ccmp_ior"
+  [(set (match_operand 6 "dominant_cc_register" "")
+	(compare
+	 (ior:SI
+	  (match_operator 4 "expandable_comparison_operator"
+	   [(match_operand 0 "dominant_cc_register" "")
+	    (match_operand:SI 1 "arm_add_operand" "")])
+	  (match_operator:SI 5 "arm_comparison_operator"
+	   [(match_operand:SI 2 "s_register_operand"
+	        "l,r,r,r,r")
+	    (match_operand:SI 3 "arm_add_operand"
+	        "lPy,rI,L,rI,L")]))
+	 (const_int 0)))]
+  "TARGET_32BIT"
+  {
+    static const char *const cmp2[2] =
+    {
+      "cmp%D4\t%2, %3",
+      "cmn%D4\t%2, #%n3"
+    };
+    static const char *const ite = "it\t%D4";
+    static const int cmp_idx[5] = {0, 0, 1, 0, 1};
+
+    if (TARGET_THUMB2)
+      output_asm_insn (ite, operands);
+
+    output_asm_insn (cmp2[cmp_idx[which_alternative]], operands);
+    return "";
+  }
+  [(set_attr "conds" "set")
+   (set_attr "arch" "t2,t2,t2,any,any")
+   (set_attr_alternative "length"
+      [(const_int 4)
+       (const_int 6)
+       (const_int 6)
+       (if_then_else (eq_attr "is_thumb" "no")
+           (const_int 4)
+           (const_int 6))
+       (if_then_else (eq_attr "is_thumb" "no")
+           (const_int 4)
+           (const_int 6))])]
+)
+
+(define_insn_and_split "*ccmp_ior_scc_scc"
+  [(set (match_operand:SI 0 "s_register_operand" "=Ts")
+	(ior:SI (match_operator 3 "expandable_comparison_operator"
+		 [(match_operand 1 "dominant_cc_register" "")
+		  (match_operand:SI 2 "arm_add_operand" "rIL")])
+		(match_operator:SI 6 "arm_comparison_operator"
+		 [(match_operand:SI 4 "s_register_operand" "r")
+		  (match_operand:SI 5 "arm_add_operand" "rIL")])))
+   (clobber (reg:CC CC_REGNUM))]
+  "TARGET_32BIT
+   && (arm_select_dominance_ccmp_mode (operands[6], GET_MODE (operands[1]),
+				       DOM_CC_X_OR_Y) != CCmode)"
+  "#"
+  "TARGET_32BIT && reload_completed"
+  [(set (match_dup 7)
+	(compare
+	 (ior:SI
+	  (match_op_dup 3 [(match_dup 1) (match_dup 2)])
+	  (match_op_dup 6 [(match_dup 4) (match_dup 5)]))
+	 (const_int 0)))
+   (set (match_dup 0) (ne:SI (match_dup 7) (const_int 0)))]
+  "operands[7]
+     = gen_rtx_REG (arm_select_dominance_ccmp_mode (operands[6],
+						    GET_MODE (operands[1]),
+						    DOM_CC_X_OR_Y),
+		    CC_REGNUM);"
+  [(set_attr "conds" "clob")
+   (set_attr "length" "12")])
+
+; If the above pattern is followed by a CMP insn, then the compare is
+; redundant, since we can rework the conditional instruction that follows.
+(define_insn_and_split "*ccmp_ior_scc_scc_cmp"
+  [(set (match_operand 0 "dominant_cc_register" "")
+	(compare (ior:SI (match_operator 3 "expandable_comparison_operator"
+			  [(match_operand 1 "dominant_cc_register" "")
+			   (match_operand:SI 2 "arm_add_operand" "rIL")])
+			 (match_operator:SI 6 "arm_comparison_operator"
+			  [(match_operand:SI 4 "s_register_operand" "r")
+			   (match_operand:SI 5 "arm_add_operand" "rIL")]))
+		 (const_int 0)))
+   (set (match_operand:SI 7 "s_register_operand" "=Ts")
+	(ior:SI (match_op_dup 3 [(match_dup 1) (match_dup 2)])
+		(match_op_dup 6 [(match_dup 4) (match_dup 5)])))]
+  "TARGET_32BIT"
+  "#"
+  "TARGET_32BIT && reload_completed"
+  [(set (match_dup 0)
+	(compare
+	 (ior:SI
+	  (match_op_dup 3 [(match_dup 1) (match_dup 2)])
+	  (match_op_dup 6 [(match_dup 4) (match_dup 5)]))
+	 (const_int 0)))
+   (set (match_dup 7) (ne:SI (match_dup 0) (const_int 0)))]
+  ""
+  [(set_attr "conds" "set")
+   (set_attr "length" "12")])
+
+(define_insn_and_split "*ccmp_and_scc_scc"
+  [(set (match_operand:SI 0 "s_register_operand" "=Ts")
+	(and:SI (match_operator 3 "expandable_comparison_operator"
+		 [(match_operand 1 "dominant_cc_register" "")
+		  (match_operand:SI 2 "arm_add_operand" "rIL")])
+		(match_operator:SI 6 "arm_comparison_operator"
+		 [(match_operand:SI 4 "s_register_operand" "r")
+		  (match_operand:SI 5 "arm_add_operand" "rIL")])))
+   (clobber (reg:CC CC_REGNUM))]
+  "TARGET_32BIT
+   && (arm_select_dominance_ccmp_mode (operands[6], GET_MODE (operands[1]),
+				       DOM_CC_X_AND_Y) != CCmode)"
+  "#"
+  "TARGET_32BIT && reload_completed"
+  [(set (match_dup 7)
+	(compare
+	 (and:SI
+	  (match_op_dup 3 [(match_dup 1) (match_dup 2)])
+	  (match_op_dup 6 [(match_dup 4) (match_dup 5)]))
+	 (const_int 0)))
+   (set (match_dup 0) (ne:SI (match_dup 7) (const_int 0)))]
+  "operands[7]
+     = gen_rtx_REG (arm_select_dominance_ccmp_mode (operands[6],
+						    GET_MODE (operands[1]),
+						    DOM_CC_X_AND_Y),
+		    CC_REGNUM);"
+  [(set_attr "conds" "clob")
+   (set_attr "length" "12")])
+
+; If the above pattern is followed by a CMP insn, then the compare is
+; redundant, since we can rework the conditional instruction that follows.
+(define_insn_and_split "*ccmp_and_scc_scc_cmp"
+  [(set (match_operand 0 "dominant_cc_register" "")
+	(compare (and:SI (match_operator 3 "expandable_comparison_operator"
+			  [(match_operand 1 "dominant_cc_register" "")
+			   (match_operand:SI 2 "arm_add_operand" "rIL")])
+			 (match_operator:SI 6 "arm_comparison_operator"
+			  [(match_operand:SI 4 "s_register_operand" "r")
+			   (match_operand:SI 5 "arm_add_operand" "rIL")]))
+		 (const_int 0)))
+   (set (match_operand:SI 7 "s_register_operand" "=Ts")
+	(and:SI (match_op_dup 3 [(match_dup 1) (match_dup 2)])
+		(match_op_dup 6 [(match_dup 4) (match_dup 5)])))]
+  "TARGET_32BIT"
+  "#"
+  "TARGET_32BIT && reload_completed"
+  [(set (match_dup 0)
+	(compare
+	 (and:SI
+	  (match_op_dup 3 [(match_dup 1) (match_dup 2)])
+	  (match_op_dup 6 [(match_dup 4) (match_dup 5)]))
+	 (const_int 0)))
+   (set (match_dup 7) (ne:SI (match_dup 0) (const_int 0)))]
+  ""
+  [(set_attr "conds" "set")
+   (set_attr "length" "12")])
diff --git a/gcc/doc/md.texi b/gcc/doc/md.texi
index ac10a0a..c81686a 100644
--- a/gcc/doc/md.texi
+++ b/gcc/doc/md.texi
@@ -6158,6 +6158,58 @@ A typical @code{ctrap} pattern looks like
   "@dots{}")
 @end smallexample
 
+@cindex @code{ccmp} instruction pattern
+@item @samp{ccmp}
+Conditional compare instruction.  Operand 2 and 5 are RTLs which perform
+two comparisons.  Operand 1 is AND or IOR, which operates on the result of
+Operand 2 and 5.  Operand 0 is the result of operand 1.
+It uses recursive method to support more than two compares.  e.g.
+
+  CC0 = CMP (a, b);
+  CC1 = CCMP (NE (CC0, 0), CMP (e, f));
+  ...
+  CCn/reg = CCMP (NE (CCn-1, 0), CMP (...));
+
+Two target hooks are used to generate conditional compares.  GEN_CCMP_FISRT
+is used to generate the first CMP.  And GEN_CCMP_NEXT is used to generate the
+following CCMPs.  Operand 1 is AND or IOR.  Operand 3 is the result of
+GEN_CCMP_FISRT or a previous GEN_CCMP_NEXT.  Operand 2 is NE.
+Operand 4, 5 and 6 is another compare expression.
+
+A typical CCMP pattern which return a reg looks like
+
+@smallexample
+(define_insn "*ccmp_scc_scc"
+  [(set (match_operand 0 "register_operand" "")
+        (match_operator 1 ""
+         [(match_operator 2 "comparison_operator"
+          [(match_operand 3 "cc_register")
+           (const_int 0)])
+         (match_operator 4 "comparison_operator"
+          [(match_operand 5 "register_operand")
+           (match_operand 6 "compare_operand")])]))]
+  ""
+  "@dots{}")
+@end smallexample
+
+A typical CCMP pattern which return a CC looks like
+
+@smallexample
+(define_insn "*ccmp_and_ior"
+  [(set (match_operand 6 "dominant_cc_register" "")
+        (compare
+         (match_operator 1
+          (match_operator 2 "comparison_operator"
+           [(match_operand 3 "dominant_cc_register")
+            (const_int 0)])
+          (match_operator 4 "comparison_operator"
+           [(match_operand 5 "_register_operand")
+            (match_operand 6 "compare_operand"]))
+         (const_int 0)))]
+  ""
+  "@dots{}")
+@end smallexample
+
 @cindex @code{prefetch} instruction pattern
 @item @samp{prefetch}
 
diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
index 72daf09..f8f3993 100644
--- a/gcc/doc/tm.texi
+++ b/gcc/doc/tm.texi
@@ -11245,6 +11245,33 @@ This target hook is required only when the target has several different
 modes and they have different conditional execution capability, such as ARM.
 @end deftypefn
 
+@deftypefn {Target Hook} int TARGET_SELECT_CCMP_CMP_ORDER (int @var{code1}, int @var{code2})
+For some target (like ARM), the order of two compares is sensitive for
+conditional compare.  cmp0-cmp1 might be an invalid combination.  But when
+swapping the order, cmp1-cmp0 is valid.  The function will return
+  -1: if @code{code1} and @code{code2} are valid combination.
+   1: if @code{code2} and @code{code1} are valid combination.
+   0: both are invalid.
+@end deftypefn
+
+@deftypefn {Target Hook} rtx TARGET_GEN_CCMP_FIRST (int @var{code}, rtx @var{op0}, rtx @var{op1})
+This function emits a comparison insn for the first of a sequence of
+ conditional comparisions.  It returns a comparison expression appropriate
+ for passing to @code{gen_ccmp_next} or to @code{cbranch_optab}.
+ @code{unsignedp} is used when converting @code{op0} and @code{op1}'s mode.
+@end deftypefn
+
+@deftypefn {Target Hook} rtx TARGET_GEN_CCMP_NEXT (rtx @var{prev}, int @var{cmp_code}, rtx @var{op0}, rtx @var{op1}, int @var{bit_code}, bool @var{cc_p})
+This function emits a conditional comparison within a sequence of
+ conditional comparisons.  The @code{prev} expression is the result of a
+ prior call to @code{gen_ccmp_first} or @code{gen_ccmp_next}.  It may return
+ @code{NULL} if the combination of @code{prev} and this comparison is
+ not supported, otherwise the result must be appropriate for passing to
+ @code{gen_ccmp_next} or @code{cbranch_optab} if @code{cc_p} is true.
+ If @code{cc_p} is false, it returns a general register.  @code{bit_code}
+ is AND or IOR, which is the op on the two compares.
+@end deftypefn
+
 @deftypefn {Target Hook} unsigned TARGET_LOOP_UNROLL_ADJUST (unsigned @var{nunroll}, struct loop *@var{loop})
 This target hook returns a new value for the number of times @var{loop}
 should be unrolled. The parameter @var{nunroll} is the number of times
diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
index 2828361..85c7df3 100644
--- a/gcc/doc/tm.texi.in
+++ b/gcc/doc/tm.texi.in
@@ -8276,6 +8276,12 @@ build_type_attribute_variant (@var{mdecl},
 
 @hook TARGET_HAVE_CONDITIONAL_EXECUTION
 
+@hook TARGET_SELECT_CCMP_CMP_ORDER
+
+@hook TARGET_GEN_CCMP_FIRST
+
+@hook TARGET_GEN_CCMP_NEXT
+
 @hook TARGET_LOOP_UNROLL_ADJUST
 
 @defmac POWI_MAX_MULTS
diff --git a/gcc/expr.c b/gcc/expr.c
index 551a660..09ed366 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -57,6 +57,8 @@ along with GCC; see the file COPYING3.  If not see
 #include "target-globals.h"
 #include "params.h"
 #include "tree-ssa-address.h"
+#include "tree-phinodes.h"
+#include "ssa-iterators.h"
 
 /* Decide whether a function's arguments should be processed
    from first to last or from last to first.
@@ -9073,6 +9075,219 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode,
 }
 #undef REDUCE_BIT_FIELD
 
+/* Check whether G is a potential conditional compare candidate.  */
+static bool
+ccmp_candidate_p (gimple g)
+{
+  tree rhs = gimple_assign_rhs_to_tree (g);
+  tree lhs, op0, op1;
+  gimple gs0, gs1;
+  enum tree_code tcode, tcode0, tcode1;
+  tcode = TREE_CODE (rhs);
+
+  if (tcode != BIT_AND_EXPR && tcode != BIT_IOR_EXPR)
+    return false;
+
+  lhs = gimple_assign_lhs (g);
+  op0 = TREE_OPERAND (rhs, 0);
+  op1 = TREE_OPERAND (rhs, 1);
+
+  if ((TREE_CODE (op0) != SSA_NAME) || (TREE_CODE (op1) != SSA_NAME)
+      || !has_single_use (lhs))
+    return false;
+
+  gs0 = get_gimple_for_ssa_name (op0);
+  gs1 = get_gimple_for_ssa_name (op1);
+  if (!gs0 || !gs1 || !is_gimple_assign (gs0) || !is_gimple_assign (gs1)
+      /* g, gs0 and gs1 must be in the same basic block, since current stage
+	 is out-of-ssa.  We can not guarantee the correctness when forwording
+	 the gs0 and gs1 into g whithout DATAFLOW analysis.  */
+      || gimple_bb (gs0) != gimple_bb (gs1)
+      || gimple_bb (gs0) != gimple_bb (g))
+    return false;
+
+  if (!(INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (gs0)))
+       || POINTER_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (gs0))))
+      || !(INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (gs1)))
+	   || POINTER_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (gs1)))))
+    return false;
+
+  tcode0 = gimple_assign_rhs_code (gs0);
+  tcode1 = gimple_assign_rhs_code (gs1);
+  if (TREE_CODE_CLASS (tcode0) == tcc_comparison
+      && TREE_CODE_CLASS (tcode1) == tcc_comparison)
+    return true;
+  if (TREE_CODE_CLASS (tcode0) == tcc_comparison
+      && ccmp_candidate_p (gs1))
+    return true;
+  else if (TREE_CODE_CLASS (tcode1) == tcc_comparison
+	   && ccmp_candidate_p (gs0))
+    return true;
+  /* We skip ccmp_candidate_p (gs1) && ccmp_candidate_p (gs0) since
+     there is no way to set the CC flag.  */
+  return false;
+}
+
+/* Check whether EXP is used in a GIMPLE_COND statement or not.  */
+static bool
+used_in_cond_stmt_p (tree exp)
+{
+  bool expand_cond = false;
+  imm_use_iterator ui;
+  gimple use_stmt;
+  FOR_EACH_IMM_USE_STMT (use_stmt, ui, exp)
+    if (gimple_code (use_stmt) == GIMPLE_COND)
+      {
+	tree op1 = gimple_cond_rhs (use_stmt);
+	/* TBD: If we can convert all
+	    _Bool t;
+
+	    if (t == 1)
+	      goto <bb 3>;
+	    else
+	      goto <bb 4>;
+	   to
+	    if (t != 0)
+	      goto <bb 3>;
+	    else
+	      goto <bb 4>;
+	   we can remove the following check.  */
+	if (integer_zerop (op1))
+	  expand_cond = true;
+	BREAK_FROM_IMM_USE_STMT (ui);
+      }
+  return expand_cond;
+}
+
+/* Help function to generate conditional compare.  PREV is the result of
+   GEN_CCMP_FIRST or GEN_CCMP_NEXT.  G is the next compare.
+   CODE is BIT_AND_EXPR or BIT_IOR_EXPR.
+   CC_P indicates the result be a CC or not.  */
+
+static rtx
+gen_ccmp_next (rtx prev, gimple g, enum tree_code code, bool cc_p)
+{
+  rtx op0, op1;
+  int unsignedp = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (g)));
+  enum rtx_code rcode = get_rtx_code (gimple_assign_rhs_code (g), unsignedp);
+
+  expand_operands (gimple_assign_rhs1 (g),
+		   gimple_assign_rhs2 (g),
+		   NULL_RTX, &op0, &op1, EXPAND_NORMAL);
+  return targetm.gen_ccmp_next (prev, rcode, op0, op1,
+				get_rtx_code (code, 0), cc_p);
+}
+
+/* Expand conditional compare gimple G.  If CC_P is TRUE, it sets CC reg.
+   If I is 1, set TARGET to CC flag.  i.e.
+
+     CC0 = CMP (a, b);
+     CC1 = CCMP (NE (CC0, 0), CMP (e, f));
+     ...
+     CCn/reg = CCMP (NE (CCn-1, 0), CMP (...));
+
+   Only the last TARGET can be a reg if it is not used in a COND_EXPR.  */
+
+static rtx
+expand_ccmp_expr_1 (gimple g, bool cc_p)
+{
+  tree exp = gimple_assign_rhs_to_tree (g);
+  enum tree_code code = TREE_CODE (exp);
+  gimple gs0 = get_gimple_for_ssa_name (TREE_OPERAND (exp, 0));
+  gimple gs1 = get_gimple_for_ssa_name (TREE_OPERAND (exp, 1));
+  rtx tmp;
+  enum tree_code code0 = gimple_assign_rhs_code (gs0);
+  enum tree_code code1 = gimple_assign_rhs_code (gs1);
+
+  gcc_assert (code == BIT_AND_EXPR || code == BIT_IOR_EXPR);
+  gcc_assert (gs0 && gs1 && is_gimple_assign (gs0) && is_gimple_assign (gs1));
+
+  if (TREE_CODE_CLASS (code0) == tcc_comparison)
+    {
+      if (TREE_CODE_CLASS (code1) == tcc_comparison)
+	{
+	  int unsignedp0, unsignedp1, dominate;
+	  enum rtx_code rcode0, rcode1, rcode;
+	  rtx op0, op1, tmp;
+	  gimple first, next;
+
+	  unsignedp0 = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (gs0)));
+	  rcode0 = get_rtx_code (code0, unsignedp0);
+	  unsignedp1 = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (gs1)));
+	  rcode1 = get_rtx_code (code1, unsignedp1);
+
+	  /* For some target (like ARM), the order of two compares is sensitive
+	     for conditional compare.  cmp0-cmp1 might be an invalid combination.
+	     But when swapping the order, cmp1-cmp0 is valid.  The target hook
+	     select_ccmp_cmp_order will return
+	       -1: if cmp0-cmp1 is valid
+	        1: if cmp1-cmp0 is valid
+	        0: both are invalid.  */
+
+	  dominate = targetm.select_ccmp_cmp_order (rcode0, rcode1);
+	  if (!dominate)
+	    return NULL_RTX;
+
+	  if (dominate == 1)
+	    {
+	      first = gs1;
+	      next = gs0;
+	      rcode = rcode1;
+	    }
+	  else
+	    {
+	      first = gs0;
+	      next = gs1;
+	      rcode = rcode0;
+	    }
+	    expand_operands (gimple_assign_rhs1 (first),
+			     gimple_assign_rhs2 (first),
+			     NULL_RTX, &op0, &op1, EXPAND_NORMAL);
+	    tmp = targetm.gen_ccmp_first (rcode, op0, op1);
+	    if (!tmp)
+	      return NULL_RTX;
+
+	    return gen_ccmp_next (tmp, next, code, cc_p);
+	}
+      gcc_assert (code1 == BIT_AND_EXPR || code1 == BIT_IOR_EXPR);
+      tmp = expand_ccmp_expr_1 (gs1, true);
+      if (tmp)
+	return gen_ccmp_next (tmp, gs0, code, cc_p);
+    }
+  else
+    {
+      gcc_assert (gimple_assign_rhs_code (gs0) == BIT_AND_EXPR
+                  || gimple_assign_rhs_code (gs0) == BIT_IOR_EXPR);
+      if (TREE_CODE_CLASS (gimple_assign_rhs_code (gs1)) == tcc_comparison)
+	{
+	  tmp = expand_ccmp_expr_1 (gs0, true);
+	  if (tmp)
+	    return gen_ccmp_next (tmp, gs1, code, cc_p);
+	}
+      else
+	{
+	  gcc_assert (gimple_assign_rhs_code (gs1) == BIT_AND_EXPR
+		      || gimple_assign_rhs_code (gs1) == BIT_IOR_EXPR);
+	}
+    }
+
+  return NULL_RTX;
+}
+
+static rtx
+expand_ccmp_expr (gimple g)
+{
+  rtx last = get_last_insn ();
+  tree lhs = gimple_assign_lhs (g);
+  rtx tmp;
+  tmp = expand_ccmp_expr_1 (g, used_in_cond_stmt_p (lhs));
+  if (tmp)
+    return tmp;
+
+  /* Clean up.  */
+  delete_insns_since (last);
+  return NULL_RTX;
+}
 
 /* Return TRUE if expression STMT is suitable for replacement.  
    Never consider memory loads as replaceable, because those don't ever lead 
@@ -9236,10 +9451,20 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
 	{
 	  rtx r;
 	  location_t saved_loc = curr_insn_location ();
+	  tree rhs = gimple_assign_rhs_to_tree (g);
 
 	  set_curr_insn_location (gimple_location (g));
-	  r = expand_expr_real (gimple_assign_rhs_to_tree (g), target,
-				tmode, modifier, NULL);
+
+	  if ((targetm.gen_ccmp_first != NULL) && ccmp_candidate_p (g))
+	    {
+	      gcc_checking_assert (targetm.gen_ccmp_next != NULL);
+	      r = expand_ccmp_expr (g);
+	      if (!r)
+		r = expand_expr_real (rhs, target, tmode, modifier, NULL);
+	    }
+	  else
+	    r = expand_expr_real (rhs, target, tmode, modifier, NULL);
+
 	  set_curr_insn_location (saved_loc);
 	  if (REG_P (r) && !REG_EXPR (r))
 	    set_reg_attrs_for_decl_rtl (SSA_NAME_VAR (exp), r);
diff --git a/gcc/optabs.c b/gcc/optabs.c
index 3755670..a186498 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -6391,7 +6391,7 @@ gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
    or unsigned operation code.  */
 
-static enum rtx_code
+enum rtx_code
 get_rtx_code (enum tree_code tcode, bool unsignedp)
 {
   enum rtx_code code;
@@ -6441,6 +6441,12 @@ get_rtx_code (enum tree_code tcode, bool unsignedp)
       code = LTGT;
       break;
 
+    case BIT_AND_EXPR:
+      code = AND;
+      break;
+    case BIT_IOR_EXPR:
+      code = IOR;
+      break;
     default:
       gcc_unreachable ();
     }
diff --git a/gcc/optabs.h b/gcc/optabs.h
index 4de4409..5a97e29 100644
--- a/gcc/optabs.h
+++ b/gcc/optabs.h
@@ -91,7 +91,7 @@ extern rtx expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
 extern rtx expand_ternary_op (enum machine_mode mode, optab ternary_optab,
 			      rtx op0, rtx op1, rtx op2, rtx target,
 			      int unsignedp);
-
+extern enum rtx_code get_rtx_code (enum tree_code tcode, bool unsignedp);
 /* Expand a binary operation given optab and rtx operands.  */
 extern rtx expand_binop (enum machine_mode, optab, rtx, rtx, rtx, int,
 			 enum optab_methods);
diff --git a/gcc/target.def b/gcc/target.def
index cf3e2fd..1cc59a7 100644
--- a/gcc/target.def
+++ b/gcc/target.def
@@ -2377,6 +2377,39 @@ modes and they have different conditional execution capability, such as ARM.",
  bool, (void),
  default_have_conditional_execution)
 
+DEFHOOK
+(select_ccmp_cmp_order,
+ "For some target (like ARM), the order of two compares is sensitive for\n\
+conditional compare.  cmp0-cmp1 might be an invalid combination.  But when\n\
+swapping the order, cmp1-cmp0 is valid.  The function will return\n\
+  -1: if @code{code1} and @code{code2} are valid combination.\n\
+   1: if @code{code2} and @code{code1} are valid combination.\n\
+   0: both are invalid.",
+ int, (int code1, int code2),
+ default_select_ccmp_cmp_order)
+
+DEFHOOK
+(gen_ccmp_first,
+ "This function emits a comparison insn for the first of a sequence of\n\
+ conditional comparisions.  It returns a comparison expression appropriate\n\
+ for passing to @code{gen_ccmp_next} or to @code{cbranch_optab}.\n\
+ @code{unsignedp} is used when converting @code{op0} and @code{op1}'s mode.",
+ rtx, (int code, rtx op0, rtx op1),
+ NULL)
+
+DEFHOOK
+(gen_ccmp_next,
+ "This function emits a conditional comparison within a sequence of\n\
+ conditional comparisons.  The @code{prev} expression is the result of a\n\
+ prior call to @code{gen_ccmp_first} or @code{gen_ccmp_next}.  It may return\n\
+ @code{NULL} if the combination of @code{prev} and this comparison is\n\
+ not supported, otherwise the result must be appropriate for passing to\n\
+ @code{gen_ccmp_next} or @code{cbranch_optab} if @code{cc_p} is true.\n\
+ If @code{cc_p} is false, it returns a general register.  @code{bit_code}\n\
+ is AND or IOR, which is the op on the two compares.",
+ rtx, (rtx prev, int cmp_code, rtx op0, rtx op1, int bit_code, bool cc_p),
+ NULL)
+
 /* Return a new value for loop unroll size.  */
 DEFHOOK
 (loop_unroll_adjust,
diff --git a/gcc/targhooks.c b/gcc/targhooks.c
index 6674109..4901d65 100644
--- a/gcc/targhooks.c
+++ b/gcc/targhooks.c
@@ -1718,5 +1718,11 @@ default_builtin_chkp_function (unsigned int fcode ATTRIBUTE_UNUSED)
   return NULL_TREE;
 }
 
+/* Default return -1 to keep current order.  */
+int
+default_select_ccmp_cmp_order (int, int)
+{
+  return -1;
+}
 
 #include "gt-targhooks.h"
diff --git a/gcc/targhooks.h b/gcc/targhooks.h
index c810998..d77e799 100644
--- a/gcc/targhooks.h
+++ b/gcc/targhooks.h
@@ -211,3 +211,4 @@ extern tree default_fn_abi_va_list_bounds_size (tree);
 extern tree default_chkp_bound_type (void);
 extern enum machine_mode default_chkp_bound_mode (void);
 extern tree default_builtin_chkp_function (unsigned int);
+extern int default_select_ccmp_cmp_order (int, int);
