The arm target has some improvements over aarch64 for
double-word arithmetic and comparisons.

        * config/aarch64/aarch64.c (aarch64_select_cc_mode): Check
        for swapped operands to CC_Cmode; check for zero_extend to
        CC_ADCmode; check for swapped operands to CC_Vmode.
---
 gcc/config/aarch64/aarch64.c | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index f2c14818c79..36e9ebb468a 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -9521,21 +9521,25 @@ aarch64_select_cc_mode (RTX_CODE code, rtx x, rtx y)
   if ((mode_x == DImode || mode_x == TImode)
       && (code == LTU || code == GEU)
       && code_x == PLUS
-      && rtx_equal_p (XEXP (x, 0), y))
+      && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
     return CC_Cmode;
 
   /* A test for unsigned overflow from an add with carry.  */
   if ((mode_x == DImode || mode_x == TImode)
       && (code == LTU || code == GEU)
       && code_x == PLUS
+      && GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
       && const_dword_umaxp1 (y, mode_x))
     return CC_ADCmode;
 
   /* A test for signed overflow.  */
   if ((mode_x == DImode || mode_x == TImode)
-      && code == NE
-      && code_x == PLUS
-      && GET_CODE (y) == SIGN_EXTEND)
+      && (code == NE || code == EQ)
+      && (code_x == PLUS || code_x == MINUS)
+      && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
+          || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
+      && GET_CODE (y) == SIGN_EXTEND
+      && GET_CODE (XEXP (y, 0)) == GET_CODE (x))
     return CC_Vmode;
 
   /* For everything else, return CCmode.  */
-- 
2.20.1

Reply via email to