The gimple folding ties the AArch64 backend to the tree representation of the midend via the neon intrinsics. This code enables constant folding of Neon intrinsics reduction ops, so improves performance, but is not necessary for correctness. By temporarily removing it (here), we can then change the midend representation independently of the AArch64 backend + intrinsics.

However, I'm leaving the code in place, as a later patch will bring it all back in a very similar form (but enabled for bigendian).

Bootstrapped on aarch64-none-linux; tested aarch64.exp on aarch64-none-elf and aarch64_be-none-elf. (The removed code was already disabled for bigendian; and this is solely a __builtin-folding mechanism, i.e. used only for Neon/ACLE intrinsics.)

gcc/ChangeLog:
        * config/aarch64/aarch64.c (TARGET_GIMPLE_FOLD_BUILTIN): Comment out.
        * config/aarch64/aarch64-builtins.c (aarch64_gimple_fold_builtin):
        Remove using preprocessor directives.
diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c
index 5217f4a5f39224dbf8029542ad33790ef2c191be..15eb7c686d95b1d66cbd514500ec29ba074eaa3f 100644
--- a/gcc/config/aarch64/aarch64-builtins.c
+++ b/gcc/config/aarch64/aarch64-builtins.c
@@ -1333,6 +1333,9 @@ aarch64_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *args,
   return NULL_TREE;
 }
 
+/* Handling of reduction operations temporarily removed so as to decouple
+   changes to tree codes from AArch64 NEON Intrinsics.  */
+#if 0
 bool
 aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi)
 {
@@ -1404,6 +1407,7 @@ aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi)
 
   return changed;
 }
+#endif
 
 void
 aarch64_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index e7946fc0b70ced70a4e98caa0a33121f29242aad..9197ec038b7d40a601c886b846113c50a29cf5e2 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -9925,8 +9925,8 @@ aarch64_expand_movmem (rtx *operands)
 #undef TARGET_FRAME_POINTER_REQUIRED
 #define TARGET_FRAME_POINTER_REQUIRED aarch64_frame_pointer_required
 
-#undef TARGET_GIMPLE_FOLD_BUILTIN
-#define TARGET_GIMPLE_FOLD_BUILTIN aarch64_gimple_fold_builtin
+//#undef TARGET_GIMPLE_FOLD_BUILTIN
+//#define TARGET_GIMPLE_FOLD_BUILTIN aarch64_gimple_fold_builtin
 
 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
 #define TARGET_GIMPLIFY_VA_ARG_EXPR aarch64_gimplify_va_arg_expr

Reply via email to