Attached patch implements TARGET_ATOMIC_ASSIGN_EXPAND_FENV for AARCH64.
With this, atomic test-case gcc.dg/atomic/c11-atomic-exec-5.c now PASS.

This implementation is based on SPARC and i386 implementations.

Regression tested on qemu-aarch64 for aarch64-none-linux-gnu with no new
regression. Is this OK for trunk?

Thanks,
Kugan

gcc/
+2014-04-27  Kugan Vivekanandarajah  <kug...@linaro.org>
+
+       * config/aarch64/aarch64.c (TARGET_ATOMIC_ASSIGN_EXPAND_FENV): New
+       define.
+       * config/aarch64/aarch64-builtins.c (arm_builtins) : Add
+       AARCH64_BUILTIN_LDFPSCR and AARCH64_BUILTIN_STFPSCR.
+       (aarch64_init_builtins) : Initialize builtins
+       __builtins_aarch64_stfpscr and __builtins_aarch64_ldfpscr.
+       (aarch64_expand_builtin) : Expand builtins __builtins_aarch64_stfpscr
+       and __builtins_aarch64_ldfpscr.
+       (aarch64_atomic_assign_expand_fenv): New function.
+       * config/aarch64/aarch64.md (stfpscr): New pattern.
+       (ldfpscr) : Likewise.
+       (unspecv): Add UNSPECV_LDFPSCR and UNSPECV_STFPSCR.
+





diff --git a/gcc/config/aarch64/aarch64-builtins.c 
b/gcc/config/aarch64/aarch64-builtins.c
index 55cfe0a..70d3efa 100644
--- a/gcc/config/aarch64/aarch64-builtins.c
+++ b/gcc/config/aarch64/aarch64-builtins.c
@@ -371,6 +371,10 @@ static aarch64_simd_builtin_datum 
aarch64_simd_builtin_data[] = {
 enum aarch64_builtins
 {
   AARCH64_BUILTIN_MIN,
+
+  AARCH64_BUILTIN_LDFPSCR,
+  AARCH64_BUILTIN_STFPSCR,
+
   AARCH64_SIMD_BUILTIN_BASE,
 #include "aarch64-simd-builtins.def"
   AARCH64_SIMD_BUILTIN_MAX = AARCH64_SIMD_BUILTIN_BASE
@@ -752,6 +756,18 @@ aarch64_init_simd_builtins (void)
 void
 aarch64_init_builtins (void)
 {
+  tree ftype_stfpscr
+    = build_function_type_list (void_type_node, unsigned_type_node, NULL);
+  tree ftype_ldfpscr
+    = build_function_type_list (unsigned_type_node, NULL);
+
+  aarch64_builtin_decls[AARCH64_BUILTIN_LDFPSCR]
+    = add_builtin_function ("__builtin_aarch64_ldfscr", ftype_ldfpscr,
+                           AARCH64_BUILTIN_LDFPSCR, BUILT_IN_MD, NULL, 
NULL_TREE);
+  aarch64_builtin_decls[AARCH64_BUILTIN_STFPSCR]
+    = add_builtin_function ("__builtin_aarch64_stfscr", ftype_stfpscr,
+                           AARCH64_BUILTIN_STFPSCR, BUILT_IN_MD, NULL, 
NULL_TREE);
+
   if (TARGET_SIMD)
     aarch64_init_simd_builtins ();
 }
@@ -964,6 +980,31 @@ aarch64_expand_builtin (tree exp,
 {
   tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
   int fcode = DECL_FUNCTION_CODE (fndecl);
+  int icode;
+  rtx pat, op0;
+  tree arg0;
+
+  switch (fcode)
+    {
+    case AARCH64_BUILTIN_LDFPSCR:
+    case AARCH64_BUILTIN_STFPSCR:
+      if (fcode == AARCH64_BUILTIN_LDFPSCR)
+       {
+         icode = CODE_FOR_ldfpscr;
+         target = gen_reg_rtx (SImode);
+         pat = GEN_FCN (icode) (target);
+       }
+      else
+       {
+         target = NULL_RTX;
+         icode = CODE_FOR_stfpscr;
+         arg0 = CALL_EXPR_ARG (exp, 0);
+         op0 = expand_normal (arg0);
+         pat = GEN_FCN (icode) (op0);
+       }
+      emit_insn (pat);
+      return target;
+    }
 
   if (fcode >= AARCH64_SIMD_BUILTIN_BASE)
     return aarch64_simd_expand_builtin (fcode, exp, target);
@@ -1196,6 +1237,70 @@ aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi)
   return changed;
 }
 
+void
+aarch64_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
+{
+  const unsigned FE_INVALID = 1;
+  const unsigned FE_DIVBYZERO = 2;
+  const unsigned FE_OVERFLOW = 4;
+  const unsigned FE_UNDERFLOW = 8;
+  const unsigned FE_INEXACT = 16;
+  const unsigned HOST_WIDE_INT FE_ALL_EXCEPT = (FE_INVALID | FE_DIVBYZERO
+                                               | FE_OVERFLOW | FE_UNDERFLOW
+                                       | FE_INEXACT);
+  const unsigned HOST_WIDE_INT FE_EXCEPT_SHIFT = 8;
+
+  /* Genareate the equivalence of :
+       unsigned int fenv_var;
+       fenv_var = __builtin_aarch64_ldfpscr ();
+
+       unsigned int masked_fenv;
+       tmp1_var = fenv_var & ~ mask;
+
+       __builtin_aarch64_fpscr (&tmp1_var);  */
+
+  tree fenv_var = create_tmp_var (unsigned_type_node, NULL);
+  tree ldfpscr = aarch64_builtin_decls[AARCH64_BUILTIN_LDFPSCR];
+  tree stfpscr = aarch64_builtin_decls[AARCH64_BUILTIN_STFPSCR];
+  tree mask = build_int_cst (unsigned_type_node,
+                            ~((FE_ALL_EXCEPT << FE_EXCEPT_SHIFT)
+                              | FE_ALL_EXCEPT));
+  tree ld_fenv_stmt = build2 (MODIFY_EXPR, unsigned_type_node,
+                             fenv_var, build_call_expr (ldfpscr, 0));
+  tree masked_fenv = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var, mask);
+  tree hold_fnclex = build_call_expr (stfpscr, 1, masked_fenv);
+  *hold = build2 (COMPOUND_EXPR, void_type_node,
+                 build2 (COMPOUND_EXPR, void_type_node, masked_fenv,
+                         ld_fenv_stmt), hold_fnclex);
+
+  /* Store the value of masked_fenv to clear the exceptions:
+     __builtin_aarch64_stfpscr (masked_fenv);  */
+
+  *clear = build_call_expr (stfpscr, 1, masked_fenv);
+
+  /* Generate the equivalent of :
+       unsigned int tmp2_var;
+       tmp_var = __builtin_aarch64_fpscr ();
+
+       __builtin_aarch64_stfpscr (fenv_var);
+
+       __atomic_feraiseexcept (tmp_var);  */
+
+  tree tmp_var = create_tmp_var (unsigned_type_node, NULL);
+  tree reload_fenv_stmt = build2 (MODIFY_EXPR, unsigned_type_node,
+                                 tmp_var, build_call_expr (ldfpscr, 0));
+  tree restore_fnenv = build_call_expr (stfpscr, 1, fenv_var);
+  tree atomic_feraiseexcept
+    = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
+  tree update_call
+    = build_call_expr (atomic_feraiseexcept, 1,
+                      fold_convert (integer_type_node, tmp_var));
+  *update = build2 (COMPOUND_EXPR, void_type_node,
+                   build2 (COMPOUND_EXPR, void_type_node,
+                           reload_fenv_stmt, restore_fnenv), update_call);
+}
+
+
 #undef AARCH64_CHECK_BUILTIN_MODE
 #undef AARCH64_FIND_FRINT_VARIANT
 #undef BUILTIN_DX
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index a3147ee..0f5ea48 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -332,6 +332,8 @@ static const char * const aarch64_condition_codes[] =
   "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
 };
 
+void aarch64_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update);
+
 /* Provide a mapping from gcc register numbers to dwarf register numbers.  */
 unsigned
 aarch64_dbx_register_number (unsigned regno)
@@ -8488,6 +8490,10 @@ aarch64_cannot_change_mode_class (enum machine_mode from,
 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
   aarch64_autovectorize_vector_sizes
 
+#undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
+#define TARGET_ATOMIC_ASSIGN_EXPAND_FENV \
+  aarch64_atomic_assign_expand_fenv
+
 /* Section anchor support.  */
 
 #undef TARGET_MIN_ANCHOR_OFFSET
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index c86a29d..e916ff5 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -107,6 +107,8 @@
 
 (define_c_enum "unspecv" [
     UNSPECV_EH_RETURN          ; Represent EH_RETURN
+    UNSPECV_LDFPSCR            ; load floating point status and control 
register.
+    UNSPECV_STFPSCR            ; store floating point status and control 
register.
   ]
 )
 
@@ -3635,6 +3637,21 @@
   DONE;
 })
 
+;; Write Floating-point Status Register.
+(define_insn "stfpscr"
+  [(unspec_volatile [(match_operand:SI 0 "register_operand" "r")] 
UNSPECV_STFPSCR)]
+  ""
+  "msr\\tfpsr, %0"
+  [(set_attr "type" "mrs")])
+
+;; Read Floating-point Status Register.
+(define_insn "ldfpscr"
+  [(set (match_operand:SI 0 "register_operand" "=r")
+        (unspec_volatile:SI [(const_int 0)] UNSPECV_LDFPSCR))]
+  ""
+  "mrs\\t%0, fpsr"
+  [(set_attr "type" "mrs")])
+
 ;; AdvSIMD Stuff
 (include "aarch64-simd.md")
 

Reply via email to