This patch reimplement scalar intrinsics for conversion between floating-
point and fixed-point.

Previously, all such intrinsics are implemented through inline assembly.
This patch added RTL pattern for these operations that those intrinsics
can be implemented through builtins.

gcc/
2016-05-23  Jiong Wang<jiong.w...@arm.com>

        * config/aarch64/aarch64-builtins.c (TYPES_BINOP_USS): New
        (TYPES_BINOP_SUS): Likewise.
        (aarch64_simd_builtin_data): Update include file name.
        (aarch64_builtins): Likewise.
        * config/aarch64/aarch64-simd-builtins.def: Rename to
        aarch64-builtins.def.
        (scvtfsi): New entries for conversion between scalar
        float-point and fixed-point.
        (scvtfdi): Likewise.
        (ucvtfsi): Likewise.
        (ucvtfdi): Likewise.
        (fcvtzssf): Likewise.
        (fcvtzsdf): Likewise.
        (fcvtzusf): Likewise.
        (fcvtzudf): Likewise.
        * config/aarch64/aarch64.md
        (<FCVT_F2FIXED_SCALAR:fcvt_fixed_insn><GPF:mode><GPI:mode>3): New
        pattern for conversion between scalar float to fixed-pointer.
        (<FCVT_FIXED2F_SCALAR:fcvt_fixed_insn><GPI:mode><GPF:mode>3): Likewise.
        (UNSPEC_FCVTZS_SCALAR): New UNSPEC enumeration.
        (UNSPEC_FCVTZU_SCALAR): Likewise.
        (UNSPEC_SCVTF_SCALAR): Likewise.
        (UNSPEC_UCVTF_SCALAR): Likewise.
        * config/aarch64/aarch64-simd.md
        (<FCVT_F2FIXED:fcvt_fixed_insn><GPF:mode>3): New pattern for conversion
        between scalar variant of SIMD and fixed-point
        (<FCVT_FIXED2F:fcvt_fixed_insn><GPI:mode>3): Likewise.
        * config/aarch64/arm_neon.h (vcvtd_n_f64_s64): Remove inline assembly.  
Use
        builtin.
        (vcvtd_n_f64_u64): Likewise.
        (vcvtd_n_s64_f64): Likewise.
        (vcvtd_n_u64_f64): Likewise.
        (vcvtd_n_f32_s32): Likewise.
        (vcvts_n_f32_u32): Likewise.
        (vcvtd_n_s32_f32): Likewise.
        (vcvts_n_u32_f32): Likewise.
        * config/aarch64/iterators.md (UNSPEC_FCVTZS): New.
        (UNSPEC_FCVTZU): Likewise.
        (UNSPEC_SCVTF): Likewise.
        (UNSPEC_UCVTF): Likewise.
        (fcvt_target): Support integer to float mapping.
        (FCVT_TARGET): Likewise.
        (FCVT_FIXED2F): New iterator.
        (FCVT_F2FIXED): Likewise.
        (FCVT_FIXED2F_SCALAR): Likewise.
        (FCVT_F2FIXED_SCALAR): Likewise.
        (fcvt_fixed_insn): New define_int_attr.
        * config/aarch64/t-aarch64 (aarch64-builtins.o): Change dependency file
        name from "aarch64-simd-builtins.def" to "aarch64-builtins.def".

>From 91adf34dbcf5a233c3d159e7038256d3f5c7572e Mon Sep 17 00:00:00 2001
From: "Jiong.Wang" <jiong.w...@arm.com>
Date: Mon, 23 May 2016 12:11:53 +0100
Subject: [PATCH 1/6] 1

---
 gcc/config/aarch64/aarch64-builtins.c        |  12 +-
 gcc/config/aarch64/aarch64-builtins.def      | 457 +++++++++++++++++++++++++++
 gcc/config/aarch64/aarch64-simd-builtins.def | 447 --------------------------
 gcc/config/aarch64/aarch64-simd.md           |  22 ++
 gcc/config/aarch64/aarch64.md                |  26 ++
 gcc/config/aarch64/arm_neon.h                | 148 +++------
 gcc/config/aarch64/iterators.md              |  25 +-
 gcc/config/aarch64/t-aarch64                 |   2 +-
 8 files changed, 591 insertions(+), 548 deletions(-)
 create mode 100644 gcc/config/aarch64/aarch64-builtins.def
 delete mode 100644 gcc/config/aarch64/aarch64-simd-builtins.def

diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c
index 5573903..d79ba3d 100644
--- a/gcc/config/aarch64/aarch64-builtins.c
+++ b/gcc/config/aarch64/aarch64-builtins.c
@@ -139,6 +139,14 @@ aarch64_types_binop_ssu_qualifiers[SIMD_MAX_BUILTIN_ARGS]
   = { qualifier_none, qualifier_none, qualifier_unsigned };
 #define TYPES_BINOP_SSU (aarch64_types_binop_ssu_qualifiers)
 static enum aarch64_type_qualifiers
+aarch64_types_binop_uss_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+  = { qualifier_unsigned, qualifier_none, qualifier_none };
+#define TYPES_BINOP_USS (aarch64_types_binop_uss_qualifiers)
+static enum aarch64_type_qualifiers
+aarch64_types_binop_sus_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+  = { qualifier_none, qualifier_unsigned, qualifier_none };
+#define TYPES_BINOP_SUS (aarch64_types_binop_sus_qualifiers)
+static enum aarch64_type_qualifiers
 aarch64_types_binopp_qualifiers[SIMD_MAX_BUILTIN_ARGS]
   = { qualifier_poly, qualifier_poly, qualifier_poly };
 #define TYPES_BINOPP (aarch64_types_binopp_qualifiers)
@@ -291,7 +299,7 @@ aarch64_types_storestruct_lane_qualifiers[SIMD_MAX_BUILTIN_ARGS]
 #include "aarch64-builtin-iterators.h"
 
 static aarch64_simd_builtin_datum aarch64_simd_builtin_data[] = {
-#include "aarch64-simd-builtins.def"
+#include "aarch64-builtins.def"
 };
 
 /* There's only 8 CRC32 builtins.  Probably not worth their own .def file.  */
@@ -336,7 +344,7 @@ enum aarch64_builtins
   AARCH64_BUILTIN_RSQRT_V4SF,
   AARCH64_SIMD_BUILTIN_BASE,
   AARCH64_SIMD_BUILTIN_LANE_CHECK,
-#include "aarch64-simd-builtins.def"
+#include "aarch64-builtins.def"
   /* The first enum element which is based on an insn_data pattern.  */
   AARCH64_SIMD_PATTERN_START = AARCH64_SIMD_BUILTIN_LANE_CHECK + 1,
   AARCH64_SIMD_BUILTIN_MAX = AARCH64_SIMD_PATTERN_START
diff --git a/gcc/config/aarch64/aarch64-builtins.def b/gcc/config/aarch64/aarch64-builtins.def
new file mode 100644
index 0000000..4528db3
--- /dev/null
+++ b/gcc/config/aarch64/aarch64-builtins.def
@@ -0,0 +1,457 @@
+/* Machine description for AArch64 architecture.
+   Copyright (C) 2012-2016 Free Software Foundation, Inc.
+   Contributed by ARM Ltd.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3, or (at your option)
+   any later version.
+
+   GCC is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with GCC; see the file COPYING3.  If not see
+   <http://www.gnu.org/licenses/>.  */
+
+/* In the list below, the BUILTIN_<ITERATOR> macros expand to create
+   builtins for each of the modes described by <ITERATOR>.  When adding
+   new builtins to this list, a helpful idiom to follow is to add
+   a line for each pattern in the md file.  Thus, ADDP, which has one
+   pattern defined for the VD_BHSI iterator, and one for DImode, has two
+   entries below.
+
+   Parameter 1 is the 'type' of the intrinsic.  This is used to
+   describe the type modifiers (for example; unsigned) applied to
+   each of the parameters to the intrinsic function.
+
+   Parameter 2 is the name of the intrinsic.  This is appended
+   to `__builtin_aarch64_<name><mode>` to give the intrinsic name
+   as exported to the front-ends.
+
+   Parameter 3 describes how to map from the name to the CODE_FOR_
+   macro holding the RTL pattern for the intrinsic.  This mapping is:
+   0 - CODE_FOR_aarch64_<name><mode>
+   1-9 - CODE_FOR_<name><mode><1-9>
+   10 - CODE_FOR_<name><mode>.  */
+
+  BUILTIN_VDC (COMBINE, combine, 0)
+  BUILTIN_VB (BINOP, pmul, 0)
+  BUILTIN_VALLF (BINOP, fmulx, 0)
+  BUILTIN_VDQF_DF (UNOP, sqrt, 2)
+  BUILTIN_VD_BHSI (BINOP, addp, 0)
+  VAR1 (UNOP, addp, 0, di)
+  BUILTIN_VDQ_BHSI (UNOP, clrsb, 2)
+  BUILTIN_VDQ_BHSI (UNOP, clz, 2)
+  BUILTIN_VS (UNOP, ctz, 2)
+  BUILTIN_VB (UNOP, popcount, 2)
+
+  /* Implemented by aarch64_<sur>q<r>shl<mode>.  */
+  BUILTIN_VSDQ_I (BINOP, sqshl, 0)
+  BUILTIN_VSDQ_I (BINOP_UUS, uqshl, 0)
+  BUILTIN_VSDQ_I (BINOP, sqrshl, 0)
+  BUILTIN_VSDQ_I (BINOP_UUS, uqrshl, 0)
+  /* Implemented by aarch64_<su_optab><optab><mode>.  */
+  BUILTIN_VSDQ_I (BINOP, sqadd, 0)
+  BUILTIN_VSDQ_I (BINOPU, uqadd, 0)
+  BUILTIN_VSDQ_I (BINOP, sqsub, 0)
+  BUILTIN_VSDQ_I (BINOPU, uqsub, 0)
+  /* Implemented by aarch64_<sur>qadd<mode>.  */
+  BUILTIN_VSDQ_I (BINOP_SSU, suqadd, 0)
+  BUILTIN_VSDQ_I (BINOP_UUS, usqadd, 0)
+
+  /* Implemented by aarch64_get_dreg<VSTRUCT:mode><VDC:mode>.  */
+  BUILTIN_VDC (GETREG, get_dregoi, 0)
+  BUILTIN_VDC (GETREG, get_dregci, 0)
+  BUILTIN_VDC (GETREG, get_dregxi, 0)
+  /* Implemented by aarch64_get_qreg<VSTRUCT:mode><VQ:mode>.  */
+  BUILTIN_VQ (GETREG, get_qregoi, 0)
+  BUILTIN_VQ (GETREG, get_qregci, 0)
+  BUILTIN_VQ (GETREG, get_qregxi, 0)
+  /* Implemented by aarch64_set_qreg<VSTRUCT:mode><VQ:mode>.  */
+  BUILTIN_VQ (SETREG, set_qregoi, 0)
+  BUILTIN_VQ (SETREG, set_qregci, 0)
+  BUILTIN_VQ (SETREG, set_qregxi, 0)
+  /* Implemented by aarch64_ld<VSTRUCT:nregs><VDC:mode>.  */
+  BUILTIN_VDC (LOADSTRUCT, ld2, 0)
+  BUILTIN_VDC (LOADSTRUCT, ld3, 0)
+  BUILTIN_VDC (LOADSTRUCT, ld4, 0)
+  /* Implemented by aarch64_ld<VSTRUCT:nregs><VQ:mode>.  */
+  BUILTIN_VQ (LOADSTRUCT, ld2, 0)
+  BUILTIN_VQ (LOADSTRUCT, ld3, 0)
+  BUILTIN_VQ (LOADSTRUCT, ld4, 0)
+  /* Implemented by aarch64_ld<VSTRUCT:nregs>r<VALLDIF:mode>.  */
+  BUILTIN_VALLDIF (LOADSTRUCT, ld2r, 0)
+  BUILTIN_VALLDIF (LOADSTRUCT, ld3r, 0)
+  BUILTIN_VALLDIF (LOADSTRUCT, ld4r, 0)
+  /* Implemented by aarch64_ld<VSTRUCT:nregs>_lane<VQ:mode>.  */
+  BUILTIN_VALLDIF (LOADSTRUCT_LANE, ld2_lane, 0)
+  BUILTIN_VALLDIF (LOADSTRUCT_LANE, ld3_lane, 0)
+  BUILTIN_VALLDIF (LOADSTRUCT_LANE, ld4_lane, 0)
+  /* Implemented by aarch64_st<VSTRUCT:nregs><VDC:mode>.  */
+  BUILTIN_VDC (STORESTRUCT, st2, 0)
+  BUILTIN_VDC (STORESTRUCT, st3, 0)
+  BUILTIN_VDC (STORESTRUCT, st4, 0)
+  /* Implemented by aarch64_st<VSTRUCT:nregs><VQ:mode>.  */
+  BUILTIN_VQ (STORESTRUCT, st2, 0)
+  BUILTIN_VQ (STORESTRUCT, st3, 0)
+  BUILTIN_VQ (STORESTRUCT, st4, 0)
+
+  BUILTIN_VALLDIF (STORESTRUCT_LANE, st2_lane, 0)
+  BUILTIN_VALLDIF (STORESTRUCT_LANE, st3_lane, 0)
+  BUILTIN_VALLDIF (STORESTRUCT_LANE, st4_lane, 0)
+
+  BUILTIN_VQW (BINOP, saddl2, 0)
+  BUILTIN_VQW (BINOP, uaddl2, 0)
+  BUILTIN_VQW (BINOP, ssubl2, 0)
+  BUILTIN_VQW (BINOP, usubl2, 0)
+  BUILTIN_VQW (BINOP, saddw2, 0)
+  BUILTIN_VQW (BINOP, uaddw2, 0)
+  BUILTIN_VQW (BINOP, ssubw2, 0)
+  BUILTIN_VQW (BINOP, usubw2, 0)
+  /* Implemented by aarch64_<ANY_EXTEND:su><ADDSUB:optab>l<mode>.  */
+  BUILTIN_VD_BHSI (BINOP, saddl, 0)
+  BUILTIN_VD_BHSI (BINOP, uaddl, 0)
+  BUILTIN_VD_BHSI (BINOP, ssubl, 0)
+  BUILTIN_VD_BHSI (BINOP, usubl, 0)
+  /* Implemented by aarch64_<ANY_EXTEND:su><ADDSUB:optab>w<mode>.  */
+  BUILTIN_VD_BHSI (BINOP, saddw, 0)
+  BUILTIN_VD_BHSI (BINOP, uaddw, 0)
+  BUILTIN_VD_BHSI (BINOP, ssubw, 0)
+  BUILTIN_VD_BHSI (BINOP, usubw, 0)
+  /* Implemented by aarch64_<sur>h<addsub><mode>.  */
+  BUILTIN_VDQ_BHSI (BINOP, shadd, 0)
+  BUILTIN_VDQ_BHSI (BINOP, shsub, 0)
+  BUILTIN_VDQ_BHSI (BINOP, uhadd, 0)
+  BUILTIN_VDQ_BHSI (BINOP, uhsub, 0)
+  BUILTIN_VDQ_BHSI (BINOP, srhadd, 0)
+  BUILTIN_VDQ_BHSI (BINOP, urhadd, 0)
+  /* Implemented by aarch64_<sur><addsub>hn<mode>.  */
+  BUILTIN_VQN (BINOP, addhn, 0)
+  BUILTIN_VQN (BINOP, subhn, 0)
+  BUILTIN_VQN (BINOP, raddhn, 0)
+  BUILTIN_VQN (BINOP, rsubhn, 0)
+  /* Implemented by aarch64_<sur><addsub>hn2<mode>.  */
+  BUILTIN_VQN (TERNOP, addhn2, 0)
+  BUILTIN_VQN (TERNOP, subhn2, 0)
+  BUILTIN_VQN (TERNOP, raddhn2, 0)
+  BUILTIN_VQN (TERNOP, rsubhn2, 0)
+
+  BUILTIN_VSQN_HSDI (UNOP, sqmovun, 0)
+  /* Implemented by aarch64_<sur>qmovn<mode>.  */
+  BUILTIN_VSQN_HSDI (UNOP, sqmovn, 0)
+  BUILTIN_VSQN_HSDI (UNOP, uqmovn, 0)
+  /* Implemented by aarch64_s<optab><mode>.  */
+  BUILTIN_VSDQ_I (UNOP, sqabs, 0)
+  BUILTIN_VSDQ_I (UNOP, sqneg, 0)
+
+  /* Implemented by aarch64_sqdml<SBINQOPS:as>l<mode>.  */
+  BUILTIN_VSD_HSI (TERNOP, sqdmlal, 0)
+  BUILTIN_VSD_HSI (TERNOP, sqdmlsl, 0)
+  /* Implemented by aarch64_sqdml<SBINQOPS:as>l_lane<mode>.  */
+  BUILTIN_VSD_HSI (QUADOP_LANE, sqdmlal_lane, 0)
+  BUILTIN_VSD_HSI (QUADOP_LANE, sqdmlsl_lane, 0)
+  /* Implemented by aarch64_sqdml<SBINQOPS:as>l_laneq<mode>.  */
+  BUILTIN_VSD_HSI (QUADOP_LANE, sqdmlal_laneq, 0)
+  BUILTIN_VSD_HSI (QUADOP_LANE, sqdmlsl_laneq, 0)
+  /* Implemented by aarch64_sqdml<SBINQOPS:as>l_n<mode>.  */
+  BUILTIN_VD_HSI (TERNOP, sqdmlal_n, 0)
+  BUILTIN_VD_HSI (TERNOP, sqdmlsl_n, 0)
+
+  BUILTIN_VQ_HSI (TERNOP, sqdmlal2, 0)
+  BUILTIN_VQ_HSI (TERNOP, sqdmlsl2, 0)
+  BUILTIN_VQ_HSI (QUADOP_LANE, sqdmlal2_lane, 0)
+  BUILTIN_VQ_HSI (QUADOP_LANE, sqdmlsl2_lane, 0)
+  BUILTIN_VQ_HSI (QUADOP_LANE, sqdmlal2_laneq, 0)
+  BUILTIN_VQ_HSI (QUADOP_LANE, sqdmlsl2_laneq, 0)
+  BUILTIN_VQ_HSI (TERNOP, sqdmlal2_n, 0)
+  BUILTIN_VQ_HSI (TERNOP, sqdmlsl2_n, 0)
+
+  BUILTIN_VSD_HSI (BINOP, sqdmull, 0)
+  BUILTIN_VSD_HSI (TERNOP_LANE, sqdmull_lane, 0)
+  BUILTIN_VSD_HSI (TERNOP_LANE, sqdmull_laneq, 0)
+  BUILTIN_VD_HSI (BINOP, sqdmull_n, 0)
+  BUILTIN_VQ_HSI (BINOP, sqdmull2, 0)
+  BUILTIN_VQ_HSI (TERNOP_LANE, sqdmull2_lane, 0)
+  BUILTIN_VQ_HSI (TERNOP_LANE, sqdmull2_laneq, 0)
+  BUILTIN_VQ_HSI (BINOP, sqdmull2_n, 0)
+  /* Implemented by aarch64_sq<r>dmulh<mode>.  */
+  BUILTIN_VSDQ_HSI (BINOP, sqdmulh, 0)
+  BUILTIN_VSDQ_HSI (BINOP, sqrdmulh, 0)
+  /* Implemented by aarch64_sq<r>dmulh_lane<q><mode>.  */
+  BUILTIN_VSDQ_HSI (TERNOP_LANE, sqdmulh_lane, 0)
+  BUILTIN_VSDQ_HSI (TERNOP_LANE, sqdmulh_laneq, 0)
+  BUILTIN_VSDQ_HSI (TERNOP_LANE, sqrdmulh_lane, 0)
+  BUILTIN_VSDQ_HSI (TERNOP_LANE, sqrdmulh_laneq, 0)
+
+  BUILTIN_VSDQ_I_DI (BINOP, ashl, 3)
+  /* Implemented by aarch64_<sur>shl<mode>.  */
+  BUILTIN_VSDQ_I_DI (BINOP, sshl, 0)
+  BUILTIN_VSDQ_I_DI (BINOP_UUS, ushl, 0)
+  BUILTIN_VSDQ_I_DI (BINOP, srshl, 0)
+  BUILTIN_VSDQ_I_DI (BINOP_UUS, urshl, 0)
+
+  BUILTIN_VDQ_I (SHIFTIMM, ashr, 3)
+  VAR1 (SHIFTIMM, ashr_simd, 0, di)
+  BUILTIN_VDQ_I (SHIFTIMM, lshr, 3)
+  VAR1 (USHIFTIMM, lshr_simd, 0, di)
+  /* Implemented by aarch64_<sur>shr_n<mode>.  */
+  BUILTIN_VSDQ_I_DI (SHIFTIMM, srshr_n, 0)
+  BUILTIN_VSDQ_I_DI (USHIFTIMM, urshr_n, 0)
+  /* Implemented by aarch64_<sur>sra_n<mode>.  */
+  BUILTIN_VSDQ_I_DI (SHIFTACC, ssra_n, 0)
+  BUILTIN_VSDQ_I_DI (USHIFTACC, usra_n, 0)
+  BUILTIN_VSDQ_I_DI (SHIFTACC, srsra_n, 0)
+  BUILTIN_VSDQ_I_DI (USHIFTACC, ursra_n, 0)
+  /* Implemented by aarch64_<sur>shll_n<mode>.  */
+  BUILTIN_VD_BHSI (SHIFTIMM, sshll_n, 0)
+  BUILTIN_VD_BHSI (USHIFTIMM, ushll_n, 0)
+  /* Implemented by aarch64_<sur>shll2_n<mode>.  */
+  BUILTIN_VQW (SHIFTIMM, sshll2_n, 0)
+  BUILTIN_VQW (SHIFTIMM, ushll2_n, 0)
+  /* Implemented by aarch64_<sur>q<r>shr<u>n_n<mode>.  */
+  BUILTIN_VSQN_HSDI (SHIFTIMM, sqshrun_n, 0)
+  BUILTIN_VSQN_HSDI (SHIFTIMM, sqrshrun_n, 0)
+  BUILTIN_VSQN_HSDI (SHIFTIMM, sqshrn_n, 0)
+  BUILTIN_VSQN_HSDI (USHIFTIMM, uqshrn_n, 0)
+  BUILTIN_VSQN_HSDI (SHIFTIMM, sqrshrn_n, 0)
+  BUILTIN_VSQN_HSDI (USHIFTIMM, uqrshrn_n, 0)
+  /* Implemented by aarch64_<sur>s<lr>i_n<mode>.  */
+  BUILTIN_VSDQ_I_DI (SHIFTINSERT, ssri_n, 0)
+  BUILTIN_VSDQ_I_DI (USHIFTACC, usri_n, 0)
+  BUILTIN_VSDQ_I_DI (SHIFTINSERT, ssli_n, 0)
+  BUILTIN_VSDQ_I_DI (USHIFTACC, usli_n, 0)
+  /* Implemented by aarch64_<sur>qshl<u>_n<mode>.  */
+  BUILTIN_VSDQ_I (SHIFTIMM_USS, sqshlu_n, 0)
+  BUILTIN_VSDQ_I (SHIFTIMM, sqshl_n, 0)
+  BUILTIN_VSDQ_I (USHIFTIMM, uqshl_n, 0)
+
+  /* Implemented by aarch64_reduc_plus_<mode>.  */
+  BUILTIN_VALL (UNOP, reduc_plus_scal_, 10)
+
+  /* Implemented by reduc_<maxmin_uns>_scal_<mode> (producing scalar).  */
+  BUILTIN_VDQIF (UNOP, reduc_smax_scal_, 10)
+  BUILTIN_VDQIF (UNOP, reduc_smin_scal_, 10)
+  BUILTIN_VDQ_BHSI (UNOPU, reduc_umax_scal_, 10)
+  BUILTIN_VDQ_BHSI (UNOPU, reduc_umin_scal_, 10)
+  BUILTIN_VDQF (UNOP, reduc_smax_nan_scal_, 10)
+  BUILTIN_VDQF (UNOP, reduc_smin_nan_scal_, 10)
+
+  /* Implemented by <maxmin><mode>3.
+     smax variants map to fmaxnm,
+     smax_nan variants map to fmax.  */
+  BUILTIN_VDQIF (BINOP, smax, 3)
+  BUILTIN_VDQIF (BINOP, smin, 3)
+  BUILTIN_VDQ_BHSI (BINOP, umax, 3)
+  BUILTIN_VDQ_BHSI (BINOP, umin, 3)
+  BUILTIN_VDQF (BINOP, smax_nan, 3)
+  BUILTIN_VDQF (BINOP, smin_nan, 3)
+
+  /* Implemented by aarch64_<maxmin_uns>p<mode>.  */
+  BUILTIN_VDQ_BHSI (BINOP, smaxp, 0)
+  BUILTIN_VDQ_BHSI (BINOP, sminp, 0)
+  BUILTIN_VDQ_BHSI (BINOP, umaxp, 0)
+  BUILTIN_VDQ_BHSI (BINOP, uminp, 0)
+  BUILTIN_VDQF (BINOP, smaxp, 0)
+  BUILTIN_VDQF (BINOP, sminp, 0)
+  BUILTIN_VDQF (BINOP, smax_nanp, 0)
+  BUILTIN_VDQF (BINOP, smin_nanp, 0)
+
+  /* Implemented by <frint_pattern><mode>2.  */
+  BUILTIN_VDQF (UNOP, btrunc, 2)
+  BUILTIN_VDQF (UNOP, ceil, 2)
+  BUILTIN_VDQF (UNOP, floor, 2)
+  BUILTIN_VDQF (UNOP, nearbyint, 2)
+  BUILTIN_VDQF (UNOP, rint, 2)
+  BUILTIN_VDQF (UNOP, round, 2)
+  BUILTIN_VDQF_DF (UNOP, frintn, 2)
+
+  /* Implemented by l<fcvt_pattern><su_optab><VQDF:mode><vcvt_target>2.  */
+  VAR1 (UNOP, lbtruncv2sf, 2, v2si)
+  VAR1 (UNOP, lbtruncv4sf, 2, v4si)
+  VAR1 (UNOP, lbtruncv2df, 2, v2di)
+
+  VAR1 (UNOPUS, lbtruncuv2sf, 2, v2si)
+  VAR1 (UNOPUS, lbtruncuv4sf, 2, v4si)
+  VAR1 (UNOPUS, lbtruncuv2df, 2, v2di)
+
+  VAR1 (UNOP, lroundv2sf, 2, v2si)
+  VAR1 (UNOP, lroundv4sf, 2, v4si)
+  VAR1 (UNOP, lroundv2df, 2, v2di)
+  /* Implemented by l<fcvt_pattern><su_optab><GPF:mode><GPI:mode>2.  */
+  VAR1 (UNOP, lroundsf, 2, si)
+  VAR1 (UNOP, lrounddf, 2, di)
+
+  VAR1 (UNOPUS, lrounduv2sf, 2, v2si)
+  VAR1 (UNOPUS, lrounduv4sf, 2, v4si)
+  VAR1 (UNOPUS, lrounduv2df, 2, v2di)
+  VAR1 (UNOPUS, lroundusf, 2, si)
+  VAR1 (UNOPUS, lroundudf, 2, di)
+
+  VAR1 (UNOP, lceilv2sf, 2, v2si)
+  VAR1 (UNOP, lceilv4sf, 2, v4si)
+  VAR1 (UNOP, lceilv2df, 2, v2di)
+
+  VAR1 (UNOPUS, lceiluv2sf, 2, v2si)
+  VAR1 (UNOPUS, lceiluv4sf, 2, v4si)
+  VAR1 (UNOPUS, lceiluv2df, 2, v2di)
+  VAR1 (UNOPUS, lceilusf, 2, si)
+  VAR1 (UNOPUS, lceiludf, 2, di)
+
+  VAR1 (UNOP, lfloorv2sf, 2, v2si)
+  VAR1 (UNOP, lfloorv4sf, 2, v4si)
+  VAR1 (UNOP, lfloorv2df, 2, v2di)
+
+  VAR1 (UNOPUS, lflooruv2sf, 2, v2si)
+  VAR1 (UNOPUS, lflooruv4sf, 2, v4si)
+  VAR1 (UNOPUS, lflooruv2df, 2, v2di)
+  VAR1 (UNOPUS, lfloorusf, 2, si)
+  VAR1 (UNOPUS, lfloorudf, 2, di)
+
+  VAR1 (UNOP, lfrintnv2sf, 2, v2si)
+  VAR1 (UNOP, lfrintnv4sf, 2, v4si)
+  VAR1 (UNOP, lfrintnv2df, 2, v2di)
+  VAR1 (UNOP, lfrintnsf, 2, si)
+  VAR1 (UNOP, lfrintndf, 2, di)
+
+  VAR1 (UNOPUS, lfrintnuv2sf, 2, v2si)
+  VAR1 (UNOPUS, lfrintnuv4sf, 2, v4si)
+  VAR1 (UNOPUS, lfrintnuv2df, 2, v2di)
+  VAR1 (UNOPUS, lfrintnusf, 2, si)
+  VAR1 (UNOPUS, lfrintnudf, 2, di)
+
+  /* Implemented by <optab><fcvt_target><VDQF:mode>2.  */
+  VAR1 (UNOP, floatv2si, 2, v2sf)
+  VAR1 (UNOP, floatv4si, 2, v4sf)
+  VAR1 (UNOP, floatv2di, 2, v2df)
+
+  VAR1 (UNOP, floatunsv2si, 2, v2sf)
+  VAR1 (UNOP, floatunsv4si, 2, v4sf)
+  VAR1 (UNOP, floatunsv2di, 2, v2df)
+
+  VAR5 (UNOPU, bswap, 2, v4hi, v8hi, v2si, v4si, v2di)
+
+  BUILTIN_VB (UNOP, rbit, 0)
+
+  /* Implemented by
+     aarch64_<PERMUTE:perm_insn><PERMUTE:perm_hilo><mode>.  */
+  BUILTIN_VALL (BINOP, zip1, 0)
+  BUILTIN_VALL (BINOP, zip2, 0)
+  BUILTIN_VALL (BINOP, uzp1, 0)
+  BUILTIN_VALL (BINOP, uzp2, 0)
+  BUILTIN_VALL (BINOP, trn1, 0)
+  BUILTIN_VALL (BINOP, trn2, 0)
+
+  /* Implemented by
+     aarch64_frecp<FRECP:frecp_suffix><mode>.  */
+  BUILTIN_GPF (UNOP, frecpe, 0)
+  BUILTIN_GPF (BINOP, frecps, 0)
+  BUILTIN_GPF (UNOP, frecpx, 0)
+
+  BUILTIN_VDQ_SI (UNOP, urecpe, 0)
+
+  BUILTIN_VDQF (UNOP, frecpe, 0)
+  BUILTIN_VDQF (BINOP, frecps, 0)
+
+  /* Implemented by a mixture of abs2 patterns.  Note the DImode builtin is
+     only ever used for the int64x1_t intrinsic, there is no scalar version.  */
+  BUILTIN_VSDQ_I_DI (UNOP, abs, 0)
+  BUILTIN_VDQF (UNOP, abs, 2)
+
+  BUILTIN_VQ_HSF (UNOP, vec_unpacks_hi_, 10)
+  VAR1 (BINOP, float_truncate_hi_, 0, v4sf)
+  VAR1 (BINOP, float_truncate_hi_, 0, v8hf)
+
+  VAR1 (UNOP, float_extend_lo_, 0, v2df)
+  VAR1 (UNOP, float_extend_lo_,  0, v4sf)
+  BUILTIN_VDF (UNOP, float_truncate_lo_, 0)
+
+  /* Implemented by aarch64_ld1<VALL_F16:mode>.  */
+  BUILTIN_VALL_F16 (LOAD1, ld1, 0)
+
+  /* Implemented by aarch64_st1<VALL_F16:mode>.  */
+  BUILTIN_VALL_F16 (STORE1, st1, 0)
+
+  /* Implemented by fma<mode>4.  */
+  BUILTIN_VDQF (TERNOP, fma, 4)
+
+  /* Implemented by aarch64_simd_bsl<mode>.  */
+  BUILTIN_VDQQH (BSL_P, simd_bsl, 0)
+  BUILTIN_VSDQ_I_DI (BSL_U, simd_bsl, 0)
+  BUILTIN_VALLDIF (BSL_S, simd_bsl, 0)
+
+  /* Implemented by aarch64_crypto_aes<op><mode>.  */
+  VAR1 (BINOPU, crypto_aese, 0, v16qi)
+  VAR1 (BINOPU, crypto_aesd, 0, v16qi)
+  VAR1 (UNOPU, crypto_aesmc, 0, v16qi)
+  VAR1 (UNOPU, crypto_aesimc, 0, v16qi)
+
+  /* Implemented by aarch64_crypto_sha1<op><mode>.  */
+  VAR1 (UNOPU, crypto_sha1h, 0, si)
+  VAR1 (BINOPU, crypto_sha1su1, 0, v4si)
+  VAR1 (TERNOPU, crypto_sha1c, 0, v4si)
+  VAR1 (TERNOPU, crypto_sha1m, 0, v4si)
+  VAR1 (TERNOPU, crypto_sha1p, 0, v4si)
+  VAR1 (TERNOPU, crypto_sha1su0, 0, v4si)
+
+  /* Implemented by aarch64_crypto_sha256<op><mode>.  */
+  VAR1 (TERNOPU, crypto_sha256h, 0, v4si)
+  VAR1 (TERNOPU, crypto_sha256h2, 0, v4si)
+  VAR1 (BINOPU, crypto_sha256su0, 0, v4si)
+  VAR1 (TERNOPU, crypto_sha256su1, 0, v4si)
+
+  /* Implemented by aarch64_crypto_pmull<mode>.  */
+  VAR1 (BINOPP, crypto_pmull, 0, di)
+  VAR1 (BINOPP, crypto_pmull, 0, v2di)
+
+  /* Implemented by aarch64_tbl3<mode>.  */
+  VAR1 (BINOP, tbl3, 0, v8qi)
+  VAR1 (BINOP, tbl3, 0, v16qi)
+
+  /* Implemented by aarch64_qtbl3<mode>.  */
+  VAR1 (BINOP, qtbl3, 0, v8qi)
+  VAR1 (BINOP, qtbl3, 0, v16qi)
+
+  /* Implemented by aarch64_qtbl4<mode>.  */
+  VAR1 (BINOP, qtbl4, 0, v8qi)
+  VAR1 (BINOP, qtbl4, 0, v16qi)
+
+  /* Implemented by aarch64_tbx4<mode>.  */
+  VAR1 (TERNOP, tbx4, 0, v8qi)
+  VAR1 (TERNOP, tbx4, 0, v16qi)
+
+  /* Implemented by aarch64_qtbx3<mode>.  */
+  VAR1 (TERNOP, qtbx3, 0, v8qi)
+  VAR1 (TERNOP, qtbx3, 0, v16qi)
+
+  /* Implemented by aarch64_qtbx4<mode>.  */
+  VAR1 (TERNOP, qtbx4, 0, v8qi)
+  VAR1 (TERNOP, qtbx4, 0, v16qi)
+
+  /* Builtins for ARMv8.1 Adv.SIMD instructions.  */
+
+  /* Implemented by aarch64_sqrdml<SQRDMLH_AS:rdma_as>h<mode>.  */
+  BUILTIN_VSDQ_HSI (TERNOP, sqrdmlah, 0)
+  BUILTIN_VSDQ_HSI (TERNOP, sqrdmlsh, 0)
+
+  /* Implemented by aarch64_sqrdml<SQRDMLH_AS:rdma_as>h_lane<mode>.  */
+  BUILTIN_VSDQ_HSI (QUADOP_LANE, sqrdmlah_lane, 0)
+  BUILTIN_VSDQ_HSI (QUADOP_LANE, sqrdmlsh_lane, 0)
+
+  /* Implemented by aarch64_sqrdml<SQRDMLH_AS:rdma_as>h_laneq<mode>.  */
+  BUILTIN_VSDQ_HSI (QUADOP_LANE, sqrdmlah_laneq, 0)
+  BUILTIN_VSDQ_HSI (QUADOP_LANE, sqrdmlsh_laneq, 0)
+
+  /* Implemented by <FCVT_F2FIXED/FIXED2F:fcvt_fixed_insn><*><*>3.  */
+  BUILTIN_GPF (BINOP, scvtfsi, 3)
+  BUILTIN_GPF (BINOP, scvtfdi, 3)
+  BUILTIN_GPF (BINOP_SUS, ucvtfsi, 3)
+  BUILTIN_GPF (BINOP_SUS, ucvtfdi, 3)
+  BUILTIN_GPI (BINOP, fcvtzssf, 3)
+  BUILTIN_GPI (BINOP, fcvtzsdf, 3)
+  BUILTIN_GPI (BINOP_USS, fcvtzusf, 3)
+  BUILTIN_GPI (BINOP_USS, fcvtzudf, 3)
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
deleted file mode 100644
index dd04579..0000000
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ /dev/null
@@ -1,447 +0,0 @@
-/* Machine description for AArch64 architecture.
-   Copyright (C) 2012-2016 Free Software Foundation, Inc.
-   Contributed by ARM Ltd.
-
-   This file is part of GCC.
-
-   GCC is free software; you can redistribute it and/or modify it
-   under the terms of the GNU General Public License as published by
-   the Free Software Foundation; either version 3, or (at your option)
-   any later version.
-
-   GCC is distributed in the hope that it will be useful, but
-   WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   General Public License for more details.
-
-   You should have received a copy of the GNU General Public License
-   along with GCC; see the file COPYING3.  If not see
-   <http://www.gnu.org/licenses/>.  */
-
-/* In the list below, the BUILTIN_<ITERATOR> macros expand to create
-   builtins for each of the modes described by <ITERATOR>.  When adding
-   new builtins to this list, a helpful idiom to follow is to add
-   a line for each pattern in the md file.  Thus, ADDP, which has one
-   pattern defined for the VD_BHSI iterator, and one for DImode, has two
-   entries below.
-
-   Parameter 1 is the 'type' of the intrinsic.  This is used to
-   describe the type modifiers (for example; unsigned) applied to
-   each of the parameters to the intrinsic function.
-
-   Parameter 2 is the name of the intrinsic.  This is appended
-   to `__builtin_aarch64_<name><mode>` to give the intrinsic name
-   as exported to the front-ends.
-
-   Parameter 3 describes how to map from the name to the CODE_FOR_
-   macro holding the RTL pattern for the intrinsic.  This mapping is:
-   0 - CODE_FOR_aarch64_<name><mode>
-   1-9 - CODE_FOR_<name><mode><1-9>
-   10 - CODE_FOR_<name><mode>.  */
-
-  BUILTIN_VDC (COMBINE, combine, 0)
-  BUILTIN_VB (BINOP, pmul, 0)
-  BUILTIN_VALLF (BINOP, fmulx, 0)
-  BUILTIN_VDQF_DF (UNOP, sqrt, 2)
-  BUILTIN_VD_BHSI (BINOP, addp, 0)
-  VAR1 (UNOP, addp, 0, di)
-  BUILTIN_VDQ_BHSI (UNOP, clrsb, 2)
-  BUILTIN_VDQ_BHSI (UNOP, clz, 2)
-  BUILTIN_VS (UNOP, ctz, 2)
-  BUILTIN_VB (UNOP, popcount, 2)
-
-  /* Implemented by aarch64_<sur>q<r>shl<mode>.  */
-  BUILTIN_VSDQ_I (BINOP, sqshl, 0)
-  BUILTIN_VSDQ_I (BINOP_UUS, uqshl, 0)
-  BUILTIN_VSDQ_I (BINOP, sqrshl, 0)
-  BUILTIN_VSDQ_I (BINOP_UUS, uqrshl, 0)
-  /* Implemented by aarch64_<su_optab><optab><mode>.  */
-  BUILTIN_VSDQ_I (BINOP, sqadd, 0)
-  BUILTIN_VSDQ_I (BINOPU, uqadd, 0)
-  BUILTIN_VSDQ_I (BINOP, sqsub, 0)
-  BUILTIN_VSDQ_I (BINOPU, uqsub, 0)
-  /* Implemented by aarch64_<sur>qadd<mode>.  */
-  BUILTIN_VSDQ_I (BINOP_SSU, suqadd, 0)
-  BUILTIN_VSDQ_I (BINOP_UUS, usqadd, 0)
-
-  /* Implemented by aarch64_get_dreg<VSTRUCT:mode><VDC:mode>.  */
-  BUILTIN_VDC (GETREG, get_dregoi, 0)
-  BUILTIN_VDC (GETREG, get_dregci, 0)
-  BUILTIN_VDC (GETREG, get_dregxi, 0)
-  /* Implemented by aarch64_get_qreg<VSTRUCT:mode><VQ:mode>.  */
-  BUILTIN_VQ (GETREG, get_qregoi, 0)
-  BUILTIN_VQ (GETREG, get_qregci, 0)
-  BUILTIN_VQ (GETREG, get_qregxi, 0)
-  /* Implemented by aarch64_set_qreg<VSTRUCT:mode><VQ:mode>.  */
-  BUILTIN_VQ (SETREG, set_qregoi, 0)
-  BUILTIN_VQ (SETREG, set_qregci, 0)
-  BUILTIN_VQ (SETREG, set_qregxi, 0)
-  /* Implemented by aarch64_ld<VSTRUCT:nregs><VDC:mode>.  */
-  BUILTIN_VDC (LOADSTRUCT, ld2, 0)
-  BUILTIN_VDC (LOADSTRUCT, ld3, 0)
-  BUILTIN_VDC (LOADSTRUCT, ld4, 0)
-  /* Implemented by aarch64_ld<VSTRUCT:nregs><VQ:mode>.  */
-  BUILTIN_VQ (LOADSTRUCT, ld2, 0)
-  BUILTIN_VQ (LOADSTRUCT, ld3, 0)
-  BUILTIN_VQ (LOADSTRUCT, ld4, 0)
-  /* Implemented by aarch64_ld<VSTRUCT:nregs>r<VALLDIF:mode>.  */
-  BUILTIN_VALLDIF (LOADSTRUCT, ld2r, 0)
-  BUILTIN_VALLDIF (LOADSTRUCT, ld3r, 0)
-  BUILTIN_VALLDIF (LOADSTRUCT, ld4r, 0)
-  /* Implemented by aarch64_ld<VSTRUCT:nregs>_lane<VQ:mode>.  */
-  BUILTIN_VALLDIF (LOADSTRUCT_LANE, ld2_lane, 0)
-  BUILTIN_VALLDIF (LOADSTRUCT_LANE, ld3_lane, 0)
-  BUILTIN_VALLDIF (LOADSTRUCT_LANE, ld4_lane, 0)
-  /* Implemented by aarch64_st<VSTRUCT:nregs><VDC:mode>.  */
-  BUILTIN_VDC (STORESTRUCT, st2, 0)
-  BUILTIN_VDC (STORESTRUCT, st3, 0)
-  BUILTIN_VDC (STORESTRUCT, st4, 0)
-  /* Implemented by aarch64_st<VSTRUCT:nregs><VQ:mode>.  */
-  BUILTIN_VQ (STORESTRUCT, st2, 0)
-  BUILTIN_VQ (STORESTRUCT, st3, 0)
-  BUILTIN_VQ (STORESTRUCT, st4, 0)
-
-  BUILTIN_VALLDIF (STORESTRUCT_LANE, st2_lane, 0)
-  BUILTIN_VALLDIF (STORESTRUCT_LANE, st3_lane, 0)
-  BUILTIN_VALLDIF (STORESTRUCT_LANE, st4_lane, 0)
-
-  BUILTIN_VQW (BINOP, saddl2, 0)
-  BUILTIN_VQW (BINOP, uaddl2, 0)
-  BUILTIN_VQW (BINOP, ssubl2, 0)
-  BUILTIN_VQW (BINOP, usubl2, 0)
-  BUILTIN_VQW (BINOP, saddw2, 0)
-  BUILTIN_VQW (BINOP, uaddw2, 0)
-  BUILTIN_VQW (BINOP, ssubw2, 0)
-  BUILTIN_VQW (BINOP, usubw2, 0)
-  /* Implemented by aarch64_<ANY_EXTEND:su><ADDSUB:optab>l<mode>.  */
-  BUILTIN_VD_BHSI (BINOP, saddl, 0)
-  BUILTIN_VD_BHSI (BINOP, uaddl, 0)
-  BUILTIN_VD_BHSI (BINOP, ssubl, 0)
-  BUILTIN_VD_BHSI (BINOP, usubl, 0)
-  /* Implemented by aarch64_<ANY_EXTEND:su><ADDSUB:optab>w<mode>.  */
-  BUILTIN_VD_BHSI (BINOP, saddw, 0)
-  BUILTIN_VD_BHSI (BINOP, uaddw, 0)
-  BUILTIN_VD_BHSI (BINOP, ssubw, 0)
-  BUILTIN_VD_BHSI (BINOP, usubw, 0)
-  /* Implemented by aarch64_<sur>h<addsub><mode>.  */
-  BUILTIN_VDQ_BHSI (BINOP, shadd, 0)
-  BUILTIN_VDQ_BHSI (BINOP, shsub, 0)
-  BUILTIN_VDQ_BHSI (BINOP, uhadd, 0)
-  BUILTIN_VDQ_BHSI (BINOP, uhsub, 0)
-  BUILTIN_VDQ_BHSI (BINOP, srhadd, 0)
-  BUILTIN_VDQ_BHSI (BINOP, urhadd, 0)
-  /* Implemented by aarch64_<sur><addsub>hn<mode>.  */
-  BUILTIN_VQN (BINOP, addhn, 0)
-  BUILTIN_VQN (BINOP, subhn, 0)
-  BUILTIN_VQN (BINOP, raddhn, 0)
-  BUILTIN_VQN (BINOP, rsubhn, 0)
-  /* Implemented by aarch64_<sur><addsub>hn2<mode>.  */
-  BUILTIN_VQN (TERNOP, addhn2, 0)
-  BUILTIN_VQN (TERNOP, subhn2, 0)
-  BUILTIN_VQN (TERNOP, raddhn2, 0)
-  BUILTIN_VQN (TERNOP, rsubhn2, 0)
-
-  BUILTIN_VSQN_HSDI (UNOP, sqmovun, 0)
-  /* Implemented by aarch64_<sur>qmovn<mode>.  */
-  BUILTIN_VSQN_HSDI (UNOP, sqmovn, 0)
-  BUILTIN_VSQN_HSDI (UNOP, uqmovn, 0)
-  /* Implemented by aarch64_s<optab><mode>.  */
-  BUILTIN_VSDQ_I (UNOP, sqabs, 0)
-  BUILTIN_VSDQ_I (UNOP, sqneg, 0)
-
-  /* Implemented by aarch64_sqdml<SBINQOPS:as>l<mode>.  */
-  BUILTIN_VSD_HSI (TERNOP, sqdmlal, 0)
-  BUILTIN_VSD_HSI (TERNOP, sqdmlsl, 0)
-  /* Implemented by aarch64_sqdml<SBINQOPS:as>l_lane<mode>.  */
-  BUILTIN_VSD_HSI (QUADOP_LANE, sqdmlal_lane, 0)
-  BUILTIN_VSD_HSI (QUADOP_LANE, sqdmlsl_lane, 0)
-  /* Implemented by aarch64_sqdml<SBINQOPS:as>l_laneq<mode>.  */
-  BUILTIN_VSD_HSI (QUADOP_LANE, sqdmlal_laneq, 0)
-  BUILTIN_VSD_HSI (QUADOP_LANE, sqdmlsl_laneq, 0)
-  /* Implemented by aarch64_sqdml<SBINQOPS:as>l_n<mode>.  */
-  BUILTIN_VD_HSI (TERNOP, sqdmlal_n, 0)
-  BUILTIN_VD_HSI (TERNOP, sqdmlsl_n, 0)
-
-  BUILTIN_VQ_HSI (TERNOP, sqdmlal2, 0)
-  BUILTIN_VQ_HSI (TERNOP, sqdmlsl2, 0)
-  BUILTIN_VQ_HSI (QUADOP_LANE, sqdmlal2_lane, 0)
-  BUILTIN_VQ_HSI (QUADOP_LANE, sqdmlsl2_lane, 0)
-  BUILTIN_VQ_HSI (QUADOP_LANE, sqdmlal2_laneq, 0)
-  BUILTIN_VQ_HSI (QUADOP_LANE, sqdmlsl2_laneq, 0)
-  BUILTIN_VQ_HSI (TERNOP, sqdmlal2_n, 0)
-  BUILTIN_VQ_HSI (TERNOP, sqdmlsl2_n, 0)
-
-  BUILTIN_VSD_HSI (BINOP, sqdmull, 0)
-  BUILTIN_VSD_HSI (TERNOP_LANE, sqdmull_lane, 0)
-  BUILTIN_VSD_HSI (TERNOP_LANE, sqdmull_laneq, 0)
-  BUILTIN_VD_HSI (BINOP, sqdmull_n, 0)
-  BUILTIN_VQ_HSI (BINOP, sqdmull2, 0)
-  BUILTIN_VQ_HSI (TERNOP_LANE, sqdmull2_lane, 0)
-  BUILTIN_VQ_HSI (TERNOP_LANE, sqdmull2_laneq, 0)
-  BUILTIN_VQ_HSI (BINOP, sqdmull2_n, 0)
-  /* Implemented by aarch64_sq<r>dmulh<mode>.  */
-  BUILTIN_VSDQ_HSI (BINOP, sqdmulh, 0)
-  BUILTIN_VSDQ_HSI (BINOP, sqrdmulh, 0)
-  /* Implemented by aarch64_sq<r>dmulh_lane<q><mode>.  */
-  BUILTIN_VSDQ_HSI (TERNOP_LANE, sqdmulh_lane, 0)
-  BUILTIN_VSDQ_HSI (TERNOP_LANE, sqdmulh_laneq, 0)
-  BUILTIN_VSDQ_HSI (TERNOP_LANE, sqrdmulh_lane, 0)
-  BUILTIN_VSDQ_HSI (TERNOP_LANE, sqrdmulh_laneq, 0)
-
-  BUILTIN_VSDQ_I_DI (BINOP, ashl, 3)
-  /* Implemented by aarch64_<sur>shl<mode>.  */
-  BUILTIN_VSDQ_I_DI (BINOP, sshl, 0)
-  BUILTIN_VSDQ_I_DI (BINOP_UUS, ushl, 0)
-  BUILTIN_VSDQ_I_DI (BINOP, srshl, 0)
-  BUILTIN_VSDQ_I_DI (BINOP_UUS, urshl, 0)
-
-  BUILTIN_VDQ_I (SHIFTIMM, ashr, 3)
-  VAR1 (SHIFTIMM, ashr_simd, 0, di)
-  BUILTIN_VDQ_I (SHIFTIMM, lshr, 3)
-  VAR1 (USHIFTIMM, lshr_simd, 0, di)
-  /* Implemented by aarch64_<sur>shr_n<mode>.  */
-  BUILTIN_VSDQ_I_DI (SHIFTIMM, srshr_n, 0)
-  BUILTIN_VSDQ_I_DI (USHIFTIMM, urshr_n, 0)
-  /* Implemented by aarch64_<sur>sra_n<mode>.  */
-  BUILTIN_VSDQ_I_DI (SHIFTACC, ssra_n, 0)
-  BUILTIN_VSDQ_I_DI (USHIFTACC, usra_n, 0)
-  BUILTIN_VSDQ_I_DI (SHIFTACC, srsra_n, 0)
-  BUILTIN_VSDQ_I_DI (USHIFTACC, ursra_n, 0)
-  /* Implemented by aarch64_<sur>shll_n<mode>.  */
-  BUILTIN_VD_BHSI (SHIFTIMM, sshll_n, 0)
-  BUILTIN_VD_BHSI (USHIFTIMM, ushll_n, 0)
-  /* Implemented by aarch64_<sur>shll2_n<mode>.  */
-  BUILTIN_VQW (SHIFTIMM, sshll2_n, 0)
-  BUILTIN_VQW (SHIFTIMM, ushll2_n, 0)
-  /* Implemented by aarch64_<sur>q<r>shr<u>n_n<mode>.  */
-  BUILTIN_VSQN_HSDI (SHIFTIMM, sqshrun_n, 0)
-  BUILTIN_VSQN_HSDI (SHIFTIMM, sqrshrun_n, 0)
-  BUILTIN_VSQN_HSDI (SHIFTIMM, sqshrn_n, 0)
-  BUILTIN_VSQN_HSDI (USHIFTIMM, uqshrn_n, 0)
-  BUILTIN_VSQN_HSDI (SHIFTIMM, sqrshrn_n, 0)
-  BUILTIN_VSQN_HSDI (USHIFTIMM, uqrshrn_n, 0)
-  /* Implemented by aarch64_<sur>s<lr>i_n<mode>.  */
-  BUILTIN_VSDQ_I_DI (SHIFTINSERT, ssri_n, 0)
-  BUILTIN_VSDQ_I_DI (USHIFTACC, usri_n, 0)
-  BUILTIN_VSDQ_I_DI (SHIFTINSERT, ssli_n, 0)
-  BUILTIN_VSDQ_I_DI (USHIFTACC, usli_n, 0)
-  /* Implemented by aarch64_<sur>qshl<u>_n<mode>.  */
-  BUILTIN_VSDQ_I (SHIFTIMM_USS, sqshlu_n, 0)
-  BUILTIN_VSDQ_I (SHIFTIMM, sqshl_n, 0)
-  BUILTIN_VSDQ_I (USHIFTIMM, uqshl_n, 0)
-
-  /* Implemented by aarch64_reduc_plus_<mode>.  */
-  BUILTIN_VALL (UNOP, reduc_plus_scal_, 10)
-
-  /* Implemented by reduc_<maxmin_uns>_scal_<mode> (producing scalar).  */
-  BUILTIN_VDQIF (UNOP, reduc_smax_scal_, 10)
-  BUILTIN_VDQIF (UNOP, reduc_smin_scal_, 10)
-  BUILTIN_VDQ_BHSI (UNOPU, reduc_umax_scal_, 10)
-  BUILTIN_VDQ_BHSI (UNOPU, reduc_umin_scal_, 10)
-  BUILTIN_VDQF (UNOP, reduc_smax_nan_scal_, 10)
-  BUILTIN_VDQF (UNOP, reduc_smin_nan_scal_, 10)
-
-  /* Implemented by <maxmin><mode>3.
-     smax variants map to fmaxnm,
-     smax_nan variants map to fmax.  */
-  BUILTIN_VDQIF (BINOP, smax, 3)
-  BUILTIN_VDQIF (BINOP, smin, 3)
-  BUILTIN_VDQ_BHSI (BINOP, umax, 3)
-  BUILTIN_VDQ_BHSI (BINOP, umin, 3)
-  BUILTIN_VDQF (BINOP, smax_nan, 3)
-  BUILTIN_VDQF (BINOP, smin_nan, 3)
-
-  /* Implemented by aarch64_<maxmin_uns>p<mode>.  */
-  BUILTIN_VDQ_BHSI (BINOP, smaxp, 0)
-  BUILTIN_VDQ_BHSI (BINOP, sminp, 0)
-  BUILTIN_VDQ_BHSI (BINOP, umaxp, 0)
-  BUILTIN_VDQ_BHSI (BINOP, uminp, 0)
-  BUILTIN_VDQF (BINOP, smaxp, 0)
-  BUILTIN_VDQF (BINOP, sminp, 0)
-  BUILTIN_VDQF (BINOP, smax_nanp, 0)
-  BUILTIN_VDQF (BINOP, smin_nanp, 0)
-
-  /* Implemented by <frint_pattern><mode>2.  */
-  BUILTIN_VDQF (UNOP, btrunc, 2)
-  BUILTIN_VDQF (UNOP, ceil, 2)
-  BUILTIN_VDQF (UNOP, floor, 2)
-  BUILTIN_VDQF (UNOP, nearbyint, 2)
-  BUILTIN_VDQF (UNOP, rint, 2)
-  BUILTIN_VDQF (UNOP, round, 2)
-  BUILTIN_VDQF_DF (UNOP, frintn, 2)
-
-  /* Implemented by l<fcvt_pattern><su_optab><VQDF:mode><vcvt_target>2.  */
-  VAR1 (UNOP, lbtruncv2sf, 2, v2si)
-  VAR1 (UNOP, lbtruncv4sf, 2, v4si)
-  VAR1 (UNOP, lbtruncv2df, 2, v2di)
-
-  VAR1 (UNOPUS, lbtruncuv2sf, 2, v2si)
-  VAR1 (UNOPUS, lbtruncuv4sf, 2, v4si)
-  VAR1 (UNOPUS, lbtruncuv2df, 2, v2di)
-
-  VAR1 (UNOP, lroundv2sf, 2, v2si)
-  VAR1 (UNOP, lroundv4sf, 2, v4si)
-  VAR1 (UNOP, lroundv2df, 2, v2di)
-  /* Implemented by l<fcvt_pattern><su_optab><GPF:mode><GPI:mode>2.  */
-  VAR1 (UNOP, lroundsf, 2, si)
-  VAR1 (UNOP, lrounddf, 2, di)
-
-  VAR1 (UNOPUS, lrounduv2sf, 2, v2si)
-  VAR1 (UNOPUS, lrounduv4sf, 2, v4si)
-  VAR1 (UNOPUS, lrounduv2df, 2, v2di)
-  VAR1 (UNOPUS, lroundusf, 2, si)
-  VAR1 (UNOPUS, lroundudf, 2, di)
-
-  VAR1 (UNOP, lceilv2sf, 2, v2si)
-  VAR1 (UNOP, lceilv4sf, 2, v4si)
-  VAR1 (UNOP, lceilv2df, 2, v2di)
-
-  VAR1 (UNOPUS, lceiluv2sf, 2, v2si)
-  VAR1 (UNOPUS, lceiluv4sf, 2, v4si)
-  VAR1 (UNOPUS, lceiluv2df, 2, v2di)
-  VAR1 (UNOPUS, lceilusf, 2, si)
-  VAR1 (UNOPUS, lceiludf, 2, di)
-
-  VAR1 (UNOP, lfloorv2sf, 2, v2si)
-  VAR1 (UNOP, lfloorv4sf, 2, v4si)
-  VAR1 (UNOP, lfloorv2df, 2, v2di)
-
-  VAR1 (UNOPUS, lflooruv2sf, 2, v2si)
-  VAR1 (UNOPUS, lflooruv4sf, 2, v4si)
-  VAR1 (UNOPUS, lflooruv2df, 2, v2di)
-  VAR1 (UNOPUS, lfloorusf, 2, si)
-  VAR1 (UNOPUS, lfloorudf, 2, di)
-
-  VAR1 (UNOP, lfrintnv2sf, 2, v2si)
-  VAR1 (UNOP, lfrintnv4sf, 2, v4si)
-  VAR1 (UNOP, lfrintnv2df, 2, v2di)
-  VAR1 (UNOP, lfrintnsf, 2, si)
-  VAR1 (UNOP, lfrintndf, 2, di)
-
-  VAR1 (UNOPUS, lfrintnuv2sf, 2, v2si)
-  VAR1 (UNOPUS, lfrintnuv4sf, 2, v4si)
-  VAR1 (UNOPUS, lfrintnuv2df, 2, v2di)
-  VAR1 (UNOPUS, lfrintnusf, 2, si)
-  VAR1 (UNOPUS, lfrintnudf, 2, di)
-
-  /* Implemented by <optab><fcvt_target><VDQF:mode>2.  */
-  VAR1 (UNOP, floatv2si, 2, v2sf)
-  VAR1 (UNOP, floatv4si, 2, v4sf)
-  VAR1 (UNOP, floatv2di, 2, v2df)
-
-  VAR1 (UNOP, floatunsv2si, 2, v2sf)
-  VAR1 (UNOP, floatunsv4si, 2, v4sf)
-  VAR1 (UNOP, floatunsv2di, 2, v2df)
-
-  VAR5 (UNOPU, bswap, 2, v4hi, v8hi, v2si, v4si, v2di)
-
-  BUILTIN_VB (UNOP, rbit, 0)
-
-  /* Implemented by
-     aarch64_<PERMUTE:perm_insn><PERMUTE:perm_hilo><mode>.  */
-  BUILTIN_VALL (BINOP, zip1, 0)
-  BUILTIN_VALL (BINOP, zip2, 0)
-  BUILTIN_VALL (BINOP, uzp1, 0)
-  BUILTIN_VALL (BINOP, uzp2, 0)
-  BUILTIN_VALL (BINOP, trn1, 0)
-  BUILTIN_VALL (BINOP, trn2, 0)
-
-  /* Implemented by
-     aarch64_frecp<FRECP:frecp_suffix><mode>.  */
-  BUILTIN_GPF (UNOP, frecpe, 0)
-  BUILTIN_GPF (BINOP, frecps, 0)
-  BUILTIN_GPF (UNOP, frecpx, 0)
-
-  BUILTIN_VDQ_SI (UNOP, urecpe, 0)
-
-  BUILTIN_VDQF (UNOP, frecpe, 0)
-  BUILTIN_VDQF (BINOP, frecps, 0)
-
-  /* Implemented by a mixture of abs2 patterns.  Note the DImode builtin is
-     only ever used for the int64x1_t intrinsic, there is no scalar version.  */
-  BUILTIN_VSDQ_I_DI (UNOP, abs, 0)
-  BUILTIN_VDQF (UNOP, abs, 2)
-
-  BUILTIN_VQ_HSF (UNOP, vec_unpacks_hi_, 10)
-  VAR1 (BINOP, float_truncate_hi_, 0, v4sf)
-  VAR1 (BINOP, float_truncate_hi_, 0, v8hf)
-
-  VAR1 (UNOP, float_extend_lo_, 0, v2df)
-  VAR1 (UNOP, float_extend_lo_,  0, v4sf)
-  BUILTIN_VDF (UNOP, float_truncate_lo_, 0)
-
-  /* Implemented by aarch64_ld1<VALL_F16:mode>.  */
-  BUILTIN_VALL_F16 (LOAD1, ld1, 0)
-
-  /* Implemented by aarch64_st1<VALL_F16:mode>.  */
-  BUILTIN_VALL_F16 (STORE1, st1, 0)
-
-  /* Implemented by fma<mode>4.  */
-  BUILTIN_VDQF (TERNOP, fma, 4)
-
-  /* Implemented by aarch64_simd_bsl<mode>.  */
-  BUILTIN_VDQQH (BSL_P, simd_bsl, 0)
-  BUILTIN_VSDQ_I_DI (BSL_U, simd_bsl, 0)
-  BUILTIN_VALLDIF (BSL_S, simd_bsl, 0)
-
-  /* Implemented by aarch64_crypto_aes<op><mode>.  */
-  VAR1 (BINOPU, crypto_aese, 0, v16qi)
-  VAR1 (BINOPU, crypto_aesd, 0, v16qi)
-  VAR1 (UNOPU, crypto_aesmc, 0, v16qi)
-  VAR1 (UNOPU, crypto_aesimc, 0, v16qi)
-
-  /* Implemented by aarch64_crypto_sha1<op><mode>.  */
-  VAR1 (UNOPU, crypto_sha1h, 0, si)
-  VAR1 (BINOPU, crypto_sha1su1, 0, v4si)
-  VAR1 (TERNOPU, crypto_sha1c, 0, v4si)
-  VAR1 (TERNOPU, crypto_sha1m, 0, v4si)
-  VAR1 (TERNOPU, crypto_sha1p, 0, v4si)
-  VAR1 (TERNOPU, crypto_sha1su0, 0, v4si)
-
-  /* Implemented by aarch64_crypto_sha256<op><mode>.  */
-  VAR1 (TERNOPU, crypto_sha256h, 0, v4si)
-  VAR1 (TERNOPU, crypto_sha256h2, 0, v4si)
-  VAR1 (BINOPU, crypto_sha256su0, 0, v4si)
-  VAR1 (TERNOPU, crypto_sha256su1, 0, v4si)
-
-  /* Implemented by aarch64_crypto_pmull<mode>.  */
-  VAR1 (BINOPP, crypto_pmull, 0, di)
-  VAR1 (BINOPP, crypto_pmull, 0, v2di)
-
-  /* Implemented by aarch64_tbl3<mode>.  */
-  VAR1 (BINOP, tbl3, 0, v8qi)
-  VAR1 (BINOP, tbl3, 0, v16qi)
-
-  /* Implemented by aarch64_qtbl3<mode>.  */
-  VAR1 (BINOP, qtbl3, 0, v8qi)
-  VAR1 (BINOP, qtbl3, 0, v16qi)
-
-  /* Implemented by aarch64_qtbl4<mode>.  */
-  VAR1 (BINOP, qtbl4, 0, v8qi)
-  VAR1 (BINOP, qtbl4, 0, v16qi)
-
-  /* Implemented by aarch64_tbx4<mode>.  */
-  VAR1 (TERNOP, tbx4, 0, v8qi)
-  VAR1 (TERNOP, tbx4, 0, v16qi)
-
-  /* Implemented by aarch64_qtbx3<mode>.  */
-  VAR1 (TERNOP, qtbx3, 0, v8qi)
-  VAR1 (TERNOP, qtbx3, 0, v16qi)
-
-  /* Implemented by aarch64_qtbx4<mode>.  */
-  VAR1 (TERNOP, qtbx4, 0, v8qi)
-  VAR1 (TERNOP, qtbx4, 0, v16qi)
-
-  /* Builtins for ARMv8.1 Adv.SIMD instructions.  */
-
-  /* Implemented by aarch64_sqrdml<SQRDMLH_AS:rdma_as>h<mode>.  */
-  BUILTIN_VSDQ_HSI (TERNOP, sqrdmlah, 0)
-  BUILTIN_VSDQ_HSI (TERNOP, sqrdmlsh, 0)
-
-  /* Implemented by aarch64_sqrdml<SQRDMLH_AS:rdma_as>h_lane<mode>.  */
-  BUILTIN_VSDQ_HSI (QUADOP_LANE, sqrdmlah_lane, 0)
-  BUILTIN_VSDQ_HSI (QUADOP_LANE, sqrdmlsh_lane, 0)
-
-  /* Implemented by aarch64_sqrdml<SQRDMLH_AS:rdma_as>h_laneq<mode>.  */
-  BUILTIN_VSDQ_HSI (QUADOP_LANE, sqrdmlah_laneq, 0)
-  BUILTIN_VSDQ_HSI (QUADOP_LANE, sqrdmlsh_laneq, 0)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 59a578f..670c690 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -1778,6 +1778,28 @@
   [(set_attr "type" "neon_fp_cvt_widen_s")]
 )
 
+;; Convert between fixed-point and floating-point (scalar variant from SIMD)
+
+(define_insn "<FCVT_F2FIXED:fcvt_fixed_insn><GPF:mode>3"
+  [(set (match_operand:<GPF:FCVT_TARGET> 0 "register_operand" "=w")
+	(unspec:<GPF:FCVT_TARGET> [(match_operand:GPF 1 "register_operand" "w")
+				   (match_operand:SI 2 "immediate_operand" "i")]
+	 FCVT_F2FIXED))]
+  "TARGET_SIMD"
+  "<FCVT_F2FIXED:fcvt_fixed_insn>\t%<v>0<Vmtype>, %<v>1<Vmtype>, #%2"
+  [(set_attr "type" "neon_fp_to_int_<GPF:Vetype><q>")]
+)
+
+(define_insn "<FCVT_FIXED2F:fcvt_fixed_insn><GPI:mode>3"
+  [(set (match_operand:<GPI:FCVT_TARGET> 0 "register_operand" "=w")
+	(unspec:<GPI:FCVT_TARGET> [(match_operand:GPI 1 "register_operand" "w")
+				   (match_operand:SI 2 "immediate_operand" "i")]
+	 FCVT_FIXED2F))]
+  "TARGET_SIMD"
+  "<FCVT_FIXED2F:fcvt_fixed_insn>\t%<v>0<Vmtype>, %<v>1<Vmtype>, #%2"
+  [(set_attr "type" "neon_int_to_fp_<GPI:Vetype><q>")]
+)
+
 ;; ??? Note that the vectorizer usage of the vec_unpacks_[lo/hi] patterns
 ;; is inconsistent with vector ordering elsewhere in the compiler, in that
 ;; the meaning of HI and LO changes depending on the target endianness.
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 223a4cc..d463808 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -75,6 +75,8 @@
     UNSPEC_CRC32H
     UNSPEC_CRC32W
     UNSPEC_CRC32X
+    UNSPEC_FCVTZS_SCALAR
+    UNSPEC_FCVTZU_SCALAR
     UNSPEC_URECPE
     UNSPEC_FRECPE
     UNSPEC_FRECPS
@@ -105,6 +107,7 @@
     UNSPEC_NOP
     UNSPEC_PRLG_STK
     UNSPEC_RBIT
+    UNSPEC_SCVTF_SCALAR
     UNSPEC_SISD_NEG
     UNSPEC_SISD_SSHL
     UNSPEC_SISD_USHL
@@ -122,6 +125,7 @@
     UNSPEC_TLSLE24
     UNSPEC_TLSLE32
     UNSPEC_TLSLE48
+    UNSPEC_UCVTF_SCALAR
     UNSPEC_USHL_2S
     UNSPEC_VSTRUCTDUMMY
     UNSPEC_SP_SET
@@ -4626,6 +4630,28 @@
   [(set_attr "type" "f_cvti2f")]
 )
 
+;; Convert between fixed-point and floating-point
+
+(define_insn "<FCVT_F2FIXED_SCALAR:fcvt_fixed_insn><GPF:mode><GPI:mode>3"
+  [(set (match_operand:GPI 0 "register_operand" "=r")
+	(unspec:GPI [(match_operand:GPF 1 "register_operand" "w")
+		     (match_operand:SI 2 "immediate_operand" "i")]
+	 FCVT_F2FIXED_SCALAR))]
+  "TARGET_FLOAT"
+  "<FCVT_F2FIXED_SCALAR:fcvt_fixed_insn>\t%<w1>0, %<s>1, #%2"
+  [(set_attr "type" "f_cvtf2i")]
+)
+
+(define_insn "<FCVT_FIXED2F_SCALAR:fcvt_fixed_insn><GPI:mode><GPF:mode>3"
+  [(set (match_operand:GPF 0 "register_operand" "=w")
+	(unspec:GPF [(match_operand:GPI 1 "register_operand" "r")
+		     (match_operand:SI 2 "immediate_operand" "i")]
+	 FCVT_FIXED2F_SCALAR))]
+  "TARGET_FLOAT"
+  "<FCVT_FIXED2F_SCALAR:fcvt_fixed_insn>\t%<s>0, %<w1>1, #%2"
+  [(set_attr "type" "f_cvti2f")]
+)
+
 ;; -------------------------------------------------------------------
 ;; Floating-point arithmetic
 ;; -------------------------------------------------------------------
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index e563e3d..012a11a 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -6073,54 +6073,6 @@ vaddlvq_u32 (uint32x4_t a)
        result;                                                          \
      })
 
-#define vcvtd_n_f64_s64(a, b)                                           \
-  __extension__                                                         \
-    ({                                                                  \
-       int64_t a_ = (a);                                                \
-       float64_t result;                                                \
-       __asm__ ("scvtf %d0,%d1,%2"                                      \
-                : "=w"(result)                                          \
-                : "w"(a_), "i"(b)                                       \
-                : /* No clobbers */);                                   \
-       result;                                                          \
-     })
-
-#define vcvtd_n_f64_u64(a, b)                                           \
-  __extension__                                                         \
-    ({                                                                  \
-       uint64_t a_ = (a);                                               \
-       float64_t result;                                                \
-       __asm__ ("ucvtf %d0,%d1,%2"                                      \
-                : "=w"(result)                                          \
-                : "w"(a_), "i"(b)                                       \
-                : /* No clobbers */);                                   \
-       result;                                                          \
-     })
-
-#define vcvtd_n_s64_f64(a, b)                                           \
-  __extension__                                                         \
-    ({                                                                  \
-       float64_t a_ = (a);                                              \
-       int64_t result;                                                  \
-       __asm__ ("fcvtzs %d0,%d1,%2"                                     \
-                : "=w"(result)                                          \
-                : "w"(a_), "i"(b)                                       \
-                : /* No clobbers */);                                   \
-       result;                                                          \
-     })
-
-#define vcvtd_n_u64_f64(a, b)                                           \
-  __extension__                                                         \
-    ({                                                                  \
-       float64_t a_ = (a);                                              \
-       uint64_t result;                                                 \
-       __asm__ ("fcvtzu %d0,%d1,%2"                                     \
-                : "=w"(result)                                          \
-                : "w"(a_), "i"(b)                                       \
-                : /* No clobbers */);                                   \
-       result;                                                          \
-     })
-
 #define vcvtq_n_f32_s32(a, b)                                           \
   __extension__                                                         \
     ({                                                                  \
@@ -6217,54 +6169,6 @@ vaddlvq_u32 (uint32x4_t a)
        result;                                                          \
      })
 
-#define vcvts_n_f32_s32(a, b)                                           \
-  __extension__                                                         \
-    ({                                                                  \
-       int32_t a_ = (a);                                                \
-       float32_t result;                                                \
-       __asm__ ("scvtf %s0,%s1,%2"                                      \
-                : "=w"(result)                                          \
-                : "w"(a_), "i"(b)                                       \
-                : /* No clobbers */);                                   \
-       result;                                                          \
-     })
-
-#define vcvts_n_f32_u32(a, b)                                           \
-  __extension__                                                         \
-    ({                                                                  \
-       uint32_t a_ = (a);                                               \
-       float32_t result;                                                \
-       __asm__ ("ucvtf %s0,%s1,%2"                                      \
-                : "=w"(result)                                          \
-                : "w"(a_), "i"(b)                                       \
-                : /* No clobbers */);                                   \
-       result;                                                          \
-     })
-
-#define vcvts_n_s32_f32(a, b)                                           \
-  __extension__                                                         \
-    ({                                                                  \
-       float32_t a_ = (a);                                              \
-       int32_t result;                                                  \
-       __asm__ ("fcvtzs %s0,%s1,%2"                                     \
-                : "=w"(result)                                          \
-                : "w"(a_), "i"(b)                                       \
-                : /* No clobbers */);                                   \
-       result;                                                          \
-     })
-
-#define vcvts_n_u32_f32(a, b)                                           \
-  __extension__                                                         \
-    ({                                                                  \
-       float32_t a_ = (a);                                              \
-       uint32_t result;                                                 \
-       __asm__ ("fcvtzu %s0,%s1,%2"                                     \
-                : "=w"(result)                                          \
-                : "w"(a_), "i"(b)                                       \
-                : /* No clobbers */);                                   \
-       result;                                                          \
-     })
-
 __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
 vcvtx_f32_f64 (float64x2_t a)
 {
@@ -12830,6 +12734,58 @@ vcvt_high_f64_f32 (float32x4_t __a)
   return __builtin_aarch64_vec_unpacks_hi_v4sf (__a);
 }
 
+/* vcvt (<u>fixed-point -> float).  */
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtd_n_f64_s64 (int64_t __a, const int __b)
+{
+  return __builtin_aarch64_scvtfdidf (__a, __b);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vcvtd_n_f64_u64 (uint64_t __a, const int __b)
+{
+  return __builtin_aarch64_ucvtfdidf_sus (__a, __b);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvts_n_f32_s32 (int32_t __a, const int __b)
+{
+  return __builtin_aarch64_scvtfsisf (__a, __b);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vcvts_n_f32_u32 (uint32_t __a, const int __b)
+{
+  return __builtin_aarch64_ucvtfsisf_sus (__a, __b);
+}
+
+/* vcvt (float -> <u>fixed-point).  */
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vcvtd_n_s64_f64 (float64_t __a, const int __b)
+{
+  return __builtin_aarch64_fcvtzsdfdi (__a, __b);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcvtd_n_u64_f64 (float64_t __a, const int __b)
+{
+  return __builtin_aarch64_fcvtzudfdi_uss (__a, __b);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvts_n_s32_f32 (float32_t __a, const int __b)
+{
+  return __builtin_aarch64_fcvtzssfsi (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvts_n_u32_f32 (float32_t __a, const int __b)
+{
+  return __builtin_aarch64_fcvtzusfsi_uss (__a, __b);
+}
+
 /* vcvt  (<u>int -> float)  */
 
 __extension__ static __inline float64_t __attribute__ ((__always_inline__))
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index d9bd391..4ebd6f7 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -208,6 +208,8 @@
     UNSPEC_ASHIFT_SIGNED	; Used in aarch-simd.md.
     UNSPEC_ASHIFT_UNSIGNED	; Used in aarch64-simd.md.
     UNSPEC_ABS		; Used in aarch64-simd.md.
+    UNSPEC_FCVTZS	; Used in aarch64-simd.md.
+    UNSPEC_FCVTZU	; Used in aarch64-simd.md.
     UNSPEC_FMAX		; Used in aarch64-simd.md.
     UNSPEC_FMAXNMV	; Used in aarch64-simd.md.
     UNSPEC_FMAXV	; Used in aarch64-simd.md.
@@ -216,8 +218,10 @@
     UNSPEC_FMINV	; Used in aarch64-simd.md.
     UNSPEC_FADDV	; Used in aarch64-simd.md.
     UNSPEC_ADDV		; Used in aarch64-simd.md.
+    UNSPEC_SCVTF	; Used in aarch64-simd.md.
     UNSPEC_SMAXV	; Used in aarch64-simd.md.
     UNSPEC_SMINV	; Used in aarch64-simd.md.
+    UNSPEC_UCVTF	; Used in aarch64-simd.md.
     UNSPEC_UMAXV	; Used in aarch64-simd.md.
     UNSPEC_UMINV	; Used in aarch64-simd.md.
     UNSPEC_SHADD	; Used in aarch64-simd.md.
@@ -648,8 +652,11 @@
 (define_mode_attr atomic_sfx
   [(QI "b") (HI "h") (SI "") (DI "")])
 
-(define_mode_attr fcvt_target [(V2DF "v2di") (V4SF "v4si") (V2SF "v2si") (SF "si") (DF "di")])
-(define_mode_attr FCVT_TARGET [(V2DF "V2DI") (V4SF "V4SI") (V2SF "V2SI") (SF "SI") (DF "DI")])
+(define_mode_attr fcvt_target [(V2DF "v2di") (V4SF "v4si") (V2SF "v2si")
+			       (SF "si") (DF "di") (SI "sf") (DI "df")])
+(define_mode_attr FCVT_TARGET [(V2DF "V2DI") (V4SF "V4SI") (V2SF "V2SI")
+			       (SF "SI") (DF "DI") (SI "SF") (DI "DF")])
+
 
 ;; for the inequal width integer to fp conversions
 (define_mode_attr fcvt_iesize [(SF "di") (DF "si")])
@@ -1001,6 +1008,11 @@
 (define_int_iterator FCVT [UNSPEC_FRINTZ UNSPEC_FRINTP UNSPEC_FRINTM
 			    UNSPEC_FRINTA UNSPEC_FRINTN])
 
+(define_int_iterator FCVT_F2FIXED [UNSPEC_FCVTZS UNSPEC_FCVTZU])
+(define_int_iterator FCVT_FIXED2F [UNSPEC_SCVTF UNSPEC_UCVTF])
+(define_int_iterator FCVT_F2FIXED_SCALAR [UNSPEC_FCVTZS_SCALAR UNSPEC_FCVTZU_SCALAR])
+(define_int_iterator FCVT_FIXED2F_SCALAR [UNSPEC_SCVTF_SCALAR UNSPEC_UCVTF_SCALAR])
+
 (define_int_iterator FRECP [UNSPEC_FRECPE UNSPEC_FRECPX])
 
 (define_int_iterator CRC [UNSPEC_CRC32B UNSPEC_CRC32H UNSPEC_CRC32W
@@ -1137,6 +1149,15 @@
 			       (UNSPEC_FRINTP "ceil") (UNSPEC_FRINTM "floor")
 			       (UNSPEC_FRINTN "frintn")])
 
+(define_int_attr fcvt_fixed_insn [(UNSPEC_SCVTF "scvtf")
+				  (UNSPEC_SCVTF_SCALAR "scvtf")
+				  (UNSPEC_UCVTF "ucvtf")
+				  (UNSPEC_UCVTF_SCALAR "ucvtf")
+				  (UNSPEC_FCVTZS "fcvtzs")
+				  (UNSPEC_FCVTZS_SCALAR "fcvtzs")
+				  (UNSPEC_FCVTZU "fcvtzu")
+				  (UNSPEC_FCVTZU_SCALAR "fcvtzu")])
+
 (define_int_attr perm_insn [(UNSPEC_ZIP1 "zip") (UNSPEC_ZIP2 "zip")
 			    (UNSPEC_TRN1 "trn") (UNSPEC_TRN2 "trn")
 			    (UNSPEC_UZP1 "uzp") (UNSPEC_UZP2 "uzp")])
diff --git a/gcc/config/aarch64/t-aarch64 b/gcc/config/aarch64/t-aarch64
index 778e15c..2473776 100644
--- a/gcc/config/aarch64/t-aarch64
+++ b/gcc/config/aarch64/t-aarch64
@@ -34,7 +34,7 @@ aarch64-builtins.o: $(srcdir)/config/aarch64/aarch64-builtins.c $(CONFIG_H) \
   $(SYSTEM_H) coretypes.h $(TM_H) \
   $(RTL_H) $(TREE_H) expr.h $(TM_P_H) $(RECOG_H) langhooks.h \
   $(DIAGNOSTIC_CORE_H) $(OPTABS_H) \
-  $(srcdir)/config/aarch64/aarch64-simd-builtins.def \
+  $(srcdir)/config/aarch64/aarch64-builtins.def \
   $(srcdir)/config/aarch64/aarch64-simd-builtin-types.def \
   aarch64-builtin-iterators.h
 	$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
-- 
1.9.1





Reply via email to