Extract 1600 lines from the big enough translate.c.
Signed-off-by: Philippe Mathieu-Daudé
---
target/arm/tcg/translate-gvec.c | 1644 +++
target/arm/tcg/translate.c | 1630 --
target/arm/tcg/meson.build |1 +
3 files changed, 1645 insertions(+), 1630 deletions(-)
create mode 100644 target/arm/tcg/translate-gvec.c
diff --git a/target/arm/tcg/translate-gvec.c b/target/arm/tcg/translate-gvec.c
new file mode 100644
index 00..8b1d5e283c
--- /dev/null
+++ b/target/arm/tcg/translate-gvec.c
@@ -0,0 +1,1644 @@
+/*
+ * ARM AdvSIMD / SVE Vector Helpers
+ *
+ * Copyright (c) 2020 Linaro
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "translate.h"
+
+#define HELPER_H "tcg/helper-neon.h.inc"
+#include "exec/helper-gen.h.inc"
+#undef HELPER_H
+
+static void gen_gvec_fn3_qc(uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs,
+uint32_t opr_sz, uint32_t max_sz,
+gen_helper_gvec_3_ptr *fn)
+{
+TCGv_ptr qc_ptr = tcg_temp_new_ptr();
+
+tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc));
+tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, qc_ptr,
+ opr_sz, max_sz, 0, fn);
+}
+
+void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+static gen_helper_gvec_3_ptr * const fns[2] = {
+gen_helper_gvec_qrdmlah_s16, gen_helper_gvec_qrdmlah_s32
+};
+tcg_debug_assert(vece >= 1 && vece <= 2);
+gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]);
+}
+
+void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+static gen_helper_gvec_3_ptr * const fns[2] = {
+gen_helper_gvec_qrdmlsh_s16, gen_helper_gvec_qrdmlsh_s32
+};
+tcg_debug_assert(vece >= 1 && vece <= 2);
+gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]);
+}
+
+#define GEN_CMP0(NAME, COND)\
+static void gen_##NAME##0_i32(TCGv_i32 d, TCGv_i32 a) \
+{ \
+tcg_gen_setcondi_i32(COND, d, a, 0);\
+tcg_gen_neg_i32(d, d); \
+} \
+static void gen_##NAME##0_i64(TCGv_i64 d, TCGv_i64 a) \
+{ \
+tcg_gen_setcondi_i64(COND, d, a, 0);\
+tcg_gen_neg_i64(d, d); \
+} \
+static void gen_##NAME##0_vec(unsigned vece, TCGv_vec d, TCGv_vec a) \
+{ \
+TCGv_vec zero = tcg_constant_vec_matching(d, vece, 0); \
+tcg_gen_cmp_vec(COND, vece, d, a, zero);\
+} \
+void gen_gvec_##NAME##0(unsigned vece, uint32_t d, uint32_t m, \
+uint32_t opr_sz, uint32_t max_sz) \
+{ \
+const GVecGen2 op[4] = {\
+{ .fno = gen_helper_gvec_##NAME##0_b, \
+ .fniv = gen_##NAME##0_vec,\
+ .opt_opc = vecop_list_cmp,\
+ .vece = MO_8 }, \
+{ .fno = gen_helper_gvec_##NAME##0_h, \
+ .fniv = gen_##NAME##0_vec,\
+ .opt_opc = vecop_list_cmp,\
+ .vece = MO_16 }, \
+{ .fni4 = gen_##NAME##0_i32,\
+ .fniv = gen_##NAME##0_vec,\
+ .opt_opc = vecop_list_cmp,\
+ .vece = MO_32 }, \
+{ .fni8 = gen_##NAME##0_i64,\
+ .fniv = gen_##NAME##0_vec,\
+ .opt_opc = vecop_list_cmp,\
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64, \
+ .vece = MO_64 }, \
+}; \
+