This patch includes: - XVSRLN.{B.H/H.W/W.D}; - XVSRAN.{B.H/H.W/W.D}; - XVSRLNI.{B.H/H.W/W.D/D.Q}; - XVSRANI.{B.H/H.W/W.D/D.Q}.
Signed-off-by: Song Gao <gaos...@loongson.cn> --- target/loongarch/disas.c | 16 ++ target/loongarch/helper.h | 48 ++--- target/loongarch/insn_trans/trans_lasx.c.inc | 16 ++ target/loongarch/insns.decode | 16 ++ target/loongarch/vec.h | 2 + target/loongarch/vec_helper.c | 206 +++++++++++-------- 6 files changed, 196 insertions(+), 108 deletions(-) diff --git a/target/loongarch/disas.c b/target/loongarch/disas.c index 9109203a05..14b526abd6 100644 --- a/target/loongarch/disas.c +++ b/target/loongarch/disas.c @@ -2104,6 +2104,22 @@ INSN_LASX(xvsrari_h, vv_i) INSN_LASX(xvsrari_w, vv_i) INSN_LASX(xvsrari_d, vv_i) +INSN_LASX(xvsrln_b_h, vvv) +INSN_LASX(xvsrln_h_w, vvv) +INSN_LASX(xvsrln_w_d, vvv) +INSN_LASX(xvsran_b_h, vvv) +INSN_LASX(xvsran_h_w, vvv) +INSN_LASX(xvsran_w_d, vvv) + +INSN_LASX(xvsrlni_b_h, vv_i) +INSN_LASX(xvsrlni_h_w, vv_i) +INSN_LASX(xvsrlni_w_d, vv_i) +INSN_LASX(xvsrlni_d_q, vv_i) +INSN_LASX(xvsrani_b_h, vv_i) +INSN_LASX(xvsrani_h_w, vv_i) +INSN_LASX(xvsrani_w_d, vv_i) +INSN_LASX(xvsrani_d_q, vv_i) + INSN_LASX(xvreplgr2vr_b, vr) INSN_LASX(xvreplgr2vr_h, vr) INSN_LASX(xvreplgr2vr_w, vr) diff --git a/target/loongarch/helper.h b/target/loongarch/helper.h index b4828b1829..edef8c2135 100644 --- a/target/loongarch/helper.h +++ b/target/loongarch/helper.h @@ -384,30 +384,30 @@ DEF_HELPER_5(vsrlri_h, void, env, i32, i32, i32, i32) DEF_HELPER_5(vsrlri_w, void, env, i32, i32, i32, i32) DEF_HELPER_5(vsrlri_d, void, env, i32, i32, i32, i32) -DEF_HELPER_5(vsrar_b, void, env, int, i32, i32, i32) -DEF_HELPER_5(vsrar_h, void, env, int, i32, i32, i32) -DEF_HELPER_5(vsrar_w, void, env, int, i32, i32, i32) -DEF_HELPER_5(vsrar_d, void, env, int, i32, i32, i32) -DEF_HELPER_5(vsrari_b, void, env, int, i32, i32, i32) -DEF_HELPER_5(vsrari_h, void, env, int, i32, i32, i32) -DEF_HELPER_5(vsrari_w, void, env, int, i32, i32, i32) -DEF_HELPER_5(vsrari_d, void, env, int, i32, i32, i32) - -DEF_HELPER_4(vsrln_b_h, void, env, i32, i32, i32) -DEF_HELPER_4(vsrln_h_w, void, env, i32, i32, i32) -DEF_HELPER_4(vsrln_w_d, void, env, i32, i32, i32) -DEF_HELPER_4(vsran_b_h, void, env, i32, i32, i32) -DEF_HELPER_4(vsran_h_w, void, env, i32, i32, i32) -DEF_HELPER_4(vsran_w_d, void, env, i32, i32, i32) - -DEF_HELPER_4(vsrlni_b_h, void, env, i32, i32, i32) -DEF_HELPER_4(vsrlni_h_w, void, env, i32, i32, i32) -DEF_HELPER_4(vsrlni_w_d, void, env, i32, i32, i32) -DEF_HELPER_4(vsrlni_d_q, void, env, i32, i32, i32) -DEF_HELPER_4(vsrani_b_h, void, env, i32, i32, i32) -DEF_HELPER_4(vsrani_h_w, void, env, i32, i32, i32) -DEF_HELPER_4(vsrani_w_d, void, env, i32, i32, i32) -DEF_HELPER_4(vsrani_d_q, void, env, i32, i32, i32) +DEF_HELPER_5(vsrar_b, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsrar_h, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsrar_w, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsrar_d, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsrari_b, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsrari_h, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsrari_w, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsrari_d, void, env, i32, i32, i32, i32) + +DEF_HELPER_5(vsrln_b_h, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsrln_h_w, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsrln_w_d, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsran_b_h, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsran_h_w, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsran_w_d, void, env, i32, i32, i32, i32) + +DEF_HELPER_5(vsrlni_b_h, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsrlni_h_w, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsrlni_w_d, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsrlni_d_q, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsrani_b_h, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsrani_h_w, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsrani_w_d, void, env, i32, i32, i32, i32) +DEF_HELPER_5(vsrani_d_q, void, env, i32, i32, i32, i32) DEF_HELPER_4(vsrlrn_b_h, void, env, i32, i32, i32) DEF_HELPER_4(vsrlrn_h_w, void, env, i32, i32, i32) diff --git a/target/loongarch/insn_trans/trans_lasx.c.inc b/target/loongarch/insn_trans/trans_lasx.c.inc index 2f654ef401..a1c3432eec 100644 --- a/target/loongarch/insn_trans/trans_lasx.c.inc +++ b/target/loongarch/insn_trans/trans_lasx.c.inc @@ -480,6 +480,22 @@ TRANS(xvsrari_h, gen_vv_i, 32, gen_helper_vsrari_h) TRANS(xvsrari_w, gen_vv_i, 32, gen_helper_vsrari_w) TRANS(xvsrari_d, gen_vv_i, 32, gen_helper_vsrari_d) +TRANS(xvsrln_b_h, gen_vvv, 32, gen_helper_vsrln_b_h) +TRANS(xvsrln_h_w, gen_vvv, 32, gen_helper_vsrln_h_w) +TRANS(xvsrln_w_d, gen_vvv, 32, gen_helper_vsrln_w_d) +TRANS(xvsran_b_h, gen_vvv, 32, gen_helper_vsran_b_h) +TRANS(xvsran_h_w, gen_vvv, 32, gen_helper_vsran_h_w) +TRANS(xvsran_w_d, gen_vvv, 32, gen_helper_vsran_w_d) + +TRANS(xvsrlni_b_h, gen_vv_i, 32, gen_helper_vsrlni_b_h) +TRANS(xvsrlni_h_w, gen_vv_i, 32, gen_helper_vsrlni_h_w) +TRANS(xvsrlni_w_d, gen_vv_i, 32, gen_helper_vsrlni_w_d) +TRANS(xvsrlni_d_q, gen_vv_i, 32, gen_helper_vsrlni_d_q) +TRANS(xvsrani_b_h, gen_vv_i, 32, gen_helper_vsrani_b_h) +TRANS(xvsrani_h_w, gen_vv_i, 32, gen_helper_vsrani_h_w) +TRANS(xvsrani_w_d, gen_vv_i, 32, gen_helper_vsrani_w_d) +TRANS(xvsrani_d_q, gen_vv_i, 32, gen_helper_vsrani_d_q) + TRANS(xvreplgr2vr_b, gvec_dup, 32, MO_8) TRANS(xvreplgr2vr_h, gvec_dup, 32, MO_16) TRANS(xvreplgr2vr_w, gvec_dup, 32, MO_32) diff --git a/target/loongarch/insns.decode b/target/loongarch/insns.decode index ca0951e1cc..204dcfa075 100644 --- a/target/loongarch/insns.decode +++ b/target/loongarch/insns.decode @@ -1678,6 +1678,22 @@ xvsrari_h 0111 01101010 10000 1 .... ..... ..... @vv_ui4 xvsrari_w 0111 01101010 10001 ..... ..... ..... @vv_ui5 xvsrari_d 0111 01101010 1001 ...... ..... ..... @vv_ui6 +xvsrln_b_h 0111 01001111 01001 ..... ..... ..... @vvv +xvsrln_h_w 0111 01001111 01010 ..... ..... ..... @vvv +xvsrln_w_d 0111 01001111 01011 ..... ..... ..... @vvv +xvsran_b_h 0111 01001111 01101 ..... ..... ..... @vvv +xvsran_h_w 0111 01001111 01110 ..... ..... ..... @vvv +xvsran_w_d 0111 01001111 01111 ..... ..... ..... @vvv + +xvsrlni_b_h 0111 01110100 00000 1 .... ..... ..... @vv_ui4 +xvsrlni_h_w 0111 01110100 00001 ..... ..... ..... @vv_ui5 +xvsrlni_w_d 0111 01110100 0001 ...... ..... ..... @vv_ui6 +xvsrlni_d_q 0111 01110100 001 ....... ..... ..... @vv_ui7 +xvsrani_b_h 0111 01110101 10000 1 .... ..... ..... @vv_ui4 +xvsrani_h_w 0111 01110101 10001 ..... ..... ..... @vv_ui5 +xvsrani_w_d 0111 01110101 1001 ...... ..... ..... @vv_ui6 +xvsrani_d_q 0111 01110101 101 ....... ..... ..... @vv_ui7 + xvreplgr2vr_b 0111 01101001 11110 00000 ..... ..... @vr xvreplgr2vr_h 0111 01101001 11110 00001 ..... ..... @vr xvreplgr2vr_w 0111 01101001 11110 00010 ..... ..... @vr diff --git a/target/loongarch/vec.h b/target/loongarch/vec.h index a0a664cde5..bc3d6b967b 100644 --- a/target/loongarch/vec.h +++ b/target/loongarch/vec.h @@ -74,4 +74,6 @@ #define DO_SIGNCOV(a, b) (a == 0 ? 0 : a < 0 ? -b : b) +#define R_SHIFT(a, b) (a >> b) + #endif /* LOONGARCH_VEC_H */ diff --git a/target/loongarch/vec_helper.c b/target/loongarch/vec_helper.c index b745240f8c..8083c24ea7 100644 --- a/target/loongarch/vec_helper.c +++ b/target/loongarch/vec_helper.c @@ -1104,113 +1104,151 @@ VSRARI(vsrari_h, 16, H) VSRARI(vsrari_w, 32, W) VSRARI(vsrari_d, 64, D) -#define R_SHIFT(a, b) (a >> b) +#define VSRLN(NAME, BIT, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, uint32_t oprsz, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i, max; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + max = LSX_LEN / BIT; \ + for (i = 0; i < max; i++) { \ + Vd->E1(i) = R_SHIFT(Vj->E2(i),Vk->E2(i) % BIT); \ + if (oprsz == 32) { \ + Vd->E1(i + max * 2) = R_SHIFT(Vj->E2(i + max), \ + Vk->E2(i + max) % BIT); \ + } \ + } \ + Vd->D(1) = 0; \ + if (oprsz == 32) { \ + Vd->D(3) = 0; \ + } \ +} -#define VSRLN(NAME, BIT, T, E1, E2) \ -void HELPER(NAME)(CPULoongArchState *env, \ - uint32_t vd, uint32_t vj, uint32_t vk) \ -{ \ - int i; \ - VReg *Vd = &(env->fpr[vd].vreg); \ - VReg *Vj = &(env->fpr[vj].vreg); \ - VReg *Vk = &(env->fpr[vk].vreg); \ - \ - for (i = 0; i < LSX_LEN/BIT; i++) { \ - Vd->E1(i) = R_SHIFT((T)Vj->E2(i),((T)Vk->E2(i)) % BIT); \ - } \ - Vd->D(1) = 0; \ -} - -VSRLN(vsrln_b_h, 16, uint16_t, B, H) -VSRLN(vsrln_h_w, 32, uint32_t, H, W) -VSRLN(vsrln_w_d, 64, uint64_t, W, D) - -#define VSRAN(NAME, BIT, T, E1, E2) \ -void HELPER(NAME)(CPULoongArchState *env, \ - uint32_t vd, uint32_t vj, uint32_t vk) \ -{ \ - int i; \ - VReg *Vd = &(env->fpr[vd].vreg); \ - VReg *Vj = &(env->fpr[vj].vreg); \ - VReg *Vk = &(env->fpr[vk].vreg); \ - \ - for (i = 0; i < LSX_LEN/BIT; i++) { \ - Vd->E1(i) = R_SHIFT(Vj->E2(i), ((T)Vk->E2(i)) % BIT); \ - } \ - Vd->D(1) = 0; \ -} - -VSRAN(vsran_b_h, 16, uint16_t, B, H) -VSRAN(vsran_h_w, 32, uint32_t, H, W) -VSRAN(vsran_w_d, 64, uint64_t, W, D) - -#define VSRLNI(NAME, BIT, T, E1, E2) \ -void HELPER(NAME)(CPULoongArchState *env, \ - uint32_t vd, uint32_t vj, uint32_t imm) \ -{ \ - int i, max; \ - VReg temp; \ - VReg *Vd = &(env->fpr[vd].vreg); \ - VReg *Vj = &(env->fpr[vj].vreg); \ - \ - temp.D(0) = 0; \ - temp.D(1) = 0; \ - max = LSX_LEN/BIT; \ - for (i = 0; i < max; i++) { \ - temp.E1(i) = R_SHIFT((T)Vj->E2(i), imm); \ - temp.E1(i + max) = R_SHIFT((T)Vd->E2(i), imm); \ - } \ - *Vd = temp; \ -} - -void HELPER(vsrlni_d_q)(CPULoongArchState *env, +VSRLN(vsrln_b_h, 16, B, UH) +VSRLN(vsrln_h_w, 32, H, UW) +VSRLN(vsrln_w_d, 64, W, UD) + +#define VSRAN(NAME, BIT, E1, E2, E3) \ +void HELPER(NAME)(CPULoongArchState *env, uint32_t oprsz, \ + uint32_t vd, uint32_t vj, uint32_t vk) \ +{ \ + int i, max; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + VReg *Vk = &(env->fpr[vk].vreg); \ + \ + max = LSX_LEN / BIT; \ + for (i = 0; i < max; i++) { \ + Vd->E1(i) = R_SHIFT(Vj->E2(i), Vk->E3(i) % BIT); \ + if (oprsz == 32) { \ + Vd->E1(i + max * 2) = R_SHIFT(Vj->E2(i + max), \ + Vk->E3(i + max) % BIT); \ + } \ + } \ + Vd->D(1) = 0; \ + if (oprsz == 32) { \ + Vd->D(3) = 0; \ + } \ +} + +VSRAN(vsran_b_h, 16, B, H, UH) +VSRAN(vsran_h_w, 32, H, W, UW) +VSRAN(vsran_w_d, 64, W, D, UD) + +#define VSRLNI(NAME, BIT, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, uint32_t oprsz, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i, max; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + temp.Q(0) = int128_zero(); \ + if (oprsz == 32) { \ + temp.Q(1) = int128_zero(); \ + } \ + max = LSX_LEN / BIT; \ + for (i = 0; i < max; i++) { \ + temp.E1(i) = R_SHIFT(Vj->E2(i), imm); \ + temp.E1(i + max) = R_SHIFT(Vd->E2(i), imm); \ + if (oprsz == 32) { \ + temp.E1(i + max * 2) = R_SHIFT(Vj->E2(i + max), imm); \ + temp.E1(i + max * 3) = R_SHIFT(Vd->E2(i + max), imm); \ + } \ + } \ + *Vd = temp; \ +} + +void HELPER(vsrlni_d_q)(CPULoongArchState *env, uint32_t oprsz, uint32_t vd, uint32_t vj, uint32_t imm) { VReg temp; VReg *Vd = &(env->fpr[vd].vreg); VReg *Vj = &(env->fpr[vj].vreg); - temp.D(0) = 0; - temp.D(1) = 0; + temp.Q(0) = int128_zero(); + if (oprsz == 32) { + temp.Q(1) = int128_zero(); + } temp.D(0) = int128_getlo(int128_urshift(Vj->Q(0), imm % 128)); temp.D(1) = int128_getlo(int128_urshift(Vd->Q(0), imm % 128)); + if (oprsz == 32) { + temp.D(2) = int128_getlo(int128_urshift(Vj->Q(1), imm % 128)); + temp.D(3) = int128_getlo(int128_urshift(Vd->Q(1), imm % 128)); + } *Vd = temp; } -VSRLNI(vsrlni_b_h, 16, uint16_t, B, H) -VSRLNI(vsrlni_h_w, 32, uint32_t, H, W) -VSRLNI(vsrlni_w_d, 64, uint64_t, W, D) +VSRLNI(vsrlni_b_h, 16, B, UH) +VSRLNI(vsrlni_h_w, 32, H, UW) +VSRLNI(vsrlni_w_d, 64, W, UD) -#define VSRANI(NAME, BIT, E1, E2) \ -void HELPER(NAME)(CPULoongArchState *env, \ - uint32_t vd, uint32_t vj, uint32_t imm) \ -{ \ - int i, max; \ - VReg temp; \ - VReg *Vd = &(env->fpr[vd].vreg); \ - VReg *Vj = &(env->fpr[vj].vreg); \ - \ - temp.D(0) = 0; \ - temp.D(1) = 0; \ - max = LSX_LEN/BIT; \ - for (i = 0; i < max; i++) { \ - temp.E1(i) = R_SHIFT(Vj->E2(i), imm); \ - temp.E1(i + max) = R_SHIFT(Vd->E2(i), imm); \ - } \ - *Vd = temp; \ +#define VSRANI(NAME, BIT, E1, E2) \ +void HELPER(NAME)(CPULoongArchState *env, uint32_t oprsz, \ + uint32_t vd, uint32_t vj, uint32_t imm) \ +{ \ + int i, max; \ + VReg temp; \ + VReg *Vd = &(env->fpr[vd].vreg); \ + VReg *Vj = &(env->fpr[vj].vreg); \ + \ + temp.Q(0) = int128_zero(); \ + if (oprsz == 32) { \ + temp.Q(1) = int128_zero(); \ + } \ + max = LSX_LEN / BIT; \ + for (i = 0; i < max; i++) { \ + temp.E1(i) = R_SHIFT(Vj->E2(i), imm); \ + temp.E1(i + max) = R_SHIFT(Vd->E2(i), imm); \ + if (oprsz == 32) { \ + temp.E1(i + max * 2) = R_SHIFT(Vj->E2(i + max), imm); \ + temp.E1(i + max * 3) = R_SHIFT(Vd->E2(i + max), imm); \ + } \ + } \ + *Vd = temp; \ } -void HELPER(vsrani_d_q)(CPULoongArchState *env, +void HELPER(vsrani_d_q)(CPULoongArchState *env, uint32_t oprsz, uint32_t vd, uint32_t vj, uint32_t imm) { VReg temp; VReg *Vd = &(env->fpr[vd].vreg); VReg *Vj = &(env->fpr[vj].vreg); - temp.D(0) = 0; - temp.D(1) = 0; + temp.Q(0) = int128_zero(); + if (oprsz == 32) { + temp.Q(1) = int128_zero(); + } temp.D(0) = int128_getlo(int128_rshift(Vj->Q(0), imm % 128)); temp.D(1) = int128_getlo(int128_rshift(Vd->Q(0), imm % 128)); + if (oprsz == 32) { + temp.D(2) = int128_getlo(int128_rshift(Vj->Q(1), imm % 128)); + temp.D(3) = int128_getlo(int128_rshift(Vd->Q(1), imm % 128)); + } *Vd = temp; } -- 2.39.1