From: Mateja Marjanovic <mateja.marjano...@rt-rk.com> Optimize ILVL.<B|H|W|D> instructions, using directly tcg registers and logic performed on them, and instead of shifting the bit mask or assigning a new tcg constant to the bit mask, assign a new (shifted) uint64_t value to the bit mask.
Performance measurement is done by executing the instructions 10 million times on a computer with Intel Core i7-3770 CPU @ 3.40GHz×8. =========================================================== || instruction || BEFORE || LOOP UNROLL || TCG || =========================================================== || ilvl.b || 107.069 ms || 55.619 ms || 7.735 ms || || ilvl.h || 83.340 ms || 31.320 ms || 3.797 ms || || ilvl.w || 109.448 ms || 31.714 ms || 2.381 ms || || ilvl.d || 31.557 ms || 28.716 ms || 2.029 ms || =========================================================== Suggested-by: Aleksandar Markovic <amarko...@wavecomp.com> Signed-off-by: Mateja Marjanovic <mateja.marjano...@rt-rk.com> --- target/mips/helper.h | 1 - target/mips/msa_helper.c | 8 --- target/mips/translate.c | 184 ++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 183 insertions(+), 10 deletions(-) diff --git a/target/mips/helper.h b/target/mips/helper.h index 2f23b0d..85c8b17 100644 --- a/target/mips/helper.h +++ b/target/mips/helper.h @@ -862,7 +862,6 @@ DEF_HELPER_5(msa_sld_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_splat_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_pckev_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_pckod_df, void, env, i32, i32, i32, i32) -DEF_HELPER_5(msa_ilvl_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_ilvr_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_vshf_df, void, env, i32, i32, i32, i32) DEF_HELPER_5(msa_srar_df, void, env, i32, i32, i32, i32) diff --git a/target/mips/msa_helper.c b/target/mips/msa_helper.c index a500c59..f9b85fc 100644 --- a/target/mips/msa_helper.c +++ b/target/mips/msa_helper.c @@ -1184,14 +1184,6 @@ MSA_FN_DF(pckod_df) #define MSA_DO(DF) \ do { \ - pwx->DF[2*i] = L##DF(pwt, i); \ - pwx->DF[2*i+1] = L##DF(pws, i); \ - } while (0) -MSA_FN_DF(ilvl_df) -#undef MSA_DO - -#define MSA_DO(DF) \ - do { \ pwx->DF[2*i] = R##DF(pwt, i); \ pwx->DF[2*i+1] = R##DF(pws, i); \ } while (0) diff --git a/target/mips/translate.c b/target/mips/translate.c index 930ef3a..d9aef77 100644 --- a/target/mips/translate.c +++ b/target/mips/translate.c @@ -28002,6 +28002,173 @@ static void gen_msa_bit(CPUMIPSState *env, DisasContext *ctx) } /* + * [MSA] ILVL.B wd, ws, wt + * + * Vector Interleave Left (byte data elements) + * + */ +static inline void gen_ilvl_b(CPUMIPSState *env, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + uint64_t mask = 0x00000000000000ffULL; + + tcg_gen_andi_i64(t1, msa_wr_d[wt * 2 + 1], mask); + tcg_gen_mov_i64(t2, t1); + tcg_gen_andi_i64(t1, msa_wr_d[ws * 2 + 1], mask); + tcg_gen_shli_i64(t1, t1, 8); + tcg_gen_or_i64(t2, t2, t1); + + mask = 0x000000000000ff00ULL; + tcg_gen_andi_i64(t1, msa_wr_d[wt * 2 + 1], mask); + tcg_gen_shli_i64(t1, t1, 8); + tcg_gen_or_i64(t2, t2, t1); + tcg_gen_andi_i64(t1, msa_wr_d[ws * 2 + 1], mask); + tcg_gen_shli_i64(t1, t1, 16); + tcg_gen_or_i64(t2, t2, t1); + + mask = 0x0000000000ff0000ULL; + tcg_gen_andi_i64(t1, msa_wr_d[wt * 2 + 1], mask); + tcg_gen_shli_i64(t1, t1, 16); + tcg_gen_or_i64(t2, t2, t1); + tcg_gen_andi_i64(t1, msa_wr_d[ws * 2 + 1], mask); + tcg_gen_shli_i64(t1, t1, 24); + tcg_gen_or_i64(t2, t2, t1); + + mask = 0x00000000ff000000ULL; + tcg_gen_andi_i64(t1, msa_wr_d[wt * 2 + 1], mask); + tcg_gen_shli_i64(t1, t1, 24); + tcg_gen_or_i64(t2, t2, t1); + tcg_gen_andi_i64(t1, msa_wr_d[ws * 2 + 1], mask); + tcg_gen_shli_i64(t1, t1, 32); + tcg_gen_or_i64(msa_wr_d[wd * 2], t2, t1); + + mask = 0x000000ff00000000ULL; + tcg_gen_andi_i64(t1, msa_wr_d[wt * 2 + 1], mask); + tcg_gen_shri_i64(t1, t1, 32); + tcg_gen_mov_i64(t2, t1); + tcg_gen_andi_i64(t1, msa_wr_d[ws * 2 + 1], mask); + tcg_gen_shri_i64(t1, t1, 24); + tcg_gen_or_i64(t2, t2, t1); + + mask = 0x0000ff0000000000ULL; + tcg_gen_andi_i64(t1, msa_wr_d[wt * 2 + 1], mask); + tcg_gen_shri_i64(t1, t1, 24); + tcg_gen_or_i64(t2, t2, t1); + tcg_gen_andi_i64(t1, msa_wr_d[ws * 2 + 1], mask); + tcg_gen_shri_i64(t1, t1, 16); + tcg_gen_or_i64(t2, t2, t1); + + mask = 0x00ff000000000000ULL; + tcg_gen_andi_i64(t1, msa_wr_d[wt * 2 + 1], mask); + tcg_gen_shri_i64(t1, t1, 16); + tcg_gen_or_i64(t2, t2, t1); + tcg_gen_andi_i64(t1, msa_wr_d[ws * 2 + 1], mask); + tcg_gen_shri_i64(t1, t1, 8); + tcg_gen_or_i64(t2, t2, t1); + + mask = 0xff00000000000000ULL; + tcg_gen_andi_i64(t1, msa_wr_d[wt * 2 + 1], mask); + tcg_gen_shri_i64(t1, t1, 8); + tcg_gen_or_i64(t2, t2, t1); + tcg_gen_andi_i64(t1, msa_wr_d[ws * 2 + 1], mask); + tcg_gen_or_i64(msa_wr_d[wd * 2 + 1], t2, t1); + + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); +} + +/* + * [MSA] ILVL.H wd, ws, wt + * + * Vector Interleave Left (halfword data elements) + * + */ +static inline void gen_ilvl_h(CPUMIPSState *env, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + uint64_t mask = 0x000000000000ffffULL; + + tcg_gen_andi_i64(t1, msa_wr_d[wt * 2 + 1], mask); + tcg_gen_mov_i64(t2, t1); + tcg_gen_andi_i64(t1, msa_wr_d[ws * 2 + 1], mask); + tcg_gen_shli_i64(t1, t1, 16); + tcg_gen_or_i64(t2, t2, t1); + + mask = 0x00000000ffff0000ULL; + tcg_gen_andi_i64(t1, msa_wr_d[wt * 2 + 1], mask); + tcg_gen_shli_i64(t1, t1, 16); + tcg_gen_or_i64(t2, t2, t1); + tcg_gen_andi_i64(t1, msa_wr_d[ws * 2 + 1], mask); + tcg_gen_shli_i64(t1, t1, 32); + tcg_gen_or_i64(msa_wr_d[wd * 2], t2, t1); + + mask = 0x0000ffff00000000ULL; + tcg_gen_andi_i64(t1, msa_wr_d[wt * 2 + 1], mask); + tcg_gen_shri_i64(t1, t1, 32); + tcg_gen_mov_i64(t2, t1); + tcg_gen_andi_i64(t1, msa_wr_d[ws * 2 + 1], mask); + tcg_gen_shri_i64(t1, t1, 16); + tcg_gen_or_i64(t2, t2, t1); + + mask = 0xffff000000000000ULL; + tcg_gen_andi_i64(t1, msa_wr_d[wt * 2 + 1], mask); + tcg_gen_shri_i64(t1, t1, 16); + tcg_gen_or_i64(t2, t2, t1); + tcg_gen_andi_i64(t1, msa_wr_d[ws * 2 + 1], mask); + tcg_gen_or_i64(msa_wr_d[wd * 2 + 1], t2, t1); + + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); +} + +/* + * [MSA] ILVL.W wd, ws, wt + * + * Vector Interleave Left (word data elements) + * + */ +static inline void gen_ilvl_w(CPUMIPSState *env, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + uint64_t mask = 0x00000000ffffffffULL; + + tcg_gen_andi_i64(t1, msa_wr_d[wt * 2 + 1], mask); + tcg_gen_mov_i64(t2, t1); + tcg_gen_andi_i64(t1, msa_wr_d[ws * 2 + 1], mask); + tcg_gen_shli_i64(t1, t1, 32); + tcg_gen_or_i64(msa_wr_d[wd * 2], t2, t1); + + mask = 0xffffffff00000000ULL; + tcg_gen_andi_i64(t1, msa_wr_d[wt * 2 + 1], mask); + tcg_gen_shri_i64(t1, t1, 32); + tcg_gen_mov_i64(t2, t1); + tcg_gen_andi_i64(t1, msa_wr_d[ws * 2 + 1], mask); + tcg_gen_or_i64(msa_wr_d[wd * 2 + 1], t2, t1); + + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); +} + +/* + * [MSA] ILVL.D wd, ws, wt + * + * Vector Interleave Left (doubleword data elements) + * + */ +static inline void gen_ilvl_d(CPUMIPSState *env, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + tcg_gen_mov_i64(msa_wr_d[wd * 2], msa_wr_d[wt * 2 + 1]); + tcg_gen_mov_i64(msa_wr_d[wd * 2 + 1], msa_wr_d[ws * 2 + 1]); +} + +/* * [MSA] ILVOD.<B|H> wd, ws, wt * * Vector Interleave Odd (<byte|halfword> data elements) @@ -28265,7 +28432,22 @@ static void gen_msa_3r(CPUMIPSState *env, DisasContext *ctx) gen_helper_msa_div_s_df(cpu_env, tdf, twd, tws, twt); break; case OPC_ILVL_df: - gen_helper_msa_ilvl_df(cpu_env, tdf, twd, tws, twt); + switch (df) { + case DF_BYTE: + gen_ilvl_b(env, wd, ws, wt); + break; + case DF_HALF: + gen_ilvl_h(env, wd, ws, wt); + break; + case DF_WORD: + gen_ilvl_w(env, wd, ws, wt); + break; + case DF_DOUBLE: + gen_ilvl_d(env, wd, ws, wt); + break; + default: + assert(0); + } break; case OPC_BNEG_df: gen_helper_msa_bneg_df(cpu_env, tdf, twd, tws, twt); -- 2.7.4