Re: [PATCH v6 26/61] target/riscv: vector single-width fractional multiply with rounding and saturation

2020-03-28 Thread LIU Zhiwei




On 2020/3/28 9:08, Richard Henderson wrote:

On 3/17/20 8:06 AM, LIU Zhiwei wrote:

+static int64_t vsmul64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
+{
+uint8_t round;
+uint64_t hi_64, lo_64, Hi62;
+uint8_t hi62, hi63, lo63;
+
+muls64(_64, _64, a, b);
+hi62 = extract64(hi_64, 62, 1);
+lo63 = extract64(lo_64, 63, 1);
+hi63 = extract64(hi_64, 63, 1);
+Hi62 = extract64(hi_64, 0, 62);

This seems like way more work than necessary.


+if (hi62 != hi63) {
+env->vxsat = 0x1;
+return INT64_MAX;
+}

This can only happen for a == b == INT64_MIN.
Perhaps just test exactly that and move it above the multiply?


+round = get_round(vxrm, lo_64, 63);
+if (round && (Hi62 == 0x3fff) && lo63) {
+env->vxsat = 0x1;
+return hi62 ? INT64_MIN : INT64_MAX;
+} else {
+if (lo63 && round) {
+return (hi_64 + 1) << 1;
+} else {
+return (hi_64 << 1) | lo63 | round;
+}
+}

Yes, it's an error here. As INT64_MIN will never be  a right result.

   /* Cannot overflow, as there are always
  2 sign bits after multiply. */
   ret = (hi_64 << 1) | (lo_64 >> 63);
   if (round) {
   if (ret == INT64_MAX) {
   env->vxsat = 1;
   } else {
   ret += 1;
   }
   }
   return ret;


Thanks. I think it's  right and much clearer.

Zhiwei.

r~





Re: [PATCH v6 26/61] target/riscv: vector single-width fractional multiply with rounding and saturation

2020-03-27 Thread Richard Henderson
On 3/17/20 8:06 AM, LIU Zhiwei wrote:
> +static int64_t vsmul64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
> +{
> +uint8_t round;
> +uint64_t hi_64, lo_64, Hi62;
> +uint8_t hi62, hi63, lo63;
> +
> +muls64(_64, _64, a, b);
> +hi62 = extract64(hi_64, 62, 1);
> +lo63 = extract64(lo_64, 63, 1);
> +hi63 = extract64(hi_64, 63, 1);
> +Hi62 = extract64(hi_64, 0, 62);

This seems like way more work than necessary.

> +if (hi62 != hi63) {
> +env->vxsat = 0x1;
> +return INT64_MAX;
> +}

This can only happen for a == b == INT64_MIN.
Perhaps just test exactly that and move it above the multiply?

> +round = get_round(vxrm, lo_64, 63);
> +if (round && (Hi62 == 0x3fff) && lo63) {
> +env->vxsat = 0x1;
> +return hi62 ? INT64_MIN : INT64_MAX;
> +} else {
> +if (lo63 && round) {
> +return (hi_64 + 1) << 1;
> +} else {
> +return (hi_64 << 1) | lo63 | round;
> +}
> +}

  /* Cannot overflow, as there are always
 2 sign bits after multiply. */
  ret = (hi_64 << 1) | (lo_64 >> 63);
  if (round) {
  if (ret == INT64_MAX) {
  env->vxsat = 1;
  } else {
  ret += 1;
  }
  }
  return ret;


r~



[PATCH v6 26/61] target/riscv: vector single-width fractional multiply with rounding and saturation

2020-03-17 Thread LIU Zhiwei
Signed-off-by: LIU Zhiwei 
---
 target/riscv/helper.h   |   9 ++
 target/riscv/insn32.decode  |   2 +
 target/riscv/insn_trans/trans_rvv.inc.c |   4 +
 target/riscv/vector_helper.c| 104 
 4 files changed, 119 insertions(+)

diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index 311ce1322c..16544ce245 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -732,3 +732,12 @@ DEF_HELPER_6(vasub_vx_b, void, ptr, ptr, tl, ptr, env, i32)
 DEF_HELPER_6(vasub_vx_h, void, ptr, ptr, tl, ptr, env, i32)
 DEF_HELPER_6(vasub_vx_w, void, ptr, ptr, tl, ptr, env, i32)
 DEF_HELPER_6(vasub_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vsmul_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsmul_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsmul_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsmul_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsmul_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsmul_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsmul_vx_d, void, ptr, ptr, tl, ptr, env, i32)
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index e617d7bd60..633f782fbf 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -422,6 +422,8 @@ vaadd_vx100100 . . . 100 . 1010111 @r_vm
 vaadd_vi100100 . . . 011 . 1010111 @r_vm
 vasub_vv100110 . . . 000 . 1010111 @r_vm
 vasub_vx100110 . . . 100 . 1010111 @r_vm
+vsmul_vv100111 . . . 000 . 1010111 @r_vm
+vsmul_vx100111 . . . 100 . 1010111 @r_vm
 
 vsetvli 0 ... . 111 . 1010111  @r2_zimm
 vsetvl  100 . . 111 . 1010111  @r
diff --git a/target/riscv/insn_trans/trans_rvv.inc.c 
b/target/riscv/insn_trans/trans_rvv.inc.c
index ba2e7d56f4..a1206f0a4d 100644
--- a/target/riscv/insn_trans/trans_rvv.inc.c
+++ b/target/riscv/insn_trans/trans_rvv.inc.c
@@ -1643,3 +1643,7 @@ GEN_OPIVV_TRANS(vasub_vv, opivv_check)
 GEN_OPIVX_TRANS(vaadd_vx,  opivx_check)
 GEN_OPIVX_TRANS(vasub_vx,  opivx_check)
 GEN_OPIVI_TRANS(vaadd_vi, 0, vaadd_vx, opivx_check)
+
+/* Vector Single-Width Fractional Multiply with Rounding and Saturation */
+GEN_OPIVV_TRANS(vsmul_vv, opivv_check)
+GEN_OPIVX_TRANS(vsmul_vx,  opivx_check)
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 984a8e260f..5f5a8daa1e 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -2588,3 +2588,107 @@ GEN_VEXT_VX_RM(vasub_vx_b, 1, 1, clearb)
 GEN_VEXT_VX_RM(vasub_vx_h, 2, 2, clearh)
 GEN_VEXT_VX_RM(vasub_vx_w, 4, 4, clearl)
 GEN_VEXT_VX_RM(vasub_vx_d, 8, 8, clearq)
+
+/* Vector Single-Width Fractional Multiply with Rounding and Saturation */
+static inline int8_t vsmul8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
+{
+uint8_t round;
+int16_t res;
+
+res = (int16_t)a * (int16_t)b;
+round = get_round(vxrm, res, 7);
+res   = (res >> 7) + round;
+
+if (res > INT8_MAX) {
+env->vxsat = 0x1;
+return INT8_MAX;
+} else if (res < INT8_MIN) {
+env->vxsat = 0x1;
+return INT8_MIN;
+} else {
+return res;
+}
+}
+static int16_t vsmul16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
+{
+uint8_t round;
+int32_t res;
+
+res = (int32_t)a * (int32_t)b;
+round = get_round(vxrm, res, 15);
+res   = (res >> 15) + round;
+
+if (res > INT16_MAX) {
+env->vxsat = 0x1;
+return INT16_MAX;
+} else if (res < INT16_MIN) {
+env->vxsat = 0x1;
+return INT16_MIN;
+} else {
+return res;
+}
+}
+static int32_t vsmul32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
+{
+uint8_t round;
+int64_t res;
+
+res = (int64_t)a * (int64_t)b;
+round = get_round(vxrm, res, 31);
+res   = (res >> 31) + round;
+
+if (res > INT32_MAX) {
+env->vxsat = 0x1;
+return INT32_MAX;
+} else if (res < INT32_MIN) {
+env->vxsat = 0x1;
+return INT32_MIN;
+} else {
+return res;
+}
+}
+static int64_t vsmul64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
+{
+uint8_t round;
+uint64_t hi_64, lo_64, Hi62;
+uint8_t hi62, hi63, lo63;
+
+muls64(_64, _64, a, b);
+hi62 = extract64(hi_64, 62, 1);
+lo63 = extract64(lo_64, 63, 1);
+hi63 = extract64(hi_64, 63, 1);
+Hi62 = extract64(hi_64, 0, 62);
+if (hi62 != hi63) {
+env->vxsat = 0x1;
+return INT64_MAX;
+}
+round = get_round(vxrm, lo_64, 63);
+if (round && (Hi62 == 0x3fff) && lo63) {
+env->vxsat = 0x1;
+return hi62 ? INT64_MIN : INT64_MAX;
+} else {
+if (lo63 && round) {
+return (hi_64 + 1) << 1;
+} else {
+return (hi_64 << 1) | lo63 | round;
+