> +void
> +expand_vec_cmp (rtx target, rtx_code code, rtx mask, rtx maskoff, rtx op0,
> +               rtx op1)
> ...
> +  rtx cmp = gen_rtx_fmt_ee (code, mask_mode, op0, op1);
> +  rtx ops[RVV_CMP_OP + 2] = {target, mask, maskoff, cmp, op0, op1};
> +  emit_vlmax_cmp_insn (icode, RVV_CMP_OP + 2, ops);

It's too magic.

> +/* This function emits cmp instruction.  */
> +void
> +emit_vlmax_cmp_insn (unsigned icode, int op_num, rtx *ops)
> +{
> +  machine_mode mode = GET_MODE (ops[0]);
> +  bool fully_unmasked_p = op_num == RVV_CMP_OP ? true : false;
> +  bool use_real_merge_p = op_num == RVV_CMP_OP ? false : true;

Don't do that, plz separate break this function into two.

> +  /* We have a maximum of 11 operands for RVV instruction patterns according 
> to
> +   * vector.md.  */
> +  insn_expander<11> e (/*OP_NUM*/ op_num, /*HAS_DEST_P*/ true,
> +                      /*FULLY_UNMASKED_P*/ fully_unmasked_p,
> +                      /*USE_REAL_MERGE_P*/ use_real_merge_p,
> +                      /*HAS_AVL_P*/ true,
> +                      /*VLMAX_P*/ true,
> +                      /*DEST_MODE*/ mode, /*MASK_MODE*/ mode);
> +  e.set_policy (op_num == RVV_CMP_OP ? MASK_UNDISTURBED : MASK_ANY);
> +  e.emit_insn ((enum insn_code) icode, ops);
> +}
> +
>  /* Expand series const vector.  */
>
>  void
> +void
> +expand_vec_cmp (rtx target, rtx_code code, rtx op0, rtx op1)
> +{
> +  machine_mode mask_mode = GET_MODE (target);
> +  machine_mode data_mode = GET_MODE (op0);
> +  insn_code icode = get_cmp_insn_code (code, data_mode);
> +
> +  if (code == LTGT)
> +    {
> +      rtx gt = gen_reg_rtx (mask_mode);
> +      rtx lt = gen_reg_rtx (mask_mode);
> +      expand_vec_cmp (gt, GT, op0, op1);
> +      expand_vec_cmp (lt, LT, op0, op1);
> +      icode = code_for_pred (IOR, mask_mode);
> +      rtx ops[3] = {target, gt, lt};

rtx ops[] = {target, gt, lt};

> +      emit_vlmax_insn (icode, riscv_vector::RVV_BINOP, ops);
> +      return;
> +    }
> +
> +  rtx cmp = gen_rtx_fmt_ee (code, mask_mode, op0, op1);
> +  rtx ops[RVV_CMP_OP] = {target, cmp, op0, op1};

rtx ops[] = {target, cmp, op0, op1};

> +  emit_vlmax_cmp_insn (icode, RVV_CMP_OP, ops);
> +}
> +

> +  /* There is native support for the inverse comparison.  */
> +  code = reverse_condition_maybe_unordered (code);
> +  if (code == ORDERED)
> +    emit_move_insn (target, eq0);
> +  else
> +    expand_vec_cmp (eq0, code, eq0, eq0, op0, op1);
> +
> +  if (can_invert_p)
> +    {
> +      emit_move_insn (target, eq0);
> +      return true;
> +    }
> +  insn_code icode = code_for_pred_not (mask_mode);
> +  rtx ops[RVV_UNOP] = {target, eq0};
> +  emit_vlmax_insn (icode, RVV_UNOP, ops);

rtx ops[] = {target, eq0};

Reply via email to