LGTM

On Fri, Jun 21, 2024 at 9:56 AM Feng Wang <wangf...@eswincomputing.com>
wrote:

> Accroding to the BFloat16 spec, some vector iterators and new pattern
> are added in md files.
>
> All these changes passed the rvv test and rvv-intrinsic test for bfloat16.
>
> gcc/ChangeLog:
>
>         * config/riscv/riscv.md: Add new insn name for vector BFloat16.
>         * config/riscv/vector-iterators.md: Add some iterators for vector
> BFloat16.
>         * config/riscv/vector.md: Add some attribute for vector BFloat16.
>         * config/riscv/vector-bfloat16.md: New file. Add insn pattern
> vector BFloat16.
>
> ---
>  gcc/config/riscv/riscv.md            |  13 ++-
>  gcc/config/riscv/vector-bfloat16.md  | 135 +++++++++++++++++++++
>  gcc/config/riscv/vector-iterators.md | 169 ++++++++++++++++++++++++++-
>  gcc/config/riscv/vector.md           | 103 ++++++++++++++--
>  4 files changed, 405 insertions(+), 15 deletions(-)
>  create mode 100644 gcc/config/riscv/vector-bfloat16.md
>
> diff --git a/gcc/config/riscv/riscv.md b/gcc/config/riscv/riscv.md
> index 7a9454de430..31dcd5f2507 100644
> --- a/gcc/config/riscv/riscv.md
> +++ b/gcc/config/riscv/riscv.md
> @@ -200,6 +200,7 @@
>    RVVMF64BI,RVVMF32BI,RVVMF16BI,RVVMF8BI,RVVMF4BI,RVVMF2BI,RVVM1BI,
>    RVVM8QI,RVVM4QI,RVVM2QI,RVVM1QI,RVVMF2QI,RVVMF4QI,RVVMF8QI,
>    RVVM8HI,RVVM4HI,RVVM2HI,RVVM1HI,RVVMF2HI,RVVMF4HI,
> +  RVVM8BF,RVVM4BF,RVVM2BF,RVVM1BF,RVVMF2BF,RVVMF4BF,
>    RVVM8HF,RVVM4HF,RVVM2HF,RVVM1HF,RVVMF2HF,RVVMF4HF,
>    RVVM8SI,RVVM4SI,RVVM2SI,RVVM1SI,RVVMF2SI,
>    RVVM8SF,RVVM4SF,RVVM2SF,RVVM1SF,RVVMF2SF,
> @@ -219,6 +220,11 @@
>    RVVM2x4HI,RVVM1x4HI,RVVMF2x4HI,RVVMF4x4HI,
>    RVVM2x3HI,RVVM1x3HI,RVVMF2x3HI,RVVMF4x3HI,
>    RVVM4x2HI,RVVM2x2HI,RVVM1x2HI,RVVMF2x2HI,RVVMF4x2HI,
> +  RVVM1x8BF,RVVMF2x8BF,RVVMF4x8BF,RVVM1x7BF,RVVMF2x7BF,
> +  RVVMF4x7BF,RVVM1x6BF,RVVMF2x6BF,RVVMF4x6BF,RVVM1x5BF,
> +  RVVMF2x5BF,RVVMF4x5BF,RVVM2x4BF,RVVM1x4BF,RVVMF2x4BF,
> +  RVVMF4x4BF,RVVM2x3BF,RVVM1x3BF,RVVMF2x3BF,RVVMF4x3BF,
> +  RVVM4x2BF,RVVM2x2BF,RVVM1x2BF,RVVMF2x2BF,RVVMF4x2BF,
>    RVVM1x8HF,RVVMF2x8HF,RVVMF4x8HF,RVVM1x7HF,RVVMF2x7HF,
>    RVVMF4x7HF,RVVM1x6HF,RVVMF2x6HF,RVVMF4x6HF,RVVM1x5HF,
>    RVVMF2x5HF,RVVMF4x5HF,RVVM2x4HF,RVVM1x4HF,RVVMF2x4HF,
> @@ -462,6 +468,10 @@
>  ;; vsm4r        crypto vector SM4 Rounds instructions
>  ;; vsm3me       crypto vector SM3 Message Expansion instructions
>  ;; vsm3c        crypto vector SM3 Compression instructions
> +;; 18.Vector BF16 instrctions
> +;; vfncvtbf16  vector narrowing single floating-point to brain
> floating-point instruction
> +;; vfwcvtbf16  vector widening brain floating-point to single
> floating-point instruction
> +;; vfwmaccbf16  vector BF16 widening multiply-accumulate
>  (define_attr "type"
>    "unknown,branch,jump,jalr,ret,call,load,fpload,store,fpstore,
>     mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
> @@ -483,7 +493,7 @@
>     vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,
>
> vgather,vcompress,vmov,vector,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vcpop,vrol,vror,vwsll,
>
> vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,vaeskf1,vaeskf2,vaesz,
> -   vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c"
> +
>  
> vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c,vfncvtbf16,vfwcvtbf16,vfwmaccbf16"
>    (cond [(eq_attr "got" "load") (const_string "load")
>
>          ;; If a doubleword move uses these expensive instructions,
> @@ -4311,6 +4321,7 @@
>  (include "generic-ooo.md")
>  (include "vector.md")
>  (include "vector-crypto.md")
> +(include "vector-bfloat16.md")
>  (include "zicond.md")
>  (include "sfb.md")
>  (include "zc.md")
> diff --git a/gcc/config/riscv/vector-bfloat16.md
> b/gcc/config/riscv/vector-bfloat16.md
> new file mode 100644
> index 00000000000..562aa8ee5ed
> --- /dev/null
> +++ b/gcc/config/riscv/vector-bfloat16.md
> @@ -0,0 +1,135 @@
> +;; Machine description for RISC-V bfloat16 extensions.
> +;; Copyright (C) 2024 Free Software Foundation, Inc.
> +
> +;; This file is part of GCC.
> +
> +;; GCC is free software; you can redistribute it and/or modify
> +;; it under the terms of the GNU General Public License as published by
> +;; the Free Software Foundation; either version 3, or (at your option)
> +;; any later version.
> +
> +;; GCC is distributed in the hope that it will be useful,
> +;; but WITHOUT ANY WARRANTY; without even the implied warranty of
> +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> +;; GNU General Public License for more details.
> +
> +;; You should have received a copy of the GNU General Public License
> +;; along with GCC; see the file COPYING3.  If not see
> +;; <http://www.gnu.org/licenses/>.
> +
> +(define_mode_iterator VWEXTF_ZVFBF [
> +  (RVVM8SF  "TARGET_VECTOR_ELEN_BF_16 && TARGET_VECTOR_ELEN_FP_32")
> +  (RVVM4SF  "TARGET_VECTOR_ELEN_BF_16 && TARGET_VECTOR_ELEN_FP_32")
> +  (RVVM2SF  "TARGET_VECTOR_ELEN_BF_16 && TARGET_VECTOR_ELEN_FP_32")
> +  (RVVM1SF  "TARGET_VECTOR_ELEN_BF_16 && TARGET_VECTOR_ELEN_FP_32")
> +  (RVVMF2SF "TARGET_VECTOR_ELEN_BF_16 && TARGET_VECTOR_ELEN_FP_32 &&
> TARGET_MIN_VLEN > 32")
> +])
> +
> +(define_mode_attr V_FP32TOBF16_TRUNC [
> +  (RVVM8SF "RVVM4BF") (RVVM4SF "RVVM2BF") (RVVM2SF "RVVM1BF") (RVVM1SF
> "RVVMF2BF") (RVVMF2SF "RVVMF4BF")
> +])
> +
> +(define_mode_attr VF32_SUBEL [
> +   (RVVM8SF "BF") (RVVM4SF "BF") (RVVM2SF "BF") (RVVM1SF "BF") (RVVMF2SF
> "BF")])
> +
> +;; Zvfbfmin extension
> +
> +(define_insn "@pred_trunc<mode>_to_bf16"
> +  [(set (match_operand:<V_FP32TOBF16_TRUNC> 0 "register_operand"   "=vd,
> vd, vr, vr,  &vr,  &vr")
> +     (if_then_else:<V_FP32TOBF16_TRUNC>
> +       (unspec:<VM>
> +         [(match_operand:<VM> 1 "vector_mask_operand"              " vm,
> vm,Wc1,Wc1,vmWc1,vmWc1")
> +          (match_operand 4 "vector_length_operand"                 " rK,
> rK, rK, rK,   rK,   rK")
> +          (match_operand 5 "const_int_operand"                     "  i,
> i,  i,  i,    i,    i")
> +          (match_operand 6 "const_int_operand"                     "  i,
> i,  i,  i,    i,    i")
> +          (match_operand 7 "const_int_operand"                     "  i,
> i,  i,  i,    i,    i")
> +          (match_operand 8 "const_int_operand"                     "  i,
> i,  i,  i,    i,    i")
> +          (reg:SI VL_REGNUM)
> +          (reg:SI VTYPE_REGNUM)
> +          (reg:SI FRM_REGNUM)] UNSPEC_VPREDICATE)
> +       (float_truncate:<V_FP32TOBF16_TRUNC>
> +          (match_operand:VWEXTF_ZVFBF 3 "register_operand"          "
> 0,  0,  0,  0,   vr,   vr"))
> +       (match_operand:<V_FP32TOBF16_TRUNC> 2 "vector_merge_operand" "
> vu,  0, vu,  0,   vu,    0")))]
> +  "TARGET_ZVFBFMIN"
> +  "vfncvtbf16.f.f.w\t%0,%3%p1"
> +  [(set_attr "type" "vfncvtbf16")
> +   (set_attr "mode" "<V_FP32TOBF16_TRUNC>")
> +   (set (attr "frm_mode")
> +       (symbol_ref "riscv_vector::get_frm_mode (operands[8])"))])
> +
> +(define_insn "@pred_extend_bf16_to_<mode>"
> +  [(set (match_operand:VWEXTF_ZVFBF 0 "register_operand"          "=&vr,
> &vr")
> +    (if_then_else:VWEXTF_ZVFBF
> +      (unspec:<VM>
> +        [(match_operand:<VM> 1 "vector_mask_operand"
> "vmWc1,vmWc1")
> +         (match_operand 4 "vector_length_operand"                 "
>  rK,   rK")
> +         (match_operand 5 "const_int_operand"                     "
> i,    i")
> +         (match_operand 6 "const_int_operand"                     "
> i,    i")
> +         (match_operand 7 "const_int_operand"                     "
> i,    i")
> +         (reg:SI VL_REGNUM)
> +         (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
> +      (float_extend:VWEXTF_ZVFBF
> +         (match_operand:<V_FP32TOBF16_TRUNC> 3 "register_operand" "
>  vr,   vr"))
> +      (match_operand:VWEXTF_ZVFBF 2 "vector_merge_operand"        "
>  vu,    0")))]
> +  "TARGET_ZVFBFMIN"
> +  "vfwcvtbf16.f.f.v\t%0,%3%p1"
> +  [(set_attr "type" "vfwcvtbf16")
> +   (set_attr "mode" "<V_FP32TOBF16_TRUNC>")])
> +
> +
> +(define_insn "@pred_widen_bf16_mul_<mode>"
> +  [(set (match_operand:VWEXTF_ZVFBF 0 "register_operand"
>  "=&vr")
> +    (if_then_else:VWEXTF_ZVFBF
> +      (unspec:<VM>
> +        [(match_operand:<VM> 1 "vector_mask_operand"
>  "vmWc1")
> +         (match_operand 5 "vector_length_operand"                    "
>  rK")
> +         (match_operand 6 "const_int_operand"                        "
> i")
> +         (match_operand 7 "const_int_operand"                        "
> i")
> +         (match_operand 8 "const_int_operand"                        "
> i")
> +         (match_operand 9 "const_int_operand"                        "
> i")
> +         (reg:SI VL_REGNUM)
> +         (reg:SI VTYPE_REGNUM)
> +         (reg:SI FRM_REGNUM)] UNSPEC_VPREDICATE)
> +      (plus:VWEXTF_ZVFBF
> +        (mult:VWEXTF_ZVFBF
> +          (float_extend:VWEXTF_ZVFBF
> +            (match_operand:<V_FP32TOBF16_TRUNC> 3 "register_operand" "
>  vr"))
> +          (float_extend:VWEXTF_ZVFBF
> +            (match_operand:<V_FP32TOBF16_TRUNC> 4 "register_operand" "
>  vr")))
> +        (match_operand:VWEXTF_ZVFBF 2 "register_operand"             "
> 0"))
> +      (match_dup 2)))]
> +  "TARGET_ZVFBFWMA"
> +  "vfwmaccbf16.vv\t%0,%3,%4%p1"
> +  [(set_attr "type" "vfwmaccbf16")
> +   (set_attr "mode" "<V_FP32TOBF16_TRUNC>")
> +   (set (attr "frm_mode")
> +       (symbol_ref "riscv_vector::get_frm_mode (operands[9])"))])
> +
> +(define_insn "@pred_widen_bf16_mul_<mode>_scalar"
> +  [(set (match_operand:VWEXTF_ZVFBF 0 "register_operand"
>  "=&vr")
> +    (if_then_else:VWEXTF_ZVFBF
> +      (unspec:<VM>
> +        [(match_operand:<VM> 1 "vector_mask_operand"
>  "vmWc1")
> +         (match_operand 5 "vector_length_operand"                    "
>  rK")
> +         (match_operand 6 "const_int_operand"                        "
> i")
> +         (match_operand 7 "const_int_operand"                        "
> i")
> +         (match_operand 8 "const_int_operand"                        "
> i")
> +         (match_operand 9 "const_int_operand"                        "
> i")
> +         (reg:SI VL_REGNUM)
> +         (reg:SI VTYPE_REGNUM)
> +         (reg:SI FRM_REGNUM)] UNSPEC_VPREDICATE)
> +      (plus:VWEXTF_ZVFBF
> +        (mult:VWEXTF_ZVFBF
> +          (float_extend:VWEXTF_ZVFBF
> +            (vec_duplicate:<V_FP32TOBF16_TRUNC>
> +              (match_operand:<VF32_SUBEL> 3 "register_operand"       "
> f")))
> +          (float_extend:VWEXTF_ZVFBF
> +            (match_operand:<V_FP32TOBF16_TRUNC> 4 "register_operand" "
>  vr")))
> +        (match_operand:VWEXTF_ZVFBF 2 "register_operand"             "
> 0"))
> +      (match_dup 2)))]
> +  "TARGET_ZVFBFWMA"
> +  "vfwmaccbf16.vf\t%0,%3,%4%p1"
> +  [(set_attr "type" "vfwmaccbf16")
> +   (set_attr "mode" "<V_FP32TOBF16_TRUNC>")
> +   (set (attr "frm_mode")
> +       (symbol_ref "riscv_vector::get_frm_mode (operands[9])"))])
> diff --git a/gcc/config/riscv/vector-iterators.md
> b/gcc/config/riscv/vector-iterators.md
> index 76c27035a73..098a1c23b90 100644
> --- a/gcc/config/riscv/vector-iterators.md
> +++ b/gcc/config/riscv/vector-iterators.md
> @@ -146,6 +146,15 @@
>    (RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF
> "TARGET_VECTOR_ELEN_FP_64")
>  ])
>
> +(define_mode_iterator VF_ZVFBF16 [
> +  (RVVM8BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVM4BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVM2BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVM1BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF2BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32")
> +])
> +
>  (define_mode_iterator VF_ZVFHMIN [
>    (RVVM8HF "TARGET_VECTOR_ELEN_FP_16") (RVVM4HF
> "TARGET_VECTOR_ELEN_FP_16") (RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
>    (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF
> "TARGET_VECTOR_ELEN_FP_16")
> @@ -281,6 +290,10 @@
>  (define_mode_iterator VEEWEXT2 [
>    RVVM8HI RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN >
> 32")
>
> +  (RVVM8BF "TARGET_VECTOR_ELEN_BF_16") (RVVM4BF
> "TARGET_VECTOR_ELEN_BF_16") (RVVM2BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVM1BF "TARGET_VECTOR_ELEN_BF_16") (RVVMF2BF
> "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32")
> +
>    (RVVM8HF "TARGET_VECTOR_ELEN_FP_16") (RVVM4HF
> "TARGET_VECTOR_ELEN_FP_16") (RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
>    (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF
> "TARGET_VECTOR_ELEN_FP_16")
>    (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
> @@ -323,6 +336,10 @@
>
>    RVVM4HI RVVM2HI RVVM1HI RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
>
> +  (RVVM4BF "TARGET_VECTOR_ELEN_BF_16") (RVVM2BF
> "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVM1BF "TARGET_VECTOR_ELEN_BF_16") (RVVMF2BF
> "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32")
> +
>    (RVVM4HF "TARGET_VECTOR_ELEN_FP_16") (RVVM2HF
> "TARGET_VECTOR_ELEN_FP_16")
>    (RVVM1HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF2HF
> "TARGET_VECTOR_ELEN_FP_16")
>    (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
> @@ -346,6 +363,11 @@
>    (RVVMF2HI "TARGET_64BIT")
>    (RVVMF4HI "TARGET_MIN_VLEN > 32 && TARGET_64BIT")
>
> +  (RVVM2BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVM1BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF2BF "TARGET_VECTOR_ELEN_FP_16")
> +  (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32 &&
> TARGET_64BIT")
> +
>    (RVVM2HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_64BIT")
>    (RVVM1HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_64BIT")
>    (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_64BIT")
> @@ -768,6 +790,7 @@
>    (RVVMF4HI "TARGET_MIN_VLEN > 32")
>    (RVVMF2SI "TARGET_MIN_VLEN > 32")
>    (RVVM1DI "TARGET_VECTOR_ELEN_64")
> +  (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32")
>    (RVVMF4HF "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
>    (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
>    (RVVM1DF "TARGET_VECTOR_ELEN_FP_64")
> @@ -778,6 +801,7 @@
>    RVVMF2HI
>    RVVM1SI
>    (RVVM2DI "TARGET_VECTOR_ELEN_64")
> +  (RVVMF2BF "TARGET_VECTOR_ELEN_BF_16")
>    (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16")
>    (RVVM1SF "TARGET_VECTOR_ELEN_FP_32")
>    (RVVM2DF "TARGET_VECTOR_ELEN_FP_64")
> @@ -788,6 +812,7 @@
>    RVVM1HI
>    RVVM2SI
>    (RVVM4DI "TARGET_VECTOR_ELEN_64")
> +  (RVVM1BF "TARGET_VECTOR_ELEN_BF_16")
>    (RVVM1HF "TARGET_VECTOR_ELEN_FP_16")
>    (RVVM2SF "TARGET_VECTOR_ELEN_FP_32")
>    (RVVM4DF "TARGET_VECTOR_ELEN_FP_64")
> @@ -798,6 +823,7 @@
>    RVVM2HI
>    RVVM4SI
>    (RVVM8DI "TARGET_VECTOR_ELEN_64")
> +  (RVVM2BF "TARGET_VECTOR_ELEN_BF_16")
>    (RVVM2HF "TARGET_VECTOR_ELEN_FP_16")
>    (RVVM4SF "TARGET_VECTOR_ELEN_FP_32")
>    (RVVM8DF "TARGET_VECTOR_ELEN_FP_64")
> @@ -807,6 +833,7 @@
>    RVVM2QI
>    RVVM4HI
>    RVVM8SI
> +  (RVVM4BF "TARGET_VECTOR_ELEN_BF_16")
>    (RVVM4HF "TARGET_VECTOR_ELEN_FP_16")
>    (RVVM8SF "TARGET_VECTOR_ELEN_FP_32")
>  ])
> @@ -814,6 +841,7 @@
>  (define_mode_iterator RATIO2 [
>    RVVM4QI
>    RVVM8HI
> +  (RVVM8BF "TARGET_VECTOR_ELEN_BF_16")
>    (RVVM8HF "TARGET_VECTOR_ELEN_FP_16")
>  ])
>
> @@ -865,6 +893,9 @@
>
>    RVVM8HI RVVM4HI RVVM2HI RVVM1HI
>
> +  (RVVM8BF "TARGET_VECTOR_ELEN_BF_16") (RVVM4BF
> "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVM2BF "TARGET_VECTOR_ELEN_BF_16") (RVVM1BF
> "TARGET_VECTOR_ELEN_BF_16")
> +
>    (RVVM8HF "TARGET_VECTOR_ELEN_FP_16") (RVVM4HF
> "TARGET_VECTOR_ELEN_FP_16")
>    (RVVM2HF "TARGET_VECTOR_ELEN_FP_16") (RVVM1HF
> "TARGET_VECTOR_ELEN_FP_16")
>
> @@ -885,6 +916,8 @@
>
>    RVVMF2HI (RVVMF4HI "TARGET_MIN_VLEN > 32")
>
> +  (RVVMF2BF "TARGET_VECTOR_ELEN_BF_16") (RVVMF4BF
> "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32")
> +
>    (RVVMF2HF "TARGET_VECTOR_ELEN_FP_16") (RVVMF4HF
> "TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32")
>
>    (RVVMF2SI "TARGET_MIN_VLEN > 32")
> @@ -1142,6 +1175,13 @@
>    (RVVM1x6DI "TARGET_VECTOR_ELEN_64")
>    (RVVM1x7DI "TARGET_VECTOR_ELEN_64")
>    (RVVM1x8DI "TARGET_VECTOR_ELEN_64")
> +  (RVVMF4x2BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF4x3BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF4x4BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF4x5BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF4x6BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF4x7BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF4x8BF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_BF_16")
>    (RVVMF4x2HF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_16")
>    (RVVMF4x3HF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_16")
>    (RVVMF4x4HF "TARGET_MIN_VLEN > 32 && TARGET_VECTOR_ELEN_FP_16")
> @@ -1190,6 +1230,13 @@
>    (RVVM2x2DI "TARGET_VECTOR_ELEN_64")
>    (RVVM2x3DI "TARGET_VECTOR_ELEN_64")
>    (RVVM2x4DI "TARGET_VECTOR_ELEN_64")
> +  (RVVMF2x2BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF2x3BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF2x4BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF2x5BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF2x6BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF2x7BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF2x8BF "TARGET_VECTOR_ELEN_BF_16")
>    (RVVMF2x2HF "TARGET_VECTOR_ELEN_FP_16")
>    (RVVMF2x3HF "TARGET_VECTOR_ELEN_FP_16")
>    (RVVMF2x4HF "TARGET_VECTOR_ELEN_FP_16")
> @@ -1228,6 +1275,13 @@
>    RVVM2x3SI
>    RVVM2x4SI
>    (RVVM4x2DI "TARGET_VECTOR_ELEN_64")
> +  (RVVM1x2BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVM1x3BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVM1x4BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVM1x5BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVM1x6BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVM1x7BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVM1x8BF "TARGET_VECTOR_ELEN_BF_16")
>    (RVVM1x2HF "TARGET_VECTOR_ELEN_FP_16")
>    (RVVM1x3HF "TARGET_VECTOR_ELEN_FP_16")
>    (RVVM1x4HF "TARGET_VECTOR_ELEN_FP_16")
> @@ -1253,6 +1307,9 @@
>    RVVM2x3HI
>    RVVM2x4HI
>    RVVM4x2SI
> +  (RVVM2x2BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVM2x3BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVM2x4BF "TARGET_VECTOR_ELEN_BF_16")
>    (RVVM2x2HF "TARGET_VECTOR_ELEN_FP_16")
>    (RVVM2x3HF "TARGET_VECTOR_ELEN_FP_16")
>    (RVVM2x4HF "TARGET_VECTOR_ELEN_FP_16")
> @@ -1264,6 +1321,7 @@
>    RVVM2x3QI
>    RVVM2x4QI
>    RVVM4x2HI
> +  (RVVM4x2BF "TARGET_VECTOR_ELEN_BF_16")
>    (RVVM4x2HF "TARGET_VECTOR_ELEN_FP_16")
>  ])
>
> @@ -1475,6 +1533,13 @@
>    (RVVM2DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
>    (RVVM1DI "TARGET_VECTOR_ELEN_64 && TARGET_64BIT")
>
> +  (RVVM8BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVM4BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVM2BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVM1BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF2BF "TARGET_VECTOR_ELEN_BF_16")
> +  (RVVMF4BF "TARGET_VECTOR_ELEN_BF_16 && TARGET_MIN_VLEN > 32")
> +
>    (RVVM8HF "TARGET_ZVFH") (RVVM4HF "TARGET_ZVFH") (RVVM2HF "TARGET_ZVFH")
>    (RVVM1HF "TARGET_ZVFH") (RVVMF2HF "TARGET_ZVFH")
>    (RVVMF4HF "TARGET_ZVFH && TARGET_MIN_VLEN > 32")
> @@ -1574,7 +1639,7 @@
>
>  (define_mode_iterator VLS [VLSI VLSF_ZVFHMIN])
>
> -(define_mode_iterator V [VI VF_ZVFHMIN])
> +(define_mode_iterator V [VI VF_ZVFBF16 VF_ZVFHMIN])
>
>  (define_mode_iterator V_VLS [V VLS])
>
> @@ -1582,7 +1647,7 @@
>
>  (define_mode_iterator V_VLSF [VF VLSF])
>
> -(define_mode_iterator V_VLSF_ZVFHMIN [VF_ZVFHMIN VLSF_ZVFHMIN])
> +(define_mode_iterator V_VLSF_ZVFHMIN [VF_ZVFBF16 VF_ZVFHMIN VLSF_ZVFHMIN])
>
>  (define_mode_iterator VT [V1T V2T V4T V8T V16T V32T])
>
> @@ -1624,6 +1689,8 @@
>
>    (RVVM8HI "RVVM8HI") (RVVM4HI "RVVM4HI") (RVVM2HI "RVVM2HI") (RVVM1HI
> "RVVM1HI") (RVVMF2HI "RVVMF2HI") (RVVMF4HI "RVVMF4HI")
>
> +  (RVVM8BF "RVVM8HI") (RVVM4BF "RVVM4HI") (RVVM2BF "RVVM2HI") (RVVM1BF
> "RVVM1HI") (RVVMF2BF "RVVMF2HI") (RVVMF4BF "RVVMF4HI")
> +
>    (RVVM8HF "RVVM8HI") (RVVM4HF "RVVM4HI") (RVVM2HF "RVVM2HI") (RVVM1HF
> "RVVM1HI") (RVVMF2HF "RVVMF2HI") (RVVMF4HF "RVVMF4HI")
>
>    (RVVM8SI "RVVM8SI") (RVVM4SI "RVVM4SI") (RVVM2SI "RVVM2SI") (RVVM1SI
> "RVVM1SI") (RVVMF2SI "RVVMF2SI")
> @@ -1815,6 +1882,8 @@
>
>    (RVVM8HI "RVVMF2BI") (RVVM4HI "RVVMF4BI") (RVVM2HI "RVVMF8BI") (RVVM1HI
> "RVVMF16BI") (RVVMF2HI "RVVMF32BI") (RVVMF4HI "RVVMF64BI")
>
> +  (RVVM8BF "RVVMF2BI") (RVVM4BF "RVVMF4BI") (RVVM2BF "RVVMF8BI") (RVVM1BF
> "RVVMF16BI") (RVVMF2BF "RVVMF32BI") (RVVMF4BF "RVVMF64BI")
> +
>    (RVVM8HF "RVVMF2BI") (RVVM4HF "RVVMF4BI") (RVVM2HF "RVVMF8BI") (RVVM1HF
> "RVVMF16BI") (RVVMF2HF "RVVMF32BI") (RVVMF4HF "RVVMF64BI")
>
>    (RVVM8SI "RVVMF4BI") (RVVM4SI "RVVMF8BI") (RVVM2SI "RVVMF16BI")
> (RVVM1SI "RVVMF32BI") (RVVMF2SI "RVVMF64BI")
> @@ -1841,6 +1910,14 @@
>    (RVVM2x3HI "RVVMF8BI") (RVVM1x3HI "RVVMF16BI") (RVVMF2x3HI "RVVMF32BI")
> (RVVMF4x3HI "RVVMF64BI")
>    (RVVM4x2HI "RVVMF4BI") (RVVM2x2HI "RVVMF8BI") (RVVM1x2HI "RVVMF16BI")
> (RVVMF2x2HI "RVVMF32BI") (RVVMF4x2HI "RVVMF64BI")
>
> +  (RVVM1x8BF "RVVMF16BI") (RVVMF2x8BF "RVVMF32BI") (RVVMF4x8BF
> "RVVMF64BI")
> +  (RVVM1x7BF "RVVMF16BI") (RVVMF2x7BF "RVVMF32BI") (RVVMF4x7BF
> "RVVMF64BI")
> +  (RVVM1x6BF "RVVMF16BI") (RVVMF2x6BF "RVVMF32BI") (RVVMF4x6BF
> "RVVMF64BI")
> +  (RVVM1x5BF "RVVMF16BI") (RVVMF2x5BF "RVVMF32BI") (RVVMF4x5BF
> "RVVMF64BI")
> +  (RVVM2x4BF "RVVMF8BI") (RVVM1x4BF "RVVMF16BI") (RVVMF2x4BF "RVVMF32BI")
> (RVVMF4x4BF "RVVMF64BI")
> +  (RVVM2x3BF "RVVMF8BI") (RVVM1x3BF "RVVMF16BI") (RVVMF2x3BF "RVVMF32BI")
> (RVVMF4x3BF "RVVMF64BI")
> +  (RVVM4x2BF "RVVMF4BI") (RVVM2x2BF "RVVMF8BI") (RVVM1x2BF "RVVMF16BI")
> (RVVMF2x2BF "RVVMF32BI") (RVVMF4x2BF "RVVMF64BI")
> +
>    (RVVM1x8HF "RVVMF16BI") (RVVMF2x8HF "RVVMF32BI") (RVVMF4x8HF
> "RVVMF64BI")
>    (RVVM1x7HF "RVVMF16BI") (RVVMF2x7HF "RVVMF32BI") (RVVMF4x7HF
> "RVVMF64BI")
>    (RVVM1x6HF "RVVMF16BI") (RVVMF2x6HF "RVVMF32BI") (RVVMF4x6HF
> "RVVMF64BI")
> @@ -1916,6 +1993,8 @@
>
>    (RVVM8HI "rvvmf2bi") (RVVM4HI "rvvmf4bi") (RVVM2HI "rvvmf8bi") (RVVM1HI
> "rvvmf16bi") (RVVMF2HI "rvvmf32bi") (RVVMF4HI "rvvmf64bi")
>
> +  (RVVM8BF "rvvmf2bi") (RVVM4BF "rvvmf4bi") (RVVM2BF "rvvmf8bi") (RVVM1BF
> "rvvmf16bi") (RVVMF2BF "rvvmf32bi") (RVVMF4BF "rvvmf64bi")
> +
>    (RVVM8HF "rvvmf2bi") (RVVM4HF "rvvmf4bi") (RVVM2HF "rvvmf8bi") (RVVM1HF
> "rvvmf16bi") (RVVMF2HF "rvvmf32bi") (RVVMF4HF "rvvmf64bi")
>
>    (RVVM8SI "rvvmf4bi") (RVVM4SI "rvvmf8bi") (RVVM2SI "rvvmf16bi")
> (RVVM1SI "rvvmf32bi") (RVVMF2SI "rvvmf64bi")
> @@ -1942,6 +2021,14 @@
>    (RVVM2x3HI "rvvmf8bi") (RVVM1x3HI "rvvmf16bi") (RVVMF2x3HI "rvvmf32bi")
> (RVVMF4x3HI "rvvmf64bi")
>    (RVVM4x2HI "rvvmf4bi") (RVVM2x2HI "rvvmf8bi") (RVVM1x2HI "rvvmf16bi")
> (RVVMF2x2HI "rvvmf32bi") (RVVMF4x2HI "rvvmf64bi")
>
> +  (RVVM1x8BF "rvvmf16bi") (RVVMF2x8BF "rvvmf32bi") (RVVMF4x8BF
> "rvvmf64bi")
> +  (RVVM1x7BF "rvvmf16bi") (RVVMF2x7BF "rvvmf32bi") (RVVMF4x7BF
> "rvvmf64bi")
> +  (RVVM1x6BF "rvvmf16bi") (RVVMF2x6BF "rvvmf32bi") (RVVMF4x6BF
> "rvvmf64bi")
> +  (RVVM1x5BF "rvvmf16bi") (RVVMF2x5BF "rvvmf32bi") (RVVMF4x5BF
> "rvvmf64bi")
> +  (RVVM2x4BF "rvvmf8bi") (RVVM1x4BF "rvvmf16bi") (RVVMF2x4BF "rvvmf32bi")
> (RVVMF4x4BF "rvvmf64bi")
> +  (RVVM2x3BF "rvvmf8bi") (RVVM1x3BF "rvvmf16bi") (RVVMF2x3BF "rvvmf32bi")
> (RVVMF4x3BF "rvvmf64bi")
> +  (RVVM4x2BF "rvvmf4bi") (RVVM2x2BF "rvvmf8bi") (RVVM1x2BF "rvvmf16bi")
> (RVVMF2x2BF "rvvmf32bi") (RVVMF4x2BF "rvvmf64bi")
> +
>    (RVVM1x8HF "rvvmf16bi") (RVVMF2x8HF "rvvmf32bi") (RVVMF4x8HF
> "rvvmf64bi")
>    (RVVM1x7HF "rvvmf16bi") (RVVMF2x7HF "rvvmf32bi") (RVVMF4x7HF
> "rvvmf64bi")
>    (RVVM1x6HF "rvvmf16bi") (RVVMF2x6HF "rvvmf32bi") (RVVMF4x6HF
> "rvvmf64bi")
> @@ -2017,6 +2104,8 @@
>
>    (RVVM8HI "HI") (RVVM4HI "HI") (RVVM2HI "HI") (RVVM1HI "HI") (RVVMF2HI
> "HI") (RVVMF4HI "HI")
>
> +  (RVVM8BF "BF") (RVVM4BF "BF") (RVVM2BF "BF") (RVVM1BF "BF") (RVVMF2BF
> "BF") (RVVMF4BF "BF")
> +
>    (RVVM8HF "HF") (RVVM4HF "HF") (RVVM2HF "HF") (RVVM1HF "HF") (RVVMF2HF
> "HF") (RVVMF4HF "HF")
>
>    (RVVM8SI "SI") (RVVM4SI "SI") (RVVM2SI "SI") (RVVM1SI "SI") (RVVMF2SI
> "SI")
> @@ -2114,6 +2203,8 @@
>
>    (RVVM8HI "hi") (RVVM4HI "hi") (RVVM2HI "hi") (RVVM1HI "hi") (RVVMF2HI
> "hi") (RVVMF4HI "hi")
>
> +  (RVVM8BF "bf") (RVVM4BF "bf") (RVVM2BF "bf") (RVVM1BF "bf") (RVVMF2BF
> "bf") (RVVMF4BF "bf")
> +
>    (RVVM8HF "hf") (RVVM4HF "hf") (RVVM2HF "hf") (RVVM1HF "hf") (RVVMF2HF
> "hf") (RVVMF4HF "hf")
>
>    (RVVM8SI "si") (RVVM4SI "si") (RVVM2SI "si") (RVVM1SI "si") (RVVMF2SI
> "si")
> @@ -2156,6 +2247,32 @@
>    (RVVM2x3HI "rvvm2hi") (RVVM1x3HI "rvvm1hi") (RVVMF2x3HI "rvvmf2hi")
> (RVVMF4x3HI "rvvmf4hi")
>    (RVVM4x2HI "rvvm4hi") (RVVM2x2HI "rvvm2hi") (RVVM1x2HI "rvvm1hi")
> (RVVMF2x2HI "rvvmf2hi") (RVVMF4x2HI "rvvmf4hi")
>
> +  (RVVM1x8BF "rvvm1bf")
> +  (RVVMF2x8BF "rvvmf2bf")
> +  (RVVMF4x8BF "rvvmf4bf")
> +  (RVVM1x7BF "rvvm1bf")
> +  (RVVMF2x7BF "rvvmf2bf")
> +  (RVVMF4x7BF "rvvmf4bf")
> +  (RVVM1x6BF "rvvm1bf")
> +  (RVVMF2x6BF "rvvmf2bf")
> +  (RVVMF4x6BF "rvvmf4bf")
> +  (RVVM1x5BF "rvvm1bf")
> +  (RVVMF2x5BF "rvvmf2bf")
> +  (RVVMF4x5BF "rvvmf4bf")
> +  (RVVM2x4BF "rvvm2bf")
> +  (RVVM1x4BF "rvvm1bf")
> +  (RVVMF2x4BF "rvvmf2bf")
> +  (RVVMF4x4BF "rvvmf4bf")
> +  (RVVM2x3BF "rvvm2bf")
> +  (RVVM1x3BF "rvvm1bf")
> +  (RVVMF2x3BF "rvvmf2bf")
> +  (RVVMF4x3BF "rvvmf4bf")
> +  (RVVM4x2BF "rvvm4bf")
> +  (RVVM2x2BF "rvvm2bf")
> +  (RVVM1x2BF "rvvm1bf")
> +  (RVVMF2x2BF "rvvmf2bf")
> +  (RVVMF4x2BF "rvvmf4bf")
> +
>    (RVVM1x8HF "rvvm1hf")
>    (RVVMF2x8HF "rvvmf2hf")
>    (RVVMF4x8HF "rvvmf4hf")
> @@ -2292,6 +2409,14 @@
>    (RVVM2x3HI "3") (RVVM1x3HI "3") (RVVMF2x3HI "3") (RVVMF4x3HI "3")
>    (RVVM4x2HI "2") (RVVM2x2HI "2") (RVVM1x2HI "2") (RVVMF2x2HI "2")
> (RVVMF4x2HI "2")
>
> +  (RVVM1x8BF "8") (RVVMF2x8BF "8") (RVVMF4x8BF "8")
> +  (RVVM1x7BF "7") (RVVMF2x7BF "7") (RVVMF4x7BF "7")
> +  (RVVM1x6BF "6") (RVVMF2x6BF "6") (RVVMF4x6BF "6")
> +  (RVVM1x5BF "5") (RVVMF2x5BF "5") (RVVMF4x5BF "5")
> +  (RVVM2x4BF "4") (RVVM1x4BF "4") (RVVMF2x4BF "4") (RVVMF4x4BF "4")
> +  (RVVM2x3BF "3") (RVVM1x3BF "3") (RVVMF2x3BF "3") (RVVMF4x3BF "3")
> +  (RVVM4x2BF "2") (RVVM2x2BF "2") (RVVM1x2BF "2") (RVVMF2x2BF "2")
> (RVVMF4x2BF "2")
> +
>    (RVVM1x8HF "8") (RVVMF2x8HF "8") (RVVMF4x8HF "8")
>    (RVVM1x7HF "7") (RVVMF2x7HF "7") (RVVMF4x7HF "7")
>    (RVVM1x6HF "6") (RVVMF2x6HF "6") (RVVMF4x6HF "6")
> @@ -2346,6 +2471,8 @@
>
>    (RVVM8HI "16") (RVVM4HI "16") (RVVM2HI "16") (RVVM1HI "16") (RVVMF2HI
> "16") (RVVMF4HI "16")
>
> +  (RVVM8BF "16") (RVVM4BF "16") (RVVM2BF "16") (RVVM1BF "16") (RVVMF2BF
> "16") (RVVMF4BF "16")
> +
>    (RVVM8HF "16") (RVVM4HF "16") (RVVM2HF "16") (RVVM1HF "16") (RVVMF2HF
> "16") (RVVMF4HF "16")
>
>    (RVVM8SI "32") (RVVM4SI "32") (RVVM2SI "32") (RVVM1SI "32") (RVVMF2SI
> "32")
> @@ -2372,6 +2499,14 @@
>    (RVVM2x3HI "16") (RVVM1x3HI "16") (RVVMF2x3HI "16") (RVVMF4x3HI "16")
>    (RVVM4x2HI "16") (RVVM2x2HI "16") (RVVM1x2HI "16") (RVVMF2x2HI "16")
> (RVVMF4x2HI "16")
>
> +  (RVVM1x8BF "16") (RVVMF2x8BF "16") (RVVMF4x8BF "16")
> +  (RVVM1x7BF "16") (RVVMF2x7BF "16") (RVVMF4x7BF "16")
> +  (RVVM1x6BF "16") (RVVMF2x6BF "16") (RVVMF4x6BF "16")
> +  (RVVM1x5BF "16") (RVVMF2x5BF "16") (RVVMF4x5BF "16")
> +  (RVVM2x4BF "16") (RVVM1x4BF "16") (RVVMF2x4BF "16") (RVVMF4x4BF "16")
> +  (RVVM2x3BF "16") (RVVM1x3BF "16") (RVVMF2x3BF "16") (RVVMF4x3BF "16")
> +  (RVVM4x2BF "16") (RVVM2x2BF "16") (RVVM1x2BF "16") (RVVMF2x2BF "16")
> (RVVMF4x2BF "16")
> +
>    (RVVM1x8HF "16") (RVVMF2x8HF "16") (RVVMF4x8HF "16")
>    (RVVM1x7HF "16") (RVVMF2x7HF "16") (RVVMF4x7HF "16")
>    (RVVM1x6HF "16") (RVVMF2x6HF "16") (RVVMF4x6HF "16")
> @@ -2438,6 +2573,8 @@
>  (define_mode_attr double_trunc_sew [
>    (RVVM8HI "8") (RVVM4HI "8") (RVVM2HI "8") (RVVM1HI "8") (RVVMF2HI "8")
> (RVVMF4HI "8")
>
> +  (RVVM8BF "8") (RVVM4BF "8") (RVVM2BF "8") (RVVM1BF "8") (RVVMF2BF "8")
> (RVVMF4BF "8")
> +
>    (RVVM8HF "8") (RVVM4HF "8") (RVVM2HF "8") (RVVM1HF "8") (RVVMF2HF "8")
> (RVVMF4HF "8")
>
>    (RVVM8SI "16") (RVVM4SI "16") (RVVM2SI "16") (RVVM1SI "16") (RVVMF2SI
> "16")
> @@ -2470,6 +2607,8 @@
>
>    (RVVM4HI "32") (RVVM2HI "32") (RVVM1HI "32") (RVVMF2HI "32") (RVVMF4HI
> "32")
>
> +  (RVVM4BF "32") (RVVM2BF "32") (RVVM1BF "32") (RVVMF2BF "32") (RVVMF4BF
> "32")
> +
>    (RVVM4HF "32") (RVVM2HF "32") (RVVM1HF "32") (RVVMF2HF "32") (RVVMF4HF
> "32")
>
>    (RVVM4SI "64") (RVVM2SI "64") (RVVM1SI "64") (RVVMF2SI "64")
> @@ -2804,6 +2943,8 @@
>  (define_mode_attr VINDEX_DOUBLE_TRUNC [
>    (RVVM8HI "RVVM4QI") (RVVM4HI "RVVM2QI") (RVVM2HI "RVVM1QI") (RVVM1HI
> "RVVMF2QI") (RVVMF2HI "RVVMF4QI") (RVVMF4HI "RVVMF8QI")
>
> +  (RVVM8BF "RVVM4QI") (RVVM4BF "RVVM2QI") (RVVM2BF "RVVM1QI") (RVVM1BF
> "RVVMF2QI") (RVVMF2BF "RVVMF4QI") (RVVMF4BF "RVVMF8QI")
> +
>    (RVVM8HF "RVVM4QI") (RVVM4HF "RVVM2QI") (RVVM2HF "RVVM1QI") (RVVM1HF
> "RVVMF2QI") (RVVMF2HF "RVVMF4QI") (RVVMF4HF "RVVMF8QI")
>
>    (RVVM8SI "RVVM4HI") (RVVM4SI "RVVM2HI") (RVVM2SI "RVVM1HI") (RVVM1SI
> "RVVMF2HI") (RVVMF2SI "RVVMF4HI")
> @@ -2836,6 +2977,8 @@
>
>    (RVVM4HI "RVVM8SI") (RVVM2HI "RVVM4SI") (RVVM1HI "RVVM2SI") (RVVMF2HI
> "RVVM1SI") (RVVMF4HI "RVVMF2SI")
>
> +  (RVVM4BF "RVVM8SI") (RVVM2BF "RVVM4SI") (RVVM1BF "RVVM2SI") (RVVMF2BF
> "RVVM1SI") (RVVMF4BF "RVVMF2SI")
> +
>    (RVVM4HF "RVVM8SI") (RVVM2HF "RVVM4SI") (RVVM1HF "RVVM2SI") (RVVMF2HF
> "RVVM1SI") (RVVMF4HF "RVVMF2SI")
>
>    (RVVM4SI "RVVM8DI") (RVVM2SI "RVVM4DI") (RVVM1SI "RVVM2DI") (RVVMF2SI
> "RVVM1DI")
> @@ -2848,6 +2991,8 @@
>
>    (RVVM2HI "RVVM8DI") (RVVM1HI "RVVM4DI") (RVVMF2HI "RVVM2DI") (RVVMF4HI
> "RVVM1DI")
>
> +  (RVVM2BF "RVVM8DI") (RVVM1BF "RVVM4DI") (RVVMF2BF "RVVM2DI") (RVVMF4BF
> "RVVM1DI")
> +
>    (RVVM2HF "RVVM8DI") (RVVM1HF "RVVM4DI") (RVVMF2HF "RVVM2DI") (RVVMF4HF
> "RVVM1DI")
>  ])
>
> @@ -3352,6 +3497,10 @@
>    (RVVM2HI "vector_eew16_stride_operand") (RVVM1HI
> "vector_eew16_stride_operand")
>    (RVVMF2HI "vector_eew16_stride_operand") (RVVMF4HI
> "vector_eew16_stride_operand")
>
> +  (RVVM8BF "vector_eew16_stride_operand") (RVVM4BF
> "vector_eew16_stride_operand")
> +  (RVVM2BF "vector_eew16_stride_operand") (RVVM1BF
> "vector_eew16_stride_operand")
> +  (RVVMF2BF "vector_eew16_stride_operand") (RVVMF4BF
> "vector_eew16_stride_operand")
> +
>    (RVVM8HF "vector_eew16_stride_operand") (RVVM4HF
> "vector_eew16_stride_operand")
>    (RVVM2HF "vector_eew16_stride_operand") (RVVM1HF
> "vector_eew16_stride_operand")
>    (RVVMF2HF "vector_eew16_stride_operand") (RVVMF4HF
> "vector_eew16_stride_operand")
> @@ -3381,6 +3530,10 @@
>    (RVVM2HI "rJ,rJ,rJ,c02,c02,c02") (RVVM1HI "rJ,rJ,rJ,c02,c02,c02")
>    (RVVMF2HI "rJ,rJ,rJ,c02,c02,c02") (RVVMF4HI "rJ,rJ,rJ,c02,c02,c02")
>
> +  (RVVM8BF "rJ,rJ,rJ,c02,c02,c02") (RVVM4BF "rJ,rJ,rJ,c02,c02,c02")
> +  (RVVM2BF "rJ,rJ,rJ,c02,c02,c02") (RVVM1BF "rJ,rJ,rJ,c02,c02,c02")
> +  (RVVMF2BF "rJ,rJ,rJ,c02,c02,c02") (RVVMF4BF "rJ,rJ,rJ,c02,c02,c02")
> +
>    (RVVM8HF "rJ,rJ,rJ,c02,c02,c02") (RVVM4HF "rJ,rJ,rJ,c02,c02,c02")
>    (RVVM2HF "rJ,rJ,rJ,c02,c02,c02") (RVVM1HF "rJ,rJ,rJ,c02,c02,c02")
>    (RVVMF2HF "rJ,rJ,rJ,c02,c02,c02") (RVVMF4HF "rJ,rJ,rJ,c02,c02,c02")
> @@ -3410,6 +3563,10 @@
>    (RVVM2HI "rJ,c02") (RVVM1HI "rJ,c02")
>    (RVVMF2HI "rJ,c02") (RVVMF4HI "rJ,c02")
>
> +  (RVVM8BF "rJ,c02") (RVVM4BF "rJ,c02")
> +  (RVVM2BF "rJ,c02") (RVVM1BF "rJ,c02")
> +  (RVVMF2BF "rJ,c02") (RVVMF4BF "rJ,c02")
> +
>    (RVVM8HF "rJ,c02") (RVVM4HF "rJ,c02")
>    (RVVM2HF "rJ,c02") (RVVM1HF "rJ,c02")
>    (RVVMF2HF "rJ,c02") (RVVMF4HF "rJ,c02")
> @@ -3438,6 +3595,10 @@
>    (RVVM2HI "immediate_operand") (RVVM1HI "immediate_operand")
>    (RVVMF2HI "immediate_operand") (RVVMF4HI "immediate_operand")
>
> +  (RVVM8BF "const_1_operand") (RVVM4BF "vector_gs_extension_operand")
> +  (RVVM2BF "immediate_operand") (RVVM1BF "immediate_operand")
> +  (RVVMF2BF "immediate_operand") (RVVMF4BF "immediate_operand")
> +
>    (RVVM8HF "const_1_operand") (RVVM4HF "vector_gs_extension_operand")
>    (RVVM2HF "immediate_operand") (RVVM1HF "immediate_operand")
>    (RVVMF2HF "immediate_operand") (RVVMF4HF "immediate_operand")
> @@ -3464,6 +3625,10 @@
>    (RVVM2HI "const_1_or_2_operand") (RVVM1HI "const_1_or_2_operand")
>    (RVVMF2HI "const_1_or_2_operand") (RVVMF4HI "const_1_or_2_operand")
>
> +  (RVVM8BF "const_1_operand") (RVVM4BF "vector_gs_scale_operand_16_rv32")
> +  (RVVM2BF "const_1_or_2_operand") (RVVM1BF "const_1_or_2_operand")
> +  (RVVMF2BF "const_1_or_2_operand") (RVVMF4BF "const_1_or_2_operand")
> +
>    (RVVM8HF "const_1_operand") (RVVM4HF "vector_gs_scale_operand_16_rv32")
>    (RVVM2HF "const_1_or_2_operand") (RVVM1HF "const_1_or_2_operand")
>    (RVVMF2HF "const_1_or_2_operand") (RVVMF4HF "const_1_or_2_operand")
> diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md
> index fbcdf96f038..9a139faf745 100644
> --- a/gcc/config/riscv/vector.md
> +++ b/gcc/config/riscv/vector.md
> @@ -54,7 +54,8 @@
>
> vgather,vcompress,vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,\
>
> vssegtux,vssegtox,vlsegdff,vandn,vbrev,vbrev8,vrev8,vcpop,vclz,vctz,vrol,\
>
> vror,vwsll,vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,\
> -
>  vaeskf1,vaeskf2,vaesz,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c")
> +
>  vaeskf1,vaeskf2,vaesz,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c,\
> +                         vfncvtbf16,vfwcvtbf16,vfwmaccbf16")
>          (const_string "true")]
>         (const_string "false")))
>
> @@ -78,7 +79,8 @@
>
> vgather,vcompress,vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,\
>
> vssegtux,vssegtox,vlsegdff,vandn,vbrev,vbrev8,vrev8,vcpop,vclz,vctz,vrol,\
>
> vror,vwsll,vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,\
> -
>  vaeskf1,vaeskf2,vaesz,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c")
> +
>  vaeskf1,vaeskf2,vaesz,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c,\
> +                         vfncvtbf16,vfwcvtbf16,vfwmaccbf16")
>          (const_string "true")]
>         (const_string "false")))
>
> @@ -119,6 +121,14 @@
>                           RVVM2x4HI,RVVM1x4HI,RVVMF2x4HI,RVVMF4x4HI,\
>                           RVVM2x3HI,RVVM1x3HI,RVVMF2x3HI,RVVMF4x3HI,\
>
> RVVM4x2HI,RVVM2x2HI,RVVM1x2HI,RVVMF2x2HI,RVVMF4x2HI,\
> +
>  RVVM8BF,RVVM4BF,RVVM2BF,RVVM1BF,RVVMF2BF,RVVMF4BF,\
> +                         RVVM1x8BF,RVVMF2x8BF,RVVMF4x8BF,\
> +                         RVVM1x7BF,RVVMF2x7BF,RVVMF4x7BF,\
> +                         RVVM1x6BF,RVVMF2x6BF,RVVMF4x6BF,\
> +                         RVVM1x5BF,RVVMF2x5BF,RVVMF4x5BF,\
> +                         RVVM2x4BF,RVVM1x4BF,RVVMF2x4BF,RVVMF4x4BF,\
> +                         RVVM2x3BF,RVVM1x3BF,RVVMF2x3BF,RVVMF4x3BF,\
> +
>  RVVM4x2BF,RVVM2x2BF,RVVM1x2BF,RVVMF2x2BF,RVVMF4x2BF,\
>
> RVVM8HF,RVVM4HF,RVVM2HF,RVVM1HF,RVVMF2HF,RVVMF4HF,\
>                           RVVM1x8HF,RVVMF2x8HF,RVVMF4x8HF,\
>                           RVVM1x7HF,RVVMF2x7HF,RVVMF4x7HF,\
> @@ -180,6 +190,12 @@
>          (eq_attr "mode" "RVVM1HI") (symbol_ref "riscv_vector::LMUL_1")
>          (eq_attr "mode" "RVVMF2HI") (symbol_ref "riscv_vector::LMUL_F2")
>          (eq_attr "mode" "RVVMF4HI") (symbol_ref "riscv_vector::LMUL_F4")
> +        (eq_attr "mode" "RVVM8BF") (symbol_ref "riscv_vector::LMUL_8")
> +        (eq_attr "mode" "RVVM4BF") (symbol_ref "riscv_vector::LMUL_4")
> +        (eq_attr "mode" "RVVM2BF") (symbol_ref "riscv_vector::LMUL_2")
> +        (eq_attr "mode" "RVVM1BF") (symbol_ref "riscv_vector::LMUL_1")
> +        (eq_attr "mode" "RVVMF2BF") (symbol_ref "riscv_vector::LMUL_F2")
> +        (eq_attr "mode" "RVVMF4BF") (symbol_ref "riscv_vector::LMUL_F4")
>          (eq_attr "mode" "RVVM8HF") (symbol_ref "riscv_vector::LMUL_8")
>          (eq_attr "mode" "RVVM4HF") (symbol_ref "riscv_vector::LMUL_4")
>          (eq_attr "mode" "RVVM2HF") (symbol_ref "riscv_vector::LMUL_2")
> @@ -261,6 +277,31 @@
>          (eq_attr "mode" "RVVM1x2HI") (symbol_ref "riscv_vector::LMUL_1")
>          (eq_attr "mode" "RVVMF2x2HI") (symbol_ref "riscv_vector::LMUL_F2")
>          (eq_attr "mode" "RVVMF4x2HI") (symbol_ref "riscv_vector::LMUL_F4")
> +        (eq_attr "mode" "RVVM1x8BF") (symbol_ref "riscv_vector::LMUL_1")
> +        (eq_attr "mode" "RVVMF2x8BF") (symbol_ref "riscv_vector::LMUL_F2")
> +        (eq_attr "mode" "RVVMF4x8BF") (symbol_ref "riscv_vector::LMUL_F4")
> +        (eq_attr "mode" "RVVM1x7BF") (symbol_ref "riscv_vector::LMUL_1")
> +        (eq_attr "mode" "RVVMF2x7BF") (symbol_ref "riscv_vector::LMUL_F2")
> +        (eq_attr "mode" "RVVMF4x7BF") (symbol_ref "riscv_vector::LMUL_F4")
> +        (eq_attr "mode" "RVVM1x6BF") (symbol_ref "riscv_vector::LMUL_1")
> +        (eq_attr "mode" "RVVMF2x6BF") (symbol_ref "riscv_vector::LMUL_F2")
> +        (eq_attr "mode" "RVVMF4x6BF") (symbol_ref "riscv_vector::LMUL_F4")
> +        (eq_attr "mode" "RVVM1x5BF") (symbol_ref "riscv_vector::LMUL_1")
> +        (eq_attr "mode" "RVVMF2x5BF") (symbol_ref "riscv_vector::LMUL_F2")
> +        (eq_attr "mode" "RVVMF4x5BF") (symbol_ref "riscv_vector::LMUL_F4")
> +        (eq_attr "mode" "RVVM2x4BF") (symbol_ref "riscv_vector::LMUL_2")
> +        (eq_attr "mode" "RVVM1x4BF") (symbol_ref "riscv_vector::LMUL_1")
> +        (eq_attr "mode" "RVVMF2x4BF") (symbol_ref "riscv_vector::LMUL_F2")
> +        (eq_attr "mode" "RVVMF4x4BF") (symbol_ref "riscv_vector::LMUL_F4")
> +        (eq_attr "mode" "RVVM2x3BF") (symbol_ref "riscv_vector::LMUL_2")
> +        (eq_attr "mode" "RVVM1x3BF") (symbol_ref "riscv_vector::LMUL_1")
> +        (eq_attr "mode" "RVVMF2x3BF") (symbol_ref "riscv_vector::LMUL_F2")
> +        (eq_attr "mode" "RVVMF4x3BF") (symbol_ref "riscv_vector::LMUL_F4")
> +        (eq_attr "mode" "RVVM4x2BF") (symbol_ref "riscv_vector::LMUL_4")
> +        (eq_attr "mode" "RVVM2x2BF") (symbol_ref "riscv_vector::LMUL_2")
> +        (eq_attr "mode" "RVVM1x2BF") (symbol_ref "riscv_vector::LMUL_1")
> +        (eq_attr "mode" "RVVMF2x2BF") (symbol_ref "riscv_vector::LMUL_F2")
> +        (eq_attr "mode" "RVVMF4x2BF") (symbol_ref "riscv_vector::LMUL_F4")
>          (eq_attr "mode" "RVVM1x8HF") (symbol_ref "riscv_vector::LMUL_1")
>          (eq_attr "mode" "RVVMF2x8HF") (symbol_ref "riscv_vector::LMUL_F2")
>          (eq_attr "mode" "RVVMF4x8HF") (symbol_ref "riscv_vector::LMUL_F4")
> @@ -446,7 +487,7 @@
>
> vandn,vbrev,vbrev8,vrev8,vcpop,vclz,vctz,vrol,vror,vwsll,\
>
> vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,\
>
> vaeskf1,vaeskf2,vaesz,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,\
> -                         vsm3me,vsm3c")
> +                         vsm3me,vsm3c,vfncvtbf16,vfwcvtbf16,vfwmaccbf16")
>            (const_int INVALID_ATTRIBUTE)
>         (and (eq_attr "type" "vlde,vste,vlsegde,vssegte,vlsegds,vssegts,\
>                                vlsegdff,vssegtux,vlsegdox,vlsegdux")
> @@ -465,6 +506,12 @@
>          (eq_attr "mode" "RVVM1HI") (const_int 16)
>          (eq_attr "mode" "RVVMF2HI") (const_int 32)
>          (eq_attr "mode" "RVVMF4HI") (const_int 64)
> +        (eq_attr "mode" "RVVM8BF") (const_int 2)
> +        (eq_attr "mode" "RVVM4BF") (const_int 4)
> +        (eq_attr "mode" "RVVM2BF") (const_int 8)
> +        (eq_attr "mode" "RVVM1BF") (const_int 16)
> +        (eq_attr "mode" "RVVMF2BF") (const_int 32)
> +        (eq_attr "mode" "RVVMF4BF") (const_int 64)
>          (eq_attr "mode" "RVVM8HF") (const_int 2)
>          (eq_attr "mode" "RVVM4HF") (const_int 4)
>          (eq_attr "mode" "RVVM2HF") (const_int 8)
> @@ -546,6 +593,31 @@
>          (eq_attr "mode" "RVVM1x2HI") (const_int 16)
>          (eq_attr "mode" "RVVMF2x2HI") (const_int 32)
>          (eq_attr "mode" "RVVMF4x2HI") (const_int 64)
> +        (eq_attr "mode" "RVVM1x8BF") (const_int 16)
> +        (eq_attr "mode" "RVVMF2x8BF") (const_int 32)
> +        (eq_attr "mode" "RVVMF4x8BF") (const_int 64)
> +        (eq_attr "mode" "RVVM1x7BF") (const_int 16)
> +        (eq_attr "mode" "RVVMF2x7BF") (const_int 32)
> +        (eq_attr "mode" "RVVMF4x7BF") (const_int 64)
> +        (eq_attr "mode" "RVVM1x6BF") (const_int 16)
> +        (eq_attr "mode" "RVVMF2x6BF") (const_int 32)
> +        (eq_attr "mode" "RVVMF4x6BF") (const_int 64)
> +        (eq_attr "mode" "RVVM1x5BF") (const_int 16)
> +        (eq_attr "mode" "RVVMF2x5BF") (const_int 32)
> +        (eq_attr "mode" "RVVMF4x5BF") (const_int 64)
> +        (eq_attr "mode" "RVVM2x4BF") (const_int 8)
> +        (eq_attr "mode" "RVVM1x4BF") (const_int 16)
> +        (eq_attr "mode" "RVVMF2x4BF") (const_int 32)
> +        (eq_attr "mode" "RVVMF4x4BF") (const_int 64)
> +        (eq_attr "mode" "RVVM2x3BF") (const_int 8)
> +        (eq_attr "mode" "RVVM1x3BF") (const_int 16)
> +        (eq_attr "mode" "RVVMF2x3BF") (const_int 32)
> +        (eq_attr "mode" "RVVMF4x3BF") (const_int 64)
> +        (eq_attr "mode" "RVVM4x2BF") (const_int 4)
> +        (eq_attr "mode" "RVVM2x2BF") (const_int 8)
> +        (eq_attr "mode" "RVVM1x2BF") (const_int 16)
> +        (eq_attr "mode" "RVVMF2x2BF") (const_int 32)
> +        (eq_attr "mode" "RVVMF4x2BF") (const_int 64)
>          (eq_attr "mode" "RVVM1x8HF") (const_int 16)
>          (eq_attr "mode" "RVVMF2x8HF") (const_int 32)
>          (eq_attr "mode" "RVVMF4x8HF") (const_int 64)
> @@ -723,7 +795,8 @@
>
> vired,viwred,vfredu,vfredo,vfwredu,vfwredo,vimovxv,vfmovfv,\
>
> vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
>
> vgather,vldff,viwmuladd,vfwmuladd,vlsegde,vlsegds,vlsegdux,vlsegdox,vlsegdff,\
> -
>  vandn,vbrev,vbrev8,vrev8,vrol,vror,vwsll,vclmul,vclmulh")
> +
>  vandn,vbrev,vbrev8,vrev8,vrol,vror,vwsll,vclmul,vclmulh,\
> +                               vfncvtbf16,vfwcvtbf16,vfwmaccbf16")
>                (const_int 2)
>
>                (eq_attr "type"
> "vimerge,vfmerge,vcompress,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,\
> @@ -767,7 +840,8 @@
>
> vfcvtftoi,vfwcvtitof,vfwcvtftoi,vfwcvtftof,vfncvtitof,\
>
> vfncvtftoi,vfncvtftof,vfclass,vimovxv,vfmovfv,vcompress,\
>
> vlsegde,vssegts,vssegtux,vssegtox,vlsegdff,vbrev,vbrev8,vrev8,\
> -
> vghsh,vaeskf1,vaeskf2,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm3me,vsm3c")
> +
> vghsh,vaeskf1,vaeskf2,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm3me,vsm3c,\
> +                         vfncvtbf16,vfwcvtbf16")
>            (const_int 4)
>
>          ;; If operands[3] of "vlds" is not vector mode, it is
> pred_broadcast.
> @@ -783,7 +857,7 @@
>
> vfsgnj,vfmerge,vired,viwred,vfredu,vfredo,vfwredu,vfwredo,\
>
> vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\
>
> vgather,viwmuladd,vfwmuladd,vlsegds,vlsegdux,vlsegdox,vandn,vrol,\
> -                          vror,vwsll,vclmul,vclmulh")
> +                          vror,vwsll,vclmul,vclmulh,vfwmaccbf16")
>            (const_int 5)
>
>          (eq_attr "type" "vicmp,vimuladd,vfcmp,vfmuladd")
> @@ -800,7 +874,8 @@
>
> vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,vfwcvtftof,\
>
> vfncvtitof,vfncvtftoi,vfncvtftof,vfclass,vimovxv,vfmovfv,\
>
> vcompress,vldff,vlsegde,vlsegdff,vbrev,vbrev8,vrev8,vghsh,\
> -
> vaeskf1,vaeskf2,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm3me,vsm3c")
> +
> vaeskf1,vaeskf2,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm3me,vsm3c,\
> +                         vfncvtbf16,vfwcvtbf16")
>            (symbol_ref "riscv_vector::get_ta(operands[5])")
>
>          ;; If operands[3] of "vlds" is not vector mode, it is
> pred_broadcast.
> @@ -816,7 +891,8 @@
>
> vfwalu,vfwmul,vfsgnj,vfmerge,vired,viwred,vfredu,\
>
> vfredo,vfwredu,vfwredo,vslideup,vslidedown,vislide1up,\
>
> vislide1down,vfslide1up,vfslide1down,vgather,viwmuladd,vfwmuladd,\
> -
>  vlsegds,vlsegdux,vlsegdox,vandn,vrol,vror,vwsll,vclmul,vclmulh")
> +
>  vlsegds,vlsegdux,vlsegdox,vandn,vrol,vror,vwsll,vclmul,vclmulh,\
> +                         vfwmaccbf16")
>            (symbol_ref "riscv_vector::get_ta(operands[6])")
>
>          (eq_attr "type" "vimuladd,vfmuladd")
> @@ -830,7 +906,8 @@
>  (define_attr "ma" ""
>    (cond [(eq_attr "type"
> "vlde,vext,vmiota,vfsqrt,vfrecp,vfcvtitof,vfcvtftoi,\
>
> vfwcvtitof,vfwcvtftoi,vfwcvtftof,vfncvtitof,vfncvtftoi,\
> -
>  vfncvtftof,vfclass,vldff,vlsegde,vlsegdff,vbrev,vbrev8,vrev8")
> +
>  vfncvtftof,vfclass,vldff,vlsegde,vlsegdff,vbrev,vbrev8,vrev8,\
> +                         vfncvtbf16,vfwcvtbf16")
>            (symbol_ref "riscv_vector::get_ma(operands[6])")
>
>          ;; If operands[3] of "vlds" is not vector mode, it is
> pred_broadcast.
> @@ -846,7 +923,7 @@
>                           vfwalu,vfwmul,vfsgnj,vfcmp,vslideup,vslidedown,\
>
> vislide1up,vislide1down,vfslide1up,vfslide1down,vgather,\
>
> viwmuladd,vfwmuladd,vlsegds,vlsegdux,vlsegdox,vandn,vrol,\
> -                          vror,vwsll,vclmul,vclmulh")
> +                          vror,vwsll,vclmul,vclmulh,vfwmaccbf16")
>            (symbol_ref "riscv_vector::get_ma(operands[7])")
>
>          (eq_attr "type" "vimuladd,vfmuladd")
> @@ -862,7 +939,8 @@
>
> vfsqrt,vfrecp,vfmerge,vfcvtitof,vfcvtftoi,vfwcvtitof,\
>
> vfwcvtftoi,vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,\
>
> vfclass,vired,viwred,vfredu,vfredo,vfwredu,vfwredo,\
> -
>  vimovxv,vfmovfv,vlsegde,vlsegdff,vmiota,vbrev,vbrev8,vrev8")
> +
>  vimovxv,vfmovfv,vlsegde,vlsegdff,vmiota,vbrev,vbrev8,vrev8,\
> +                         vfncvtbf16,vfwcvtbf16")
>            (const_int 7)
>          (eq_attr "type"
> "vldm,vstm,vmalu,vmalu,vgmul,vaesef,vaesem,vaesdf,vaesdm,vaesz,\
>                            vsm4r")
> @@ -880,7 +958,8 @@
>
> vnclip,vicmp,vfalu,vfmul,vfminmax,vfdiv,vfwalu,vfwmul,\
>                           vfsgnj,vfcmp,vslideup,vslidedown,vislide1up,\
>
> vislide1down,vfslide1up,vfslide1down,vgather,viwmuladd,vfwmuladd,\
> -
>  vlsegds,vlsegdux,vlsegdox,vandn,vrol,vror,vclmul,vclmulh,vwsll")
> +
>  vlsegds,vlsegdux,vlsegdox,vandn,vrol,vror,vclmul,vclmulh,vwsll,\
> +                         vfwmaccbf16")
>            (const_int 8)
>          (eq_attr "type" "vstux,vstox,vssegts,vssegtux,vssegtox")
>            (const_int 5)
> --
> 2.17.1
>
>

Reply via email to