[PATCH] D82725: [PowerPC] Implement Move to VSR Mask builtins in LLVM/Clang

2020-09-18 Thread Amy Kwan via Phabricator via cfe-commits
This revision was landed with ongoing or failed builds.
This revision was automatically updated to reflect the committed changes.
Closed by commit rG37e7673c21af: [PowerPC] Implement Move to VSR Mask builtins 
in LLVM/Clang (authored by amyk).

Changed prior to commit:
  https://reviews.llvm.org/D82725?vs=292634&id=292921#toc

Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D82725/new/

https://reviews.llvm.org/D82725

Files:
  clang/include/clang/Basic/BuiltinsPPC.def
  clang/lib/Headers/altivec.h
  clang/test/CodeGen/builtins-ppc-p10vector.c
  llvm/include/llvm/IR/IntrinsicsPowerPC.td
  llvm/lib/Target/PowerPC/PPCInstrPrefix.td
  llvm/test/CodeGen/PowerPC/p10-vector-mask-ops.ll

Index: llvm/test/CodeGen/PowerPC/p10-vector-mask-ops.ll
===
--- llvm/test/CodeGen/PowerPC/p10-vector-mask-ops.ll
+++ llvm/test/CodeGen/PowerPC/p10-vector-mask-ops.ll
@@ -165,3 +165,109 @@
   %cnt = tail call i64 @llvm.ppc.altivec.vcntmbd(<2 x i64> %a, i32 0)
   ret i64 %cnt
 }
+
+declare <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64)
+declare <8 x i16> @llvm.ppc.altivec.mtvsrhm(i64)
+declare <4 x i32> @llvm.ppc.altivec.mtvsrwm(i64)
+declare <2 x i64> @llvm.ppc.altivec.mtvsrdm(i64)
+declare <1 x i128> @llvm.ppc.altivec.mtvsrqm(i64)
+
+define <16 x i8>  @test_mtvsrbm(i64 %a) {
+; CHECK-LABEL: test_mtvsrbm:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrbm v2, r3
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 %a)
+  ret <16 x i8> %mv
+}
+
+define <16 x i8>  @test_mtvsrbmi() {
+; CHECK-LABEL: test_mtvsrbmi:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrbmi v2, 1
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 1)
+  ret <16 x i8> %mv
+}
+
+define <16 x i8>  @test_mtvsrbmi2() {
+; CHECK-LABEL: test_mtvsrbmi2:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrbmi v2, 255
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 255)
+  ret <16 x i8> %mv
+}
+
+define <16 x i8>  @test_mtvsrbmi3() {
+; CHECK-LABEL: test_mtvsrbmi3:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrbmi v2, 65535
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 65535)
+  ret <16 x i8> %mv
+}
+
+define <16 x i8>  @test_mtvsrbmi4() {
+; CHECK-LABEL: test_mtvsrbmi4:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrbmi v2, 0
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 65536)
+  ret <16 x i8> %mv
+}
+
+define <16 x i8>  @test_mtvsrbmi5() {
+; CHECK-LABEL: test_mtvsrbmi5:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrbmi v2, 10
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 65546)
+  ret <16 x i8> %mv
+}
+
+define <8 x i16> @test_mtvsrhm(i64 %a) {
+; CHECK-LABEL: test_mtvsrhm:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrhm v2, r3
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <8 x i16> @llvm.ppc.altivec.mtvsrhm(i64 %a)
+  ret <8 x i16> %mv
+}
+
+define <4 x i32> @test_mtvsrwm(i64 %a) {
+; CHECK-LABEL: test_mtvsrwm:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrwm v2, r3
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <4 x i32> @llvm.ppc.altivec.mtvsrwm(i64 %a)
+  ret <4 x i32> %mv
+}
+
+define <2 x i64> @test_mtvsrdm(i64 %a) {
+; CHECK-LABEL: test_mtvsrdm:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrdm v2, r3
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <2 x i64> @llvm.ppc.altivec.mtvsrdm(i64 %a)
+  ret <2 x i64> %mv
+}
+
+define <1 x i128> @test_mtvsrqm(i64 %a) {
+; CHECK-LABEL: test_mtvsrqm:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrqm v2, r3
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <1 x i128> @llvm.ppc.altivec.mtvsrqm(i64 %a)
+  ret <1 x i128> %mv
+}
Index: llvm/lib/Target/PowerPC/PPCInstrPrefix.td
===
--- llvm/lib/Target/PowerPC/PPCInstrPrefix.td
+++ llvm/lib/Target/PowerPC/PPCInstrPrefix.td
@@ -1054,22 +1054,28 @@
v1i128:$vB))]>;
   def MTVSRBM : VXForm_RD5_XO5_RS5<1602, 16, (outs vrrc:$vD), (ins g8rc:$rB),
"mtvsrbm $vD, $rB", IIC_VecGeneral,
-   []>;
+   [(set v16i8:$vD,
+ (int_ppc_altivec_mtvsrbm i64:$rB))]>;
   def MTVSRHM : VXForm_RD5_XO5_RS5<1602, 17, (outs vrrc:$vD), (ins g8rc:$rB),
"mtvsrhm $vD, $rB", IIC_VecGeneral,
-   []>;
+   [(set v8i16:$vD,
+ (int_ppc_altivec_mtvsrhm i64:$rB))]>;
   def MTVSRWM : VXForm_RD5_XO5_RS5<1602, 18, (outs vrrc:$vD), (ins g8rc:$rB),
"mtvsrwm $vD, $rB",

[PATCH] D82725: [PowerPC] Implement Move to VSR Mask builtins in LLVM/Clang

2020-09-17 Thread Qing Shan Zhang via Phabricator via cfe-commits
steven.zhang accepted this revision.
steven.zhang added a comment.
This revision is now accepted and ready to land.

LGTM.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D82725/new/

https://reviews.llvm.org/D82725

___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D82725: [PowerPC] Implement Move to VSR Mask builtins in LLVM/Clang

2020-09-17 Thread Amy Kwan via Phabricator via cfe-commits
amyk updated this revision to Diff 292634.
amyk added a comment.

- Rebased patch.
- Update patch to remove unnecessary immediate handling.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D82725/new/

https://reviews.llvm.org/D82725

Files:
  clang/include/clang/Basic/BuiltinsPPC.def
  clang/lib/Headers/altivec.h
  clang/test/CodeGen/builtins-ppc-p10vector.c
  llvm/include/llvm/IR/IntrinsicsPowerPC.td
  llvm/lib/Target/PowerPC/PPCInstrPrefix.td
  llvm/test/CodeGen/PowerPC/p10-vector-mask-ops.ll

Index: llvm/test/CodeGen/PowerPC/p10-vector-mask-ops.ll
===
--- llvm/test/CodeGen/PowerPC/p10-vector-mask-ops.ll
+++ llvm/test/CodeGen/PowerPC/p10-vector-mask-ops.ll
@@ -120,3 +120,109 @@
   %exp = tail call <1 x i128> @llvm.ppc.altivec.vexpandqm(<1 x i128> %a)
   ret <1 x i128> %exp
 }
+
+declare <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64)
+declare <8 x i16> @llvm.ppc.altivec.mtvsrhm(i64)
+declare <4 x i32> @llvm.ppc.altivec.mtvsrwm(i64)
+declare <2 x i64> @llvm.ppc.altivec.mtvsrdm(i64)
+declare <1 x i128> @llvm.ppc.altivec.mtvsrqm(i64)
+
+define <16 x i8>  @test_mtvsrbm(i64 %a) {
+; CHECK-LABEL: test_mtvsrbm:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrbm v2, r3
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 %a)
+  ret <16 x i8> %mv
+}
+
+define <16 x i8>  @test_mtvsrbmi() {
+; CHECK-LABEL: test_mtvsrbmi:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrbmi v2, 1
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 1)
+  ret <16 x i8> %mv
+}
+
+define <16 x i8>  @test_mtvsrbmi2() {
+; CHECK-LABEL: test_mtvsrbmi2:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrbmi v2, 255
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 255)
+  ret <16 x i8> %mv
+}
+
+define <16 x i8>  @test_mtvsrbmi3() {
+; CHECK-LABEL: test_mtvsrbmi3:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrbmi v2, 65535
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 65535)
+  ret <16 x i8> %mv
+}
+
+define <16 x i8>  @test_mtvsrbmi4() {
+; CHECK-LABEL: test_mtvsrbmi4:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrbmi v2, 0
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 65536)
+  ret <16 x i8> %mv
+}
+
+define <16 x i8>  @test_mtvsrbmi5() {
+; CHECK-LABEL: test_mtvsrbmi5:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrbmi v2, 10
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 65546)
+  ret <16 x i8> %mv
+}
+
+define <8 x i16> @test_mtvsrhm(i64 %a) {
+; CHECK-LABEL: test_mtvsrhm:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrhm v2, r3
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <8 x i16> @llvm.ppc.altivec.mtvsrhm(i64 %a)
+  ret <8 x i16> %mv
+}
+
+define <4 x i32> @test_mtvsrwm(i64 %a) {
+; CHECK-LABEL: test_mtvsrwm:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrwm v2, r3
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <4 x i32> @llvm.ppc.altivec.mtvsrwm(i64 %a)
+  ret <4 x i32> %mv
+}
+
+define <2 x i64> @test_mtvsrdm(i64 %a) {
+; CHECK-LABEL: test_mtvsrdm:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrdm v2, r3
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <2 x i64> @llvm.ppc.altivec.mtvsrdm(i64 %a)
+  ret <2 x i64> %mv
+}
+
+define <1 x i128> @test_mtvsrqm(i64 %a) {
+; CHECK-LABEL: test_mtvsrqm:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrqm v2, r3
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <1 x i128> @llvm.ppc.altivec.mtvsrqm(i64 %a)
+  ret <1 x i128> %mv
+}
Index: llvm/lib/Target/PowerPC/PPCInstrPrefix.td
===
--- llvm/lib/Target/PowerPC/PPCInstrPrefix.td
+++ llvm/lib/Target/PowerPC/PPCInstrPrefix.td
@@ -1027,22 +1027,28 @@
v1i128:$vB))]>;
   def MTVSRBM : VXForm_RD5_XO5_RS5<1602, 16, (outs vrrc:$vD), (ins g8rc:$rB),
"mtvsrbm $vD, $rB", IIC_VecGeneral,
-   []>;
+   [(set v16i8:$vD,
+ (int_ppc_altivec_mtvsrbm i64:$rB))]>;
   def MTVSRHM : VXForm_RD5_XO5_RS5<1602, 17, (outs vrrc:$vD), (ins g8rc:$rB),
"mtvsrhm $vD, $rB", IIC_VecGeneral,
-   []>;
+   [(set v8i16:$vD,
+ (int_ppc_altivec_mtvsrhm i64:$rB))]>;
   def MTVSRWM : VXForm_RD5_XO5_RS5<1602, 18, (outs vrrc:$vD), (ins g8rc:$rB),
"mtvsrwm $vD, $rB", IIC_VecGeneral,
-   []>;
+   [(set v4i32:$vD,
+ (int_ppc_altivec_mtvsrwm

[PATCH] D82725: [PowerPC] Implement Move to VSR Mask builtins in LLVM/Clang

2020-09-17 Thread Amy Kwan via Phabricator via cfe-commits
amyk added inline comments.



Comment at: llvm/lib/Target/PowerPC/PPCISelLowering.cpp:10054
+
+  case Intrinsic::ppc_altivec_mtvsrbm: {
+// The llvm.ppc.altivec.mtvsrbm intrinsic can correspond to two different

steven.zhang wrote:
> Can we handle this inside the .td ? i.e. change the definition of the instr 
> as:
> ```
>   def MTVSRBMI : DXForm<4, 10, (outs vrrc:$vD), (ins u8imm64:$D),
> "mtvsrbmi $vD, $D", IIC_VecGeneral,
> [(set v16i8:$vD,
>   (int_ppc_altivec_mtvsrbm imm:$D))]>;
> ```
> And add the missing u8imm64 as what we did for u16imm64 ?
I actually made a mistake when I was implementing this patch. The immediate 
should fit within 16-bits and not 8-bits. Sorry about that. I will update the 
patch to remove this handling as it is unnecessary and not correct.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D82725/new/

https://reviews.llvm.org/D82725

___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D82725: [PowerPC] Implement Move to VSR Mask builtins in LLVM/Clang

2020-09-07 Thread Qing Shan Zhang via Phabricator via cfe-commits
steven.zhang added inline comments.



Comment at: llvm/lib/Target/PowerPC/PPCISelLowering.cpp:10054
+
+  case Intrinsic::ppc_altivec_mtvsrbm: {
+// The llvm.ppc.altivec.mtvsrbm intrinsic can correspond to two different

Can we handle this inside the .td ? i.e. change the definition of the instr as:
```
  def MTVSRBMI : DXForm<4, 10, (outs vrrc:$vD), (ins u8imm64:$D),
"mtvsrbmi $vD, $D", IIC_VecGeneral,
[(set v16i8:$vD,
  (int_ppc_altivec_mtvsrbm imm:$D))]>;
```
And add the missing u8imm64 as what we did for u16imm64 ?


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D82725/new/

https://reviews.llvm.org/D82725

___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D82725: [PowerPC] Implement Move to VSR Mask builtins in LLVM/Clang

2020-09-03 Thread Amy Kwan via Phabricator via cfe-commits
amyk added a comment.

Ping.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D82725/new/

https://reviews.llvm.org/D82725

___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D82725: [PowerPC] Implement Move to VSR Mask builtins in LLVM/Clang

2020-08-24 Thread Amy Kwan via Phabricator via cfe-commits
amyk added a comment.

Ping.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D82725/new/

https://reviews.llvm.org/D82725

___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D82725: [PowerPC] Implement Move to VSR Mask builtins in LLVM/Clang

2020-08-04 Thread Amy Kwan via Phabricator via cfe-commits
amyk updated this revision to Diff 283113.
amyk added a comment.

Rebased patch, and removed MC tests from the original patch.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D82725/new/

https://reviews.llvm.org/D82725

Files:
  clang/include/clang/Basic/BuiltinsPPC.def
  clang/lib/Headers/altivec.h
  clang/test/CodeGen/builtins-ppc-p10vector.c
  llvm/include/llvm/IR/IntrinsicsPowerPC.td
  llvm/lib/Target/PowerPC/PPCISelLowering.cpp
  llvm/lib/Target/PowerPC/PPCInstrPrefix.td
  llvm/test/CodeGen/PowerPC/p10-vector-mask-ops.ll

Index: llvm/test/CodeGen/PowerPC/p10-vector-mask-ops.ll
===
--- llvm/test/CodeGen/PowerPC/p10-vector-mask-ops.ll
+++ llvm/test/CodeGen/PowerPC/p10-vector-mask-ops.ll
@@ -64,3 +64,99 @@
   %ext = tail call i32 @llvm.ppc.altivec.vextractqm(<1 x i128> %a)
   ret i32 %ext
 }
+
+declare <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64)
+declare <8 x i16> @llvm.ppc.altivec.mtvsrhm(i64)
+declare <4 x i32> @llvm.ppc.altivec.mtvsrwm(i64)
+declare <2 x i64> @llvm.ppc.altivec.mtvsrdm(i64)
+declare <1 x i128> @llvm.ppc.altivec.mtvsrqm(i64)
+
+define <16 x i8>  @test_mtvsrbm(i64 %a) {
+; CHECK-LABEL: test_mtvsrbm:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrbm v2, r3
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 %a)
+  ret <16 x i8> %mv
+}
+
+define <16 x i8>  @test_mtvsrbmi() {
+; CHECK-LABEL: test_mtvsrbmi:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrbmi v2, 1
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 1)
+  ret <16 x i8> %mv
+}
+
+define <16 x i8>  @test_mtvsrbmi2() {
+; CHECK-LABEL: test_mtvsrbmi2:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrbmi v2, 255
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 255)
+  ret <16 x i8> %mv
+}
+
+define <16 x i8>  @test_mtvsrbmi3() {
+; CHECK-LABEL: test_mtvsrbmi3:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrbmi v2, 0
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 256)
+  ret <16 x i8> %mv
+}
+
+define <16 x i8>  @test_mtvsrbmi4() {
+; CHECK-LABEL: test_mtvsrbmi4:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrbmi v2, 10
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 266)
+  ret <16 x i8> %mv
+}
+
+define <8 x i16> @test_mtvsrhm(i64 %a) {
+; CHECK-LABEL: test_mtvsrhm:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrhm v2, r3
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <8 x i16> @llvm.ppc.altivec.mtvsrhm(i64 %a)
+  ret <8 x i16> %mv
+}
+
+define <4 x i32> @test_mtvsrwm(i64 %a) {
+; CHECK-LABEL: test_mtvsrwm:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrwm v2, r3
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <4 x i32> @llvm.ppc.altivec.mtvsrwm(i64 %a)
+  ret <4 x i32> %mv
+}
+
+define <2 x i64> @test_mtvsrdm(i64 %a) {
+; CHECK-LABEL: test_mtvsrdm:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrdm v2, r3
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <2 x i64> @llvm.ppc.altivec.mtvsrdm(i64 %a)
+  ret <2 x i64> %mv
+}
+
+define <1 x i128> @test_mtvsrqm(i64 %a) {
+; CHECK-LABEL: test_mtvsrqm:
+; CHECK:   # %bb.0: # %entry
+; CHECK-NEXT:mtvsrqm v2, r3
+; CHECK-NEXT:blr
+entry:
+  %mv = tail call <1 x i128> @llvm.ppc.altivec.mtvsrqm(i64 %a)
+  ret <1 x i128> %mv
+}
Index: llvm/lib/Target/PowerPC/PPCInstrPrefix.td
===
--- llvm/lib/Target/PowerPC/PPCInstrPrefix.td
+++ llvm/lib/Target/PowerPC/PPCInstrPrefix.td
@@ -892,22 +892,28 @@
  []>;
   def MTVSRBM : VXForm_RD5_XO5_RS5<1602, 16, (outs vrrc:$vD), (ins g8rc:$rB),
"mtvsrbm $vD, $rB", IIC_VecGeneral,
-   []>;
+   [(set v16i8:$vD,
+ (int_ppc_altivec_mtvsrbm i64:$rB))]>;
   def MTVSRHM : VXForm_RD5_XO5_RS5<1602, 17, (outs vrrc:$vD), (ins g8rc:$rB),
"mtvsrhm $vD, $rB", IIC_VecGeneral,
-   []>;
+   [(set v8i16:$vD,
+ (int_ppc_altivec_mtvsrhm i64:$rB))]>;
   def MTVSRWM : VXForm_RD5_XO5_RS5<1602, 18, (outs vrrc:$vD), (ins g8rc:$rB),
"mtvsrwm $vD, $rB", IIC_VecGeneral,
-   []>;
+   [(set v4i32:$vD,
+ (int_ppc_altivec_mtvsrwm i64:$rB))]>;
   def MTVSRDM : VXForm_RD5_XO5_RS5<1602, 19, (outs vrrc:$vD), (ins g8rc:$rB),
"mtvsrdm $vD, $rB", IIC_VecGeneral,
-   []>;
+   [(set v2i64:$vD,
+