Author: leonardchan
Date: Fri Jul 26 14:19:37 2019
New Revision: 367157

URL: http://llvm.org/viewvc/llvm-project?rev=367157&view=rev
Log:
[NewPM] Run avx*-builtins.c tests under the new pass manager only

This patch changes the following tests to run under the new pass manager only:

```
Clang :: CodeGen/avx512-reduceMinMaxIntrin.c (1 of 4)
Clang :: CodeGen/avx512vl-builtins.c (2 of 4)
Clang :: CodeGen/avx512vlbw-builtins.c (3 of 4)
Clang :: CodeGen/avx512f-builtins.c (4 of 4)
```

The new PM added extra bitcasts that weren't checked before. For
reduceMinMaxIntrin.c, the issue was mostly the alloca's being in a different
order. Other changes involved extra bitcasts, and differently ordered loads and
stores, but the logic should still be the same.

Differential revision: https://reviews.llvm.org/D65110

Modified:
    cfe/trunk/test/CodeGen/avx512-reduceMinMaxIntrin.c
    cfe/trunk/test/CodeGen/avx512f-builtins.c
    cfe/trunk/test/CodeGen/avx512vl-builtins.c
    cfe/trunk/test/CodeGen/avx512vlbw-builtins.c

Modified: cfe/trunk/test/CodeGen/avx512-reduceMinMaxIntrin.c
URL: 
http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx512-reduceMinMaxIntrin.c?rev=367157&r1=367156&r2=367157&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/avx512-reduceMinMaxIntrin.c (original)
+++ cfe/trunk/test/CodeGen/avx512-reduceMinMaxIntrin.c Fri Jul 26 14:19:37 2019
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -ffreestanding %s -O0 -triple=x86_64-apple-darwin 
-target-cpu skylake-avx512 -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -fexperimental-new-pass-manager -ffreestanding %s -O0 
-triple=x86_64-apple-darwin -target-cpu skylake-avx512 -emit-llvm -o - -Wall 
-Werror | FileCheck %s
 
 #include <immintrin.h>
 
@@ -27,10 +27,10 @@
 // CHECK-NEXT:    store <8 x i64> [[SHUFFLE_I]], <8 x i64>* [[__T1_I]], align 
64
 // CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], 
align 64
 // CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[__T1_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP3]], <8 x i64>* [[__A_ADDR_I_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP4]], <8 x i64>* [[__B_ADDR_I_I]], align 
64
-// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], 
align 64
-// CHECK-NEXT:    [[TMP6:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP3]], <8 x i64>* [[__A_ADDR_I7_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP4]], <8 x i64>* [[__B_ADDR_I8_I]], 
align 64
+// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I7_I]], 
align 64
+// CHECK-NEXT:    [[TMP6:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I8_I]], 
align 64
 // CHECK-NEXT:    [[TMP7:%.*]] = icmp sgt <8 x i64> [[TMP5]], [[TMP6]]
 // CHECK-NEXT:    [[TMP8:%.*]] = select <8 x i1> [[TMP7]], <8 x i64> [[TMP5]], 
<8 x i64> [[TMP6]]
 // CHECK-NEXT:    store <8 x i64> [[TMP8]], <8 x i64>* [[__T2_I]], align 64
@@ -40,10 +40,10 @@
 // CHECK-NEXT:    store <8 x i64> [[SHUFFLE1_I]], <8 x i64>* [[__T3_I]], align 
64
 // CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i64>, <8 x i64>* [[__T2_I]], align 
64
 // CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i64>, <8 x i64>* [[__T3_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP11]], <8 x i64>* [[__A_ADDR_I7_I]], 
align 64
-// CHECK-NEXT:    store <8 x i64> [[TMP12]], <8 x i64>* [[__B_ADDR_I8_I]], 
align 64
-// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i64>, <8 x i64>* 
[[__A_ADDR_I7_I]], align 64
-// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i64>, <8 x i64>* 
[[__B_ADDR_I8_I]], align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP11]], <8 x i64>* [[__A_ADDR_I5_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP12]], <8 x i64>* [[__B_ADDR_I6_I]], 
align 64
+// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i64>, <8 x i64>* 
[[__A_ADDR_I5_I]], align 64
+// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i64>, <8 x i64>* 
[[__B_ADDR_I6_I]], align 64
 // CHECK-NEXT:    [[TMP15:%.*]] = icmp sgt <8 x i64> [[TMP13]], [[TMP14]]
 // CHECK-NEXT:    [[TMP16:%.*]] = select <8 x i1> [[TMP15]], <8 x i64> 
[[TMP13]], <8 x i64> [[TMP14]]
 // CHECK-NEXT:    store <8 x i64> [[TMP16]], <8 x i64>* [[__T4_I]], align 64
@@ -53,10 +53,10 @@
 // CHECK-NEXT:    store <8 x i64> [[SHUFFLE3_I]], <8 x i64>* [[__T5_I]], align 
64
 // CHECK-NEXT:    [[TMP19:%.*]] = load <8 x i64>, <8 x i64>* [[__T4_I]], align 
64
 // CHECK-NEXT:    [[TMP20:%.*]] = load <8 x i64>, <8 x i64>* [[__T5_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP19]], <8 x i64>* [[__A_ADDR_I5_I]], 
align 64
-// CHECK-NEXT:    store <8 x i64> [[TMP20]], <8 x i64>* [[__B_ADDR_I6_I]], 
align 64
-// CHECK-NEXT:    [[TMP21:%.*]] = load <8 x i64>, <8 x i64>* 
[[__A_ADDR_I5_I]], align 64
-// CHECK-NEXT:    [[TMP22:%.*]] = load <8 x i64>, <8 x i64>* 
[[__B_ADDR_I6_I]], align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP19]], <8 x i64>* [[__A_ADDR_I_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP20]], <8 x i64>* [[__B_ADDR_I_I]], 
align 64
+// CHECK-NEXT:    [[TMP21:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], 
align 64
+// CHECK-NEXT:    [[TMP22:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], 
align 64
 // CHECK-NEXT:    [[TMP23:%.*]] = icmp sgt <8 x i64> [[TMP21]], [[TMP22]]
 // CHECK-NEXT:    [[TMP24:%.*]] = select <8 x i1> [[TMP23]], <8 x i64> 
[[TMP21]], <8 x i64> [[TMP22]]
 // CHECK-NEXT:    store <8 x i64> [[TMP24]], <8 x i64>* [[__T6_I]], align 64
@@ -92,10 +92,10 @@ long long test_mm512_reduce_max_epi64(__
 // CHECK-NEXT:    store <8 x i64> [[SHUFFLE_I]], <8 x i64>* [[__T1_I]], align 
64
 // CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], 
align 64
 // CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[__T1_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP3]], <8 x i64>* [[__A_ADDR_I_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP4]], <8 x i64>* [[__B_ADDR_I_I]], align 
64
-// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], 
align 64
-// CHECK-NEXT:    [[TMP6:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP3]], <8 x i64>* [[__A_ADDR_I7_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP4]], <8 x i64>* [[__B_ADDR_I8_I]], 
align 64
+// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I7_I]], 
align 64
+// CHECK-NEXT:    [[TMP6:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I8_I]], 
align 64
 // CHECK-NEXT:    [[TMP7:%.*]] = icmp ugt <8 x i64> [[TMP5]], [[TMP6]]
 // CHECK-NEXT:    [[TMP8:%.*]] = select <8 x i1> [[TMP7]], <8 x i64> [[TMP5]], 
<8 x i64> [[TMP6]]
 // CHECK-NEXT:    store <8 x i64> [[TMP8]], <8 x i64>* [[__T2_I]], align 64
@@ -105,10 +105,10 @@ long long test_mm512_reduce_max_epi64(__
 // CHECK-NEXT:    store <8 x i64> [[SHUFFLE1_I]], <8 x i64>* [[__T3_I]], align 
64
 // CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i64>, <8 x i64>* [[__T2_I]], align 
64
 // CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i64>, <8 x i64>* [[__T3_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP11]], <8 x i64>* [[__A_ADDR_I7_I]], 
align 64
-// CHECK-NEXT:    store <8 x i64> [[TMP12]], <8 x i64>* [[__B_ADDR_I8_I]], 
align 64
-// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i64>, <8 x i64>* 
[[__A_ADDR_I7_I]], align 64
-// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i64>, <8 x i64>* 
[[__B_ADDR_I8_I]], align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP11]], <8 x i64>* [[__A_ADDR_I5_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP12]], <8 x i64>* [[__B_ADDR_I6_I]], 
align 64
+// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i64>, <8 x i64>* 
[[__A_ADDR_I5_I]], align 64
+// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i64>, <8 x i64>* 
[[__B_ADDR_I6_I]], align 64
 // CHECK-NEXT:    [[TMP15:%.*]] = icmp ugt <8 x i64> [[TMP13]], [[TMP14]]
 // CHECK-NEXT:    [[TMP16:%.*]] = select <8 x i1> [[TMP15]], <8 x i64> 
[[TMP13]], <8 x i64> [[TMP14]]
 // CHECK-NEXT:    store <8 x i64> [[TMP16]], <8 x i64>* [[__T4_I]], align 64
@@ -118,10 +118,10 @@ long long test_mm512_reduce_max_epi64(__
 // CHECK-NEXT:    store <8 x i64> [[SHUFFLE3_I]], <8 x i64>* [[__T5_I]], align 
64
 // CHECK-NEXT:    [[TMP19:%.*]] = load <8 x i64>, <8 x i64>* [[__T4_I]], align 
64
 // CHECK-NEXT:    [[TMP20:%.*]] = load <8 x i64>, <8 x i64>* [[__T5_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP19]], <8 x i64>* [[__A_ADDR_I5_I]], 
align 64
-// CHECK-NEXT:    store <8 x i64> [[TMP20]], <8 x i64>* [[__B_ADDR_I6_I]], 
align 64
-// CHECK-NEXT:    [[TMP21:%.*]] = load <8 x i64>, <8 x i64>* 
[[__A_ADDR_I5_I]], align 64
-// CHECK-NEXT:    [[TMP22:%.*]] = load <8 x i64>, <8 x i64>* 
[[__B_ADDR_I6_I]], align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP19]], <8 x i64>* [[__A_ADDR_I_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP20]], <8 x i64>* [[__B_ADDR_I_I]], 
align 64
+// CHECK-NEXT:    [[TMP21:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], 
align 64
+// CHECK-NEXT:    [[TMP22:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], 
align 64
 // CHECK-NEXT:    [[TMP23:%.*]] = icmp ugt <8 x i64> [[TMP21]], [[TMP22]]
 // CHECK-NEXT:    [[TMP24:%.*]] = select <8 x i1> [[TMP23]], <8 x i64> 
[[TMP21]], <8 x i64> [[TMP22]]
 // CHECK-NEXT:    store <8 x i64> [[TMP24]], <8 x i64>* [[__T6_I]], align 64
@@ -134,12 +134,12 @@ unsigned long long test_mm512_reduce_max
 
 // CHECK-LABEL: define double @test_mm512_reduce_max_pd(<8 x double> %__W) #0 {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[__A_ADDR_I10_I:%.*]] = alloca <4 x double>, align 32
-// CHECK-NEXT:    [[__B_ADDR_I11_I:%.*]] = alloca <4 x double>, align 32
 // CHECK-NEXT:    [[__A_ADDR_I8_I:%.*]] = alloca <2 x double>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I9_I:%.*]] = alloca <2 x double>, align 16
 // CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <2 x double>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <2 x double>, align 16
+// CHECK-NEXT:    [[__A_ADDR_I10_I:%.*]] = alloca <4 x double>, align 32
+// CHECK-NEXT:    [[__B_ADDR_I11_I:%.*]] = alloca <4 x double>, align 32
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <8 x double>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <4 x double>, align 32
 // CHECK-NEXT:    [[__T2_I:%.*]] = alloca <4 x double>, align 32
@@ -225,10 +225,10 @@ double test_mm512_reduce_max_pd(__m512d
 // CHECK-NEXT:    store <8 x i64> [[SHUFFLE_I]], <8 x i64>* [[__T1_I]], align 
64
 // CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], 
align 64
 // CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[__T1_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP3]], <8 x i64>* [[__A_ADDR_I_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP4]], <8 x i64>* [[__B_ADDR_I_I]], align 
64
-// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], 
align 64
-// CHECK-NEXT:    [[TMP6:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP3]], <8 x i64>* [[__A_ADDR_I7_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP4]], <8 x i64>* [[__B_ADDR_I8_I]], 
align 64
+// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I7_I]], 
align 64
+// CHECK-NEXT:    [[TMP6:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I8_I]], 
align 64
 // CHECK-NEXT:    [[TMP7:%.*]] = icmp slt <8 x i64> [[TMP5]], [[TMP6]]
 // CHECK-NEXT:    [[TMP8:%.*]] = select <8 x i1> [[TMP7]], <8 x i64> [[TMP5]], 
<8 x i64> [[TMP6]]
 // CHECK-NEXT:    store <8 x i64> [[TMP8]], <8 x i64>* [[__T2_I]], align 64
@@ -238,10 +238,10 @@ double test_mm512_reduce_max_pd(__m512d
 // CHECK-NEXT:    store <8 x i64> [[SHUFFLE1_I]], <8 x i64>* [[__T3_I]], align 
64
 // CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i64>, <8 x i64>* [[__T2_I]], align 
64
 // CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i64>, <8 x i64>* [[__T3_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP11]], <8 x i64>* [[__A_ADDR_I7_I]], 
align 64
-// CHECK-NEXT:    store <8 x i64> [[TMP12]], <8 x i64>* [[__B_ADDR_I8_I]], 
align 64
-// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i64>, <8 x i64>* 
[[__A_ADDR_I7_I]], align 64
-// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i64>, <8 x i64>* 
[[__B_ADDR_I8_I]], align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP11]], <8 x i64>* [[__A_ADDR_I5_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP12]], <8 x i64>* [[__B_ADDR_I6_I]], 
align 64
+// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i64>, <8 x i64>* 
[[__A_ADDR_I5_I]], align 64
+// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i64>, <8 x i64>* 
[[__B_ADDR_I6_I]], align 64
 // CHECK-NEXT:    [[TMP15:%.*]] = icmp slt <8 x i64> [[TMP13]], [[TMP14]]
 // CHECK-NEXT:    [[TMP16:%.*]] = select <8 x i1> [[TMP15]], <8 x i64> 
[[TMP13]], <8 x i64> [[TMP14]]
 // CHECK-NEXT:    store <8 x i64> [[TMP16]], <8 x i64>* [[__T4_I]], align 64
@@ -251,10 +251,10 @@ double test_mm512_reduce_max_pd(__m512d
 // CHECK-NEXT:    store <8 x i64> [[SHUFFLE3_I]], <8 x i64>* [[__T5_I]], align 
64
 // CHECK-NEXT:    [[TMP19:%.*]] = load <8 x i64>, <8 x i64>* [[__T4_I]], align 
64
 // CHECK-NEXT:    [[TMP20:%.*]] = load <8 x i64>, <8 x i64>* [[__T5_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP19]], <8 x i64>* [[__A_ADDR_I5_I]], 
align 64
-// CHECK-NEXT:    store <8 x i64> [[TMP20]], <8 x i64>* [[__B_ADDR_I6_I]], 
align 64
-// CHECK-NEXT:    [[TMP21:%.*]] = load <8 x i64>, <8 x i64>* 
[[__A_ADDR_I5_I]], align 64
-// CHECK-NEXT:    [[TMP22:%.*]] = load <8 x i64>, <8 x i64>* 
[[__B_ADDR_I6_I]], align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP19]], <8 x i64>* [[__A_ADDR_I_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP20]], <8 x i64>* [[__B_ADDR_I_I]], 
align 64
+// CHECK-NEXT:    [[TMP21:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], 
align 64
+// CHECK-NEXT:    [[TMP22:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], 
align 64
 // CHECK-NEXT:    [[TMP23:%.*]] = icmp slt <8 x i64> [[TMP21]], [[TMP22]]
 // CHECK-NEXT:    [[TMP24:%.*]] = select <8 x i1> [[TMP23]], <8 x i64> 
[[TMP21]], <8 x i64> [[TMP22]]
 // CHECK-NEXT:    store <8 x i64> [[TMP24]], <8 x i64>* [[__T6_I]], align 64
@@ -290,10 +290,10 @@ long long test_mm512_reduce_min_epi64(__
 // CHECK-NEXT:    store <8 x i64> [[SHUFFLE_I]], <8 x i64>* [[__T1_I]], align 
64
 // CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], 
align 64
 // CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[__T1_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP3]], <8 x i64>* [[__A_ADDR_I_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP4]], <8 x i64>* [[__B_ADDR_I_I]], align 
64
-// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], 
align 64
-// CHECK-NEXT:    [[TMP6:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP3]], <8 x i64>* [[__A_ADDR_I7_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP4]], <8 x i64>* [[__B_ADDR_I8_I]], 
align 64
+// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I7_I]], 
align 64
+// CHECK-NEXT:    [[TMP6:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I8_I]], 
align 64
 // CHECK-NEXT:    [[TMP7:%.*]] = icmp ult <8 x i64> [[TMP5]], [[TMP6]]
 // CHECK-NEXT:    [[TMP8:%.*]] = select <8 x i1> [[TMP7]], <8 x i64> [[TMP5]], 
<8 x i64> [[TMP6]]
 // CHECK-NEXT:    store <8 x i64> [[TMP8]], <8 x i64>* [[__T2_I]], align 64
@@ -303,10 +303,10 @@ long long test_mm512_reduce_min_epi64(__
 // CHECK-NEXT:    store <8 x i64> [[SHUFFLE1_I]], <8 x i64>* [[__T3_I]], align 
64
 // CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i64>, <8 x i64>* [[__T2_I]], align 
64
 // CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i64>, <8 x i64>* [[__T3_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP11]], <8 x i64>* [[__A_ADDR_I7_I]], 
align 64
-// CHECK-NEXT:    store <8 x i64> [[TMP12]], <8 x i64>* [[__B_ADDR_I8_I]], 
align 64
-// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i64>, <8 x i64>* 
[[__A_ADDR_I7_I]], align 64
-// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i64>, <8 x i64>* 
[[__B_ADDR_I8_I]], align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP11]], <8 x i64>* [[__A_ADDR_I5_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP12]], <8 x i64>* [[__B_ADDR_I6_I]], 
align 64
+// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i64>, <8 x i64>* 
[[__A_ADDR_I5_I]], align 64
+// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i64>, <8 x i64>* 
[[__B_ADDR_I6_I]], align 64
 // CHECK-NEXT:    [[TMP15:%.*]] = icmp ult <8 x i64> [[TMP13]], [[TMP14]]
 // CHECK-NEXT:    [[TMP16:%.*]] = select <8 x i1> [[TMP15]], <8 x i64> 
[[TMP13]], <8 x i64> [[TMP14]]
 // CHECK-NEXT:    store <8 x i64> [[TMP16]], <8 x i64>* [[__T4_I]], align 64
@@ -316,10 +316,10 @@ long long test_mm512_reduce_min_epi64(__
 // CHECK-NEXT:    store <8 x i64> [[SHUFFLE3_I]], <8 x i64>* [[__T5_I]], align 
64
 // CHECK-NEXT:    [[TMP19:%.*]] = load <8 x i64>, <8 x i64>* [[__T4_I]], align 
64
 // CHECK-NEXT:    [[TMP20:%.*]] = load <8 x i64>, <8 x i64>* [[__T5_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP19]], <8 x i64>* [[__A_ADDR_I5_I]], 
align 64
-// CHECK-NEXT:    store <8 x i64> [[TMP20]], <8 x i64>* [[__B_ADDR_I6_I]], 
align 64
-// CHECK-NEXT:    [[TMP21:%.*]] = load <8 x i64>, <8 x i64>* 
[[__A_ADDR_I5_I]], align 64
-// CHECK-NEXT:    [[TMP22:%.*]] = load <8 x i64>, <8 x i64>* 
[[__B_ADDR_I6_I]], align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP19]], <8 x i64>* [[__A_ADDR_I_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP20]], <8 x i64>* [[__B_ADDR_I_I]], 
align 64
+// CHECK-NEXT:    [[TMP21:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], 
align 64
+// CHECK-NEXT:    [[TMP22:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], 
align 64
 // CHECK-NEXT:    [[TMP23:%.*]] = icmp ult <8 x i64> [[TMP21]], [[TMP22]]
 // CHECK-NEXT:    [[TMP24:%.*]] = select <8 x i1> [[TMP23]], <8 x i64> 
[[TMP21]], <8 x i64> [[TMP22]]
 // CHECK-NEXT:    store <8 x i64> [[TMP24]], <8 x i64>* [[__T6_I]], align 64
@@ -332,12 +332,12 @@ unsigned long long test_mm512_reduce_min
 
 // CHECK-LABEL: define double @test_mm512_reduce_min_pd(<8 x double> %__W) #0 {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[__A_ADDR_I10_I:%.*]] = alloca <4 x double>, align 32
-// CHECK-NEXT:    [[__B_ADDR_I11_I:%.*]] = alloca <4 x double>, align 32
 // CHECK-NEXT:    [[__A_ADDR_I8_I:%.*]] = alloca <2 x double>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I9_I:%.*]] = alloca <2 x double>, align 16
 // CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <2 x double>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <2 x double>, align 16
+// CHECK-NEXT:    [[__A_ADDR_I10_I:%.*]] = alloca <4 x double>, align 32
+// CHECK-NEXT:    [[__B_ADDR_I11_I:%.*]] = alloca <4 x double>, align 32
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <8 x double>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <4 x double>, align 32
 // CHECK-NEXT:    [[__T2_I:%.*]] = alloca <4 x double>, align 32
@@ -400,6 +400,8 @@ double test_mm512_reduce_min_pd(__m512d
 
 // CHECK-LABEL: define i64 @test_mm512_mask_reduce_max_epi64(i8 zeroext %__M, 
<8 x i64> %__W) #0 {
 // CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__D_ADDR_I_I:%.*]] = alloca i64, align 8
+// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__W_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__U_ADDR_I_I:%.*]] = alloca i8, align 1
 // CHECK-NEXT:    [[__A_ADDR_I11_I:%.*]] = alloca <8 x i64>, align 64
@@ -409,8 +411,6 @@ double test_mm512_reduce_min_pd(__m512d
 // CHECK-NEXT:    [[__B_ADDR_I8_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
-// CHECK-NEXT:    [[__D_ADDR_I_I:%.*]] = alloca i64, align 8
-// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__M_ADDR_I:%.*]] = alloca i8, align 1
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <8 x i64>, align 64
@@ -505,15 +505,15 @@ long long test_mm512_mask_reduce_max_epi
 
 // CHECK-LABEL: define i64 @test_mm512_mask_reduce_max_epu64(i8 zeroext %__M, 
<8 x i64> %__W) #0 {
 // CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK-NEXT:    [[__U_ADDR_I_I:%.*]] = alloca i8, align 1
 // CHECK-NEXT:    [[__A_ADDR_I9_I:%.*]] = alloca <8 x i64>, align 64
-// CHECK-NEXT:    [[__B_ADDR_I10_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__A_ADDR_I7_I:%.*]] = alloca <8 x i64>, align 64
-// CHECK-NEXT:    [[__B_ADDR_I8_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK-NEXT:    [[__B_ADDR_I10_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__A_ADDR_I6_I:%.*]] = alloca <8 x i64>, align 64
-// CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
-// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
-// CHECK-NEXT:    [[__U_ADDR_I_I:%.*]] = alloca i8, align 1
+// CHECK-NEXT:    [[__B_ADDR_I8_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__M_ADDR_I:%.*]] = alloca i8, align 1
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <8 x i64>, align 64
@@ -533,9 +533,9 @@ long long test_mm512_mask_reduce_max_epi
 // CHECK-NEXT:    [[TMP2:%.*]] = load i8, i8* [[__M_ADDR_I]], align 1
 // CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], 
align 64
 // CHECK-NEXT:    store i8 [[TMP2]], i8* [[__U_ADDR_I_I]], align 1
-// CHECK-NEXT:    store <8 x i64> [[TMP3]], <8 x i64>* [[__A_ADDR_I_I]], align 
64
+// CHECK-NEXT:    store <8 x i64> [[TMP3]], <8 x i64>* [[__A_ADDR_I9_I]], 
align 64
 // CHECK-NEXT:    [[TMP4:%.*]] = load i8, i8* [[__U_ADDR_I_I]], align 1
-// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], 
align 64
+// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I9_I]], 
align 64
 // CHECK-NEXT:    store <8 x i64> zeroinitializer, <8 x i64>* 
[[DOTCOMPOUNDLITERAL_I_I_I]], align 64
 // CHECK-NEXT:    [[TMP6:%.*]] = load <8 x i64>, <8 x i64>* 
[[DOTCOMPOUNDLITERAL_I_I_I]], align 64
 // CHECK-NEXT:    [[TMP7:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
@@ -547,9 +547,9 @@ long long test_mm512_mask_reduce_max_epi
 // CHECK-NEXT:    store <8 x i64> [[SHUFFLE_I]], <8 x i64>* [[__T1_I]], align 
64
 // CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i64>, <8 x i64>* [[__V_ADDR_I]], 
align 64
 // CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i64>, <8 x i64>* [[__T1_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP11]], <8 x i64>* [[__A_ADDR_I9_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP11]], <8 x i64>* [[__A_ADDR_I7_I]], 
align 64
 // CHECK-NEXT:    store <8 x i64> [[TMP12]], <8 x i64>* [[__B_ADDR_I10_I]], 
align 64
-// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i64>, <8 x i64>* 
[[__A_ADDR_I9_I]], align 64
+// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i64>, <8 x i64>* 
[[__A_ADDR_I7_I]], align 64
 // CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i64>, <8 x i64>* 
[[__B_ADDR_I10_I]], align 64
 // CHECK-NEXT:    [[TMP15:%.*]] = icmp ugt <8 x i64> [[TMP13]], [[TMP14]]
 // CHECK-NEXT:    [[TMP16:%.*]] = select <8 x i1> [[TMP15]], <8 x i64> 
[[TMP13]], <8 x i64> [[TMP14]]
@@ -560,9 +560,9 @@ long long test_mm512_mask_reduce_max_epi
 // CHECK-NEXT:    store <8 x i64> [[SHUFFLE2_I]], <8 x i64>* [[__T3_I]], align 
64
 // CHECK-NEXT:    [[TMP19:%.*]] = load <8 x i64>, <8 x i64>* [[__T2_I]], align 
64
 // CHECK-NEXT:    [[TMP20:%.*]] = load <8 x i64>, <8 x i64>* [[__T3_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP19]], <8 x i64>* [[__A_ADDR_I7_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP19]], <8 x i64>* [[__A_ADDR_I6_I]], 
align 64
 // CHECK-NEXT:    store <8 x i64> [[TMP20]], <8 x i64>* [[__B_ADDR_I8_I]], 
align 64
-// CHECK-NEXT:    [[TMP21:%.*]] = load <8 x i64>, <8 x i64>* 
[[__A_ADDR_I7_I]], align 64
+// CHECK-NEXT:    [[TMP21:%.*]] = load <8 x i64>, <8 x i64>* 
[[__A_ADDR_I6_I]], align 64
 // CHECK-NEXT:    [[TMP22:%.*]] = load <8 x i64>, <8 x i64>* 
[[__B_ADDR_I8_I]], align 64
 // CHECK-NEXT:    [[TMP23:%.*]] = icmp ugt <8 x i64> [[TMP21]], [[TMP22]]
 // CHECK-NEXT:    [[TMP24:%.*]] = select <8 x i1> [[TMP23]], <8 x i64> 
[[TMP21]], <8 x i64> [[TMP22]]
@@ -573,9 +573,9 @@ long long test_mm512_mask_reduce_max_epi
 // CHECK-NEXT:    store <8 x i64> [[SHUFFLE4_I]], <8 x i64>* [[__T5_I]], align 
64
 // CHECK-NEXT:    [[TMP27:%.*]] = load <8 x i64>, <8 x i64>* [[__T4_I]], align 
64
 // CHECK-NEXT:    [[TMP28:%.*]] = load <8 x i64>, <8 x i64>* [[__T5_I]], align 
64
-// CHECK-NEXT:    store <8 x i64> [[TMP27]], <8 x i64>* [[__A_ADDR_I6_I]], 
align 64
+// CHECK-NEXT:    store <8 x i64> [[TMP27]], <8 x i64>* [[__A_ADDR_I_I]], 
align 64
 // CHECK-NEXT:    store <8 x i64> [[TMP28]], <8 x i64>* [[__B_ADDR_I_I]], 
align 64
-// CHECK-NEXT:    [[TMP29:%.*]] = load <8 x i64>, <8 x i64>* 
[[__A_ADDR_I6_I]], align 64
+// CHECK-NEXT:    [[TMP29:%.*]] = load <8 x i64>, <8 x i64>* [[__A_ADDR_I_I]], 
align 64
 // CHECK-NEXT:    [[TMP30:%.*]] = load <8 x i64>, <8 x i64>* [[__B_ADDR_I_I]], 
align 64
 // CHECK-NEXT:    [[TMP31:%.*]] = icmp ugt <8 x i64> [[TMP29]], [[TMP30]]
 // CHECK-NEXT:    [[TMP32:%.*]] = select <8 x i1> [[TMP31]], <8 x i64> 
[[TMP29]], <8 x i64> [[TMP30]]
@@ -589,17 +589,17 @@ unsigned long test_mm512_mask_reduce_max
 
 // CHECK-LABEL: define double @test_mm512_mask_reduce_max_pd(i8 zeroext %__M, 
<8 x double> %__W) #0 {
 // CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__W_ADDR_I_I:%.*]] = alloca double, align 8
+// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <8 x double>, align 
64
 // CHECK-NEXT:    [[__W2_ADDR_I_I:%.*]] = alloca <8 x double>, align 64
 // CHECK-NEXT:    [[__U_ADDR_I_I:%.*]] = alloca i8, align 1
 // CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <8 x double>, align 64
-// CHECK-NEXT:    [[__A_ADDR_I12_I:%.*]] = alloca <4 x double>, align 32
-// CHECK-NEXT:    [[__B_ADDR_I13_I:%.*]] = alloca <4 x double>, align 32
 // CHECK-NEXT:    [[__A_ADDR_I10_I:%.*]] = alloca <2 x double>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I11_I:%.*]] = alloca <2 x double>, align 16
 // CHECK-NEXT:    [[__A2_ADDR_I_I:%.*]] = alloca <2 x double>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <2 x double>, align 16
-// CHECK-NEXT:    [[__W_ADDR_I_I:%.*]] = alloca double, align 8
-// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <8 x double>, align 
64
+// CHECK-NEXT:    [[__A_ADDR_I12_I:%.*]] = alloca <4 x double>, align 32
+// CHECK-NEXT:    [[__B_ADDR_I13_I:%.*]] = alloca <4 x double>, align 32
 // CHECK-NEXT:    [[__M_ADDR_I:%.*]] = alloca i8, align 1
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <8 x double>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <4 x double>, align 32
@@ -697,6 +697,8 @@ double test_mm512_mask_reduce_max_pd(__m
 
 // CHECK-LABEL: define i64 @test_mm512_mask_reduce_min_epi64(i8 zeroext %__M, 
<8 x i64> %__W) #0 {
 // CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__D_ADDR_I_I:%.*]] = alloca i64, align 8
+// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__W_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__U_ADDR_I_I:%.*]] = alloca i8, align 1
 // CHECK-NEXT:    [[__A_ADDR_I11_I:%.*]] = alloca <8 x i64>, align 64
@@ -706,8 +708,6 @@ double test_mm512_mask_reduce_max_pd(__m
 // CHECK-NEXT:    [[__B_ADDR_I8_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
-// CHECK-NEXT:    [[__D_ADDR_I_I:%.*]] = alloca i64, align 8
-// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__M_ADDR_I:%.*]] = alloca i8, align 1
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <8 x i64>, align 64
@@ -802,6 +802,8 @@ long long test_mm512_mask_reduce_min_epi
 
 // CHECK-LABEL: define i64 @test_mm512_mask_reduce_min_epu64(i8 zeroext %__M, 
<8 x i64> %__W) #0 {
 // CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__D_ADDR_I_I:%.*]] = alloca i64, align 8
+// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__W_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__U_ADDR_I_I:%.*]] = alloca i8, align 1
 // CHECK-NEXT:    [[__A_ADDR_I11_I:%.*]] = alloca <8 x i64>, align 64
@@ -811,8 +813,6 @@ long long test_mm512_mask_reduce_min_epi
 // CHECK-NEXT:    [[__B_ADDR_I8_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
-// CHECK-NEXT:    [[__D_ADDR_I_I:%.*]] = alloca i64, align 8
-// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__M_ADDR_I:%.*]] = alloca i8, align 1
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <8 x i64>, align 64
@@ -907,17 +907,17 @@ long long test_mm512_mask_reduce_min_epu
 
 // CHECK-LABEL: define double @test_mm512_mask_reduce_min_pd(i8 zeroext %__M, 
<8 x double> %__W) #0 {
 // CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__W_ADDR_I_I:%.*]] = alloca double, align 8
+// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <8 x double>, align 
64
 // CHECK-NEXT:    [[__W2_ADDR_I_I:%.*]] = alloca <8 x double>, align 64
 // CHECK-NEXT:    [[__U_ADDR_I_I:%.*]] = alloca i8, align 1
 // CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <8 x double>, align 64
-// CHECK-NEXT:    [[__A_ADDR_I12_I:%.*]] = alloca <4 x double>, align 32
-// CHECK-NEXT:    [[__B_ADDR_I13_I:%.*]] = alloca <4 x double>, align 32
 // CHECK-NEXT:    [[__A_ADDR_I10_I:%.*]] = alloca <2 x double>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I11_I:%.*]] = alloca <2 x double>, align 16
 // CHECK-NEXT:    [[__A2_ADDR_I_I:%.*]] = alloca <2 x double>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <2 x double>, align 16
-// CHECK-NEXT:    [[__W_ADDR_I_I:%.*]] = alloca double, align 8
-// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <8 x double>, align 
64
+// CHECK-NEXT:    [[__A_ADDR_I12_I:%.*]] = alloca <4 x double>, align 32
+// CHECK-NEXT:    [[__B_ADDR_I13_I:%.*]] = alloca <4 x double>, align 32
 // CHECK-NEXT:    [[__M_ADDR_I:%.*]] = alloca i8, align 1
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <8 x double>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <4 x double>, align 32
@@ -1015,14 +1015,14 @@ double test_mm512_mask_reduce_min_pd(__m
 
 // CHECK-LABEL: define i32 @test_mm512_reduce_max_epi32(<8 x i64> %__W) #0 {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
-// CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__V1_ADDR_I12_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I13_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V1_ADDR_I10_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I11_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V1_ADDR_I_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I_I:%.*]] = alloca <2 x i64>, align 16
+// CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
+// CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__T2_I:%.*]] = alloca <4 x i64>, align 32
@@ -1111,9 +1111,10 @@ double test_mm512_mask_reduce_min_pd(__m
 // CHECK-NEXT:    [[TMP48:%.*]] = icmp sgt <4 x i32> [[TMP45]], [[TMP47]]
 // CHECK-NEXT:    [[TMP49:%.*]] = select <4 x i1> [[TMP48]], <4 x i32> 
[[TMP45]], <4 x i32> [[TMP47]]
 // CHECK-NEXT:    [[TMP50:%.*]] = bitcast <4 x i32> [[TMP49]] to <2 x i64>
-// CHECK-NEXT:    store <4 x i32> [[TMP49]], <4 x i32>* [[__T10_I]], align 16
-// CHECK-NEXT:    [[TMP51:%.*]] = load <4 x i32>, <4 x i32>* [[__T10_I]], 
align 16
-// CHECK-NEXT:    [[VECEXT_I:%.*]] = extractelement <4 x i32> [[TMP51]], i32 0
+// CHECK-NEXT:    [[TMP51:%.*]] = bitcast <2 x i64> [[TMP50]] to <4 x i32>
+// CHECK-NEXT:    store <4 x i32> [[TMP51]], <4 x i32>* [[__T10_I]], align 16
+// CHECK-NEXT:    [[TMP52:%.*]] = load <4 x i32>, <4 x i32>* [[__T10_I]], 
align 16
+// CHECK-NEXT:    [[VECEXT_I:%.*]] = extractelement <4 x i32> [[TMP52]], i32 0
 // CHECK-NEXT:    ret i32 [[VECEXT_I]]
 int test_mm512_reduce_max_epi32(__m512i __W){
   return _mm512_reduce_max_epi32(__W);
@@ -1121,14 +1122,14 @@ int test_mm512_reduce_max_epi32(__m512i
 
 // CHECK-LABEL: define i32 @test_mm512_reduce_max_epu32(<8 x i64> %__W) #0 {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
-// CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__V1_ADDR_I12_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I13_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V1_ADDR_I10_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I11_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V1_ADDR_I_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I_I:%.*]] = alloca <2 x i64>, align 16
+// CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
+// CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__T2_I:%.*]] = alloca <4 x i64>, align 32
@@ -1217,9 +1218,10 @@ int test_mm512_reduce_max_epi32(__m512i
 // CHECK-NEXT:    [[TMP48:%.*]] = icmp ugt <4 x i32> [[TMP45]], [[TMP47]]
 // CHECK-NEXT:    [[TMP49:%.*]] = select <4 x i1> [[TMP48]], <4 x i32> 
[[TMP45]], <4 x i32> [[TMP47]]
 // CHECK-NEXT:    [[TMP50:%.*]] = bitcast <4 x i32> [[TMP49]] to <2 x i64>
-// CHECK-NEXT:    store <4 x i32> [[TMP49]], <4 x i32>* [[__T10_I]], align 16
-// CHECK-NEXT:    [[TMP51:%.*]] = load <4 x i32>, <4 x i32>* [[__T10_I]], 
align 16
-// CHECK-NEXT:    [[VECEXT_I:%.*]] = extractelement <4 x i32> [[TMP51]], i32 0
+// CHECK-NEXT:    [[TMP51:%.*]] = bitcast <2 x i64> [[TMP50]] to <4 x i32>
+// CHECK-NEXT:    store <4 x i32> [[TMP51]], <4 x i32>* [[__T10_I]], align 16
+// CHECK-NEXT:    [[TMP52:%.*]] = load <4 x i32>, <4 x i32>* [[__T10_I]], 
align 16
+// CHECK-NEXT:    [[VECEXT_I:%.*]] = extractelement <4 x i32> [[TMP52]], i32 0
 // CHECK-NEXT:    ret i32 [[VECEXT_I]]
 unsigned int test_mm512_reduce_max_epu32(__m512i __W){
   return _mm512_reduce_max_epu32(__W); 
@@ -1227,14 +1229,14 @@ unsigned int test_mm512_reduce_max_epu32
 
 // CHECK-LABEL: define float @test_mm512_reduce_max_ps(<16 x float> %__W) #0 {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[__A_ADDR_I14_I:%.*]] = alloca <8 x float>, align 32
-// CHECK-NEXT:    [[__B_ADDR_I15_I:%.*]] = alloca <8 x float>, align 32
 // CHECK-NEXT:    [[__A_ADDR_I12_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I13_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__A_ADDR_I10_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I11_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x float>, align 16
+// CHECK-NEXT:    [[__A_ADDR_I14_I:%.*]] = alloca <8 x float>, align 32
+// CHECK-NEXT:    [[__B_ADDR_I15_I:%.*]] = alloca <8 x float>, align 32
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <16 x float>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <8 x float>, align 32
 // CHECK-NEXT:    [[__T2_I:%.*]] = alloca <8 x float>, align 32
@@ -1315,14 +1317,14 @@ float test_mm512_reduce_max_ps(__m512 __
 
 // CHECK-LABEL: define i32 @test_mm512_reduce_min_epi32(<8 x i64> %__W) #0 {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
-// CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__V1_ADDR_I12_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I13_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V1_ADDR_I10_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I11_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V1_ADDR_I_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I_I:%.*]] = alloca <2 x i64>, align 16
+// CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
+// CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__T2_I:%.*]] = alloca <4 x i64>, align 32
@@ -1411,9 +1413,10 @@ float test_mm512_reduce_max_ps(__m512 __
 // CHECK-NEXT:    [[TMP48:%.*]] = icmp slt <4 x i32> [[TMP45]], [[TMP47]]
 // CHECK-NEXT:    [[TMP49:%.*]] = select <4 x i1> [[TMP48]], <4 x i32> 
[[TMP45]], <4 x i32> [[TMP47]]
 // CHECK-NEXT:    [[TMP50:%.*]] = bitcast <4 x i32> [[TMP49]] to <2 x i64>
-// CHECK-NEXT:    store <4 x i32> [[TMP49]], <4 x i32>* [[__T10_I]], align 16
-// CHECK-NEXT:    [[TMP51:%.*]] = load <4 x i32>, <4 x i32>* [[__T10_I]], 
align 16
-// CHECK-NEXT:    [[VECEXT_I:%.*]] = extractelement <4 x i32> [[TMP51]], i32 0
+// CHECK-NEXT:    [[TMP51:%.*]] = bitcast <2 x i64> [[TMP50]] to <4 x i32>
+// CHECK-NEXT:    store <4 x i32> [[TMP51]], <4 x i32>* [[__T10_I]], align 16
+// CHECK-NEXT:    [[TMP52:%.*]] = load <4 x i32>, <4 x i32>* [[__T10_I]], 
align 16
+// CHECK-NEXT:    [[VECEXT_I:%.*]] = extractelement <4 x i32> [[TMP52]], i32 0
 // CHECK-NEXT:    ret i32 [[VECEXT_I]]
 int test_mm512_reduce_min_epi32(__m512i __W){
   return _mm512_reduce_min_epi32(__W);
@@ -1421,14 +1424,14 @@ int test_mm512_reduce_min_epi32(__m512i
 
 // CHECK-LABEL: define i32 @test_mm512_reduce_min_epu32(<8 x i64> %__W) #0 {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
-// CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__V1_ADDR_I12_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I13_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V1_ADDR_I10_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I11_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V1_ADDR_I_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I_I:%.*]] = alloca <2 x i64>, align 16
+// CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
+// CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__T2_I:%.*]] = alloca <4 x i64>, align 32
@@ -1517,9 +1520,10 @@ int test_mm512_reduce_min_epi32(__m512i
 // CHECK-NEXT:    [[TMP48:%.*]] = icmp ult <4 x i32> [[TMP45]], [[TMP47]]
 // CHECK-NEXT:    [[TMP49:%.*]] = select <4 x i1> [[TMP48]], <4 x i32> 
[[TMP45]], <4 x i32> [[TMP47]]
 // CHECK-NEXT:    [[TMP50:%.*]] = bitcast <4 x i32> [[TMP49]] to <2 x i64>
-// CHECK-NEXT:    store <4 x i32> [[TMP49]], <4 x i32>* [[__T10_I]], align 16
-// CHECK-NEXT:    [[TMP51:%.*]] = load <4 x i32>, <4 x i32>* [[__T10_I]], 
align 16
-// CHECK-NEXT:    [[VECEXT_I:%.*]] = extractelement <4 x i32> [[TMP51]], i32 0
+// CHECK-NEXT:    [[TMP51:%.*]] = bitcast <2 x i64> [[TMP50]] to <4 x i32>
+// CHECK-NEXT:    store <4 x i32> [[TMP51]], <4 x i32>* [[__T10_I]], align 16
+// CHECK-NEXT:    [[TMP52:%.*]] = load <4 x i32>, <4 x i32>* [[__T10_I]], 
align 16
+// CHECK-NEXT:    [[VECEXT_I:%.*]] = extractelement <4 x i32> [[TMP52]], i32 0
 // CHECK-NEXT:    ret i32 [[VECEXT_I]]
 unsigned int test_mm512_reduce_min_epu32(__m512i __W){
   return _mm512_reduce_min_epu32(__W); 
@@ -1527,14 +1531,14 @@ unsigned int test_mm512_reduce_min_epu32
 
 // CHECK-LABEL: define float @test_mm512_reduce_min_ps(<16 x float> %__W) #0 {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[__A_ADDR_I14_I:%.*]] = alloca <8 x float>, align 32
-// CHECK-NEXT:    [[__B_ADDR_I15_I:%.*]] = alloca <8 x float>, align 32
 // CHECK-NEXT:    [[__A_ADDR_I12_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I13_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__A_ADDR_I10_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I11_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x float>, align 16
+// CHECK-NEXT:    [[__A_ADDR_I14_I:%.*]] = alloca <8 x float>, align 32
+// CHECK-NEXT:    [[__B_ADDR_I15_I:%.*]] = alloca <8 x float>, align 32
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <16 x float>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <8 x float>, align 32
 // CHECK-NEXT:    [[__T2_I:%.*]] = alloca <8 x float>, align 32
@@ -1615,19 +1619,19 @@ float test_mm512_reduce_min_ps(__m512 __
 
 // CHECK-LABEL: define i32 @test_mm512_mask_reduce_max_epi32(i16 zeroext %__M, 
<8 x i64> %__W) #0 {
 // CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__S_ADDR_I_I:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <16 x i32>, align 64
 // CHECK-NEXT:    [[__W_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__U_ADDR_I_I:%.*]] = alloca i16, align 2
 // CHECK-NEXT:    [[__A2_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
-// CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
-// CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__V1_ADDR_I14_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I15_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V1_ADDR_I12_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I13_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V1_ADDR_I_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I_I:%.*]] = alloca <2 x i64>, align 16
-// CHECK-NEXT:    [[__S_ADDR_I_I:%.*]] = alloca i32, align 4
-// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <16 x i32>, align 64
+// CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
+// CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__M_ADDR_I:%.*]] = alloca i16, align 2
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <4 x i64>, align 32
@@ -1771,9 +1775,10 @@ float test_mm512_reduce_min_ps(__m512 __
 // CHECK-NEXT:    [[TMP77:%.*]] = icmp sgt <4 x i32> [[TMP74]], [[TMP76]]
 // CHECK-NEXT:    [[TMP78:%.*]] = select <4 x i1> [[TMP77]], <4 x i32> 
[[TMP74]], <4 x i32> [[TMP76]]
 // CHECK-NEXT:    [[TMP79:%.*]] = bitcast <4 x i32> [[TMP78]] to <2 x i64>
-// CHECK-NEXT:    store <4 x i32> [[TMP78]], <4 x i32>* [[__T10_I]], align 16
-// CHECK-NEXT:    [[TMP80:%.*]] = load <4 x i32>, <4 x i32>* [[__T10_I]], 
align 16
-// CHECK-NEXT:    [[VECEXT_I:%.*]] = extractelement <4 x i32> [[TMP80]], i32 0
+// CHECK-NEXT:    [[TMP80:%.*]] = bitcast <2 x i64> [[TMP79]] to <4 x i32>
+// CHECK-NEXT:    store <4 x i32> [[TMP80]], <4 x i32>* [[__T10_I]], align 16
+// CHECK-NEXT:    [[TMP81:%.*]] = load <4 x i32>, <4 x i32>* [[__T10_I]], 
align 16
+// CHECK-NEXT:    [[VECEXT_I:%.*]] = extractelement <4 x i32> [[TMP81]], i32 0
 // CHECK-NEXT:    ret i32 [[VECEXT_I]]
 int test_mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __W){
   return _mm512_mask_reduce_max_epi32(__M, __W); 
@@ -1781,17 +1786,17 @@ int test_mm512_mask_reduce_max_epi32(__m
 
 // CHECK-LABEL: define i32 @test_mm512_mask_reduce_max_epu32(i16 zeroext %__M, 
<8 x i64> %__W) #0 {
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[__A2_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
-// CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
+// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK-NEXT:    [[__U_ADDR_I_I:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__V1_ADDR_I13_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I14_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V1_ADDR_I11_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I12_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V1_ADDR_I_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I_I:%.*]] = alloca <2 x i64>, align 16
-// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I_I:%.*]] = alloca <8 x i64>, align 64
-// CHECK-NEXT:    [[__U_ADDR_I_I:%.*]] = alloca i16, align 2
-// CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
+// CHECK-NEXT:    [[__A2_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
+// CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__M_ADDR_I:%.*]] = alloca i16, align 2
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <4 x i64>, align 32
@@ -1899,9 +1904,10 @@ int test_mm512_mask_reduce_max_epi32(__m
 // CHECK-NEXT:    [[TMP59:%.*]] = icmp ugt <4 x i32> [[TMP56]], [[TMP58]]
 // CHECK-NEXT:    [[TMP60:%.*]] = select <4 x i1> [[TMP59]], <4 x i32> 
[[TMP56]], <4 x i32> [[TMP58]]
 // CHECK-NEXT:    [[TMP61:%.*]] = bitcast <4 x i32> [[TMP60]] to <2 x i64>
-// CHECK-NEXT:    store <4 x i32> [[TMP60]], <4 x i32>* [[__T10_I]], align 16
-// CHECK-NEXT:    [[TMP62:%.*]] = load <4 x i32>, <4 x i32>* [[__T10_I]], 
align 16
-// CHECK-NEXT:    [[VECEXT_I:%.*]] = extractelement <4 x i32> [[TMP62]], i32 0
+// CHECK-NEXT:    [[TMP62:%.*]] = bitcast <2 x i64> [[TMP61]] to <4 x i32>
+// CHECK-NEXT:    store <4 x i32> [[TMP62]], <4 x i32>* [[__T10_I]], align 16
+// CHECK-NEXT:    [[TMP63:%.*]] = load <4 x i32>, <4 x i32>* [[__T10_I]], 
align 16
+// CHECK-NEXT:    [[VECEXT_I:%.*]] = extractelement <4 x i32> [[TMP63]], i32 0
 // CHECK-NEXT:    ret i32 [[VECEXT_I]]
 unsigned int test_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __W){
   return _mm512_mask_reduce_max_epu32(__M, __W); 
@@ -1909,19 +1915,19 @@ unsigned int test_mm512_mask_reduce_max_
 
 // CHECK-LABEL: define float @test_mm512_mask_reduce_max_ps(i16 zeroext %__M, 
<16 x float> %__W) #0 {
 // CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__W_ADDR_I_I:%.*]] = alloca float, align 4
+// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <16 x float>, align 
64
 // CHECK-NEXT:    [[__W2_ADDR_I_I:%.*]] = alloca <16 x float>, align 64
 // CHECK-NEXT:    [[__U_ADDR_I_I:%.*]] = alloca i16, align 2
 // CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <16 x float>, align 64
-// CHECK-NEXT:    [[__A_ADDR_I16_I:%.*]] = alloca <8 x float>, align 32
-// CHECK-NEXT:    [[__B_ADDR_I17_I:%.*]] = alloca <8 x float>, align 32
 // CHECK-NEXT:    [[__A_ADDR_I14_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I15_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__A_ADDR_I12_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I13_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__A2_ADDR_I_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x float>, align 16
-// CHECK-NEXT:    [[__W_ADDR_I_I:%.*]] = alloca float, align 4
-// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <16 x float>, align 
64
+// CHECK-NEXT:    [[__A_ADDR_I16_I:%.*]] = alloca <8 x float>, align 32
+// CHECK-NEXT:    [[__B_ADDR_I17_I:%.*]] = alloca <8 x float>, align 32
 // CHECK-NEXT:    [[__M_ADDR_I:%.*]] = alloca i16, align 2
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <16 x float>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <8 x float>, align 32
@@ -2053,19 +2059,19 @@ float test_mm512_mask_reduce_max_ps(__mm
 
 // CHECK-LABEL: define i32 @test_mm512_mask_reduce_min_epi32(i16 zeroext %__M, 
<8 x i64> %__W) #0 {
 // CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__S_ADDR_I_I:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <16 x i32>, align 64
 // CHECK-NEXT:    [[__W_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__U_ADDR_I_I:%.*]] = alloca i16, align 2
 // CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
-// CHECK-NEXT:    [[__A2_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
-// CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__V1_ADDR_I14_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I15_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V1_ADDR_I12_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I13_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V1_ADDR_I_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I_I:%.*]] = alloca <2 x i64>, align 16
-// CHECK-NEXT:    [[__S_ADDR_I_I:%.*]] = alloca i32, align 4
-// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <16 x i32>, align 64
+// CHECK-NEXT:    [[__A2_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
+// CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__M_ADDR_I:%.*]] = alloca i16, align 2
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <4 x i64>, align 32
@@ -2209,9 +2215,10 @@ float test_mm512_mask_reduce_max_ps(__mm
 // CHECK-NEXT:    [[TMP77:%.*]] = icmp slt <4 x i32> [[TMP74]], [[TMP76]]
 // CHECK-NEXT:    [[TMP78:%.*]] = select <4 x i1> [[TMP77]], <4 x i32> 
[[TMP74]], <4 x i32> [[TMP76]]
 // CHECK-NEXT:    [[TMP79:%.*]] = bitcast <4 x i32> [[TMP78]] to <2 x i64>
-// CHECK-NEXT:    store <4 x i32> [[TMP78]], <4 x i32>* [[__T10_I]], align 16
-// CHECK-NEXT:    [[TMP80:%.*]] = load <4 x i32>, <4 x i32>* [[__T10_I]], 
align 16
-// CHECK-NEXT:    [[VECEXT_I:%.*]] = extractelement <4 x i32> [[TMP80]], i32 0
+// CHECK-NEXT:    [[TMP80:%.*]] = bitcast <2 x i64> [[TMP79]] to <4 x i32>
+// CHECK-NEXT:    store <4 x i32> [[TMP80]], <4 x i32>* [[__T10_I]], align 16
+// CHECK-NEXT:    [[TMP81:%.*]] = load <4 x i32>, <4 x i32>* [[__T10_I]], 
align 16
+// CHECK-NEXT:    [[VECEXT_I:%.*]] = extractelement <4 x i32> [[TMP81]], i32 0
 // CHECK-NEXT:    ret i32 [[VECEXT_I]]
 int test_mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __W){
   return _mm512_mask_reduce_min_epi32(__M, __W); 
@@ -2219,19 +2226,19 @@ int test_mm512_mask_reduce_min_epi32(__m
 
 // CHECK-LABEL: define i32 @test_mm512_mask_reduce_min_epu32(i16 zeroext %__M, 
<8 x i64> %__W) #0 {
 // CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__S_ADDR_I_I:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <16 x i32>, align 64
 // CHECK-NEXT:    [[__W_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__U_ADDR_I_I:%.*]] = alloca i16, align 2
 // CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <8 x i64>, align 64
-// CHECK-NEXT:    [[__A2_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
-// CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__V1_ADDR_I14_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I15_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V1_ADDR_I12_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I13_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V1_ADDR_I_I:%.*]] = alloca <2 x i64>, align 16
 // CHECK-NEXT:    [[__V2_ADDR_I_I:%.*]] = alloca <2 x i64>, align 16
-// CHECK-NEXT:    [[__S_ADDR_I_I:%.*]] = alloca i32, align 4
-// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <16 x i32>, align 64
+// CHECK-NEXT:    [[__A2_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
+// CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x i64>, align 32
 // CHECK-NEXT:    [[__M_ADDR_I:%.*]] = alloca i16, align 2
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <8 x i64>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <4 x i64>, align 32
@@ -2375,9 +2382,10 @@ int test_mm512_mask_reduce_min_epi32(__m
 // CHECK-NEXT:    [[TMP77:%.*]] = icmp ult <4 x i32> [[TMP74]], [[TMP76]]
 // CHECK-NEXT:    [[TMP78:%.*]] = select <4 x i1> [[TMP77]], <4 x i32> 
[[TMP74]], <4 x i32> [[TMP76]]
 // CHECK-NEXT:    [[TMP79:%.*]] = bitcast <4 x i32> [[TMP78]] to <2 x i64>
-// CHECK-NEXT:    store <4 x i32> [[TMP78]], <4 x i32>* [[__T10_I]], align 16
-// CHECK-NEXT:    [[TMP80:%.*]] = load <4 x i32>, <4 x i32>* [[__T10_I]], 
align 16
-// CHECK-NEXT:    [[VECEXT_I:%.*]] = extractelement <4 x i32> [[TMP80]], i32 0
+// CHECK-NEXT:    [[TMP80:%.*]] = bitcast <2 x i64> [[TMP79]] to <4 x i32>
+// CHECK-NEXT:    store <4 x i32> [[TMP80]], <4 x i32>* [[__T10_I]], align 16
+// CHECK-NEXT:    [[TMP81:%.*]] = load <4 x i32>, <4 x i32>* [[__T10_I]], 
align 16
+// CHECK-NEXT:    [[VECEXT_I:%.*]] = extractelement <4 x i32> [[TMP81]], i32 0
 // CHECK-NEXT:    ret i32 [[VECEXT_I]]
 unsigned int test_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __W){
   return _mm512_mask_reduce_min_epu32(__M, __W); 
@@ -2385,19 +2393,19 @@ unsigned int test_mm512_mask_reduce_min_
 
 // CHECK-LABEL: define float @test_mm512_mask_reduce_min_ps(i16 zeroext %__M, 
<16 x float> %__W) #0 {
 // CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__W_ADDR_I_I:%.*]] = alloca float, align 4
+// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <16 x float>, align 
64
 // CHECK-NEXT:    [[__W2_ADDR_I_I:%.*]] = alloca <16 x float>, align 64
 // CHECK-NEXT:    [[__U_ADDR_I_I:%.*]] = alloca i16, align 2
 // CHECK-NEXT:    [[__A_ADDR_I_I:%.*]] = alloca <16 x float>, align 64
-// CHECK-NEXT:    [[__A_ADDR_I16_I:%.*]] = alloca <8 x float>, align 32
-// CHECK-NEXT:    [[__B_ADDR_I17_I:%.*]] = alloca <8 x float>, align 32
 // CHECK-NEXT:    [[__A_ADDR_I14_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I15_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__A_ADDR_I12_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I13_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__A2_ADDR_I_I:%.*]] = alloca <4 x float>, align 16
 // CHECK-NEXT:    [[__B_ADDR_I_I:%.*]] = alloca <4 x float>, align 16
-// CHECK-NEXT:    [[__W_ADDR_I_I:%.*]] = alloca float, align 4
-// CHECK-NEXT:    [[DOTCOMPOUNDLITERAL_I_I:%.*]] = alloca <16 x float>, align 
64
+// CHECK-NEXT:    [[__A_ADDR_I16_I:%.*]] = alloca <8 x float>, align 32
+// CHECK-NEXT:    [[__B_ADDR_I17_I:%.*]] = alloca <8 x float>, align 32
 // CHECK-NEXT:    [[__M_ADDR_I:%.*]] = alloca i16, align 2
 // CHECK-NEXT:    [[__V_ADDR_I:%.*]] = alloca <16 x float>, align 64
 // CHECK-NEXT:    [[__T1_I:%.*]] = alloca <8 x float>, align 32

Modified: cfe/trunk/test/CodeGen/avx512f-builtins.c
URL: 
http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx512f-builtins.c?rev=367157&r1=367156&r2=367157&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/avx512f-builtins.c (original)
+++ cfe/trunk/test/CodeGen/avx512f-builtins.c Fri Jul 26 14:19:37 2019
@@ -1,5 +1,5 @@
-// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin 
-target-feature +avx512f -emit-llvm -o - -Wall -Werror | FileCheck %s
-// RUN: %clang_cc1 -fms-extensions -fms-compatibility -ffreestanding %s 
-triple=x86_64-windows-msvc -target-feature +avx512f -emit-llvm -o - -Wall 
-Werror | FileCheck %s
+// RUN: %clang_cc1 -fexperimental-new-pass-manager -ffreestanding %s 
-triple=x86_64-apple-darwin -target-feature +avx512f -emit-llvm -o - -Wall 
-Werror | FileCheck %s
+// RUN: %clang_cc1 -fexperimental-new-pass-manager -fms-extensions 
-fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc 
-target-feature +avx512f -emit-llvm -o - -Wall -Werror | FileCheck %s
 
 #include <immintrin.h>
 
@@ -10480,20 +10480,24 @@ __m512i test_mm512_maskz_abs_epi64 (__mm
 
 __m512i test_mm512_mask_abs_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
 {
-  // CHECK-LABEL: @test_mm512_mask_abs_epi32 
+  // CHECK-LABEL: @test_mm512_mask_abs_epi32
   // CHECK: [[SUB:%.*]] = sub <16 x i32> zeroinitializer, [[A:%.*]]
   // CHECK: [[CMP:%.*]] = icmp sgt <16 x i32> [[A]], zeroinitializer
   // CHECK: [[SEL:%.*]] = select <16 x i1> [[CMP]], <16 x i32> [[A]], <16 x 
i32> [[SUB]]
+  // CHECK: [[TMP:%.*]] = bitcast <16 x i32> [[SEL]] to <8 x i64>
+  // CHECK: [[SEL:%.*]] = bitcast <8 x i64> [[TMP]] to <16 x i32>
   // CHECK: select <16 x i1> %{{.*}}, <16 x i32> [[SEL]], <16 x i32> %{{.*}}
   return _mm512_mask_abs_epi32 (__W,__U,__A);
 }
 
 __m512i test_mm512_maskz_abs_epi32 (__mmask16 __U, __m512i __A)
 {
-  // CHECK-LABEL: @test_mm512_maskz_abs_epi32 
+  // CHECK-LABEL: @test_mm512_maskz_abs_epi32
   // CHECK: [[SUB:%.*]] = sub <16 x i32> zeroinitializer, [[A:%.*]]
   // CHECK: [[CMP:%.*]] = icmp sgt <16 x i32> [[A]], zeroinitializer
   // CHECK: [[SEL:%.*]] = select <16 x i1> [[CMP]], <16 x i32> [[A]], <16 x 
i32> [[SUB]]
+  // CHECK: [[TMP:%.*]] = bitcast <16 x i32> [[SEL]] to <8 x i64>
+  // CHECK: [[SEL:%.*]] = bitcast <8 x i64> [[TMP]] to <16 x i32>
   // CHECK: select <16 x i1> %{{.*}}, <16 x i32> [[SEL]], <16 x i32> %{{.*}}
   return _mm512_maskz_abs_epi32 (__U,__A);
 }

Modified: cfe/trunk/test/CodeGen/avx512vl-builtins.c
URL: 
http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx512vl-builtins.c?rev=367157&r1=367156&r2=367157&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/avx512vl-builtins.c (original)
+++ cfe/trunk/test/CodeGen/avx512vl-builtins.c Fri Jul 26 14:19:37 2019
@@ -1,5 +1,4 @@
-// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin 
-target-feature +avx512f -target-feature +avx512vl -emit-llvm -o - -Wall 
-Werror | FileCheck %s
-
+// RUN: %clang_cc1 -ffreestanding %s -fexperimental-new-pass-manager 
-triple=x86_64-apple-darwin -target-feature +avx512f -target-feature +avx512vl 
-emit-llvm -o - -Wall -Werror | FileCheck %s
 
 #include <immintrin.h>
 
@@ -4589,6 +4588,8 @@ __m128i test_mm_mask_abs_epi32(__m128i _
   // CHECK: [[SUB:%.*]] = sub <4 x i32> zeroinitializer, [[A:%.*]]
   // CHECK: [[CMP:%.*]] = icmp sgt <4 x i32> [[A]], zeroinitializer
   // CHECK: [[SEL:%.*]] = select <4 x i1> [[CMP]], <4 x i32> [[A]], <4 x i32> 
[[SUB]]
+  // CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[SEL]] to <2 x i64>
+  // CHECK: [[SEL:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
   // CHECK: select <4 x i1> %{{.*}}, <4 x i32> [[SEL]], <4 x i32> %{{.*}}
   return _mm_mask_abs_epi32(__W,__U,__A); 
 }
@@ -4597,6 +4598,8 @@ __m128i test_mm_maskz_abs_epi32(__mmask8
   // CHECK: [[SUB:%.*]] = sub <4 x i32> zeroinitializer, [[A:%.*]]
   // CHECK: [[CMP:%.*]] = icmp sgt <4 x i32> [[A]], zeroinitializer
   // CHECK: [[SEL:%.*]] = select <4 x i1> [[CMP]], <4 x i32> [[A]], <4 x i32> 
[[SUB]]
+  // CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[SEL]] to <2 x i64>
+  // CHECK: [[SEL:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
   // CHECK: select <4 x i1> %{{.*}}, <4 x i32> [[SEL]], <4 x i32> %{{.*}}
   return _mm_maskz_abs_epi32(__U,__A); 
 }
@@ -4605,6 +4608,8 @@ __m256i test_mm256_mask_abs_epi32(__m256
   // CHECK: [[SUB:%.*]] = sub <8 x i32> zeroinitializer, [[A:%.*]]
   // CHECK: [[CMP:%.*]] = icmp sgt <8 x i32> [[A]], zeroinitializer
   // CHECK: [[SEL:%.*]] = select <8 x i1> [[CMP]], <8 x i32> [[A]], <8 x i32> 
[[SUB]]
+  // CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[SEL]] to <4 x i64>
+  // CHECK: [[SEL:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
   // CHECK: select <8 x i1> %{{.*}}, <8 x i32> [[SEL]], <8 x i32> %{{.*}}
   return _mm256_mask_abs_epi32(__W,__U,__A); 
 }
@@ -4613,6 +4618,8 @@ __m256i test_mm256_maskz_abs_epi32(__mma
   // CHECK: [[SUB:%.*]] = sub <8 x i32> zeroinitializer, [[A:%.*]]
   // CHECK: [[CMP:%.*]] = icmp sgt <8 x i32> [[A]], zeroinitializer
   // CHECK: [[SEL:%.*]] = select <8 x i1> [[CMP]], <8 x i32> [[A]], <8 x i32> 
[[SUB]]
+  // CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[SEL]] to <4 x i64>
+  // CHECK: [[SEL:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
   // CHECK: select <8 x i1> %{{.*}}, <8 x i32> [[SEL]], <8 x i32> %{{.*}}
   return _mm256_maskz_abs_epi32(__U,__A); 
 }
@@ -4668,6 +4675,8 @@ __m128i test_mm_maskz_max_epi32(__mmask8
   // CHECK-LABEL: @test_mm_maskz_max_epi32
   // CHECK:       [[CMP:%.*]] = icmp sgt <4 x i32> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <4 x i1> [[CMP]], <4 x i32> [[X]], <4 x 
i32> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
+  // CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
   // CHECK:       select <4 x i1> {{.*}}, <4 x i32> [[RES]], <4 x i32> {{.*}}
   return _mm_maskz_max_epi32(__M,__A,__B); 
 }
@@ -4675,6 +4684,8 @@ __m128i test_mm_mask_max_epi32(__m128i _
   // CHECK-LABEL: @test_mm_mask_max_epi32
   // CHECK:       [[CMP:%.*]] = icmp sgt <4 x i32> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <4 x i1> [[CMP]], <4 x i32> [[X]], <4 x 
i32> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
+  // CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
   // CHECK:       select <4 x i1> {{.*}}, <4 x i32> [[RES]], <4 x i32> {{.*}}
   return _mm_mask_max_epi32(__W,__M,__A,__B); 
 }
@@ -4682,6 +4693,8 @@ __m256i test_mm256_maskz_max_epi32(__mma
   // CHECK-LABEL: @test_mm256_maskz_max_epi32
   // CHECK:       [[CMP:%.*]] = icmp sgt <8 x i32> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <8 x i1> [[CMP]], <8 x i32> [[X]], <8 x 
i32> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
+  // CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
   // CHECK:       select <8 x i1> {{.*}}, <8 x i32> [[RES]], <8 x i32> {{.*}}
   return _mm256_maskz_max_epi32(__M,__A,__B); 
 }
@@ -4689,6 +4702,8 @@ __m256i test_mm256_mask_max_epi32(__m256
   // CHECK-LABEL: @test_mm256_mask_max_epi32
   // CHECK:       [[CMP:%.*]] = icmp sgt <8 x i32> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <8 x i1> [[CMP]], <8 x i32> [[X]], <8 x 
i32> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
+  // CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
   // CHECK:       select <8 x i1> {{.*}}, <8 x i32> [[RES]], <8 x i32> {{.*}}
   return _mm256_mask_max_epi32(__W,__M,__A,__B); 
 }
@@ -4736,6 +4751,8 @@ __m128i test_mm_maskz_max_epu32(__mmask8
   // CHECK-LABEL: @test_mm_maskz_max_epu32
   // CHECK:       [[CMP:%.*]] = icmp ugt <4 x i32> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <4 x i1> [[CMP]], <4 x i32> [[X]], <4 x 
i32> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
+  // CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
   // CHECK:       select <4 x i1> {{.*}}, <4 x i32> [[RES]], <4 x i32> {{.*}}
   return _mm_maskz_max_epu32(__M,__A,__B); 
 }
@@ -4743,6 +4760,8 @@ __m128i test_mm_mask_max_epu32(__m128i _
   // CHECK-LABEL: @test_mm_mask_max_epu32
   // CHECK:       [[CMP:%.*]] = icmp ugt <4 x i32> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <4 x i1> [[CMP]], <4 x i32> [[X]], <4 x 
i32> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
+  // CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
   // CHECK:       select <4 x i1> {{.*}}, <4 x i32> [[RES]], <4 x i32> {{.*}}
   return _mm_mask_max_epu32(__W,__M,__A,__B); 
 }
@@ -4750,6 +4769,8 @@ __m256i test_mm256_maskz_max_epu32(__mma
   // CHECK-LABEL: @test_mm256_maskz_max_epu32
   // CHECK:       [[CMP:%.*]] = icmp ugt <8 x i32> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <8 x i1> [[CMP]], <8 x i32> [[X]], <8 x 
i32> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
+  // CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
   // CHECK:       select <8 x i1> {{.*}}, <8 x i32> [[RES]], <8 x i32> {{.*}}
   return _mm256_maskz_max_epu32(__M,__A,__B); 
 }
@@ -4757,6 +4778,8 @@ __m256i test_mm256_mask_max_epu32(__m256
   // CHECK-LABEL: @test_mm256_mask_max_epu32
   // CHECK:       [[CMP:%.*]] = icmp ugt <8 x i32> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <8 x i1> [[CMP]], <8 x i32> [[X]], <8 x 
i32> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
+  // CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
   // CHECK:       select <8 x i1> {{.*}}, <8 x i32> [[RES]], <8 x i32> {{.*}}
   return _mm256_mask_max_epu32(__W,__M,__A,__B); 
 }
@@ -4804,6 +4827,8 @@ __m128i test_mm_maskz_min_epi32(__mmask8
   // CHECK-LABEL: @test_mm_maskz_min_epi32
   // CHECK:       [[CMP:%.*]] = icmp slt <4 x i32> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <4 x i1> [[CMP]], <4 x i32> [[X]], <4 x 
i32> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
+  // CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
   // CHECK:       select <4 x i1> {{.*}}, <4 x i32> [[RES]], <4 x i32> {{.*}}
   return _mm_maskz_min_epi32(__M,__A,__B); 
 }
@@ -4811,6 +4836,8 @@ __m128i test_mm_mask_min_epi32(__m128i _
   // CHECK-LABEL: @test_mm_mask_min_epi32
   // CHECK:       [[CMP:%.*]] = icmp slt <4 x i32> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <4 x i1> [[CMP]], <4 x i32> [[X]], <4 x 
i32> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
+  // CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
   // CHECK:       select <4 x i1> {{.*}}, <4 x i32> [[RES]], <4 x i32> {{.*}}
   return _mm_mask_min_epi32(__W,__M,__A,__B); 
 }
@@ -4818,6 +4845,8 @@ __m256i test_mm256_maskz_min_epi32(__mma
   // CHECK-LABEL: @test_mm256_maskz_min_epi32
   // CHECK:       [[CMP:%.*]] = icmp slt <8 x i32> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <8 x i1> [[CMP]], <8 x i32> [[X]], <8 x 
i32> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
+  // CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
   // CHECK:       select <8 x i1> {{.*}}, <8 x i32> [[RES]], <8 x i32> {{.*}}
   return _mm256_maskz_min_epi32(__M,__A,__B); 
 }
@@ -4825,6 +4854,8 @@ __m256i test_mm256_mask_min_epi32(__m256
   // CHECK-LABEL: @test_mm256_mask_min_epi32
   // CHECK:       [[CMP:%.*]] = icmp slt <8 x i32> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <8 x i1> [[CMP]], <8 x i32> [[X]], <8 x 
i32> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
+  // CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
   // CHECK:       select <8 x i1> {{.*}}, <8 x i32> [[RES]], <8 x i32> {{.*}}
   return _mm256_mask_min_epi32(__W,__M,__A,__B); 
 }
@@ -4872,6 +4903,8 @@ __m128i test_mm_maskz_min_epu32(__mmask8
   // CHECK-LABEL: @test_mm_maskz_min_epu32
   // CHECK:       [[CMP:%.*]] = icmp ult <4 x i32> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <4 x i1> [[CMP]], <4 x i32> [[X]], <4 x 
i32> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
+  // CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
   // CHECK:       select <4 x i1> {{.*}}, <4 x i32> [[RES]], <4 x i32> {{.*}}
   return _mm_maskz_min_epu32(__M,__A,__B); 
 }
@@ -4879,6 +4912,8 @@ __m128i test_mm_mask_min_epu32(__m128i _
   // CHECK-LABEL: @test_mm_mask_min_epu32
   // CHECK:       [[CMP:%.*]] = icmp ult <4 x i32> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <4 x i1> [[CMP]], <4 x i32> [[X]], <4 x 
i32> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
+  // CHECK: [[RES:%.*]] = bitcast <2 x i64> [[TMP]] to <4 x i32>
   // CHECK:       select <4 x i1> {{.*}}, <4 x i32> [[RES]], <4 x i32> {{.*}}
   return _mm_mask_min_epu32(__W,__M,__A,__B); 
 }
@@ -4886,6 +4921,8 @@ __m256i test_mm256_maskz_min_epu32(__mma
   // CHECK-LABEL: @test_mm256_maskz_min_epu32
   // CHECK:       [[CMP:%.*]] = icmp ult <8 x i32> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <8 x i1> [[CMP]], <8 x i32> [[X]], <8 x 
i32> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
+  // CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
   // CHECK:       select <8 x i1> {{.*}}, <8 x i32> [[RES]], <8 x i32> {{.*}}
   return _mm256_maskz_min_epu32(__M,__A,__B); 
 }
@@ -4893,6 +4930,8 @@ __m256i test_mm256_mask_min_epu32(__m256
   // CHECK-LABEL: @test_mm256_mask_min_epu32
   // CHECK:       [[CMP:%.*]] = icmp ult <8 x i32> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <8 x i1> [[CMP]], <8 x i32> [[X]], <8 x 
i32> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast <8 x i32> [[RES]] to <4 x i64>
+  // CHECK: [[RES:%.*]] = bitcast <4 x i64> [[TMP]] to <8 x i32>
   // CHECK:       select <8 x i1> {{.*}}, <8 x i32> [[RES]], <8 x i32> {{.*}}
   return _mm256_mask_min_epu32(__W,__M,__A,__B); 
 }

Modified: cfe/trunk/test/CodeGen/avx512vlbw-builtins.c
URL: 
http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/avx512vlbw-builtins.c?rev=367157&r1=367156&r2=367157&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/avx512vlbw-builtins.c (original)
+++ cfe/trunk/test/CodeGen/avx512vlbw-builtins.c Fri Jul 26 14:19:37 2019
@@ -1,6 +1,5 @@
-// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin 
-target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall 
-Werror | FileCheck %s
-// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin 
-target-feature +avx512bw -target-feature +avx512vl -fno-signed-char -emit-llvm 
-o - -Wall -Werror | FileCheck %s
-
+// RUN: %clang_cc1 -ffreestanding %s -fexperimental-new-pass-manager 
-triple=x86_64-apple-darwin -target-feature +avx512bw -target-feature +avx512vl 
-emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -ffreestanding %s -fexperimental-new-pass-manager 
-triple=x86_64-apple-darwin -target-feature +avx512bw -target-feature +avx512vl 
-fno-signed-char -emit-llvm -o - -Wall -Werror | FileCheck %s
 
 #include <immintrin.h>
 
@@ -901,6 +900,8 @@ __m128i test_mm_mask_abs_epi8(__m128i __
   // CHECK: [[SUB:%.*]] = sub <16 x i8> zeroinitializer, [[A:%.*]]
   // CHECK: [[CMP:%.*]] = icmp sgt <16 x i8> [[A]], zeroinitializer
   // CHECK: [[SEL:%.*]] = select <16 x i1> [[CMP]], <16 x i8> [[A]], <16 x i8> 
[[SUB]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i8>]] [[SEL]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[SEL:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK: select <16 x i1> %{{.*}}, <16 x i8> [[SEL]], <16 x i8> %{{.*}}
   return _mm_mask_abs_epi8(__W,__U,__A); 
 }
@@ -910,6 +911,8 @@ __m128i test_mm_maskz_abs_epi8(__mmask16
   // CHECK: [[SUB:%.*]] = sub <16 x i8> zeroinitializer, [[A:%.*]]
   // CHECK: [[CMP:%.*]] = icmp sgt <16 x i8> [[A]], zeroinitializer
   // CHECK: [[SEL:%.*]] = select <16 x i1> [[CMP]], <16 x i8> [[A]], <16 x i8> 
[[SUB]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i8>]] [[SEL]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[SEL:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK: select <16 x i1> %{{.*}}, <16 x i8> [[SEL]], <16 x i8> %{{.*}}
   return _mm_maskz_abs_epi8(__U,__A); 
 }
@@ -919,6 +922,8 @@ __m256i test_mm256_mask_abs_epi8(__m256i
   // CHECK: [[SUB:%.*]] = sub <32 x i8> zeroinitializer, [[A:%.*]]
   // CHECK: [[CMP:%.*]] = icmp sgt <32 x i8> [[A]], zeroinitializer
   // CHECK: [[SEL:%.*]] = select <32 x i1> [[CMP]], <32 x i8> [[A]], <32 x i8> 
[[SUB]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<32 x i8>]] [[SEL]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[SEL:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK: select <32 x i1> %{{.*}}, <32 x i8> [[SEL]], <32 x i8> %{{.*}}
   return _mm256_mask_abs_epi8(__W,__U,__A); 
 }
@@ -928,6 +933,8 @@ __m256i test_mm256_maskz_abs_epi8(__mmas
   // CHECK: [[SUB:%.*]] = sub <32 x i8> zeroinitializer, [[A:%.*]]
   // CHECK: [[CMP:%.*]] = icmp sgt <32 x i8> [[A]], zeroinitializer
   // CHECK: [[SEL:%.*]] = select <32 x i1> [[CMP]], <32 x i8> [[A]], <32 x i8> 
[[SUB]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<32 x i8>]] [[SEL]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[SEL:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK: select <32 x i1> %{{.*}}, <32 x i8> [[SEL]], <32 x i8> %{{.*}}
   return _mm256_maskz_abs_epi8(__U,__A); 
 }
@@ -937,6 +944,8 @@ __m128i test_mm_mask_abs_epi16(__m128i _
   // CHECK: [[SUB:%.*]] = sub <8 x i16> zeroinitializer, [[A:%.*]]
   // CHECK: [[CMP:%.*]] = icmp sgt <8 x i16> [[A]], zeroinitializer
   // CHECK: [[SEL:%.*]] = select <8 x i1> [[CMP]], <8 x i16> [[A]], <8 x i16> 
[[SUB]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<8 x i16>]] [[SEL]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[SEL:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK: select <8 x i1> %{{.*}}, <8 x i16> [[SEL]], <8 x i16> %{{.*}}
   return _mm_mask_abs_epi16(__W,__U,__A); 
 }
@@ -946,6 +955,8 @@ __m128i test_mm_maskz_abs_epi16(__mmask8
   // CHECK: [[SUB:%.*]] = sub <8 x i16> zeroinitializer, [[A:%.*]]
   // CHECK: [[CMP:%.*]] = icmp sgt <8 x i16> [[A]], zeroinitializer
   // CHECK: [[SEL:%.*]] = select <8 x i1> [[CMP]], <8 x i16> [[A]], <8 x i16> 
[[SUB]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<8 x i16>]] [[SEL]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[SEL:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK: select <8 x i1> %{{.*}}, <8 x i16> [[SEL]], <8 x i16> %{{.*}}
   return _mm_maskz_abs_epi16(__U,__A); 
 }
@@ -955,6 +966,8 @@ __m256i test_mm256_mask_abs_epi16(__m256
   // CHECK: [[SUB:%.*]] = sub <16 x i16> zeroinitializer, [[A:%.*]]
   // CHECK: [[CMP:%.*]] = icmp sgt <16 x i16> [[A]], zeroinitializer
   // CHECK: [[SEL:%.*]] = select <16 x i1> [[CMP]], <16 x i16> [[A]], <16 x 
i16> [[SUB]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i16>]] [[SEL]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[SEL:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK: select <16 x i1> %{{.*}}, <16 x i16> [[SEL]], <16 x i16> %{{.*}}
   return _mm256_mask_abs_epi16(__W,__U,__A); 
 }
@@ -964,6 +977,8 @@ __m256i test_mm256_maskz_abs_epi16(__mma
   // CHECK: [[SUB:%.*]] = sub <16 x i16> zeroinitializer, [[A:%.*]]
   // CHECK: [[CMP:%.*]] = icmp sgt <16 x i16> [[A]], zeroinitializer
   // CHECK: [[SEL:%.*]] = select <16 x i1> [[CMP]], <16 x i16> [[A]], <16 x 
i16> [[SUB]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i16>]] [[SEL]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[SEL:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK: select <16 x i1> %{{.*}}, <16 x i16> [[SEL]], <16 x i16> %{{.*}}
   return _mm256_maskz_abs_epi16(__U,__A); 
 }
@@ -1229,6 +1244,8 @@ __m128i test_mm_maskz_max_epi8(__mmask16
   // CHECK-LABEL: @test_mm_maskz_max_epi8
   // CHECK:       [[CMP:%.*]] = icmp sgt <16 x i8> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <16 x i1> [[CMP]], <16 x i8> [[X]], <16 
x i8> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i8>]] [[RES]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <16 x i1> {{.*}}, <16 x i8> [[RES]], <16 x i8> {{.*}}
   return _mm_maskz_max_epi8(__M,__A,__B); 
 }
@@ -1236,6 +1253,8 @@ __m128i test_mm_mask_max_epi8(__m128i __
   // CHECK-LABEL: @test_mm_mask_max_epi8
   // CHECK:       [[CMP:%.*]] = icmp sgt <16 x i8> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <16 x i1> [[CMP]], <16 x i8> [[X]], <16 
x i8> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i8>]] [[RES]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <16 x i1> {{.*}}, <16 x i8> [[RES]], <16 x i8> {{.*}}
   return _mm_mask_max_epi8(__W,__M,__A,__B); 
 }
@@ -1243,6 +1262,8 @@ __m256i test_mm256_maskz_max_epi8(__mmas
   // CHECK-LABEL: @test_mm256_maskz_max_epi8
   // CHECK:       [[CMP:%.*]] = icmp sgt <32 x i8> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <32 x i1> [[CMP]], <32 x i8> [[X]], <32 
x i8> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<32 x i8>]] [[RES]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <32 x i1> {{.*}}, <32 x i8> [[RES]], <32 x i8> {{.*}}
   return _mm256_maskz_max_epi8(__M,__A,__B); 
 }
@@ -1250,6 +1271,8 @@ __m256i test_mm256_mask_max_epi8(__m256i
   // CHECK-LABEL: @test_mm256_mask_max_epi8
   // CHECK:       [[CMP:%.*]] = icmp sgt <32 x i8> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <32 x i1> [[CMP]], <32 x i8> [[X]], <32 
x i8> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<32 x i8>]] [[RES]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <32 x i1> {{.*}}, <32 x i8> [[RES]], <32 x i8> {{.*}}
   return _mm256_mask_max_epi8(__W,__M,__A,__B); 
 }
@@ -1257,6 +1280,8 @@ __m128i test_mm_maskz_max_epi16(__mmask8
   // CHECK-LABEL: @test_mm_maskz_max_epi16
   // CHECK:       [[CMP:%.*]] = icmp sgt <8 x i16> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <8 x i1> [[CMP]], <8 x i16> [[X]], <8 x 
i16> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<8 x i16>]] [[RES]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <8 x i1> {{.*}}, <8 x i16> [[RES]], <8 x i16> {{.*}}
   return _mm_maskz_max_epi16(__M,__A,__B); 
 }
@@ -1264,6 +1289,8 @@ __m128i test_mm_mask_max_epi16(__m128i _
   // CHECK-LABEL: @test_mm_mask_max_epi16
   // CHECK:       [[CMP:%.*]] = icmp sgt <8 x i16> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <8 x i1> [[CMP]], <8 x i16> [[X]], <8 x 
i16> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<8 x i16>]] [[RES]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <8 x i1> {{.*}}, <8 x i16> [[RES]], <8 x i16> {{.*}}
   return _mm_mask_max_epi16(__W,__M,__A,__B); 
 }
@@ -1271,6 +1298,8 @@ __m256i test_mm256_maskz_max_epi16(__mma
   // CHECK-LABEL: @test_mm256_maskz_max_epi16
   // CHECK:       [[CMP:%.*]] = icmp sgt <16 x i16> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <16 x i1> [[CMP]], <16 x i16> [[X]], 
<16 x i16> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i16>]] [[RES]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <16 x i1> {{.*}}, <16 x i16> [[RES]], <16 x i16> 
{{.*}}
   return _mm256_maskz_max_epi16(__M,__A,__B); 
 }
@@ -1278,6 +1307,8 @@ __m256i test_mm256_mask_max_epi16(__m256
   // CHECK-LABEL: @test_mm256_mask_max_epi16
   // CHECK:       [[CMP:%.*]] = icmp sgt <16 x i16> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <16 x i1> [[CMP]], <16 x i16> [[X]], 
<16 x i16> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i16>]] [[RES]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <16 x i1> {{.*}}, <16 x i16> [[RES]], <16 x i16> 
{{.*}}
   return _mm256_mask_max_epi16(__W,__M,__A,__B); 
 }
@@ -1285,6 +1316,8 @@ __m128i test_mm_maskz_max_epu8(__mmask16
   // CHECK-LABEL: @test_mm_maskz_max_epu8
   // CHECK:       [[CMP:%.*]] = icmp ugt <16 x i8> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <16 x i1> [[CMP]], <16 x i8> [[X]], <16 
x i8> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i8>]] [[RES]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <16 x i1> {{.*}}, <16 x i8> [[RES]], <16 x i8> {{.*}}
   return _mm_maskz_max_epu8(__M,__A,__B); 
 }
@@ -1292,6 +1325,8 @@ __m128i test_mm_mask_max_epu8(__m128i __
   // CHECK-LABEL: @test_mm_mask_max_epu8
   // CHECK:       [[CMP:%.*]] = icmp ugt <16 x i8> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <16 x i1> [[CMP]], <16 x i8> [[X]], <16 
x i8> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i8>]] [[RES]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <16 x i1> {{.*}}, <16 x i8> [[RES]], <16 x i8> {{.*}}
   return _mm_mask_max_epu8(__W,__M,__A,__B); 
 }
@@ -1299,6 +1334,8 @@ __m256i test_mm256_maskz_max_epu8(__mmas
   // CHECK-LABEL: @test_mm256_maskz_max_epu8
   // CHECK:       [[CMP:%.*]] = icmp ugt <32 x i8> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <32 x i1> [[CMP]], <32 x i8> [[X]], <32 
x i8> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<32 x i8>]] [[RES]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <32 x i1> {{.*}}, <32 x i8> [[RES]], <32 x i8> {{.*}}
   return _mm256_maskz_max_epu8(__M,__A,__B); 
 }
@@ -1306,6 +1343,8 @@ __m256i test_mm256_mask_max_epu8(__m256i
   // CHECK-LABEL: @test_mm256_mask_max_epu8
   // CHECK:       [[CMP:%.*]] = icmp ugt <32 x i8> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <32 x i1> [[CMP]], <32 x i8> [[X]], <32 
x i8> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<32 x i8>]] [[RES]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <32 x i1> {{.*}}, <32 x i8> [[RES]], <32 x i8> {{.*}}
   return _mm256_mask_max_epu8(__W,__M,__A,__B); 
 }
@@ -1313,6 +1352,8 @@ __m128i test_mm_maskz_max_epu16(__mmask8
   // CHECK-LABEL: @test_mm_maskz_max_epu16
   // CHECK:       [[CMP:%.*]] = icmp ugt <8 x i16> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <8 x i1> [[CMP]], <8 x i16> [[X]], <8 x 
i16> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<8 x i16>]] [[RES]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <8 x i1> {{.*}}, <8 x i16> [[RES]], <8 x i16> {{.*}}
   return _mm_maskz_max_epu16(__M,__A,__B); 
 }
@@ -1320,6 +1361,8 @@ __m128i test_mm_mask_max_epu16(__m128i _
   // CHECK-LABEL: @test_mm_mask_max_epu16
   // CHECK:       [[CMP:%.*]] = icmp ugt <8 x i16> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <8 x i1> [[CMP]], <8 x i16> [[X]], <8 x 
i16> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<8 x i16>]] [[RES]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <8 x i1> {{.*}}, <8 x i16> [[RES]], <8 x i16> {{.*}}
   return _mm_mask_max_epu16(__W,__M,__A,__B); 
 }
@@ -1327,6 +1370,8 @@ __m256i test_mm256_maskz_max_epu16(__mma
   // CHECK-LABEL: @test_mm256_maskz_max_epu16
   // CHECK:       [[CMP:%.*]] = icmp ugt <16 x i16> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <16 x i1> [[CMP]], <16 x i16> [[X]], 
<16 x i16> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i16>]] [[RES]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <16 x i1> {{.*}}, <16 x i16> [[RES]], <16 x i16> 
{{.*}}
   return _mm256_maskz_max_epu16(__M,__A,__B); 
 }
@@ -1334,6 +1379,8 @@ __m256i test_mm256_mask_max_epu16(__m256
   // CHECK-LABEL: @test_mm256_mask_max_epu16
   // CHECK:       [[CMP:%.*]] = icmp ugt <16 x i16> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <16 x i1> [[CMP]], <16 x i16> [[X]], 
<16 x i16> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i16>]] [[RES]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <16 x i1> {{.*}}, <16 x i16> [[RES]], <16 x i16> 
{{.*}}
   return _mm256_mask_max_epu16(__W,__M,__A,__B); 
 }
@@ -1341,6 +1388,8 @@ __m128i test_mm_maskz_min_epi8(__mmask16
   // CHECK-LABEL: @test_mm_maskz_min_epi8
   // CHECK:       [[CMP:%.*]] = icmp slt <16 x i8> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <16 x i1> [[CMP]], <16 x i8> [[X]], <16 
x i8> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i8>]] [[RES]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <16 x i1> {{.*}}, <16 x i8> [[RES]], <16 x i8> {{.*}}
   return _mm_maskz_min_epi8(__M,__A,__B); 
 }
@@ -1348,6 +1397,8 @@ __m128i test_mm_mask_min_epi8(__m128i __
   // CHECK-LABEL: @test_mm_mask_min_epi8
   // CHECK:       [[CMP:%.*]] = icmp slt <16 x i8> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <16 x i1> [[CMP]], <16 x i8> [[X]], <16 
x i8> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i8>]] [[RES]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <16 x i1> {{.*}}, <16 x i8> [[RES]], <16 x i8> {{.*}}
   return _mm_mask_min_epi8(__W,__M,__A,__B); 
 }
@@ -1355,6 +1406,8 @@ __m256i test_mm256_maskz_min_epi8(__mmas
   // CHECK-LABEL: @test_mm256_maskz_min_epi8
   // CHECK:       [[CMP:%.*]] = icmp slt <32 x i8> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <32 x i1> [[CMP]], <32 x i8> [[X]], <32 
x i8> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<32 x i8>]] [[RES]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <32 x i1> {{.*}}, <32 x i8> [[RES]], <32 x i8> {{.*}}
   return _mm256_maskz_min_epi8(__M,__A,__B); 
 }
@@ -1362,6 +1415,8 @@ __m256i test_mm256_mask_min_epi8(__m256i
   // CHECK-LABEL: @test_mm256_mask_min_epi8
   // CHECK:       [[CMP:%.*]] = icmp slt <32 x i8> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <32 x i1> [[CMP]], <32 x i8> [[X]], <32 
x i8> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<32 x i8>]] [[RES]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <32 x i1> {{.*}}, <32 x i8> [[RES]], <32 x i8> {{.*}}
   return _mm256_mask_min_epi8(__W,__M,__A,__B); 
 }
@@ -1369,6 +1424,8 @@ __m128i test_mm_maskz_min_epi16(__mmask8
   // CHECK-LABEL: @test_mm_maskz_min_epi16
   // CHECK:       [[CMP:%.*]] = icmp slt <8 x i16> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <8 x i1> [[CMP]], <8 x i16> [[X]], <8 x 
i16> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<8 x i16>]] [[RES]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <8 x i1> {{.*}}, <8 x i16> [[RES]], <8 x i16> {{.*}}
   return _mm_maskz_min_epi16(__M,__A,__B); 
 }
@@ -1376,6 +1433,8 @@ __m128i test_mm_mask_min_epi16(__m128i _
   // CHECK-LABEL: @test_mm_mask_min_epi16
   // CHECK:       [[CMP:%.*]] = icmp slt <8 x i16> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <8 x i1> [[CMP]], <8 x i16> [[X]], <8 x 
i16> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<8 x i16>]] [[RES]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <8 x i1> {{.*}}, <8 x i16> [[RES]], <8 x i16> {{.*}}
   return _mm_mask_min_epi16(__W,__M,__A,__B); 
 }
@@ -1383,6 +1442,8 @@ __m256i test_mm256_maskz_min_epi16(__mma
   // CHECK-LABEL: @test_mm256_maskz_min_epi16
   // CHECK:       [[CMP:%.*]] = icmp slt <16 x i16> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <16 x i1> [[CMP]], <16 x i16> [[X]], 
<16 x i16> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i16>]] [[RES]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <16 x i1> {{.*}}, <16 x i16> [[RES]], <16 x i16> 
{{.*}}
   return _mm256_maskz_min_epi16(__M,__A,__B); 
 }
@@ -1390,6 +1451,8 @@ __m256i test_mm256_mask_min_epi16(__m256
   // CHECK-LABEL: @test_mm256_mask_min_epi16
   // CHECK:       [[CMP:%.*]] = icmp slt <16 x i16> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <16 x i1> [[CMP]], <16 x i16> [[X]], 
<16 x i16> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i16>]] [[RES]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <16 x i1> {{.*}}, <16 x i16> [[RES]], <16 x i16> 
{{.*}}
   return _mm256_mask_min_epi16(__W,__M,__A,__B); 
 }
@@ -1397,6 +1460,8 @@ __m128i test_mm_maskz_min_epu8(__mmask16
   // CHECK-LABEL: @test_mm_maskz_min_epu8
   // CHECK:       [[CMP:%.*]] = icmp ult <16 x i8> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <16 x i1> [[CMP]], <16 x i8> [[X]], <16 
x i8> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i8>]] [[RES]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <16 x i1> {{.*}}, <16 x i8> [[RES]], <16 x i8> {{.*}}
   return _mm_maskz_min_epu8(__M,__A,__B); 
 }
@@ -1404,6 +1469,8 @@ __m128i test_mm_mask_min_epu8(__m128i __
   // CHECK-LABEL: @test_mm_mask_min_epu8
   // CHECK:       [[CMP:%.*]] = icmp ult <16 x i8> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <16 x i1> [[CMP]], <16 x i8> [[X]], <16 
x i8> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i8>]] [[RES]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <16 x i1> {{.*}}, <16 x i8> [[RES]], <16 x i8> {{.*}}
   return _mm_mask_min_epu8(__W,__M,__A,__B); 
 }
@@ -1411,6 +1478,8 @@ __m256i test_mm256_maskz_min_epu8(__mmas
   // CHECK-LABEL: @test_mm256_maskz_min_epu8
   // CHECK:       [[CMP:%.*]] = icmp ult <32 x i8> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <32 x i1> [[CMP]], <32 x i8> [[X]], <32 
x i8> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<32 x i8>]] [[RES]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <32 x i1> {{.*}}, <32 x i8> [[RES]], <32 x i8> {{.*}}
   return _mm256_maskz_min_epu8(__M,__A,__B); 
 }
@@ -1418,6 +1487,8 @@ __m256i test_mm256_mask_min_epu8(__m256i
   // CHECK-LABEL: @test_mm256_mask_min_epu8
   // CHECK:       [[CMP:%.*]] = icmp ult <32 x i8> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <32 x i1> [[CMP]], <32 x i8> [[X]], <32 
x i8> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<32 x i8>]] [[RES]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <32 x i1> {{.*}}, <32 x i8> [[RES]], <32 x i8> {{.*}}
   return _mm256_mask_min_epu8(__W,__M,__A,__B); 
 }
@@ -1425,6 +1496,8 @@ __m128i test_mm_maskz_min_epu16(__mmask8
   // CHECK-LABEL: @test_mm_maskz_min_epu16
   // CHECK:       [[CMP:%.*]] = icmp ult <8 x i16> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <8 x i1> [[CMP]], <8 x i16> [[X]], <8 x 
i16> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<8 x i16>]] [[RES]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <8 x i1> {{.*}}, <8 x i16> [[RES]], <8 x i16> {{.*}}
   return _mm_maskz_min_epu16(__M,__A,__B); 
 }
@@ -1432,6 +1505,8 @@ __m128i test_mm_mask_min_epu16(__m128i _
   // CHECK-LABEL: @test_mm_mask_min_epu16
   // CHECK:       [[CMP:%.*]] = icmp ult <8 x i16> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <8 x i1> [[CMP]], <8 x i16> [[X]], <8 x 
i16> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<8 x i16>]] [[RES]] to [[DSTTY:<2 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <8 x i1> {{.*}}, <8 x i16> [[RES]], <8 x i16> {{.*}}
   return _mm_mask_min_epu16(__W,__M,__A,__B); 
 }
@@ -1439,6 +1514,8 @@ __m256i test_mm256_maskz_min_epu16(__mma
   // CHECK-LABEL: @test_mm256_maskz_min_epu16
   // CHECK:       [[CMP:%.*]] = icmp ult <16 x i16> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <16 x i1> [[CMP]], <16 x i16> [[X]], 
<16 x i16> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i16>]] [[RES]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <16 x i1> {{.*}}, <16 x i16> [[RES]], <16 x i16> 
{{.*}}
   return _mm256_maskz_min_epu16(__M,__A,__B); 
 }
@@ -1446,6 +1523,8 @@ __m256i test_mm256_mask_min_epu16(__m256
   // CHECK-LABEL: @test_mm256_mask_min_epu16
   // CHECK:       [[CMP:%.*]] = icmp ult <16 x i16> [[X:%.*]], [[Y:%.*]]
   // CHECK-NEXT:  [[RES:%.*]] = select <16 x i1> [[CMP]], <16 x i16> [[X]], 
<16 x i16> [[Y]]
+  // CHECK: [[TMP:%.*]] = bitcast [[SRCTY:<16 x i16>]] [[RES]] to [[DSTTY:<4 x 
i64>]]
+  // CHECK: [[RES:%.*]] = bitcast [[DSTTY]] [[TMP]] to [[SRCTY]]
   // CHECK:       select <16 x i1> {{.*}}, <16 x i16> [[RES]], <16 x i16> 
{{.*}}
   return _mm256_mask_min_epu16(__W,__M,__A,__B); 
 }


_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to