Author: Eli Friedman Date: 2020-04-13T13:43:14-07:00 New Revision: 89e0662dee5fa541f284e6be0af9e36e7f39f947
URL: https://github.com/llvm/llvm-project/commit/89e0662dee5fa541f284e6be0af9e36e7f39f947 DIFF: https://github.com/llvm/llvm-project/commit/89e0662dee5fa541f284e6be0af9e36e7f39f947.diff LOG: Make IRBuilder automatically set alignment on load/store/alloca. This is equivalent in terms of LLVM IR semantics, but we want to transition away from using MaybeAlign to represent the alignment of these instructions. Differential Revision: https://reviews.llvm.org/D77984 Added: Modified: clang/test/CodeGen/arm_neon_intrinsics.c llvm/include/llvm/IR/IRBuilder.h llvm/test/Bindings/llvm-c/atomics.ll llvm/test/Bindings/llvm-c/echo.ll llvm/test/Bindings/llvm-c/memops.ll llvm/test/CodeGen/AMDGPU/widen_extending_scalar_loads.ll llvm/test/Instrumentation/AddressSanitizer/debug-info-alloca.ll llvm/test/Instrumentation/SanitizerCoverage/inline-8bit-counters.ll llvm/test/Instrumentation/SanitizerCoverage/inline-bool-flag.ll llvm/test/Transforms/ArgumentPromotion/dbg.ll llvm/test/Transforms/SROA/alignment.ll llvm/test/Transforms/SROA/basictest.ll llvm/test/Transforms/SROA/preserve-nonnull.ll llvm/tools/llvm-c-test/echo.cpp polly/test/Isl/CodeGen/invariant_load_alias_metadata.ll polly/test/Isl/CodeGen/non-affine-phi-node-expansion-2.ll polly/test/Isl/CodeGen/partial_write_array.ll polly/test/Isl/CodeGen/partial_write_impossible_restriction.ll Removed: ################################################################################ diff --git a/clang/test/CodeGen/arm_neon_intrinsics.c b/clang/test/CodeGen/arm_neon_intrinsics.c index bff9f6a2e30f..18d59ffe0ae7 100644 --- a/clang/test/CodeGen/arm_neon_intrinsics.c +++ b/clang/test/CodeGen/arm_neon_intrinsics.c @@ -20227,10 +20227,10 @@ poly8x8_t test_vtbx4_p8(poly8x8_t a, poly8x8x4_t b, uint8x8_t c) { // CHECK: [[TMP0:%.*]] = bitcast %struct.int8x8x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i8>* // CHECK: [[VTRN_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14> -// CHECK: store <8 x i8> [[VTRN_I]], <8 x i8>* [[TMP1]], !alias.scope !3 +// CHECK: store <8 x i8> [[VTRN_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope !3 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x i8>, <8 x i8>* [[TMP1]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15> -// CHECK: store <8 x i8> [[VTRN1_I]], <8 x i8>* [[TMP2]], !alias.scope !3 +// CHECK: store <8 x i8> [[VTRN1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope !3 // CHECK: ret void int8x8x2_t test_vtrn_s8(int8x8_t a, int8x8_t b) { return vtrn_s8(a, b); @@ -20242,10 +20242,10 @@ int8x8x2_t test_vtrn_s8(int8x8_t a, int8x8_t b) { // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>* // CHECK: [[VTRN_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6> -// CHECK: store <4 x i16> [[VTRN_I]], <4 x i16>* [[TMP3]], !alias.scope !6 +// CHECK: store <4 x i16> [[VTRN_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope !6 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i16>, <4 x i16>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7> -// CHECK: store <4 x i16> [[VTRN1_I]], <4 x i16>* [[TMP4]], !alias.scope !6 +// CHECK: store <4 x i16> [[VTRN1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope !6 // CHECK: ret void int16x4x2_t test_vtrn_s16(int16x4_t a, int16x4_t b) { return vtrn_s16(a, b); @@ -20257,10 +20257,10 @@ int16x4x2_t test_vtrn_s16(int16x4_t a, int16x4_t b) { // CHECK: [[TMP2:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <2 x i32>* // CHECK: [[VTRN_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2> -// CHECK: store <2 x i32> [[VTRN_I]], <2 x i32>* [[TMP3]], !alias.scope !9 +// CHECK: store <2 x i32> [[VTRN_I]], <2 x i32>* [[TMP3]], align 4, !alias.scope !9 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <2 x i32>, <2 x i32>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3> -// CHECK: store <2 x i32> [[VTRN1_I]], <2 x i32>* [[TMP4]], !alias.scope !9 +// CHECK: store <2 x i32> [[VTRN1_I]], <2 x i32>* [[TMP4]], align 4, !alias.scope !9 // CHECK: ret void int32x2x2_t test_vtrn_s32(int32x2_t a, int32x2_t b) { return vtrn_s32(a, b); @@ -20270,10 +20270,10 @@ int32x2x2_t test_vtrn_s32(int32x2_t a, int32x2_t b) { // CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x8x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i8>* // CHECK: [[VTRN_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14> -// CHECK: store <8 x i8> [[VTRN_I]], <8 x i8>* [[TMP1]], !alias.scope !12 +// CHECK: store <8 x i8> [[VTRN_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope !12 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x i8>, <8 x i8>* [[TMP1]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15> -// CHECK: store <8 x i8> [[VTRN1_I]], <8 x i8>* [[TMP2]], !alias.scope !12 +// CHECK: store <8 x i8> [[VTRN1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope !12 // CHECK: ret void uint8x8x2_t test_vtrn_u8(uint8x8_t a, uint8x8_t b) { return vtrn_u8(a, b); @@ -20285,10 +20285,10 @@ uint8x8x2_t test_vtrn_u8(uint8x8_t a, uint8x8_t b) { // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>* // CHECK: [[VTRN_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6> -// CHECK: store <4 x i16> [[VTRN_I]], <4 x i16>* [[TMP3]], !alias.scope !15 +// CHECK: store <4 x i16> [[VTRN_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope !15 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i16>, <4 x i16>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7> -// CHECK: store <4 x i16> [[VTRN1_I]], <4 x i16>* [[TMP4]], !alias.scope !15 +// CHECK: store <4 x i16> [[VTRN1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope !15 // CHECK: ret void uint16x4x2_t test_vtrn_u16(uint16x4_t a, uint16x4_t b) { return vtrn_u16(a, b); @@ -20300,10 +20300,10 @@ uint16x4x2_t test_vtrn_u16(uint16x4_t a, uint16x4_t b) { // CHECK: [[TMP2:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <2 x i32>* // CHECK: [[VTRN_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2> -// CHECK: store <2 x i32> [[VTRN_I]], <2 x i32>* [[TMP3]], !alias.scope !18 +// CHECK: store <2 x i32> [[VTRN_I]], <2 x i32>* [[TMP3]], align 4, !alias.scope !18 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <2 x i32>, <2 x i32>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3> -// CHECK: store <2 x i32> [[VTRN1_I]], <2 x i32>* [[TMP4]], !alias.scope !18 +// CHECK: store <2 x i32> [[VTRN1_I]], <2 x i32>* [[TMP4]], align 4, !alias.scope !18 // CHECK: ret void uint32x2x2_t test_vtrn_u32(uint32x2_t a, uint32x2_t b) { return vtrn_u32(a, b); @@ -20315,10 +20315,10 @@ uint32x2x2_t test_vtrn_u32(uint32x2_t a, uint32x2_t b) { // CHECK: [[TMP2:%.*]] = bitcast <2 x float> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <2 x float>* // CHECK: [[VTRN_I:%.*]] = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 0, i32 2> -// CHECK: store <2 x float> [[VTRN_I]], <2 x float>* [[TMP3]], !alias.scope !21 +// CHECK: store <2 x float> [[VTRN_I]], <2 x float>* [[TMP3]], align 4, !alias.scope !21 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <2 x float>, <2 x float>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 3> -// CHECK: store <2 x float> [[VTRN1_I]], <2 x float>* [[TMP4]], !alias.scope !21 +// CHECK: store <2 x float> [[VTRN1_I]], <2 x float>* [[TMP4]], align 4, !alias.scope !21 // CHECK: ret void float32x2x2_t test_vtrn_f32(float32x2_t a, float32x2_t b) { return vtrn_f32(a, b); @@ -20328,10 +20328,10 @@ float32x2x2_t test_vtrn_f32(float32x2_t a, float32x2_t b) { // CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x8x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i8>* // CHECK: [[VTRN_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14> -// CHECK: store <8 x i8> [[VTRN_I]], <8 x i8>* [[TMP1]], !alias.scope !24 +// CHECK: store <8 x i8> [[VTRN_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope !24 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x i8>, <8 x i8>* [[TMP1]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15> -// CHECK: store <8 x i8> [[VTRN1_I]], <8 x i8>* [[TMP2]], !alias.scope !24 +// CHECK: store <8 x i8> [[VTRN1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope !24 // CHECK: ret void poly8x8x2_t test_vtrn_p8(poly8x8_t a, poly8x8_t b) { return vtrn_p8(a, b); @@ -20343,10 +20343,10 @@ poly8x8x2_t test_vtrn_p8(poly8x8_t a, poly8x8_t b) { // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>* // CHECK: [[VTRN_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6> -// CHECK: store <4 x i16> [[VTRN_I]], <4 x i16>* [[TMP3]], !alias.scope !27 +// CHECK: store <4 x i16> [[VTRN_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope !27 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i16>, <4 x i16>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7> -// CHECK: store <4 x i16> [[VTRN1_I]], <4 x i16>* [[TMP4]], !alias.scope !27 +// CHECK: store <4 x i16> [[VTRN1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope !27 // CHECK: ret void poly16x4x2_t test_vtrn_p16(poly16x4_t a, poly16x4_t b) { return vtrn_p16(a, b); @@ -20356,10 +20356,10 @@ poly16x4x2_t test_vtrn_p16(poly16x4_t a, poly16x4_t b) { // CHECK: [[TMP0:%.*]] = bitcast %struct.int8x16x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>* // CHECK: [[VTRN_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30> -// CHECK: store <16 x i8> [[VTRN_I]], <16 x i8>* [[TMP1]], !alias.scope !30 +// CHECK: store <16 x i8> [[VTRN_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope !30 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[TMP1]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31> -// CHECK: store <16 x i8> [[VTRN1_I]], <16 x i8>* [[TMP2]], !alias.scope !30 +// CHECK: store <16 x i8> [[VTRN1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope !30 // CHECK: ret void int8x16x2_t test_vtrnq_s8(int8x16_t a, int8x16_t b) { return vtrnq_s8(a, b); @@ -20371,10 +20371,10 @@ int8x16x2_t test_vtrnq_s8(int8x16_t a, int8x16_t b) { // CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>* // CHECK: [[VTRN_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14> -// CHECK: store <8 x i16> [[VTRN_I]], <8 x i16>* [[TMP3]], !alias.scope !33 +// CHECK: store <8 x i16> [[VTRN_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope !33 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15> -// CHECK: store <8 x i16> [[VTRN1_I]], <8 x i16>* [[TMP4]], !alias.scope !33 +// CHECK: store <8 x i16> [[VTRN1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope !33 // CHECK: ret void int16x8x2_t test_vtrnq_s16(int16x8_t a, int16x8_t b) { return vtrnq_s16(a, b); @@ -20386,10 +20386,10 @@ int16x8x2_t test_vtrnq_s16(int16x8_t a, int16x8_t b) { // CHECK: [[TMP2:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i32>* // CHECK: [[VTRN_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6> -// CHECK: store <4 x i32> [[VTRN_I]], <4 x i32>* [[TMP3]], !alias.scope !36 +// CHECK: store <4 x i32> [[VTRN_I]], <4 x i32>* [[TMP3]], align 4, !alias.scope !36 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7> -// CHECK: store <4 x i32> [[VTRN1_I]], <4 x i32>* [[TMP4]], !alias.scope !36 +// CHECK: store <4 x i32> [[VTRN1_I]], <4 x i32>* [[TMP4]], align 4, !alias.scope !36 // CHECK: ret void int32x4x2_t test_vtrnq_s32(int32x4_t a, int32x4_t b) { return vtrnq_s32(a, b); @@ -20399,10 +20399,10 @@ int32x4x2_t test_vtrnq_s32(int32x4_t a, int32x4_t b) { // CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x16x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>* // CHECK: [[VTRN_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30> -// CHECK: store <16 x i8> [[VTRN_I]], <16 x i8>* [[TMP1]], !alias.scope !39 +// CHECK: store <16 x i8> [[VTRN_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope !39 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[TMP1]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31> -// CHECK: store <16 x i8> [[VTRN1_I]], <16 x i8>* [[TMP2]], !alias.scope !39 +// CHECK: store <16 x i8> [[VTRN1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope !39 // CHECK: ret void uint8x16x2_t test_vtrnq_u8(uint8x16_t a, uint8x16_t b) { return vtrnq_u8(a, b); @@ -20414,10 +20414,10 @@ uint8x16x2_t test_vtrnq_u8(uint8x16_t a, uint8x16_t b) { // CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>* // CHECK: [[VTRN_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14> -// CHECK: store <8 x i16> [[VTRN_I]], <8 x i16>* [[TMP3]], !alias.scope !42 +// CHECK: store <8 x i16> [[VTRN_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope !42 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15> -// CHECK: store <8 x i16> [[VTRN1_I]], <8 x i16>* [[TMP4]], !alias.scope !42 +// CHECK: store <8 x i16> [[VTRN1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope !42 // CHECK: ret void uint16x8x2_t test_vtrnq_u16(uint16x8_t a, uint16x8_t b) { return vtrnq_u16(a, b); @@ -20429,10 +20429,10 @@ uint16x8x2_t test_vtrnq_u16(uint16x8_t a, uint16x8_t b) { // CHECK: [[TMP2:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i32>* // CHECK: [[VTRN_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6> -// CHECK: store <4 x i32> [[VTRN_I]], <4 x i32>* [[TMP3]], !alias.scope !45 +// CHECK: store <4 x i32> [[VTRN_I]], <4 x i32>* [[TMP3]], align 4, !alias.scope !45 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7> -// CHECK: store <4 x i32> [[VTRN1_I]], <4 x i32>* [[TMP4]], !alias.scope !45 +// CHECK: store <4 x i32> [[VTRN1_I]], <4 x i32>* [[TMP4]], align 4, !alias.scope !45 // CHECK: ret void uint32x4x2_t test_vtrnq_u32(uint32x4_t a, uint32x4_t b) { return vtrnq_u32(a, b); @@ -20444,10 +20444,10 @@ uint32x4x2_t test_vtrnq_u32(uint32x4_t a, uint32x4_t b) { // CHECK: [[TMP2:%.*]] = bitcast <4 x float> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x float>* // CHECK: [[VTRN_I:%.*]] = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6> -// CHECK: store <4 x float> [[VTRN_I]], <4 x float>* [[TMP3]], !alias.scope !48 +// CHECK: store <4 x float> [[VTRN_I]], <4 x float>* [[TMP3]], align 4, !alias.scope !48 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7> -// CHECK: store <4 x float> [[VTRN1_I]], <4 x float>* [[TMP4]], !alias.scope !48 +// CHECK: store <4 x float> [[VTRN1_I]], <4 x float>* [[TMP4]], align 4, !alias.scope !48 // CHECK: ret void float32x4x2_t test_vtrnq_f32(float32x4_t a, float32x4_t b) { return vtrnq_f32(a, b); @@ -20457,10 +20457,10 @@ float32x4x2_t test_vtrnq_f32(float32x4_t a, float32x4_t b) { // CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x16x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>* // CHECK: [[VTRN_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30> -// CHECK: store <16 x i8> [[VTRN_I]], <16 x i8>* [[TMP1]], !alias.scope !51 +// CHECK: store <16 x i8> [[VTRN_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope !51 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[TMP1]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31> -// CHECK: store <16 x i8> [[VTRN1_I]], <16 x i8>* [[TMP2]], !alias.scope !51 +// CHECK: store <16 x i8> [[VTRN1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope !51 // CHECK: ret void poly8x16x2_t test_vtrnq_p8(poly8x16_t a, poly8x16_t b) { return vtrnq_p8(a, b); @@ -20472,10 +20472,10 @@ poly8x16x2_t test_vtrnq_p8(poly8x16_t a, poly8x16_t b) { // CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>* // CHECK: [[VTRN_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14> -// CHECK: store <8 x i16> [[VTRN_I]], <8 x i16>* [[TMP3]], !alias.scope !54 +// CHECK: store <8 x i16> [[VTRN_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope !54 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15> -// CHECK: store <8 x i16> [[VTRN1_I]], <8 x i16>* [[TMP4]], !alias.scope !54 +// CHECK: store <8 x i16> [[VTRN1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope !54 // CHECK: ret void poly16x8x2_t test_vtrnq_p16(poly16x8_t a, poly16x8_t b) { return vtrnq_p16(a, b); @@ -20649,10 +20649,10 @@ uint16x8_t test_vtstq_p16(poly16x8_t a, poly16x8_t b) { // CHECK: [[TMP0:%.*]] = bitcast %struct.int8x8x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i8>* // CHECK: [[VUZP_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> -// CHECK: store <8 x i8> [[VUZP_I]], <8 x i8>* [[TMP1]], !alias.scope !57 +// CHECK: store <8 x i8> [[VUZP_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope !57 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x i8>, <8 x i8>* [[TMP1]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> -// CHECK: store <8 x i8> [[VUZP1_I]], <8 x i8>* [[TMP2]], !alias.scope !57 +// CHECK: store <8 x i8> [[VUZP1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope !57 // CHECK: ret void int8x8x2_t test_vuzp_s8(int8x8_t a, int8x8_t b) { return vuzp_s8(a, b); @@ -20664,10 +20664,10 @@ int8x8x2_t test_vuzp_s8(int8x8_t a, int8x8_t b) { // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>* // CHECK: [[VUZP_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6> -// CHECK: store <4 x i16> [[VUZP_I]], <4 x i16>* [[TMP3]], !alias.scope !60 +// CHECK: store <4 x i16> [[VUZP_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope !60 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i16>, <4 x i16>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7> -// CHECK: store <4 x i16> [[VUZP1_I]], <4 x i16>* [[TMP4]], !alias.scope !60 +// CHECK: store <4 x i16> [[VUZP1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope !60 // CHECK: ret void int16x4x2_t test_vuzp_s16(int16x4_t a, int16x4_t b) { return vuzp_s16(a, b); @@ -20679,10 +20679,10 @@ int16x4x2_t test_vuzp_s16(int16x4_t a, int16x4_t b) { // CHECK: [[TMP2:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <2 x i32>* // CHECK: [[VUZP_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2> -// CHECK: store <2 x i32> [[VUZP_I]], <2 x i32>* [[TMP3]], !alias.scope !63 +// CHECK: store <2 x i32> [[VUZP_I]], <2 x i32>* [[TMP3]], align 4, !alias.scope !63 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <2 x i32>, <2 x i32>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3> -// CHECK: store <2 x i32> [[VUZP1_I]], <2 x i32>* [[TMP4]], !alias.scope !63 +// CHECK: store <2 x i32> [[VUZP1_I]], <2 x i32>* [[TMP4]], align 4, !alias.scope !63 // CHECK: ret void int32x2x2_t test_vuzp_s32(int32x2_t a, int32x2_t b) { return vuzp_s32(a, b); @@ -20692,10 +20692,10 @@ int32x2x2_t test_vuzp_s32(int32x2_t a, int32x2_t b) { // CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x8x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i8>* // CHECK: [[VUZP_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> -// CHECK: store <8 x i8> [[VUZP_I]], <8 x i8>* [[TMP1]], !alias.scope !66 +// CHECK: store <8 x i8> [[VUZP_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope !66 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x i8>, <8 x i8>* [[TMP1]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> -// CHECK: store <8 x i8> [[VUZP1_I]], <8 x i8>* [[TMP2]], !alias.scope !66 +// CHECK: store <8 x i8> [[VUZP1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope !66 // CHECK: ret void uint8x8x2_t test_vuzp_u8(uint8x8_t a, uint8x8_t b) { return vuzp_u8(a, b); @@ -20707,10 +20707,10 @@ uint8x8x2_t test_vuzp_u8(uint8x8_t a, uint8x8_t b) { // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>* // CHECK: [[VUZP_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6> -// CHECK: store <4 x i16> [[VUZP_I]], <4 x i16>* [[TMP3]], !alias.scope !69 +// CHECK: store <4 x i16> [[VUZP_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope !69 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i16>, <4 x i16>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7> -// CHECK: store <4 x i16> [[VUZP1_I]], <4 x i16>* [[TMP4]], !alias.scope !69 +// CHECK: store <4 x i16> [[VUZP1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope !69 // CHECK: ret void uint16x4x2_t test_vuzp_u16(uint16x4_t a, uint16x4_t b) { return vuzp_u16(a, b); @@ -20722,10 +20722,10 @@ uint16x4x2_t test_vuzp_u16(uint16x4_t a, uint16x4_t b) { // CHECK: [[TMP2:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <2 x i32>* // CHECK: [[VUZP_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2> -// CHECK: store <2 x i32> [[VUZP_I]], <2 x i32>* [[TMP3]], !alias.scope !72 +// CHECK: store <2 x i32> [[VUZP_I]], <2 x i32>* [[TMP3]], align 4, !alias.scope !72 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <2 x i32>, <2 x i32>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3> -// CHECK: store <2 x i32> [[VUZP1_I]], <2 x i32>* [[TMP4]], !alias.scope !72 +// CHECK: store <2 x i32> [[VUZP1_I]], <2 x i32>* [[TMP4]], align 4, !alias.scope !72 // CHECK: ret void uint32x2x2_t test_vuzp_u32(uint32x2_t a, uint32x2_t b) { return vuzp_u32(a, b); @@ -20737,10 +20737,10 @@ uint32x2x2_t test_vuzp_u32(uint32x2_t a, uint32x2_t b) { // CHECK: [[TMP2:%.*]] = bitcast <2 x float> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <2 x float>* // CHECK: [[VUZP_I:%.*]] = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 0, i32 2> -// CHECK: store <2 x float> [[VUZP_I]], <2 x float>* [[TMP3]], !alias.scope !75 +// CHECK: store <2 x float> [[VUZP_I]], <2 x float>* [[TMP3]], align 4, !alias.scope !75 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <2 x float>, <2 x float>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 3> -// CHECK: store <2 x float> [[VUZP1_I]], <2 x float>* [[TMP4]], !alias.scope !75 +// CHECK: store <2 x float> [[VUZP1_I]], <2 x float>* [[TMP4]], align 4, !alias.scope !75 // CHECK: ret void float32x2x2_t test_vuzp_f32(float32x2_t a, float32x2_t b) { return vuzp_f32(a, b); @@ -20750,10 +20750,10 @@ float32x2x2_t test_vuzp_f32(float32x2_t a, float32x2_t b) { // CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x8x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i8>* // CHECK: [[VUZP_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> -// CHECK: store <8 x i8> [[VUZP_I]], <8 x i8>* [[TMP1]], !alias.scope !78 +// CHECK: store <8 x i8> [[VUZP_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope !78 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x i8>, <8 x i8>* [[TMP1]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> -// CHECK: store <8 x i8> [[VUZP1_I]], <8 x i8>* [[TMP2]], !alias.scope !78 +// CHECK: store <8 x i8> [[VUZP1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope !78 // CHECK: ret void poly8x8x2_t test_vuzp_p8(poly8x8_t a, poly8x8_t b) { return vuzp_p8(a, b); @@ -20765,10 +20765,10 @@ poly8x8x2_t test_vuzp_p8(poly8x8_t a, poly8x8_t b) { // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>* // CHECK: [[VUZP_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6> -// CHECK: store <4 x i16> [[VUZP_I]], <4 x i16>* [[TMP3]], !alias.scope !81 +// CHECK: store <4 x i16> [[VUZP_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope !81 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i16>, <4 x i16>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7> -// CHECK: store <4 x i16> [[VUZP1_I]], <4 x i16>* [[TMP4]], !alias.scope !81 +// CHECK: store <4 x i16> [[VUZP1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope !81 // CHECK: ret void poly16x4x2_t test_vuzp_p16(poly16x4_t a, poly16x4_t b) { return vuzp_p16(a, b); @@ -20778,10 +20778,10 @@ poly16x4x2_t test_vuzp_p16(poly16x4_t a, poly16x4_t b) { // CHECK: [[TMP0:%.*]] = bitcast %struct.int8x16x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>* // CHECK: [[VUZP_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30> -// CHECK: store <16 x i8> [[VUZP_I]], <16 x i8>* [[TMP1]], !alias.scope !84 +// CHECK: store <16 x i8> [[VUZP_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope !84 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[TMP1]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31> -// CHECK: store <16 x i8> [[VUZP1_I]], <16 x i8>* [[TMP2]], !alias.scope !84 +// CHECK: store <16 x i8> [[VUZP1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope !84 // CHECK: ret void int8x16x2_t test_vuzpq_s8(int8x16_t a, int8x16_t b) { return vuzpq_s8(a, b); @@ -20793,10 +20793,10 @@ int8x16x2_t test_vuzpq_s8(int8x16_t a, int8x16_t b) { // CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>* // CHECK: [[VUZP_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> -// CHECK: store <8 x i16> [[VUZP_I]], <8 x i16>* [[TMP3]], !alias.scope !87 +// CHECK: store <8 x i16> [[VUZP_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope !87 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> -// CHECK: store <8 x i16> [[VUZP1_I]], <8 x i16>* [[TMP4]], !alias.scope !87 +// CHECK: store <8 x i16> [[VUZP1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope !87 // CHECK: ret void int16x8x2_t test_vuzpq_s16(int16x8_t a, int16x8_t b) { return vuzpq_s16(a, b); @@ -20808,10 +20808,10 @@ int16x8x2_t test_vuzpq_s16(int16x8_t a, int16x8_t b) { // CHECK: [[TMP2:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i32>* // CHECK: [[VUZP_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6> -// CHECK: store <4 x i32> [[VUZP_I]], <4 x i32>* [[TMP3]], !alias.scope !90 +// CHECK: store <4 x i32> [[VUZP_I]], <4 x i32>* [[TMP3]], align 4, !alias.scope !90 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7> -// CHECK: store <4 x i32> [[VUZP1_I]], <4 x i32>* [[TMP4]], !alias.scope !90 +// CHECK: store <4 x i32> [[VUZP1_I]], <4 x i32>* [[TMP4]], align 4, !alias.scope !90 // CHECK: ret void int32x4x2_t test_vuzpq_s32(int32x4_t a, int32x4_t b) { return vuzpq_s32(a, b); @@ -20821,10 +20821,10 @@ int32x4x2_t test_vuzpq_s32(int32x4_t a, int32x4_t b) { // CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x16x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>* // CHECK: [[VUZP_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30> -// CHECK: store <16 x i8> [[VUZP_I]], <16 x i8>* [[TMP1]], !alias.scope !93 +// CHECK: store <16 x i8> [[VUZP_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope !93 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[TMP1]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31> -// CHECK: store <16 x i8> [[VUZP1_I]], <16 x i8>* [[TMP2]], !alias.scope !93 +// CHECK: store <16 x i8> [[VUZP1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope !93 // CHECK: ret void uint8x16x2_t test_vuzpq_u8(uint8x16_t a, uint8x16_t b) { return vuzpq_u8(a, b); @@ -20836,10 +20836,10 @@ uint8x16x2_t test_vuzpq_u8(uint8x16_t a, uint8x16_t b) { // CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>* // CHECK: [[VUZP_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> -// CHECK: store <8 x i16> [[VUZP_I]], <8 x i16>* [[TMP3]], !alias.scope !96 +// CHECK: store <8 x i16> [[VUZP_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope !96 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> -// CHECK: store <8 x i16> [[VUZP1_I]], <8 x i16>* [[TMP4]], !alias.scope !96 +// CHECK: store <8 x i16> [[VUZP1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope !96 // CHECK: ret void uint16x8x2_t test_vuzpq_u16(uint16x8_t a, uint16x8_t b) { return vuzpq_u16(a, b); @@ -20851,10 +20851,10 @@ uint16x8x2_t test_vuzpq_u16(uint16x8_t a, uint16x8_t b) { // CHECK: [[TMP2:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i32>* // CHECK: [[VUZP_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6> -// CHECK: store <4 x i32> [[VUZP_I]], <4 x i32>* [[TMP3]], !alias.scope !99 +// CHECK: store <4 x i32> [[VUZP_I]], <4 x i32>* [[TMP3]], align 4, !alias.scope !99 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7> -// CHECK: store <4 x i32> [[VUZP1_I]], <4 x i32>* [[TMP4]], !alias.scope !99 +// CHECK: store <4 x i32> [[VUZP1_I]], <4 x i32>* [[TMP4]], align 4, !alias.scope !99 // CHECK: ret void uint32x4x2_t test_vuzpq_u32(uint32x4_t a, uint32x4_t b) { return vuzpq_u32(a, b); @@ -20866,10 +20866,10 @@ uint32x4x2_t test_vuzpq_u32(uint32x4_t a, uint32x4_t b) { // CHECK: [[TMP2:%.*]] = bitcast <4 x float> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x float>* // CHECK: [[VUZP_I:%.*]] = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6> -// CHECK: store <4 x float> [[VUZP_I]], <4 x float>* [[TMP3]], !alias.scope !102 +// CHECK: store <4 x float> [[VUZP_I]], <4 x float>* [[TMP3]], align 4, !alias.scope !102 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7> -// CHECK: store <4 x float> [[VUZP1_I]], <4 x float>* [[TMP4]], !alias.scope !102 +// CHECK: store <4 x float> [[VUZP1_I]], <4 x float>* [[TMP4]], align 4, !alias.scope !102 // CHECK: ret void float32x4x2_t test_vuzpq_f32(float32x4_t a, float32x4_t b) { return vuzpq_f32(a, b); @@ -20879,10 +20879,10 @@ float32x4x2_t test_vuzpq_f32(float32x4_t a, float32x4_t b) { // CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x16x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>* // CHECK: [[VUZP_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30> -// CHECK: store <16 x i8> [[VUZP_I]], <16 x i8>* [[TMP1]], !alias.scope !105 +// CHECK: store <16 x i8> [[VUZP_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope !105 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[TMP1]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31> -// CHECK: store <16 x i8> [[VUZP1_I]], <16 x i8>* [[TMP2]], !alias.scope !105 +// CHECK: store <16 x i8> [[VUZP1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope !105 // CHECK: ret void poly8x16x2_t test_vuzpq_p8(poly8x16_t a, poly8x16_t b) { return vuzpq_p8(a, b); @@ -20894,10 +20894,10 @@ poly8x16x2_t test_vuzpq_p8(poly8x16_t a, poly8x16_t b) { // CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>* // CHECK: [[VUZP_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> -// CHECK: store <8 x i16> [[VUZP_I]], <8 x i16>* [[TMP3]], !alias.scope !108 +// CHECK: store <8 x i16> [[VUZP_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope !108 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> -// CHECK: store <8 x i16> [[VUZP1_I]], <8 x i16>* [[TMP4]], !alias.scope !108 +// CHECK: store <8 x i16> [[VUZP1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope !108 // CHECK: ret void poly16x8x2_t test_vuzpq_p16(poly16x8_t a, poly16x8_t b) { return vuzpq_p16(a, b); @@ -20907,10 +20907,10 @@ poly16x8x2_t test_vuzpq_p16(poly16x8_t a, poly16x8_t b) { // CHECK: [[TMP0:%.*]] = bitcast %struct.int8x8x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i8>* // CHECK: [[VZIP_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11> -// CHECK: store <8 x i8> [[VZIP_I]], <8 x i8>* [[TMP1]], !alias.scope !111 +// CHECK: store <8 x i8> [[VZIP_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope !111 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x i8>, <8 x i8>* [[TMP1]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> -// CHECK: store <8 x i8> [[VZIP1_I]], <8 x i8>* [[TMP2]], !alias.scope !111 +// CHECK: store <8 x i8> [[VZIP1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope !111 // CHECK: ret void int8x8x2_t test_vzip_s8(int8x8_t a, int8x8_t b) { return vzip_s8(a, b); @@ -20922,10 +20922,10 @@ int8x8x2_t test_vzip_s8(int8x8_t a, int8x8_t b) { // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>* // CHECK: [[VZIP_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5> -// CHECK: store <4 x i16> [[VZIP_I]], <4 x i16>* [[TMP3]], !alias.scope !114 +// CHECK: store <4 x i16> [[VZIP_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope !114 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i16>, <4 x i16>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7> -// CHECK: store <4 x i16> [[VZIP1_I]], <4 x i16>* [[TMP4]], !alias.scope !114 +// CHECK: store <4 x i16> [[VZIP1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope !114 // CHECK: ret void int16x4x2_t test_vzip_s16(int16x4_t a, int16x4_t b) { return vzip_s16(a, b); @@ -20937,10 +20937,10 @@ int16x4x2_t test_vzip_s16(int16x4_t a, int16x4_t b) { // CHECK: [[TMP2:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <2 x i32>* // CHECK: [[VZIP_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2> -// CHECK: store <2 x i32> [[VZIP_I]], <2 x i32>* [[TMP3]], !alias.scope !117 +// CHECK: store <2 x i32> [[VZIP_I]], <2 x i32>* [[TMP3]], align 4, !alias.scope !117 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <2 x i32>, <2 x i32>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3> -// CHECK: store <2 x i32> [[VZIP1_I]], <2 x i32>* [[TMP4]], !alias.scope !117 +// CHECK: store <2 x i32> [[VZIP1_I]], <2 x i32>* [[TMP4]], align 4, !alias.scope !117 // CHECK: ret void int32x2x2_t test_vzip_s32(int32x2_t a, int32x2_t b) { return vzip_s32(a, b); @@ -20950,10 +20950,10 @@ int32x2x2_t test_vzip_s32(int32x2_t a, int32x2_t b) { // CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x8x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i8>* // CHECK: [[VZIP_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11> -// CHECK: store <8 x i8> [[VZIP_I]], <8 x i8>* [[TMP1]], !alias.scope !120 +// CHECK: store <8 x i8> [[VZIP_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope !120 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x i8>, <8 x i8>* [[TMP1]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> -// CHECK: store <8 x i8> [[VZIP1_I]], <8 x i8>* [[TMP2]], !alias.scope !120 +// CHECK: store <8 x i8> [[VZIP1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope !120 // CHECK: ret void uint8x8x2_t test_vzip_u8(uint8x8_t a, uint8x8_t b) { return vzip_u8(a, b); @@ -20965,10 +20965,10 @@ uint8x8x2_t test_vzip_u8(uint8x8_t a, uint8x8_t b) { // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>* // CHECK: [[VZIP_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5> -// CHECK: store <4 x i16> [[VZIP_I]], <4 x i16>* [[TMP3]], !alias.scope !123 +// CHECK: store <4 x i16> [[VZIP_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope !123 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i16>, <4 x i16>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7> -// CHECK: store <4 x i16> [[VZIP1_I]], <4 x i16>* [[TMP4]], !alias.scope !123 +// CHECK: store <4 x i16> [[VZIP1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope !123 // CHECK: ret void uint16x4x2_t test_vzip_u16(uint16x4_t a, uint16x4_t b) { return vzip_u16(a, b); @@ -20980,10 +20980,10 @@ uint16x4x2_t test_vzip_u16(uint16x4_t a, uint16x4_t b) { // CHECK: [[TMP2:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <2 x i32>* // CHECK: [[VZIP_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2> -// CHECK: store <2 x i32> [[VZIP_I]], <2 x i32>* [[TMP3]], !alias.scope !126 +// CHECK: store <2 x i32> [[VZIP_I]], <2 x i32>* [[TMP3]], align 4, !alias.scope !126 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <2 x i32>, <2 x i32>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3> -// CHECK: store <2 x i32> [[VZIP1_I]], <2 x i32>* [[TMP4]], !alias.scope !126 +// CHECK: store <2 x i32> [[VZIP1_I]], <2 x i32>* [[TMP4]], align 4, !alias.scope !126 // CHECK: ret void uint32x2x2_t test_vzip_u32(uint32x2_t a, uint32x2_t b) { return vzip_u32(a, b); @@ -20995,10 +20995,10 @@ uint32x2x2_t test_vzip_u32(uint32x2_t a, uint32x2_t b) { // CHECK: [[TMP2:%.*]] = bitcast <2 x float> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <2 x float>* // CHECK: [[VZIP_I:%.*]] = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 0, i32 2> -// CHECK: store <2 x float> [[VZIP_I]], <2 x float>* [[TMP3]], !alias.scope !129 +// CHECK: store <2 x float> [[VZIP_I]], <2 x float>* [[TMP3]], align 4, !alias.scope !129 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <2 x float>, <2 x float>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 3> -// CHECK: store <2 x float> [[VZIP1_I]], <2 x float>* [[TMP4]], !alias.scope !129 +// CHECK: store <2 x float> [[VZIP1_I]], <2 x float>* [[TMP4]], align 4, !alias.scope !129 // CHECK: ret void float32x2x2_t test_vzip_f32(float32x2_t a, float32x2_t b) { return vzip_f32(a, b); @@ -21008,10 +21008,10 @@ float32x2x2_t test_vzip_f32(float32x2_t a, float32x2_t b) { // CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x8x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i8>* // CHECK: [[VZIP_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11> -// CHECK: store <8 x i8> [[VZIP_I]], <8 x i8>* [[TMP1]], !alias.scope !132 +// CHECK: store <8 x i8> [[VZIP_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope !132 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x i8>, <8 x i8>* [[TMP1]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> -// CHECK: store <8 x i8> [[VZIP1_I]], <8 x i8>* [[TMP2]], !alias.scope !132 +// CHECK: store <8 x i8> [[VZIP1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope !132 // CHECK: ret void poly8x8x2_t test_vzip_p8(poly8x8_t a, poly8x8_t b) { return vzip_p8(a, b); @@ -21023,10 +21023,10 @@ poly8x8x2_t test_vzip_p8(poly8x8_t a, poly8x8_t b) { // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>* // CHECK: [[VZIP_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5> -// CHECK: store <4 x i16> [[VZIP_I]], <4 x i16>* [[TMP3]], !alias.scope !135 +// CHECK: store <4 x i16> [[VZIP_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope !135 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i16>, <4 x i16>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7> -// CHECK: store <4 x i16> [[VZIP1_I]], <4 x i16>* [[TMP4]], !alias.scope !135 +// CHECK: store <4 x i16> [[VZIP1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope !135 // CHECK: ret void poly16x4x2_t test_vzip_p16(poly16x4_t a, poly16x4_t b) { return vzip_p16(a, b); @@ -21036,10 +21036,10 @@ poly16x4x2_t test_vzip_p16(poly16x4_t a, poly16x4_t b) { // CHECK: [[TMP0:%.*]] = bitcast %struct.int8x16x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>* // CHECK: [[VZIP_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23> -// CHECK: store <16 x i8> [[VZIP_I]], <16 x i8>* [[TMP1]], !alias.scope !138 +// CHECK: store <16 x i8> [[VZIP_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope !138 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[TMP1]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31> -// CHECK: store <16 x i8> [[VZIP1_I]], <16 x i8>* [[TMP2]], !alias.scope !138 +// CHECK: store <16 x i8> [[VZIP1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope !138 // CHECK: ret void int8x16x2_t test_vzipq_s8(int8x16_t a, int8x16_t b) { return vzipq_s8(a, b); @@ -21051,10 +21051,10 @@ int8x16x2_t test_vzipq_s8(int8x16_t a, int8x16_t b) { // CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>* // CHECK: [[VZIP_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11> -// CHECK: store <8 x i16> [[VZIP_I]], <8 x i16>* [[TMP3]], !alias.scope !141 +// CHECK: store <8 x i16> [[VZIP_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope !141 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> -// CHECK: store <8 x i16> [[VZIP1_I]], <8 x i16>* [[TMP4]], !alias.scope !141 +// CHECK: store <8 x i16> [[VZIP1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope !141 // CHECK: ret void int16x8x2_t test_vzipq_s16(int16x8_t a, int16x8_t b) { return vzipq_s16(a, b); @@ -21066,10 +21066,10 @@ int16x8x2_t test_vzipq_s16(int16x8_t a, int16x8_t b) { // CHECK: [[TMP2:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i32>* // CHECK: [[VZIP_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5> -// CHECK: store <4 x i32> [[VZIP_I]], <4 x i32>* [[TMP3]], !alias.scope !144 +// CHECK: store <4 x i32> [[VZIP_I]], <4 x i32>* [[TMP3]], align 4, !alias.scope !144 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7> -// CHECK: store <4 x i32> [[VZIP1_I]], <4 x i32>* [[TMP4]], !alias.scope !144 +// CHECK: store <4 x i32> [[VZIP1_I]], <4 x i32>* [[TMP4]], align 4, !alias.scope !144 // CHECK: ret void int32x4x2_t test_vzipq_s32(int32x4_t a, int32x4_t b) { return vzipq_s32(a, b); @@ -21079,10 +21079,10 @@ int32x4x2_t test_vzipq_s32(int32x4_t a, int32x4_t b) { // CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x16x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>* // CHECK: [[VZIP_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23> -// CHECK: store <16 x i8> [[VZIP_I]], <16 x i8>* [[TMP1]], !alias.scope !147 +// CHECK: store <16 x i8> [[VZIP_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope !147 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[TMP1]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31> -// CHECK: store <16 x i8> [[VZIP1_I]], <16 x i8>* [[TMP2]], !alias.scope !147 +// CHECK: store <16 x i8> [[VZIP1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope !147 // CHECK: ret void uint8x16x2_t test_vzipq_u8(uint8x16_t a, uint8x16_t b) { return vzipq_u8(a, b); @@ -21094,10 +21094,10 @@ uint8x16x2_t test_vzipq_u8(uint8x16_t a, uint8x16_t b) { // CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>* // CHECK: [[VZIP_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11> -// CHECK: store <8 x i16> [[VZIP_I]], <8 x i16>* [[TMP3]], !alias.scope !150 +// CHECK: store <8 x i16> [[VZIP_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope !150 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> -// CHECK: store <8 x i16> [[VZIP1_I]], <8 x i16>* [[TMP4]], !alias.scope !150 +// CHECK: store <8 x i16> [[VZIP1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope !150 // CHECK: ret void uint16x8x2_t test_vzipq_u16(uint16x8_t a, uint16x8_t b) { return vzipq_u16(a, b); @@ -21109,10 +21109,10 @@ uint16x8x2_t test_vzipq_u16(uint16x8_t a, uint16x8_t b) { // CHECK: [[TMP2:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i32>* // CHECK: [[VZIP_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5> -// CHECK: store <4 x i32> [[VZIP_I]], <4 x i32>* [[TMP3]], !alias.scope !153 +// CHECK: store <4 x i32> [[VZIP_I]], <4 x i32>* [[TMP3]], align 4, !alias.scope !153 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7> -// CHECK: store <4 x i32> [[VZIP1_I]], <4 x i32>* [[TMP4]], !alias.scope !153 +// CHECK: store <4 x i32> [[VZIP1_I]], <4 x i32>* [[TMP4]], align 4, !alias.scope !153 // CHECK: ret void uint32x4x2_t test_vzipq_u32(uint32x4_t a, uint32x4_t b) { return vzipq_u32(a, b); @@ -21124,10 +21124,10 @@ uint32x4x2_t test_vzipq_u32(uint32x4_t a, uint32x4_t b) { // CHECK: [[TMP2:%.*]] = bitcast <4 x float> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x float>* // CHECK: [[VZIP_I:%.*]] = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5> -// CHECK: store <4 x float> [[VZIP_I]], <4 x float>* [[TMP3]], !alias.scope !156 +// CHECK: store <4 x float> [[VZIP_I]], <4 x float>* [[TMP3]], align 4, !alias.scope !156 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7> -// CHECK: store <4 x float> [[VZIP1_I]], <4 x float>* [[TMP4]], !alias.scope !156 +// CHECK: store <4 x float> [[VZIP1_I]], <4 x float>* [[TMP4]], align 4, !alias.scope !156 // CHECK: ret void float32x4x2_t test_vzipq_f32(float32x4_t a, float32x4_t b) { return vzipq_f32(a, b); @@ -21137,10 +21137,10 @@ float32x4x2_t test_vzipq_f32(float32x4_t a, float32x4_t b) { // CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x16x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>* // CHECK: [[VZIP_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23> -// CHECK: store <16 x i8> [[VZIP_I]], <16 x i8>* [[TMP1]], !alias.scope !159 +// CHECK: store <16 x i8> [[VZIP_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope !159 // CHECK: [[TMP2:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[TMP1]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31> -// CHECK: store <16 x i8> [[VZIP1_I]], <16 x i8>* [[TMP2]], !alias.scope !159 +// CHECK: store <16 x i8> [[VZIP1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope !159 // CHECK: ret void poly8x16x2_t test_vzipq_p8(poly8x16_t a, poly8x16_t b) { return vzipq_p8(a, b); @@ -21152,10 +21152,10 @@ poly8x16x2_t test_vzipq_p8(poly8x16_t a, poly8x16_t b) { // CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>* // CHECK: [[VZIP_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11> -// CHECK: store <8 x i16> [[VZIP_I]], <8 x i16>* [[TMP3]], !alias.scope !162 +// CHECK: store <8 x i16> [[VZIP_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope !162 // CHECK: [[TMP4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15> -// CHECK: store <8 x i16> [[VZIP1_I]], <8 x i16>* [[TMP4]], !alias.scope !162 +// CHECK: store <8 x i16> [[VZIP1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope !162 // CHECK: ret void poly16x8x2_t test_vzipq_p16(poly16x8_t a, poly16x8_t b) { return vzipq_p16(a, b); diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h index 75a7980b9e76..d4485ee23ab4 100644 --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -1568,28 +1568,32 @@ class IRBuilderBase { AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize = nullptr, const Twine &Name = "") { - return Insert(new AllocaInst(Ty, AddrSpace, ArraySize), Name); + const DataLayout &DL = BB->getModule()->getDataLayout(); + Align AllocaAlign = DL.getPrefTypeAlign(Ty); + return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name); } AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr, const Twine &Name = "") { - const DataLayout &DL = BB->getParent()->getParent()->getDataLayout(); - return Insert(new AllocaInst(Ty, DL.getAllocaAddrSpace(), ArraySize), Name); + const DataLayout &DL = BB->getModule()->getDataLayout(); + Align AllocaAlign = DL.getPrefTypeAlign(Ty); + unsigned AddrSpace = DL.getAllocaAddrSpace(); + return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name); } /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of /// converting the string to 'bool' for the isVolatile parameter. LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) { - return Insert(new LoadInst(Ty, Ptr), Name); + return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name); } LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") { - return Insert(new LoadInst(Ty, Ptr), Name); + return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name); } LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile, const Twine &Name = "") { - return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile), Name); + return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name); } // Deprecated [opaque pointer types] @@ -1609,7 +1613,7 @@ class IRBuilderBase { } StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) { - return Insert(new StoreInst(Val, Ptr, isVolatile)); + return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile); } LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, @@ -1620,9 +1624,7 @@ class IRBuilderBase { } LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name) { - LoadInst *LI = CreateLoad(Ty, Ptr, Name); - LI->setAlignment(Align); - return LI; + return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name); } LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, @@ -1633,9 +1635,7 @@ class IRBuilderBase { } LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const Twine &Name = "") { - LoadInst *LI = CreateLoad(Ty, Ptr, Name); - LI->setAlignment(Align); - return LI; + return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name); } LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, @@ -1647,9 +1647,11 @@ class IRBuilderBase { } LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name = "") { - LoadInst *LI = CreateLoad(Ty, Ptr, isVolatile, Name); - LI->setAlignment(Align); - return LI; + if (!Align) { + const DataLayout &DL = BB->getModule()->getDataLayout(); + Align = DL.getABITypeAlign(Ty); + } + return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name); } // Deprecated [opaque pointer types] @@ -1703,9 +1705,11 @@ class IRBuilderBase { } StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile = false) { - StoreInst *SI = CreateStore(Val, Ptr, isVolatile); - SI->setAlignment(Align); - return SI; + if (!Align) { + const DataLayout &DL = BB->getModule()->getDataLayout(); + Align = DL.getABITypeAlign(Val->getType()); + } + return Insert(new StoreInst(Val, Ptr, isVolatile, Align)); } FenceInst *CreateFence(AtomicOrdering Ordering, SyncScope::ID SSID = SyncScope::System, diff --git a/llvm/test/Bindings/llvm-c/atomics.ll b/llvm/test/Bindings/llvm-c/atomics.ll index 4fe62b07d488..7e4f205986e7 100644 --- a/llvm/test/Bindings/llvm-c/atomics.ll +++ b/llvm/test/Bindings/llvm-c/atomics.ll @@ -3,7 +3,7 @@ ; RUN: diff -w %t.orig %t.echo define i32 @main() { - %1 = alloca i32 + %1 = alloca i32, align 4 %2 = cmpxchg i32* %1, i32 2, i32 3 seq_cst acquire %3 = extractvalue { i32, i1 } %2, 0 ret i32 %3 diff --git a/llvm/test/Bindings/llvm-c/echo.ll b/llvm/test/Bindings/llvm-c/echo.ll index a244a65d91f9..510798592b9d 100644 --- a/llvm/test/Bindings/llvm-c/echo.ll +++ b/llvm/test/Bindings/llvm-c/echo.ll @@ -48,18 +48,18 @@ declare void @decl() ; TODO: label and metadata types define void @types() { - %1 = alloca half - %2 = alloca float - %3 = alloca double - %4 = alloca x86_fp80 - %5 = alloca fp128 - %6 = alloca ppc_fp128 - %7 = alloca i7 - %8 = alloca void (i1)* - %9 = alloca [3 x i22] - %10 = alloca i328 addrspace(5)* - %11 = alloca <5 x i23*> - %12 = alloca x86_mmx + %1 = alloca half, align 2 + %2 = alloca float, align 4 + %3 = alloca double, align 8 + %4 = alloca x86_fp80, align 16 + %5 = alloca fp128, align 16 + %6 = alloca ppc_fp128, align 16 + %7 = alloca i7, align 1 + %8 = alloca void (i1)*, align 8 + %9 = alloca [3 x i22], align 4 + %10 = alloca i328 addrspace(5)*, align 8 + %11 = alloca <5 x i23*>, align 64 + %12 = alloca x86_mmx, align 8 ret void } @@ -203,7 +203,7 @@ declare void @llvm.lifetime.end.p0i8(i64, i8*) define void @test_intrinsics() { entry: %sp = call i8* @llvm.stacksave() - %x = alloca i32 + %x = alloca i32, align 4 %0 = bitcast i32* %x to i8* call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) call void @llvm.lifetime.end.p0i8(i64 4, i8* %0) diff --git a/llvm/test/Bindings/llvm-c/memops.ll b/llvm/test/Bindings/llvm-c/memops.ll index cc41b7bf724e..4254043ff26d 100644 --- a/llvm/test/Bindings/llvm-c/memops.ll +++ b/llvm/test/Bindings/llvm-c/memops.ll @@ -5,9 +5,9 @@ %S = type { i32, i32 } define i32 @method(%S* %this, i32 %arg.a, i32 %arg.b) { - %a = alloca i32 + %a = alloca i32, align 4 store i32 %arg.a, i32* %a, align 4 - %b = alloca i32 + %b = alloca i32, align 4 store i32 %arg.b, i32* %b %1 = load i32, i32* %a, align 4 %2 = load i32, i32* %b, align 4 @@ -22,7 +22,7 @@ define i32 @method(%S* %this, i32 %arg.a, i32 %arg.b) { } define i32 @main() { - %s = alloca %S + %s = alloca %S, align 4 store %S zeroinitializer, %S* %s %1 = getelementptr inbounds %S, %S* %s, i32 0, i32 0 %2 = getelementptr inbounds %S, %S* %s, i32 0, i32 1 diff --git a/llvm/test/CodeGen/AMDGPU/widen_extending_scalar_loads.ll b/llvm/test/CodeGen/AMDGPU/widen_extending_scalar_loads.ll index 1d6388bc1d21..92df3402edf6 100644 --- a/llvm/test/CodeGen/AMDGPU/widen_extending_scalar_loads.ll +++ b/llvm/test/CodeGen/AMDGPU/widen_extending_scalar_loads.ll @@ -190,7 +190,7 @@ define amdgpu_kernel void @use_dispatch_ptr(i32 addrspace(1)* %ptr) #1 { } ; OPT-LABEL: @constant_load_i16_align4_range( -; OPT: load i32, i32 addrspace(4)* %1, !range !0 +; OPT: load i32, i32 addrspace(4)* %1, align 4, !range !0 define amdgpu_kernel void @constant_load_i16_align4_range(i32 addrspace(1)* %out, i16 addrspace(4)* %in) #0 { %ld = load i16, i16 addrspace(4)* %in, align 4, !range !0 %ext = sext i16 %ld to i32 @@ -199,7 +199,7 @@ define amdgpu_kernel void @constant_load_i16_align4_range(i32 addrspace(1)* %out } ; OPT-LABEL: @constant_load_i16_align4_range_max( -; OPT: load i32, i32 addrspace(4)* %1, !range !0 +; OPT: load i32, i32 addrspace(4)* %1, align 4, !range !0 define amdgpu_kernel void @constant_load_i16_align4_range_max(i32 addrspace(1)* %out, i16 addrspace(4)* %in) #0 { %ld = load i16, i16 addrspace(4)* %in, align 4, !range !1 %ext = sext i16 %ld to i32 @@ -208,7 +208,7 @@ define amdgpu_kernel void @constant_load_i16_align4_range_max(i32 addrspace(1)* } ; OPT-LABEL: @constant_load_i16_align4_complex_range( -; OPT: load i32, i32 addrspace(4)* %1, !range !1 +; OPT: load i32, i32 addrspace(4)* %1, align 4, !range !1 define amdgpu_kernel void @constant_load_i16_align4_complex_range(i32 addrspace(1)* %out, i16 addrspace(4)* %in) #0 { %ld = load i16, i16 addrspace(4)* %in, align 4, !range !2 %ext = sext i16 %ld to i32 @@ -217,7 +217,7 @@ define amdgpu_kernel void @constant_load_i16_align4_complex_range(i32 addrspace( } ; OPT-LABEL: @constant_load_i16_align4_range_from_0( -; OPT: load i32, i32 addrspace(4)* %1{{$}} +; OPT: load i32, i32 addrspace(4)* %1, align 4{{$}} define amdgpu_kernel void @constant_load_i16_align4_range_from_0(i32 addrspace(1)* %out, i16 addrspace(4)* %in) #0 { %ld = load i16, i16 addrspace(4)* %in, align 4, !range !3 %ext = sext i16 %ld to i32 @@ -226,7 +226,7 @@ define amdgpu_kernel void @constant_load_i16_align4_range_from_0(i32 addrspace(1 } ; OPT-LABEL: @constant_load_i16_align4_range_from_neg( -; OPT: load i32, i32 addrspace(4)* %1, !range !2 +; OPT: load i32, i32 addrspace(4)* %1, align 4, !range !2 define amdgpu_kernel void @constant_load_i16_align4_range_from_neg(i32 addrspace(1)* %out, i16 addrspace(4)* %in) #0 { %ld = load i16, i16 addrspace(4)* %in, align 4, !range !4 %ext = sext i16 %ld to i32 @@ -235,7 +235,7 @@ define amdgpu_kernel void @constant_load_i16_align4_range_from_neg(i32 addrspace } ; OPT-LABEL: @constant_load_i16_align4_range_from_neg_to_0( -; OPT: load i32, i32 addrspace(4)* %1, !range !2 +; OPT: load i32, i32 addrspace(4)* %1, align 4, !range !2 define amdgpu_kernel void @constant_load_i16_align4_range_from_neg_to_0(i32 addrspace(1)* %out, i16 addrspace(4)* %in) #0 { %ld = load i16, i16 addrspace(4)* %in, align 4, !range !5 %ext = sext i16 %ld to i32 @@ -244,7 +244,7 @@ define amdgpu_kernel void @constant_load_i16_align4_range_from_neg_to_0(i32 addr } ; OPT-LABEL: @constant_load_i16_align4_invariant -; OPT: load i32, i32 addrspace(4)* %1, !invariant.load !3 +; OPT: load i32, i32 addrspace(4)* %1, align 4, !invariant.load !3 define amdgpu_kernel void @constant_load_i16_align4_invariant(i32 addrspace(1)* %out, i16 addrspace(4)* %in) #0 { %ld = load i16, i16 addrspace(4)* %in, align 4, !invariant.load !6 %ext = sext i16 %ld to i32 diff --git a/llvm/test/Instrumentation/AddressSanitizer/debug-info-alloca.ll b/llvm/test/Instrumentation/AddressSanitizer/debug-info-alloca.ll index ba148e8d6e7a..48cda7d7f48c 100644 --- a/llvm/test/Instrumentation/AddressSanitizer/debug-info-alloca.ll +++ b/llvm/test/Instrumentation/AddressSanitizer/debug-info-alloca.ll @@ -17,7 +17,7 @@ target triple = "x86_64-unknown-linux-gnu" define dso_local i32 @main(i32 %argc, i8** %argv) #0 !dbg !15 { entry: ; No suffix like !dbg !123 -; CHECK: %asan_local_stack_base = alloca i64{{$}} +; CHECK: %asan_local_stack_base = alloca i64, align 8{{$}} ; CHECK: %3 = call i64 @__asan_stack_malloc_0(i64 64){{$}} %argc.addr = alloca i32, align 4 %argv.addr = alloca i8**, align 8 diff --git a/llvm/test/Instrumentation/SanitizerCoverage/inline-8bit-counters.ll b/llvm/test/Instrumentation/SanitizerCoverage/inline-8bit-counters.ll index bdcf0208d2cc..b3269b879988 100644 --- a/llvm/test/Instrumentation/SanitizerCoverage/inline-8bit-counters.ll +++ b/llvm/test/Instrumentation/SanitizerCoverage/inline-8bit-counters.ll @@ -7,9 +7,9 @@ target triple = "x86_64-unknown-linux-gnu" define void @foo() { entry: ; CHECK: section "__sancov_cntrs", comdat($foo), align 1 -; CHECK: %0 = load i8, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @__sancov_gen_, i64 0, i64 0), !nosanitize +; CHECK: %0 = load i8, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @__sancov_gen_, i64 0, i64 0), align 1, !nosanitize ; CHECK: %1 = add i8 %0, 1 -; CHECK: store i8 %1, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @__sancov_gen_, i64 0, i64 0), !nosanitize +; CHECK: store i8 %1, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @__sancov_gen_, i64 0, i64 0), align 1, !nosanitize ret void } ; CHECK: call void @__sanitizer_cov_8bit_counters_init(i8* bitcast (i8** @__start___sancov_cntrs to i8*), i8* bitcast (i8** @__stop___sancov_cntrs to i8*)) diff --git a/llvm/test/Instrumentation/SanitizerCoverage/inline-bool-flag.ll b/llvm/test/Instrumentation/SanitizerCoverage/inline-bool-flag.ll index b7f8e83894cb..4819420e04e2 100644 --- a/llvm/test/Instrumentation/SanitizerCoverage/inline-bool-flag.ll +++ b/llvm/test/Instrumentation/SanitizerCoverage/inline-bool-flag.ll @@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu" define void @foo() { entry: ; CHECK: @__sancov_gen_ = private global [1 x i1] zeroinitializer, section "__sancov_bool_flag", comdat($foo), align 1, !associated !0 -; CHECK: store i1 true, i1* getelementptr inbounds ([1 x i1], [1 x i1]* @__sancov_gen_, i64 0, i64 0), !nosanitize !1 +; CHECK: store i1 true, i1* getelementptr inbounds ([1 x i1], [1 x i1]* @__sancov_gen_, i64 0, i64 0), align 1, !nosanitize !1 ret void } ; CHECK: call void @__sanitizer_cov_bool_flag_init(i1* bitcast (i1** @__start___sancov_bool_flag to i1*), i1* bitcast (i1** @__stop___sancov_bool_flag to i1*)) diff --git a/llvm/test/Transforms/ArgumentPromotion/dbg.ll b/llvm/test/Transforms/ArgumentPromotion/dbg.ll index b26f5ce04b33..e576ec099c61 100644 --- a/llvm/test/Transforms/ArgumentPromotion/dbg.ll +++ b/llvm/test/Transforms/ArgumentPromotion/dbg.ll @@ -38,9 +38,9 @@ define void @caller(i32** %Y, %struct.pair* %P) { ; CHECK-NEXT: [[Y_VAL_VAL:%.*]] = load i32, i32* [[Y_VAL]], align 8, !dbg !4 ; CHECK-NEXT: call void @test(i32 [[Y_VAL_VAL]]), !dbg !4 ; CHECK-NEXT: [[P_0:%.*]] = getelementptr [[STRUCT_PAIR:%.*]], %struct.pair* [[P]], i32 0, i32 0, !dbg !5 -; CHECK-NEXT: [[P_0_VAL:%.*]] = load i32, i32* [[P_0]], !dbg !5 +; CHECK-NEXT: [[P_0_VAL:%.*]] = load i32, i32* [[P_0]], align 4, !dbg !5 ; CHECK-NEXT: [[P_1:%.*]] = getelementptr [[STRUCT_PAIR]], %struct.pair* [[P]], i32 0, i32 1, !dbg !5 -; CHECK-NEXT: [[P_1_VAL:%.*]] = load i32, i32* [[P_1]], !dbg !5 +; CHECK-NEXT: [[P_1_VAL:%.*]] = load i32, i32* [[P_1]], align 4, !dbg !5 ; CHECK-NEXT: call void @test_byval(i32 [[P_0_VAL]], i32 [[P_1_VAL]]), !dbg !5 ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/SROA/alignment.ll b/llvm/test/Transforms/SROA/alignment.ll index 674f45fb1665..f411ed06578d 100644 --- a/llvm/test/Transforms/SROA/alignment.ll +++ b/llvm/test/Transforms/SROA/alignment.ll @@ -130,11 +130,14 @@ entry: } define void @test6() { -; Test that we promote alignment when the underlying alloca switches to one -; that innately provides it. +; We should set the alignment on all load and store operations; make sure +; we choose an appropriate alignment. ; CHECK-LABEL: @test6( -; CHECK: alloca double -; CHECK: alloca double +; CHECK: alloca double{{$}} +; CHECK: alloca double{{$}} +; CHECK: store{{.*}}, align 8 +; CHECK: load{{.*}}, align 8 +; CHECK: store{{.*}}, align 8 ; CHECK-NOT: align ; CHECK: ret void diff --git a/llvm/test/Transforms/SROA/basictest.ll b/llvm/test/Transforms/SROA/basictest.ll index 498d2c869a3b..6e8d4d3ae9b3 100644 --- a/llvm/test/Transforms/SROA/basictest.ll +++ b/llvm/test/Transforms/SROA/basictest.ll @@ -195,7 +195,7 @@ entry: %overlap.9.i64 = bitcast i8* %overlap.9.i8 to i64* store i8 1, i8* %overlap.1.i8, !tbaa !3 ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 0 -; CHECK-NEXT: store i8 1, i8* %[[gep]], !tbaa [[TAG_3:!.*]] +; CHECK-NEXT: store i8 1, i8* %[[gep]], align 1, !tbaa [[TAG_3:!.*]] store i16 1, i16* %overlap.1.i16, !tbaa !5 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast [16 x i8]* %[[test3_a3]] to i16* ; CHECK-NEXT: store i16 1, i16* %[[bitcast]], {{.*}}, !tbaa [[TAG_5:!.*]] @@ -256,7 +256,7 @@ entry: %overlap2.1.3.i32 = bitcast i8* %overlap2.1.3.i8 to i32* store i8 1, i8* %overlap2.1.0.i8, !tbaa !27 ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 0 -; CHECK-NEXT: store i8 1, i8* %[[gep]], !tbaa [[TAG_27:!.*]] +; CHECK-NEXT: store i8 1, i8* %[[gep]], align 1, !tbaa [[TAG_27:!.*]] store i16 1, i16* %overlap2.1.0.i16, !tbaa !29 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast [7 x i8]* %[[test3_a5]] to i16* ; CHECK-NEXT: store i16 1, i16* %[[bitcast]], {{.*}}, !tbaa [[TAG_29:!.*]] @@ -286,7 +286,7 @@ entry: ; CHECK-NEXT: store i32 1, i32* %[[bitcast]], {{.*}}, !tbaa [[TAG_39:!.*]] store i8 1, i8* %overlap2.2.1.i8, !tbaa !41 ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 1 -; CHECK-NEXT: store i8 1, i8* %[[gep]], !tbaa [[TAG_41:!.*]] +; CHECK-NEXT: store i8 1, i8* %[[gep]], align 1, !tbaa [[TAG_41:!.*]] store i16 1, i16* %overlap2.2.1.i16, !tbaa !43 ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 1 ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16* diff --git a/llvm/test/Transforms/SROA/preserve-nonnull.ll b/llvm/test/Transforms/SROA/preserve-nonnull.ll index d6f084fd7490..284a6154cc17 100644 --- a/llvm/test/Transforms/SROA/preserve-nonnull.ll +++ b/llvm/test/Transforms/SROA/preserve-nonnull.ll @@ -12,7 +12,7 @@ define i8* @propagate_nonnull(i32* %v) { ; CHECK-NEXT: %[[A:.*]] = alloca i8* ; CHECK-NEXT: %[[V_CAST:.*]] = bitcast i32* %v to i8* ; CHECK-NEXT: store i8* %[[V_CAST]], i8** %[[A]] -; CHECK-NEXT: %[[LOAD:.*]] = load volatile i8*, i8** %[[A]], !nonnull !0 +; CHECK-NEXT: %[[LOAD:.*]] = load volatile i8*, i8** %[[A]], align 8, !nonnull !0 ; CHECK-NEXT: ret i8* %[[LOAD]] entry: %a = alloca [2 x i8*] diff --git a/llvm/tools/llvm-c-test/echo.cpp b/llvm/tools/llvm-c-test/echo.cpp index 71b101596ee5..710b5b053460 100644 --- a/llvm/tools/llvm-c-test/echo.cpp +++ b/llvm/tools/llvm-c-test/echo.cpp @@ -577,6 +577,7 @@ struct FunCloner { case LLVMAlloca: { LLVMTypeRef Ty = CloneType(LLVMGetAllocatedType(Src)); Dst = LLVMBuildAlloca(Builder, Ty, Name); + LLVMSetAlignment(Dst, LLVMGetAlignment(Src)); break; } case LLVMLoad: { diff --git a/polly/test/Isl/CodeGen/invariant_load_alias_metadata.ll b/polly/test/Isl/CodeGen/invariant_load_alias_metadata.ll index a21d9932bdd2..4fde2dc6443f 100644 --- a/polly/test/Isl/CodeGen/invariant_load_alias_metadata.ll +++ b/polly/test/Isl/CodeGen/invariant_load_alias_metadata.ll @@ -4,7 +4,7 @@ ; This test case checks whether Polly generates alias metadata in case of ; the ublas gemm kernel and polly-invariant-load-hoisting. ; -; CHECK: store float 4.200000e+01, float* %polly.access.A.load, !alias.scope !3, !noalias !4 +; CHECK: store float 4.200000e+01, float* %polly.access.A.load, align 4, !alias.scope !3, !noalias !4 ; ; CHECK: !0 = distinct !{!0, !1, !"polly.alias.scope.MemRef_A"} ; CHECK-NEXT: !1 = distinct !{!1, !"polly.alias.scope.domain"} diff --git a/polly/test/Isl/CodeGen/non-affine-phi-node-expansion-2.ll b/polly/test/Isl/CodeGen/non-affine-phi-node-expansion-2.ll index 645b65c6cdbb..7cec20cbf6c3 100644 --- a/polly/test/Isl/CodeGen/non-affine-phi-node-expansion-2.ll +++ b/polly/test/Isl/CodeGen/non-affine-phi-node-expansion-2.ll @@ -4,7 +4,7 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" ; CHECK: polly.stmt.bb3: ; preds = %polly.stmt.bb3.entry -; CHECK: %tmp6_p_scalar_ = load double, double* %arg1{{[0-9]*}}, !alias.scope !0, !noalias !2 +; CHECK: %tmp6_p_scalar_ = load double, double* %arg1{{[0-9]*}}, align 8, !alias.scope !0, !noalias !2 ; CHECK: %p_tmp7 = fadd double 1.000000e+00, %tmp6_p_scalar_ ; CHECK: %p_tmp8 = fcmp olt double 1.400000e+01, %p_tmp7 ; CHECK: br i1 %p_tmp8, label %polly.stmt.bb9, label %polly.stmt.bb10 diff --git a/polly/test/Isl/CodeGen/partial_write_array.ll b/polly/test/Isl/CodeGen/partial_write_array.ll index 09fe054d1ff1..15517aa80198 100644 --- a/polly/test/Isl/CodeGen/partial_write_array.ll +++ b/polly/test/Isl/CodeGen/partial_write_array.ll @@ -38,7 +38,7 @@ return: ; CHECK: polly.stmt.body.Stmt_body_Write0.partial: ; CHECK-NEXT: %polly.access.A = getelementptr double, double* %A, i64 0 -; CHECK-NEXT: store double 4.200000e+01, double* %polly.access.A, !alias.scope !0, !noalias !2 +; CHECK-NEXT: store double 4.200000e+01, double* %polly.access.A, align 8, !alias.scope !0, !noalias !2 ; CHECK-NEXT: br label %polly.stmt.body.cont ; CHECK: polly.stmt.body.cont: diff --git a/polly/test/Isl/CodeGen/partial_write_impossible_restriction.ll b/polly/test/Isl/CodeGen/partial_write_impossible_restriction.ll index c8aac54ab2ec..e4c2ce20b82b 100644 --- a/polly/test/Isl/CodeGen/partial_write_impossible_restriction.ll +++ b/polly/test/Isl/CodeGen/partial_write_impossible_restriction.ll @@ -50,10 +50,10 @@ if.then.i.i1141.loopexit: ; preds = %cond.end ; CHECK-LABEL: polly.stmt.cond.false: ; CHECK: %polly.access..pn2 = getelementptr i32, i32* %.pn, i64 %polly.indvar -; CHECK: store i32 %cond.in.sroa.speculate.load.cond.false_p_scalar_, i32* %polly.access..pn2, !alias.scope !0, !noalias !2 +; CHECK: store i32 %cond.in.sroa.speculate.load.cond.false_p_scalar_, i32* %polly.access..pn2, align 4, !alias.scope !0, !noalias !2 ; CHECK: br label %polly.merge ; CHECK-LABEL: polly.stmt.cond.false11: ; CHECK: %polly.access..pn14 = getelementptr i32, i32* %.pn, i64 0 -; CHECK: store i32 %cond.in.sroa.speculate.load.cond.false_p_scalar_13, i32* %polly.access..pn14, !alias.scope !0, !noalias !2 +; CHECK: store i32 %cond.in.sroa.speculate.load.cond.false_p_scalar_13, i32* %polly.access..pn14, align 4, !alias.scope !0, !noalias !2 ; CHECK: br label %polly.stmt.cond.end15 _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits