Author: abataev
Date: Mon Apr 15 13:38:10 2019
New Revision: 358445

URL: http://llvm.org/viewvc/llvm-project?rev=358445&view=rev
Log:
[OPENMP][NVPTX]Run parallel regions with num_threads clauses in SPMD
mode.

After the previous patch with the more correct handling of the number of
threads in parallel regions, the parallel regions with num_threads
clauses can be executed in SPMD mode.

Modified:
    cfe/trunk/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
    cfe/trunk/test/OpenMP/nvptx_target_codegen.cpp
    cfe/trunk/test/OpenMP/nvptx_target_parallel_num_threads_codegen.cpp

Modified: cfe/trunk/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
URL: 
http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp?rev=358445&r1=358444&r2=358445&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp Mon Apr 15 13:38:10 2019
@@ -715,12 +715,9 @@ getDataSharingMode(CodeGenModule &CGM) {
 }
 
 /// Check if the parallel directive has an 'if' clause with non-constant or
-/// false condition. Also, check if the number of threads is strictly specified
-/// and run those directives in non-SPMD mode.
-static bool hasParallelIfNumThreadsClause(ASTContext &Ctx,
-                                          const OMPExecutableDirective &D) {
-  if (D.hasClausesOfKind<OMPNumThreadsClause>())
-    return true;
+/// false condition.
+static bool hasParallelIfClause(ASTContext &Ctx,
+                                const OMPExecutableDirective &D) {
   for (const auto *C : D.getClausesOfKind<OMPIfClause>()) {
     OpenMPDirectiveKind NameModifier = C->getNameModifier();
     if (NameModifier != OMPD_parallel && NameModifier != OMPD_unknown)
@@ -747,7 +744,7 @@ static bool hasNestedSPMDDirective(ASTCo
     switch (D.getDirectiveKind()) {
     case OMPD_target:
       if (isOpenMPParallelDirective(DKind) &&
-          !hasParallelIfNumThreadsClause(Ctx, *NestedDir))
+          !hasParallelIfClause(Ctx, *NestedDir))
         return true;
       if (DKind == OMPD_teams) {
         Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
@@ -759,14 +756,14 @@ static bool hasNestedSPMDDirective(ASTCo
                 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
           DKind = NND->getDirectiveKind();
           if (isOpenMPParallelDirective(DKind) &&
-              !hasParallelIfNumThreadsClause(Ctx, *NND))
+              !hasParallelIfClause(Ctx, *NND))
             return true;
         }
       }
       return false;
     case OMPD_target_teams:
       return isOpenMPParallelDirective(DKind) &&
-             !hasParallelIfNumThreadsClause(Ctx, *NestedDir);
+             !hasParallelIfClause(Ctx, *NestedDir);
     case OMPD_target_simd:
     case OMPD_target_parallel:
     case OMPD_target_parallel_for:
@@ -840,7 +837,7 @@ static bool supportsSPMDExecutionMode(AS
   case OMPD_target_parallel_for_simd:
   case OMPD_target_teams_distribute_parallel_for:
   case OMPD_target_teams_distribute_parallel_for_simd:
-    return !hasParallelIfNumThreadsClause(Ctx, D);
+    return !hasParallelIfClause(Ctx, D);
   case OMPD_target_simd:
   case OMPD_target_teams_distribute:
   case OMPD_target_teams_distribute_simd:

Modified: cfe/trunk/test/OpenMP/nvptx_target_codegen.cpp
URL: 
http://llvm.org/viewvc/llvm-project/cfe/trunk/test/OpenMP/nvptx_target_codegen.cpp?rev=358445&r1=358444&r2=358445&view=diff
==============================================================================
--- cfe/trunk/test/OpenMP/nvptx_target_codegen.cpp (original)
+++ cfe/trunk/test/OpenMP/nvptx_target_codegen.cpp Mon Apr 15 13:38:10 2019
@@ -5,21 +5,20 @@
 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx-unknown-unknown 
-fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device 
-fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix 
CHECK --check-prefix CHECK-32
 // RUN: %clang_cc1 -verify -fopenmp -fexceptions -fcxx-exceptions -x c++ 
-triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s 
-fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck 
%s --check-prefix CHECK --check-prefix CHECK-32
 // expected-no-diagnostics
+
 #ifndef HEADER
 #define HEADER
 
 // Check that the execution mode of all 7 target regions is set to Generic 
Mode.
 // CHECK-DAG: [[NONSPMD:@.+]] = private unnamed_addr constant %struct.ident_t 
{ i32 0, i32 2, i32 0, i32 0, i8* getelementptr inbounds
 // CHECK-DAG: [[UNKNOWN:@.+]] = private unnamed_addr constant %struct.ident_t 
{ i32 0, i32 2, i32 2, i32 0, i8* getelementptr inbounds
-// CHECK-DAG: {{@__omp_offloading_.+l59}}_exec_mode = weak constant i8 1
-// CHECK-DAG: {{@__omp_offloading_.+l137}}_exec_mode = weak constant i8 1
-// CHECK-DAG: {{@__omp_offloading_.+l214}}_exec_mode = weak constant i8 1
-// CHECK-DAG: {{@__omp_offloading_.+l324}}_exec_mode = weak constant i8 1
-// CHECK-DAG: {{@__omp_offloading_.+l362}}_exec_mode = weak constant i8 1
-// CHECK-DAG: {{@__omp_offloading_.+l380}}_exec_mode = weak constant i8 1
-// CHECK-DAG: {{@__omp_offloading_.+l345}}_exec_mode = weak constant i8 1
-// CHECK-DAG: [[MAP_TY:%.+]] = type { [128 x i8] }
-// CHECK-DAG: [[GLOB_TY:%.+]] = type { i32* }
+// CHECK-DAG: {{@__omp_offloading_.+l45}}_exec_mode = weak constant i8 0
+// CHECK-DAG: {{@__omp_offloading_.+l123}}_exec_mode = weak constant i8 1
+// CHECK-DAG: {{@__omp_offloading_.+l200}}_exec_mode = weak constant i8 1
+// CHECK-DAG: {{@__omp_offloading_.+l310}}_exec_mode = weak constant i8 1
+// CHECK-DAG: {{@__omp_offloading_.+l348}}_exec_mode = weak constant i8 1
+// CHECK-DAG: {{@__omp_offloading_.+l366}}_exec_mode = weak constant i8 1
+// CHECK-DAG: {{@__omp_offloading_.+l331}}_exec_mode = weak constant i8 1
 
 __thread int id;
 
@@ -32,29 +31,16 @@ struct TT{
   tx &operator[](int i) { return X; }
 };
 
-// CHECK: define weak void 
@__omp_offloading_{{.+}}_{{.+}}targetBar{{.+}}_l59(i32* [[PTR1:%.+]], i32** 
dereferenceable{{.*}} [[PTR2_REF:%.+]])
+// CHECK: define weak void 
@__omp_offloading_{{.+}}_{{.+}}targetBar{{.+}}_l45(i32* [[PTR1:%.+]], i32** 
dereferenceable{{.*}} [[PTR2_REF:%.+]])
 // CHECK: store i32* [[PTR1]], i32** [[PTR1_ADDR:%.+]],
 // CHECK: store i32** [[PTR2_REF]], i32*** [[PTR2_REF_PTR:%.+]],
 // CHECK: [[PTR2_REF:%.+]] = load i32**, i32*** [[PTR2_REF_PTR]],
-// CHECK: call void @__kmpc_kernel_init(
-// CHECK: call void @__kmpc_get_team_static_memory(i16 0, i8* addrspacecast 
(i8 addrspace(3)* getelementptr inbounds ([[MAP_TY]], [[MAP_TY]] addrspace(3)* 
@{{.+}}, i32 0, i32 0, i32 0) to i8*), i{{64|32}} %{{.+}}, i16 %{{.+}}, i8** 
addrspacecast (i8* addrspace(3)* [[BUF_PTR:@.+]] to i8**))
-// CHECK: [[BUF:%.+]] = load i8*, i8* addrspace(3)* [[BUF_PTR]],
-// CHECK: [[BUF_OFFS:%.+]] = getelementptr inbounds i8, i8* [[BUF]], 
i{{[0-9]+}} 0
-// CHECK: [[BUF:%.+]] = bitcast i8* [[BUF_OFFS]] to [[GLOB_TY]]*
-// CHECK: [[PTR1:%.+]] = load i32*, i32** [[PTR1_ADDR]],
-// CHECK: [[PTR1_GLOB_REF:%.+]] = getelementptr inbounds [[GLOB_TY]], 
[[GLOB_TY]]* [[BUF]], i32 0, i32 0
-// CHECK: store i32* [[PTR1]], i32** [[PTR1_GLOB_REF]],
-// CHECK: call void @__kmpc_begin_sharing_variables(i8*** 
[[ARG_PTRS_REF:%.+]], i{{64|32}} 2)
-// CHECK: [[ARG_PTRS:%.+]] = load i8**, i8*** [[ARG_PTRS_REF]],
-// CHECK: [[ARG_PTR1:%.+]] = getelementptr inbounds i8*, i8** [[ARG_PTRS]], 
i{{[0-9]+}} 0
-// CHECK: [[BC:%.+]] = bitcast i32** [[PTR1_GLOB_REF]] to i8*
-// CHECK: store i8* [[BC]], i8** [[ARG_PTR1]],
-// CHECK: [[ARG_PTR2:%.+]] = getelementptr inbounds i8*, i8** [[ARG_PTRS]], 
i{{[0-9]+}} 1
-// CHECK: [[BC:%.+]] = bitcast i32** [[PTR2_REF]] to i8*
-// CHECK: store i8* [[BC]], i8** [[ARG_PTR2]],
-// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
-// CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
-// CHECK: call void @__kmpc_end_sharing_variables()
+// CHECK: call void @__kmpc_spmd_kernel_init(
+// CHECK: call void @__kmpc_data_sharing_init_stack_spmd()
+// CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* 
@{{.+}})
+// CHECK: store i32 [[GTID]], i32* [[THREADID:%.+]],
+// CHECK: call void @{{.+}}(i32* [[THREADID]], i32* %{{.+}}, i32** 
[[PTR1_ADDR]], i32** [[PTR2_REF]])
+// CHECK: call void @__kmpc_spmd_kernel_deinit_v2(i16 1)
 void targetBar(int *Ptr1, int *Ptr2) {
 #pragma omp target map(Ptr1[:0], Ptr2)
 #pragma omp parallel num_threads(2)
@@ -70,7 +56,7 @@ int foo(int n) {
   double cn[5][n];
   TT<long long, char> d;
 
-  // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l137}}_worker()
+  // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l123}}_worker()
   // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
   // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
   // CHECK: store i8* null, i8** [[OMP_WORK_FN]],
@@ -101,7 +87,7 @@ int foo(int n) {
   // CHECK: [[EXIT]]
   // CHECK: ret void
 
-  // CHECK: define {{.*}}void [[T1:@__omp_offloading_.+foo.+l137]]()
+  // CHECK: define {{.*}}void [[T1:@__omp_offloading_.+foo.+l123]]()
   // CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
   // CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
   // CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
@@ -143,7 +129,7 @@ int foo(int n) {
   {
   }
 
-  // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l214}}_worker()
+  // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l200}}_worker()
   // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
   // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
   // CHECK: store i8* null, i8** [[OMP_WORK_FN]],
@@ -174,7 +160,7 @@ int foo(int n) {
   // CHECK: [[EXIT]]
   // CHECK: ret void
 
-  // CHECK: define {{.*}}void 
[[T2:@__omp_offloading_.+foo.+l214]](i[[SZ:32|64]] [[ARG1:%[a-zA-Z_]+]], 
i[[SZ:32|64]] [[ID:%[a-zA-Z_]+]])
+  // CHECK: define {{.*}}void 
[[T2:@__omp_offloading_.+foo.+l200]](i[[SZ:32|64]] [[ARG1:%[a-zA-Z_]+]], 
i[[SZ:32|64]] [[ID:%[a-zA-Z_]+]])
   // CHECK: [[AA_ADDR:%.+]] = alloca i[[SZ]],
   // CHECK: store i[[SZ]] [[ARG1]], i[[SZ]]* [[AA_ADDR]],
   // CHECK: [[AA_CADDR:%.+]] = bitcast i[[SZ]]* [[AA_ADDR]] to i16*
@@ -217,7 +203,7 @@ int foo(int n) {
     id = aa;
   }
 
-  // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l324}}_worker()
+  // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l310}}_worker()
   // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
   // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
   // CHECK: store i8* null, i8** [[OMP_WORK_FN]],
@@ -248,7 +234,7 @@ int foo(int n) {
   // CHECK: [[EXIT]]
   // CHECK: ret void
 
-  // CHECK: define {{.*}}void [[T3:@__omp_offloading_.+foo.+l324]](i[[SZ]]
+  // CHECK: define {{.*}}void [[T3:@__omp_offloading_.+foo.+l310]](i[[SZ]]
   // Create local storage for each capture.
   // CHECK:    [[LOCAL_A:%.+]] = alloca i[[SZ]]
   // CHECK:    [[LOCAL_B:%.+]] = alloca [10 x float]*
@@ -409,7 +395,7 @@ int baz(int f, double &a) {
   return f;
 }
 
-  // CHECK-LABEL: define {{.*}}void 
{{@__omp_offloading_.+static.+362}}_worker()
+  // CHECK-LABEL: define {{.*}}void 
{{@__omp_offloading_.+static.+348}}_worker()
   // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
   // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
   // CHECK: store i8* null, i8** [[OMP_WORK_FN]],
@@ -440,7 +426,7 @@ int baz(int f, double &a) {
   // CHECK: [[EXIT]]
   // CHECK: ret void
 
-  // CHECK: define {{.*}}void [[T4:@__omp_offloading_.+static.+l362]](i[[SZ]]
+  // CHECK: define {{.*}}void [[T4:@__omp_offloading_.+static.+l348]](i[[SZ]]
   // Create local storage for each capture.
   // CHECK:  [[LOCAL_A:%.+]] = alloca i[[SZ]]
   // CHECK:  [[LOCAL_AA:%.+]] = alloca i[[SZ]]
@@ -495,7 +481,7 @@ int baz(int f, double &a) {
 
 
 
-  // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+S1.+l380}}_worker()
+  // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+S1.+l366}}_worker()
   // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
   // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
   // CHECK: store i8* null, i8** [[OMP_WORK_FN]],
@@ -529,7 +515,7 @@ int baz(int f, double &a) {
   // CHECK: [[EXIT]]
   // CHECK: ret void
 
-  // CHECK: define {{.*}}void [[T5:@__omp_offloading_.+S1.+l380]](
+  // CHECK: define {{.*}}void [[T5:@__omp_offloading_.+S1.+l366]](
   // Create local storage for each capture.
   // CHECK:       [[LOCAL_THIS:%.+]] = alloca [[S1:%struct.*]]*
   // CHECK:       [[LOCAL_B:%.+]] = alloca i[[SZ]]
@@ -648,7 +634,7 @@ int baz(int f, double &a) {
   // CHECK: ret i32 [[RES]]
 
 
-  // CHECK-LABEL: define {{.*}}void 
{{@__omp_offloading_.+template.+l345}}_worker()
+  // CHECK-LABEL: define {{.*}}void 
{{@__omp_offloading_.+template.+l331}}_worker()
   // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
   // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
   // CHECK: store i8* null, i8** [[OMP_WORK_FN]],
@@ -679,7 +665,7 @@ int baz(int f, double &a) {
   // CHECK: [[EXIT]]
   // CHECK: ret void
 
-  // CHECK: define {{.*}}void [[T6:@__omp_offloading_.+template.+l345]](i[[SZ]]
+  // CHECK: define {{.*}}void [[T6:@__omp_offloading_.+template.+l331]](i[[SZ]]
   // Create local storage for each capture.
   // CHECK:  [[LOCAL_A:%.+]] = alloca i[[SZ]]
   // CHECK:  [[LOCAL_AA:%.+]] = alloca i[[SZ]]

Modified: cfe/trunk/test/OpenMP/nvptx_target_parallel_num_threads_codegen.cpp
URL: 
http://llvm.org/viewvc/llvm-project/cfe/trunk/test/OpenMP/nvptx_target_parallel_num_threads_codegen.cpp?rev=358445&r1=358444&r2=358445&view=diff
==============================================================================
--- cfe/trunk/test/OpenMP/nvptx_target_parallel_num_threads_codegen.cpp 
(original)
+++ cfe/trunk/test/OpenMP/nvptx_target_parallel_num_threads_codegen.cpp Mon Apr 
15 13:38:10 2019
@@ -9,8 +9,8 @@
 #define HEADER
 
 // Check that the execution mode of all 2 target regions on the gpu is set to 
non-SPMD Mode.
-// CHECK-DAG: {{@__omp_offloading_.+l21}}_exec_mode = weak constant i8 1
-// CHECK-DAG: {{@__omp_offloading_.+l26}}_exec_mode = weak constant i8 1
+// CHECK-DAG: {{@__omp_offloading_.+l21}}_exec_mode = weak constant i8 0
+// CHECK-DAG: {{@__omp_offloading_.+l26}}_exec_mode = weak constant i8 0
 
 template<typename tx>
 tx ftemplate(int n) {
@@ -46,13 +46,16 @@ int bar(int n){
   // CHECK: store i16* {{%.+}}, i16** [[AA_ADDR]], align
   // CHECK: [[AA:%.+]] = load i16*, i16** [[AA_ADDR]], align
   // CHECK: [[THREAD_LIMIT:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
-  // CHECK: call void @__kmpc_kernel_init(i32
-  // CHECK: call void @__kmpc_push_num_threads
-  // CHECK: call void @__kmpc_kernel_deinit(i16 1)
+  // CHECK: call void @__kmpc_spmd_kernel_init(i32 [[THREAD_LIMIT]], i16 1, 
i16 0)
+  // CHECK: call void @__kmpc_data_sharing_init_stack_spmd()
+  // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* 
@{{.+}})
+  // CHECK: store i32 [[GTID]], i32* [[THREADID:%.+]],
+  // CHECK: call void [[OUTLINED:@.+]](i32* [[THREADID]], i32* %{{.+}}, i16* 
[[AA]])
+  // CHECK: call void @__kmpc_spmd_kernel_deinit_v2(i16 1)
   // CHECK: ret void
   // CHECK: }
 
-  // CHECK: define internal void @{{.+}}(i32* noalias %{{.+}}, i32* noalias 
%{{.+}}, i16* {{[^%]*}}[[ARG:%.+]])
+  // CHECK: define internal void [[OUTLINED]](i32* noalias %{{.+}}, i32* 
noalias %{{.+}}, i16* {{[^%]*}}[[ARG:%.+]])
   // CHECK: = alloca i32*, align
   // CHECK: = alloca i32*, align
   // CHECK: [[AA_ADDR:%.+]] = alloca i16*, align
@@ -63,11 +66,6 @@ int bar(int n){
   // CHECK: ret void
   // CHECK: }
 
-
-
-
-
-
   // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l26}}(
   // CHECK: [[A_ADDR:%.+]] = alloca i32*, align
   // CHECK: [[AA_ADDR:%.+]] = alloca i16*, align
@@ -79,13 +77,16 @@ int bar(int n){
   // CHECK: [[AA:%.+]] = load i16*, i16** [[AA_ADDR]], align
   // CHECK: [[B:%.+]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align
   // CHECK: [[THREAD_LIMIT:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
-  // CHECK: call void @__kmpc_kernel_init(i32
-  // CHECK: call void @__kmpc_push_num_threads
-  // CHECK: call void @__kmpc_kernel_deinit(i16 1)
+  // CHECK: call void @__kmpc_spmd_kernel_init(i32 [[THREAD_LIMIT]], i16 1, 
i16 0)
+  // CHECK: call void @__kmpc_data_sharing_init_stack_spmd()
+  // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* 
@{{.+}})
+  // CHECK: store i32 [[GTID]], i32* [[THREADID:%.+]],
+  // CHECK: call void [[OUTLINED:@.+]](i32* [[THREADID]], i32* %{{.+}}, i32* 
[[A]], i16* [[AA]], [10 x i32]* [[B]])
+  // CHECK: call void @__kmpc_spmd_kernel_deinit_v2(i16 1)
   // CHECK: ret void
   // CHECK: }
 
-  // CHECK: define internal void @{{.+}}(i32* noalias %{{.+}}, i32* noalias 
%{{.+}}, i32* {{[^%]*}}[[ARG1:%.+]], i16* {{[^%]*}}[[ARG2:%.+]], [10 x i32]* 
{{[^%]*}}[[ARG3:%.+]])
+  // CHECK: define internal void [[OUTLINED]](i32* noalias %{{.+}}, i32* 
noalias %{{.+}}, i32* {{[^%]*}}[[ARG1:%.+]], i16* {{[^%]*}}[[ARG2:%.+]], [10 x 
i32]* {{[^%]*}}[[ARG3:%.+]])
   // CHECK: = alloca i32*, align
   // CHECK: = alloca i32*, align
   // CHECK: [[A_ADDR:%.+]] = alloca i32*, align


_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to