Author: Amr Hesham
Date: 2025-08-29T19:00:48+02:00
New Revision: 1123a930c632397383a45430708fd1922d00dfe1

URL: 
https://github.com/llvm/llvm-project/commit/1123a930c632397383a45430708fd1922d00dfe1
DIFF: 
https://github.com/llvm/llvm-project/commit/1123a930c632397383a45430708fd1922d00dfe1.diff

LOG: [CIR] DivOp & CompoundAssignmentDiv between ComplexType and ScalarType 
(#155167)

This change adds support for DivOp and CompoundAssignmentDiv between
ComplexType and ScalarType

Issue: https://github.com/llvm/llvm-project/issues/141365

Added: 
    

Modified: 
    clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
    clang/test/CIR/CodeGen/complex-compound-assignment.cpp
    clang/test/CIR/CodeGen/complex-mul-div.cpp

Removed: 
    


################################################################################
diff  --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
index bb1b55f2d16f4..41364f516691e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
@@ -178,6 +178,10 @@ class ComplexExprEmitter : public 
StmtVisitor<ComplexExprEmitter, mlir::Value> {
   mlir::Value VisitBinMulAssign(const CompoundAssignOperator *e) {
     return emitCompoundAssign(e, &ComplexExprEmitter::emitBinMul);
   }
+
+  mlir::Value VisitBinDivAssign(const CompoundAssignOperator *e) {
+    return emitCompoundAssign(e, &ComplexExprEmitter::emitBinDiv);
+  }
 };
 } // namespace
 
@@ -865,6 +869,10 @@ mlir::Value ComplexExprEmitter::emitBinDiv(const BinOpInfo 
&op) {
   assert(!cir::MissingFeatures::fastMathFlags());
   assert(!cir::MissingFeatures::cgFPOptionsRAII());
 
+  // Handle division between two complex values. In the case of complex integer
+  // types mixed with scalar integers, the scalar integer type will always be
+  // promoted to a complex integer value with a zero imaginary component when
+  // the AST is formed.
   if (mlir::isa<cir::ComplexType>(op.lhs.getType()) &&
       mlir::isa<cir::ComplexType>(op.rhs.getType())) {
     cir::ComplexRangeKind rangeKind =
@@ -873,8 +881,24 @@ mlir::Value ComplexExprEmitter::emitBinDiv(const BinOpInfo 
&op) {
                                      rangeKind);
   }
 
-  cgf.cgm.errorNYI("ComplexExprEmitter::emitBinDiv between Complex & Scalar");
-  return {};
+  // The C99 standard (G.5.1) defines division of a complex value by a real
+  // value in the following simplified form.
+  if (mlir::isa<cir::ComplexType>(op.lhs.getType())) {
+    assert(mlir::cast<cir::ComplexType>(op.lhs.getType()).getElementType() ==
+           op.rhs.getType());
+    mlir::Value real = builder.createComplexReal(op.loc, op.lhs);
+    mlir::Value imag = builder.createComplexImag(op.loc, op.lhs);
+    mlir::Value newReal = builder.createFDiv(op.loc, real, op.rhs);
+    mlir::Value newImag = builder.createFDiv(op.loc, imag, op.rhs);
+    return builder.createComplexCreate(op.loc, newReal, newImag);
+  }
+
+  assert(mlir::isa<cir::ComplexType>(op.rhs.getType()));
+  cir::ConstantOp nullValue = builder.getNullValue(op.lhs.getType(), op.loc);
+  mlir::Value lhs = builder.createComplexCreate(op.loc, op.lhs, nullValue);
+  cir::ComplexRangeKind rangeKind =
+      getComplexRangeAttr(op.fpFeatures.getComplexRange());
+  return cir::ComplexDivOp::create(builder, op.loc, lhs, op.rhs, rangeKind);
 }
 
 LValue CIRGenFunction::emitComplexAssignmentLValue(const BinaryOperator *e) {
@@ -903,7 +927,7 @@ static CompoundFunc getComplexOp(BinaryOperatorKind op) {
   case BO_MulAssign:
     return &ComplexExprEmitter::emitBinMul;
   case BO_DivAssign:
-    llvm_unreachable("getComplexOp: BO_DivAssign");
+    return &ComplexExprEmitter::emitBinDiv;
   case BO_SubAssign:
     return &ComplexExprEmitter::emitBinSub;
   case BO_AddAssign:

diff  --git a/clang/test/CIR/CodeGen/complex-compound-assignment.cpp 
b/clang/test/CIR/CodeGen/complex-compound-assignment.cpp
index 9a6659bd5d93e..82c00863f4d74 100644
--- a/clang/test/CIR/CodeGen/complex-compound-assignment.cpp
+++ b/clang/test/CIR/CodeGen/complex-compound-assignment.cpp
@@ -534,6 +534,173 @@ void foo8() {
 // OGCG: store float %[[RESULT_REAL]], ptr %[[A_REAL_PTR]], align 4
 // OGCG: store float %[[RESULT_IMAG]], ptr %[[A_IMAG_PTR]], align 4
 
+void foo10() {
+  float _Complex a;
+  float _Complex b;
+  a /= b;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, 
!cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, 
!cir.ptr<!cir.complex<!cir.float>>, ["b"]
+// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : 
!cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR: %[[RESULT:.*]] = cir.call @__divsc3(%[[A_REAL]], %[[A_IMAG]], 
%[[B_REAL]], %[[B_IMAG]]) : (!cir.float, !cir.float, !cir.float, !cir.float) -> 
!cir.complex<!cir.float>
+// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!cir.float>, 
!cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4
+// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
+// LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
+// LLVM: %[[RESULT:.*]] = call { float, float } @__divsc3(float %[[A_REAL]], 
float %[[A_IMAG]], float %[[B_REAL]], float %[[B_IMAG]])
+// LLVM: store { float, float } %[[RESULT]], ptr %[[A_ADDR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[RESULT_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr 
%[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
+// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr 
%[[B_ADDR]], i32 0, i32 1
+// OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr 
%[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr 
%[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG: %[[RESULT:.*]] = call{{.*}} <2 x float> @__divsc3(float noundef 
%[[A_REAL]], float noundef %[[A_IMAG]], float noundef %[[B_REAL]], float 
noundef %[[B_IMAG]])
+// OGCG: store <2 x float> %[[RESULT]], ptr %[[RESULT_ADDR]], align 4
+// OGCG: %[[RESULT_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float 
}, ptr %[[RESULT_ADDR]], i32 0, i32 0
+// OGCG: %[[RESULT_REAL:.*]] = load float, ptr %[[RESULT_REAL_PTR]], align 4
+// OGCG: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float 
}, ptr %[[RESULT_ADDR]], i32 0, i32 1
+// OGCG: %[[RESULT_IMAG:.*]] = load float, ptr %[[RESULT_IMAG_PTR]], align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr 
%[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr 
%[[A_ADDR]], i32 0, i32 1
+// OGCG: store float %[[RESULT_REAL]], ptr %[[A_REAL_PTR]], align 4
+// OGCG: store float %[[RESULT_IMAG]], ptr %[[A_IMAG_PTR]], align 4
+
+void foo11() {
+  float _Complex a;
+  float b;
+  a /= b;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, 
!cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b"]
+// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.float>, 
!cir.float
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR: %[[RESULT_REAL:.*]] = cir.binop(div, %[[A_REAL]], %[[TMP_B]]) : 
!cir.float
+// CIR: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[A_IMAG]], %[[TMP_B]]) : 
!cir.float
+// CIR: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] 
: !cir.float -> !cir.complex<!cir.float>
+// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!cir.float>, 
!cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
+// LLVM: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
+// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// LLVM: %[[RESULT_REAL:.*]] = fdiv float %[[A_REAL]], %[[TMP_B]]
+// LLVM: %[[RESULT_IMAG:.*]] = fdiv float %[[A_IMAG]], %[[TMP_B]]
+// LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float 
%[[RESULT_REAL]], 0
+// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float 
%[[RESULT_IMAG]], 1
+// LLVM: store { float, float } %[[RESULT]], ptr %[[A_ADDR]], align 4
+
+// OGCG:  %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[B_ADDR:.*]] = alloca float, align 4
+// OGCG: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr 
%[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr 
%[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG: %[[RESULT_REAL:.*]] = fdiv float %[[A_REAL]], %[[TMP_B]]
+// OGCG: %[[RESULT_IMAG:.*]] = fdiv float %[[A_IMAG]], %[[TMP_B]]
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr 
%[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr 
%[[A_ADDR]], i32 0, i32 1
+// OGCG: store float %[[RESULT_REAL]], ptr %[[A_REAL_PTR]], align 4
+// OGCG: store float %[[RESULT_IMAG]], ptr %[[A_IMAG_PTR]], align 4
+
+void foo12() {
+  int _Complex  a;
+  int b;
+  a /= b;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, 
!cir.ptr<!cir.complex<!s32i>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b"]
+// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: %[[B_COMPLEX:.*]] = cir.complex.create %[[TMP_B]], %[[CONST_0]] : 
!s32i -> !cir.complex<!s32i>
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!s32i> -> 
!s32i
+// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!s32i> -> 
!s32i
+// CIR: %[[B_REAL:.*]] = cir.complex.real %[[B_COMPLEX]] : !cir.complex<!s32i> 
-> !s32i
+// CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[B_COMPLEX]] : !cir.complex<!s32i> 
-> !s32i
+// CIR: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_REAL]]) : !s32i
+// CIR: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_IMAG]]) : !s32i
+// CIR: %[[MUL_BR_BR:.*]] = cir.binop(mul, %[[B_REAL]], %[[B_REAL]]) : !s32i
+// CIR: %[[MUL_BI_BI:.*]] = cir.binop(mul, %[[B_IMAG]], %[[B_IMAG]]) : !s32i
+// CIR: %[[ADD_ARBR_AIBI:.*]] = cir.binop(add, %[[MUL_AR_BR]], %[[MUL_AI_BI]]) 
: !s32i
+// CIR: %[[ADD_BRBR_BIBI:.*]] = cir.binop(add, %[[MUL_BR_BR]], %[[MUL_BI_BI]]) 
: !s32i
+// CIR: %[[RESULT_REAL:.*]] = cir.binop(div, %[[ADD_ARBR_AIBI]], 
%[[ADD_BRBR_BIBI]]) : !s32i
+// CIR: %[[MUL_AI_BR:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_REAL]]) : !s32i
+// CIR: %[[MUL_AR_BI:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_IMAG]]) : !s32i
+// CIR: %[[SUB_AIBR_ARBI:.*]] = cir.binop(sub, %[[MUL_AI_BR]], %[[MUL_AR_BI]]) 
: !s32i
+// CIR: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[SUB_AIBR_ARBI]], 
%[[ADD_BRBR_BIBI]]) : !s32i
+// CIR: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] 
: !s32i -> !cir.complex<!s32i>
+// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!s32i>, 
!cir.ptr<!cir.complex<!s32i>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM: %[[B_ADDR:.*]] = alloca i32, i64 1, align 4
+// LLVM: %[[TMP_B:.*]] = load i32, ptr %[[B_ADDR]], align 4
+// LLVM: %[[TMP_B_COMPLEX:.*]] = insertvalue { i32, i32 } {{.*}}, i32 
%[[TMP_B]], 0
+// LLVM: %[[B_COMPLEX:.*]] = insertvalue { i32, i32 } %[[TMP_B_COMPLEX]], i32 
0, 1
+// LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[A_ADDR]], align 4
+// LLVM: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0
+// LLVM: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 1
+// LLVM: %[[MUL_AR_BR:.*]] = mul i32 %[[A_REAL]], %[[TMP_B]]
+// LLVM: %[[MUL_AI_BI:.*]] = mul i32 %[[A_IMAG]], 0
+// LLVM: %[[MUL_BR_BR:.*]] = mul i32 %[[TMP_B]], %[[TMP_B]]
+// LLVM: %[[ADD_ARBR_AIBI:.*]] = add i32 %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// LLVM: %[[ADD_BRBR_BIBI:.*]] = add i32 %[[MUL_BR_BR]], 0
+// LLVM: %[[RESULT_REAL:.*]] = sdiv i32 %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]
+// LLVM: %[[MUL_AI_BR:.*]] = mul i32 %[[A_IMAG]], %[[TMP_B]]
+// LLVM: %[[MUL_AR_BI:.*]] = mul i32 %[[A_REAL]], 0
+// LLVM: %[[SUB_AIBR_ARBI:.*]] = sub i32 %[[MUL_AI_BR]], %[[MUL_AR_BI]]
+// LLVM: %[[RESULT_IMAG:.*]] = sdiv i32 %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]
+// LLVM: %[[TMP_RESULT:.*]] = insertvalue { i32, i32 } {{.*}}, i32 
%[[RESULT_REAL]], 0
+// LLVM: %[[RESULT:.*]] = insertvalue { i32, i32 } %[[TMP_RESULT]], i32 
%[[RESULT_IMAG]], 1
+// LLVM: store { i32, i32 } %[[RESULT]], ptr %[[A_ADDR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG: %[[B_ADDR:.*]] = alloca i32, align 4
+// OGCG: %[[TMP_B:.*]] = load i32, ptr %[[B_ADDR]], align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr 
%[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load i32, ptr %[[A_REAL_PTR]], align 4
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr 
%[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load i32, ptr %[[A_IMAG_PTR]], align 4
+// OGCG: %[[MUL_AR_BR:.*]] = mul i32 %[[A_REAL]], %[[TMP_B]]
+// OGCG: %[[MUL_AI_BI:.*]] = mul i32 %[[A_IMAG]], 0
+// OGCG: %[[ADD_ARBR_AIBI:.*]] = add i32 %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// OGCG: %[[MUL_BR_BR:.*]] = mul i32 %[[TMP_B]], %[[TMP_B]]
+// OGCG: %[[ADD_BRBR_BIBI:.*]] = add i32 %[[MUL_BR_BR]], 0
+// OGCG: %[[MUL_AI_BR:.*]] = mul i32 %[[A_IMAG]], %[[TMP_B]]
+// OGCG: %[[MUL_AR_BI:.*]] = mul i32 %[[A_REAL]], 0
+// OGCG: %[[SUB_AIBR_ARBI:.*]] = sub i32 %[[MUL_AI_BR]], %[[MUL_AR_BI]]
+// OGCG: %[[RESULT_REAL:.*]] = sdiv i32 %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]
+// OGCG: %[[RESULT_IMAG:.*]] = sdiv i32 %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr 
%[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr 
%[[A_ADDR]], i32 0, i32 1
+// OGCG: store i32 %[[RESULT_REAL]], ptr %[[A_REAL_PTR]], align 4
+// OGCG: store i32 %[[RESULT_IMAG]], ptr %[[A_IMAG_PTR]], align 4
+
 #ifndef __cplusplus
 void foo9() {
   float _Complex a;
@@ -571,4 +738,4 @@ void foo9() {
 // C_OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
 // C_OGCG: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
 // C_OGCG: %[[ADD_REAL:.*]] = fadd float %[[TMP_B]], %[[A_REAL]]
-// C_OGCG: store float %[[ADD_REAL]], ptr %[[B_ADDR]], align 4
+// C_OGCG: store float %[[ADD_REAL]], ptr %[[B_ADDR]], align 4
\ No newline at end of file

diff  --git a/clang/test/CIR/CodeGen/complex-mul-div.cpp 
b/clang/test/CIR/CodeGen/complex-mul-div.cpp
index 0fb82b24f6640..d49304660b4d4 100644
--- a/clang/test/CIR/CodeGen/complex-mul-div.cpp
+++ b/clang/test/CIR/CodeGen/complex-mul-div.cpp
@@ -352,7 +352,7 @@ void foo3() {
 // CIR-AFTER-BASIC: %[[MUL_AI_BR:.*]] = cir.binop(mul, %[[A_IMAG]], 
%[[B_REAL]]) : !cir.float
 // CIR-AFTER-BASIC: %[[MUL_AR_BI:.*]] = cir.binop(mul, %[[A_REAL]], 
%[[B_IMAG]]) : !cir.float
 // CIR-AFTER-BASIC: %[[SUB_AIBR_ARBI:.*]] = cir.binop(sub, %[[MUL_AI_BR]], 
%[[MUL_AR_BI]]) : !cir.float
-// CIR-AFTER-BASIC: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[SUB_AIBR_ARBI]], 
%14) : !cir.float
+// CIR-AFTER-BASIC: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[SUB_AIBR_ARBI]], 
%[[ADD_BRBR_BIBI]]) : !cir.float
 // CIR-AFTER-BASIC: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], 
%[[RESULT_IMAG]] : !cir.float -> !cir.complex<!cir.float>
 // CIR-AFTER-BASIC: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : 
!cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 
@@ -559,7 +559,7 @@ void foo3() {
 // CIR-AFTER-PROMOTED: %[[MUL_BI_BI:.*]] = cir.binop(mul, %[[B_IMAG_F64]], 
%[[B_IMAG_F64]]) : !cir.double
 // CIR-AFTER-PROMOTED: %[[ADD_ARBR_AIBI:.*]] = cir.binop(add, %[[MUL_AR_BR]], 
%[[MUL_AI_BI]]) : !cir.double
 // CIR-AFTER-PROMOTED: %[[ADD_BRBR_BIBI:.*]] = cir.binop(add, %[[MUL_BR_BR]], 
%[[MUL_BI_BI]]) : !cir.double
-// CIR-AFTER-PROMOTED: %[[RESULT_REAL:.*]] = cir.binop(div, 
%[[ADD_ARBR_AIBI]], %18) : !cir.double
+// CIR-AFTER-PROMOTED: %[[RESULT_REAL:.*]] = cir.binop(div, 
%[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]) : !cir.double
 // CIR-AFTER-PROMOTED: %[[MUL_AI_BR:.*]] = cir.binop(mul, %[[A_IMAG_F64]], 
%[[B_REAL_F64]]) : !cir.double
 // CIR-AFTER-PROMOTED: %[[MUL_AR_BI:.*]] = cir.binop(mul, %[[A_REAL_F64]], 
%[[B_IMAG_F64]]) : !cir.double
 // CIR-AFTER-PROMOTED: %[[SUB_AIBR_ARBI:.*]] = cir.binop(sub, %[[MUL_AI_BR]], 
%[[MUL_AR_BI]]) : !cir.double
@@ -773,3 +773,489 @@ void foo4() {
 // OGCG-COMBINED: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 
}, ptr %[[C_ADDR]], i32 0, i32 1
 // OGCG-COMBINED: store i32 %[[RESULT_REAL]], ptr %[[C_REAL_PTR]], align 4
 // OGCG-COMBINED: store i32 %[[RESULT_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+void foo5() {
+  float _Complex a;
+  float b;
+  float _Complex c = a / b;
+}
+
+// CIR-COMBINED: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, 
!cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR-COMBINED: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, 
["b"]
+// CIR-COMBINED: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, 
!cir.ptr<!cir.complex<!cir.float>>, ["c", init]
+// CIR-COMBINED: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-COMBINED: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : 
!cir.ptr<!cir.float>, !cir.float
+// CIR-COMBINED: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-COMBINED: %[[A_IMGA:.*]] = cir.complex.imag %[[TMP_A]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-COMBINED: %[[RESULT_REAL:.*]] = cir.binop(div, %[[A_REAL]], %[[TMP_B]]) 
: !cir.float
+// CIR-COMBINED: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[A_IMAG]], %[[TMP_B]]) 
: !cir.float
+// CIR-COMBINED: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], 
%[[RESULT_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-COMBINED: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : 
!cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM-COMBINED: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-COMBINED: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
+// LLVM-COMBINED: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-COMBINED: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], 
align 4
+// LLVM-COMBINED: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
+// LLVM-COMBINED: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// LLVM-COMBINED: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// LLVM-COMBINED: %[[RESULT_REAL:.*]] = fdiv float %[[A_REAL]], %[[TMP_B]]
+// LLVM-COMBINED: %[[RESULT_IMAG:.*]] = fdiv float %[[A_IMAG]], %[[TMP_B]]
+// LLVM-COMBINED: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, 
float %[[RESULT_REAL]], 0
+// LLVM-COMBINED: %[[RESULT:.*]] = insertvalue { float, float } 
%[[TMP_RESULT]], float %[[RESULT_IMAG]], 1
+// LLVM-COMBINED: store { float, float } %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+// OGCG-COMBINED: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-COMBINED: %[[B_ADDR:.*]] = alloca float, align 4
+// OGCG-COMBINED: %[[C_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-COMBINED: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, 
float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG-COMBINED: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG-COMBINED: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, 
float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG-COMBINED: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG-COMBINED: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
+// OGCG-COMBINED: %[[RESULT_REAL:.*]] = fdiv float %[[A_REAL]], %[[TMP_B]]
+// OGCG-COMBINED: %[[RESULT_IMAG:.*]] = fdiv float %[[A_IMAG]], %[[TMP_B]]
+// OGCG-COMBINED: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, 
float }, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-COMBINED: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, 
float }, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-COMBINED: store float %[[RESULT_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-COMBINED: store float %[[RESULT_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+void foo6() {
+  float  a;
+  float _Complex b;
+  float _Complex c = a / b;
+}
+
+// CIR-BEFORE-BASIC: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(basic) : 
!cir.complex<!cir.float>
+
+// CIR-AFTER-BASIC: %[[A_ADDR:.*]] = cir.alloca !cir.float, 
!cir.ptr<!cir.float>, ["a"]
+// CIR-AFTER-BASIC: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, 
!cir.ptr<!cir.complex<!cir.float>>, ["b"]
+// CIR-AFTER-BASIC: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, 
!cir.ptr<!cir.complex<!cir.float>>, ["c", init]
+// CIR-AFTER-BASIC: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.float>, !cir.float
+// CIR-AFTER-BASIC: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : 
!cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-BASIC: %[[CONST_0:.*]] = cir.const #cir.fp<0.000000e+00> : 
!cir.float
+// CIR-AFTER-BASIC: %[[COMPLEX_A:.*]] = cir.complex.create %[[TMP_A]], 
%[[CONST_0]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-BASIC: %[[A_REAL:.*]] = cir.complex.real %[[COMPLEX_A]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-BASIC: %[[A_IMAG:.*]] = cir.complex.imag %[[COMPLEX_A]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-BASIC: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-BASIC: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-BASIC: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL]], 
%[[B_REAL]]) : !cir.float
+// CIR-AFTER-BASIC: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG]], 
%[[B_IMAG]]) : !cir.float
+// CIR-AFTER-BASIC: %[[MUL_BR_BR:.*]] = cir.binop(mul, %[[B_REAL]], 
%[[B_REAL]]) : !cir.float
+// CIR-AFTER-BASIC: %[[MUL_BI_BI:.*]] = cir.binop(mul, %[[B_IMAG]], 
%[[B_IMAG]]) : !cir.float
+// CIR-AFTER-BASIC: %[[ADD_ARBR_AIBI:.*]] = cir.binop(add, %[[MUL_AR_BR]], 
%[[MUL_AI_BI]]) : !cir.float
+// CIR-AFTER-BASIC: %[[ADD_BRBR_BIBI:.*]] = cir.binop(add, %[[MUL_BR_BR]], 
%[[MUL_BI_BI]]) : !cir.float
+// CIR-AFTER-BASIC: %[[RESULT_REAL:.*]] = cir.binop(div, %[[ADD_ARBR_AIBI]], 
%[[ADD_BRBR_BIBI]]) : !cir.float
+// CIR-AFTER-BASIC: %[[MUL_AI_BR:.*]] = cir.binop(mul, %[[A_IMAG]], 
%[[B_REAL]]) : !cir.float
+// CIR-AFTER-BASIC: %[[MUL_AR_BI:.*]] = cir.binop(mul, %[[A_REAL]], 
%[[B_IMAG]]) : !cir.float
+// CIR-AFTER-BASIC: %[[SUB_AIBR_ARBI:.*]] = cir.binop(sub, %[[MUL_AI_BR]], 
%[[MUL_AR_BI]]) : !cir.float
+// CIR-AFTER-BASIC: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[SUB_AIBR_ARBI]], 
%[[ADD_BRBR_BIBI]]) : !cir.float
+// CIR-AFTER-BASIC: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], 
%[[RESULT_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-BASIC: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : 
!cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM-BASIC: %[[A_ADDR:.*]] = alloca float, i64 1, align 4
+// LLVM-BASIC: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-BASIC: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-BASIC: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
+// LLVM-BASIC: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4
+// LLVM-BASIC: %[[TMP_COMPELX_A:.*]] = insertvalue { float, float } undef, 
float %[[TMP_A]], 0
+// LLVM-BASIC: %[[COMPLEX_A:.*]] = insertvalue { float, float } 
%[[TMP_COMPELX_A]], float 0.000000e+00, 1
+// LLVM-BASIC: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
+// LLVM-BASIC: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
+// LLVM-BASIC: %[[MUL_AR_BR:.*]] = fmul float %[[TMP_A]], %[[B_REAL]]
+// LLVM-BASIC: %[[MUL_AI_BI:.*]] = fmul float 0.000000e+00, %[[B_IMAG]]
+// LLVM-BASIC: %[[MUL_BR_BR:.*]] = fmul float %[[B_REAL]], %[[B_REAL]]
+// LLVM-BASIC: %[[MUL_BI_BI:.*]] = fmul float %[[B_IMAG]], %[[B_IMAG]]
+// LLVM-BASIC: %[[ADD_ARBR_AIBI:.*]] = fadd float %[[MUL_AR_BR]], 
%[[MUL_AI_BI]]
+// LLVM-BASIC: %[[ADD_BRBR_BIBI:.*]] = fadd float %[[MUL_BR_BR]], 
%[[MUL_BI_BI]]
+// LLVM-BASIC: %[[RESULT_REAL:.*]] = fdiv float %[[ADD_ARBR_AIBI]], 
%[[ADD_BRBR_BIBI]]
+// LLVM-BASIC: %[[MUL_AI_BR:.*]] = fmul float 0.000000e+00, %[[B_REAL]]
+// LLVM-BASIC: %[[MUL_BR_BI:.*]] = fmul float %[[TMP_A]], %[[B_IMAG]]
+// LLVM-BASIC: %[[SUB_AIBR_BRBI:.*]] = fsub float %[[MUL_AI_BR]], 
%[[MUL_BR_BI]]
+// LLVM-BASIC: %[[RESULT_IMAG:.*]] = fdiv float %[[SUB_AIBR_BRBI]], 
%[[ADD_BRBR_BIBI]]
+// LLVM-BASIC: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float 
%[[RESULT_REAL]], 0
+// LLVM-BASIC: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], 
float %[[RESULT_IMAG]], 1
+// LLVM-BASIC: store { float, float } %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+// OGCG-BASIC: %[[A_ADDR:.*]] = alloca float, align 4
+// OGCG-BASIC: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-BASIC: %[[C_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-BASIC: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
+// OGCG-BASIC: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float 
}, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG-BASIC: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
+// OGCG-BASIC: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float 
}, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG-BASIC: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
+// OGCG-BASIC: %[[MUL_AR_BR:.*]] = fmul float %[[TMP_A]], %[[B_REAL]]
+// OGCG-BASIC: %[[MUL_AI_BI:.*]] = fmul float 0.000000e+00, %[[B_IMAG]]
+// OGCG-BASIC: %[[ADD_ARBR_AIBI:.*]] = fadd float %[[MUL_AR_BR]], 
%[[MUL_AI_BI]]
+// OGCG-BASIC: %[[MUL_BR_BR:.*]] = fmul float %[[B_REAL]], %[[B_REAL]]
+// OGCG-BASIC: %[[MUL_BI_BI:.*]] = fmul float %[[B_IMAG]], %[[B_IMAG]]
+// OGCG-BASIC: %[[ADD_BRBR_BIBI:.*]] = fadd float %[[MUL_BR_BR]], 
%[[MUL_BI_BI]]
+// OGCG-BASIC: %[[MUL_AI_BR:.*]] = fmul float 0.000000e+00, %[[B_REAL]]
+// OGCG-BASIC: %[[MUL_AR_BI:.*]] = fmul float %[[TMP_A]], %[[B_IMAG]]
+// OGCG-BASIC: %[[SUB_AIBR_BRBI:.*]] = fsub float %[[MUL_AI_BR]], 
%[[MUL_AR_BI]]
+// OGCG-BASIC: %[[RESULT_REAL:.*]] = fdiv float %[[ADD_ARBR_AIBI]], 
%[[ADD_BRBR_BIBI]]
+// OGCG-BASIC: %[[RESULT_IMAG:.*]] = fdiv float %[[SUB_AIBR_BRBI]], 
%[[ADD_BRBR_BIBI]]
+// OGCG-BASIC: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float 
}, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-BASIC: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float 
}, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-BASIC: store float %[[RESULT_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-BASIC: store float %[[RESULT_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+// CIR-BEFORE-IMPROVED: %{{.*}} = cir.complex.div {{.*}}, {{.*}} 
range(improved) : !cir.complex<!cir.float>
+
+// CIR-AFTER-IMPROVED: %[[A_ADDR:.*]] = cir.alloca !cir.float, 
!cir.ptr<!cir.float>, ["a"]
+// CIR-AFTER-IMPROVED: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, 
!cir.ptr<!cir.complex<!cir.float>>, ["b"]
+// CIR-AFTER-IMPROVED: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, 
!cir.ptr<!cir.complex<!cir.float>>, ["c", init]
+// CIR-AFTER-IMPROVED: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.float>, !cir.float
+// CIR-AFTER-IMPROVED: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : 
!cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: %[[CONST_0:.*]] = cir.const #cir.fp<0.000000e+00> : 
!cir.float
+// CIR-AFTER-IMPROVED: %[[COMPLEX_A:.*]] = cir.complex.create %[[TMP_A]], 
%[[CONST_0]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: %[[A_REAL:.*]] = cir.complex.real %[[COMPLEX_A]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-IMPROVED: %[[A_IMAG:.*]] = cir.complex.imag %[[COMPLEX_A]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-IMPROVED: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-IMPROVED: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-IMPROVED: %[[ABS_B_REAL:.*]] = cir.fabs %[[B_REAL]] : !cir.float
+// CIR-AFTER-IMPROVED: %[[ABS_B_IMAG:.*]] = cir.fabs %[[B_IMAG]] : !cir.float
+// CIR-AFTER-IMPROVED: %[[ABS_B_CMP:.*]] = cir.cmp(ge, %[[ABS_B_REAL]], 
%[[ABS_B_IMAG]]) : !cir.float, !cir.bool
+// CIR-AFTER-IMPROVED: %[[RESULT:.*]] = cir.ternary(%[[ABS_B_CMP]], true {
+// CIR-AFTER-IMPROVED:   %[[DIV_BI_BR:.*]] = cir.binop(div, %[[B_IMAG]], 
%[[B_REAL]]) : !cir.float
+// CIR-AFTER-IMPROVED:   %[[MUL_DIV_BIBR_BI:.*]] = cir.binop(mul, 
%[[DIV_BI_BR]], %[[B_IMAG]]) : !cir.float
+// CIR-AFTER-IMPROVED:   %[[ADD_BR_MUL_DIV_BIBR_BI:.*]] = cir.binop(add, 
%[[B_REAL]], %[[MUL_DIV_BIBR_BI]]) : !cir.float
+// CIR-AFTER-IMPROVED:   %[[MUL_AI_DIV_BIBR:.*]] = cir.binop(mul, %[[A_IMAG]], 
%[[DIV_BI_BR]]) : !cir.float
+// CIR-AFTER-IMPROVED:   %[[ADD_AR_MUL_AI_DIV_BIBR:.*]] = cir.binop(add, 
%[[A_REAL]], %[[MUL_AI_DIV_BIBR]]) : !cir.float
+// CIR-AFTER-IMPROVED:   %[[RESULT_REAL:.*]] = cir.binop(div, 
%[[ADD_AR_MUL_AI_DIV_BIBR]], %[[ADD_BR_MUL_DIV_BIBR_BI]]) : !cir.float
+// CIR-AFTER-IMPROVED:   %[[MUL_AR_DIV_BIBR:.*]] = cir.binop(mul, %[[A_REAL]], 
%[[DIV_BI_BR]]) : !cir.float
+// CIR-AFTER-IMPROVED:   %[[SUB_AI_MUL_AR_DIV_BIBR:.*]] = cir.binop(sub, 
%[[A_IMAG]], %[[MUL_AR_DIV_BIBR]]) : !cir.float
+// CIR-AFTER-IMPROVED:   %[[RESULT_IMAG:.*]] = cir.binop(div, 
%[[SUB_AI_MUL_AR_DIV_BIBR]], %[[ADD_BR_MUL_DIV_BIBR_BI]]) : !cir.float
+// CIR-AFTER-IMPROVED:   %[[RESULT_COMPLEX:.*]] = cir.complex.create 
%[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED:   cir.yield %[[RESULT_COMPLEX]] : 
!cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: }, false {
+// CIR-AFTER-IMPROVED:   %[[DIV_BR_BI:.*]] = cir.binop(div, %[[B_REAL]], 
%[[B_IMAG]]) : !cir.float
+// CIR-AFTER-IMPROVED:   %[[MUL_DIV_BRBI_BR:.*]] = cir.binop(mul, 
%[[DIV_BR_BI]], %[[B_REAL]]) : !cir.float
+// CIR-AFTER-IMPROVED:   %[[ADD_BI_MUL_DIV_BRBI_BR:.*]] = cir.binop(add, 
%[[B_IMAG]], %[[MUL_DIV_BRBI_BR]]) : !cir.float
+// CIR-AFTER-IMPROVED:   %[[MUL_AR_DIV_BIBR:.*]] = cir.binop(mul, %[[A_REAL]], 
%[[DIV_BR_BI]]) : !cir.float
+// CIR-AFTER-IMPROVED:   %[[ADD_MUL_AR_DIV_BRBI_AI:.*]] = cir.binop(add, 
%[[MUL_AR_DIV_BIBR]], %[[A_IMAG]]) : !cir.float
+// CIR-AFTER-IMPROVED:   %[[RESULT_REAL:.*]] = cir.binop(div, 
%[[ADD_MUL_AR_DIV_BRBI_AI]], %[[ADD_BI_MUL_DIV_BRBI_BR]]) : !cir.float
+// CIR-AFTER-IMPROVED:   %[[MUL_AI_DIV_BRBI:.*]] = cir.binop(mul, %[[A_IMAG]], 
%[[DIV_BR_BI]]) : !cir.float
+// CIR-AFTER-IMPROVED:   %[[SUB_MUL_AI_DIV_BRBI_AR:.*]] = cir.binop(sub, 
%[[MUL_AI_DIV_BRBI]], %[[A_REAL]]) : !cir.float
+// CIR-AFTER-IMPROVED:   %[[RESULT_IMAG:.*]] = cir.binop(div, 
%[[SUB_MUL_AI_DIV_BRBI_AR]], %[[ADD_BI_MUL_DIV_BRBI_BR]]) : !cir.float
+// CIR-AFTER-IMPROVED:   %[[RESULT_COMPLEX:.*]] = cir.complex.create 
%[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED:   cir.yield %[[RESULT_COMPLEX]] : 
!cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: }) : (!cir.bool) -> !cir.complex<!cir.float>
+// CIR-AFTER-IMPROVED: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : 
!cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM-IMPROVED: %[[A_ADDR:.*]] = alloca float, i64 1, align 4
+// LLVM-IMPROVED: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-IMPROVED: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-IMPROVED: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
+// LLVM-IMPROVED: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], 
align 4
+// LLVM-IMPROVED: %[[TMP_COMPLEX_A:.*]] = insertvalue { float, float } {{.*}}, 
float %[[TMP_A]], 0
+// LLVM-IMPROVED: %[[COMPLEX_A:.*]] = insertvalue { float, float } 
%[[TMP_COMPLEX_A]], float 0.000000e+00, 1
+// LLVM-IMPROVED: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
+// LLVM-IMPROVED: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
+// LLVM-IMPROVED: %[[ABS_B_REAL:.*]] = call float @llvm.fabs.f32(float 
%[[B_REAL]])
+// LLVM-IMPROVED: %[[ABS_B_IMAG:.*]] = call float @llvm.fabs.f32(float 
%[[B_IMAG]])
+// LLVM-IMPROVED: %[[ABS_B_CMP:.*]] = fcmp oge float %[[ABS_B_REAL]], 
%[[ABS_B_IMAG]]
+// LLVM-IMPROVED: br i1 %[[ABS_B_CMP]], label %[[ABS_BR_GT_ABS_BI:.*]], label 
%[[ABS_BR_LT_ABS_BI:.*]]
+// LLVM-IMPROVED: [[ABS_BR_GT_ABS_BI]]:
+// LLVM-IMPROVED:  %[[DIV_BI_BR:.*]] = fdiv float %[[B_IMAG]], %[[B_REAL]]
+// LLVM-IMPROVED:  %[[MUL_DIV_BIBR_BI:.*]] = fmul float %[[DIV_BI_BR]], 
%[[B_IMAG]]
+// LLVM-IMPROVED:  %[[ADD_BR_MUL_DIV_BIBR_BI:.*]] = fadd float %[[B_REAL]], 
%[[MUL_DIV_BIBR_BI]]
+// LLVM-IMPROVED:  %[[MUL_AI_DIV_BIBR:.*]] = fmul float 0.000000e+00, 
%[[DIV_BI_BR]]
+// LLVM-IMPROVED:  %[[ADD_AR_MUL_AI_DIV_BIBR:.*]] = fadd float %[[TMP_A]], 
%[[MUL_AI_DIV_BIBR]]
+// LLVM-IMPROVED:  %[[RESULT_REAL:.*]] = fdiv float 
%[[ADD_AR_MUL_AI_DIV_BIBR]], %16
+// LLVM-IMPROVED:  %[[MUL_AR_DIV_BIBR:.*]] = fmul float %[[TMP_A]], 
%[[DIV_BI_BR]]
+// LLVM-IMPROVED:  %[[SUB_AI_MUL_AR_DIV_BIBR:.*]] = fsub float 0.000000e+00, 
%[[MUL_AR_DIV_BIBR]]
+// LLVM-IMPROVED:  %[[RESULT_IMAG:.*]] = fdiv float 
%[[SUB_AI_MUL_AR_DIV_BIBR]], %[[ADD_BR_MUL_DIV_BIBR_BI]]
+// LLVM-IMPROVED:  %[[TMP_THEN_RESULT:.*]] = insertvalue { float, float } 
{{.*}}, float %[[RESULT_REAL]], 0
+// LLVM-IMPROVED:  %[[THEN_RESULT:.*]] = insertvalue { float, float } 
%[[TMP_THEN_RESULT]], float %[[RESULT_IMAG]], 1
+// LLVM-IMPROVED:  br label %[[PHI_RESULT:.*]]
+// LLVM-IMPROVED: [[ABS_BR_LT_ABS_BI]]:
+// LLVM-IMPROVED:  %[[DIV_BR_BI:.*]] = fdiv float %[[B_REAL]], %[[B_IMAG]]
+// LLVM-IMPROVED:  %[[MUL_DIV_BRBI_BR:.*]] = fmul float %[[DIV_BR_BI]], 
%[[B_REAL]]
+// LLVM-IMPROVED:  %[[ADD_BI_MUL_DIV_BRBI_BR:.*]] = fadd float %[[B_IMAG]], 
%[[MUL_DIV_BRBI_BR]]
+// LLVM-IMPROVED:  %[[MUL_AR_DIV_BRBI:.*]] = fmul float %[[TMP_A]], 
%[[DIV_BR_BI]]
+// LLVM-IMPROVED:  %[[ADD_MUL_AR_DIV_BRBI_AI:.*]] = fadd float 
%[[MUL_AR_DIV_BRBI]], 0.000000e+00
+// LLVM-IMPROVED:  %[[RESULT_REAL:.*]] = fdiv float 
%[[ADD_MUL_AR_DIV_BRBI_AI]], %[[ADD_BI_MUL_DIV_BRBI_BR]]
+// LLVM-IMPROVED:  %[[MUL_AI_DIV_BRBI:.*]] = fmul float 0.000000e+00, 
%[[DIV_BR_BI]]
+// LLVM-IMPROVED:  %[[SUB_MUL_AI_DIV_BRBI_AR:.*]] = fsub float 
%[[MUL_AI_DIV_BRBI]], %[[TMP_A]]
+// LLVM-IMPROVED:  %[[RESULT_IMAG:.*]] = fdiv float 
%[[SUB_MUL_AI_DIV_BRBI_AR]], %[[ADD_BI_MUL_DIV_BRBI_BR]]
+// LLVM-IMPROVED:  %[[TMP_ELSE_RESULT:.*]] = insertvalue { float, float } 
{{.*}}, float %[[RESULT_REAL]], 0
+// LLVM-IMPROVED:  %[[ELSE_RESULT:.*]] = insertvalue { float, float } 
%[[TMP_ELSE_RESULT]], float %[[RESULT_IMAG]], 1
+// LLVM-IMPROVED:  br label %[[PHI_RESULT]]
+// LLVM-IMPROVED: [[PHI_RESULT]]:
+// LLVM-IMPROVED:  %[[RESULT:.*]] = phi { float, float } [ %[[ELSE_RESULT]], 
%[[ABS_BR_LT_ABS_BI]] ], [ %[[THEN_RESULT]], %[[ABS_BR_GT_ABS_BI]] ]
+// LLVM-IMPROVED:  br label %[[STORE_RESULT:.*]]
+// LLVM-IMPROVED: [[STORE_RESULT]]:
+// LLVM-IMPROVED:  store { float, float } %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+// OGCG-IMPROVED: %[[A_ADDR:.*]] = alloca float, align 4
+// OGCG-IMPROVED: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-IMPROVED: %[[C_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-IMPROVED: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
+// OGCG-IMPROVED: %b.realp = getelementptr inbounds nuw { float, float }, ptr 
%[[B_ADDR]], i32 0, i32 0
+// OGCG-IMPROVED: %b.real = load float, ptr %b.realp, align 4
+// OGCG-IMPROVED: %b.imagp = getelementptr inbounds nuw { float, float }, ptr 
%[[B_ADDR]], i32 0, i32 1
+// OGCG-IMPROVED: %b.imag = load float, ptr %b.imagp, align 4
+// OGCG-IMPROVED: %[[ABS_B_REAL:.*]] = call float @llvm.fabs.f32(float 
%[[B_REAL]])
+// OGCG-IMPROVED: %[[ABS_B_IMAG:.*]] = call float @llvm.fabs.f32(float 
%[[B_IMAG]])
+// OGCG-IMPROVED: %[[ABS_B_CMP:.*]] = fcmp ugt float %[[ABS_B_REAL]], 
%[[ABS_B_IMAG]]
+// OGCG-IMPROVED: br i1 %[[ABS_B_CMP]], label %[[ABS_BR_GT_ABS_BI:.*]], label 
%[[ABS_BR_LT_ABS_BI:.*]]
+// OGCG-IMPROVED: [[ABS_BR_GT_ABS_BI]]:
+// OGCG-IMPROVED:  %[[DIV_BI_BR:.*]] = fdiv float %[[B_IMAG]], %[[B_REAL]]
+// OGCG-IMPROVED:  %[[MUL_DIV_BIBR_BI:.*]] = fmul float %[[DIV_BI_BR]], 
%[[B_IMAG]]
+// OGCG-IMPROVED:  %[[ADD_BR_MUL_DIV_BIBR_BI:.*]] = fadd float %[[B_REAL]], 
%[[MUL_DIV_BIBR_BI]]
+// OGCG-IMPROVED:  %[[MUL_AI_DIV_BIBR:.*]] = fmul float 0.000000e+00, 
%[[DIV_BI_BR]]
+// OGCG-IMPROVED:  %[[ADD_AR_MUL_AI_DIV_BIBR:.*]] = fadd float %[[TMP_A]], 
%[[MUL_AI_DIV_BIBR]]
+// OGCG-IMPROVED:  %[[THEN_RESULT_REAL:.*]] = fdiv float 
%[[ADD_AR_MUL_AI_DIV_BIBR]], %[[ADD_BR_MUL_DIV_BIBR_BI]]
+// OGCG-IMPROVED:  %[[MUL_AR_DIV_BI_BR:.*]] = fmul float %[[TMP_A]], 
%[[DIV_BI_BR]]
+// OGCG-IMPROVED:  %[[SUB_AI_MUL_AR_DIV_BIBR:.*]] = fsub float 0.000000e+00, 
%[[MUL_AR_DIV_BI_BR]]
+// OGCG-IMPROVED:  %[[THEN_RESULT_IMAG:.*]] = fdiv float 
%[[SUB_AI_MUL_AR_DIV_BIBR]], %[[ADD_BR_MUL_DIV_BIBR_BI]]
+// OGCG-IMPROVED:  br label %[[STORE_RESULT:.*]]
+// OGCG-IMPROVED: [[ABS_BR_LT_ABS_BI]]:
+// OGCG-IMPROVED:  %[[DIV_BR_BI:.*]] = fdiv float %[[B_REAL]], %[[B_IMAG]]
+// OGCG-IMPROVED:  %[[MUL_DIV_BRBI_BR:.*]] = fmul float %[[DIV_BR_BI]], 
%[[B_REAL]]
+// OGCG-IMPROVED:  %[[ADD_BI_MUL_DIV_BRBI_BR:.*]] = fadd float %[[B_IMAG]], 
%[[MUL_DIV_BRBI_BR]]
+// OGCG-IMPROVED:  %[[MUL_AR_DIV_BRBI:.*]] = fmul float %[[TMP_A]], 
%[[DIV_BR_BI]]
+// OGCG-IMPROVED:  %[[ADD_MUL_AR_DIV_BRBI_AI:.*]] = fadd float 
%[[MUL_AR_DIV_BRBI]], 0.000000e+00
+// OGCG-IMPROVED:  %[[ELSE_RESULT_REAL:.*]] = fdiv float 
%[[ADD_MUL_AR_DIV_BRBI_AI]], %[[ADD_BI_MUL_DIV_BRBI_BR]]
+// OGCG-IMPROVED:  %[[MUL_AI_DIV_BRBI:.*]] = fmul float 0.000000e+00, 
%[[DIV_BR_BI]]
+// OGCG-IMPROVED:  %[[SUB_MUL_AI_DIV_BRBI_AR:.*]] = fsub float 
%[[MUL_AI_DIV_BRBI]], %[[TMP_A]]
+// OGCG-IMPROVED:  %[[ELSE_RESULT_IMAG:.*]] = fdiv float 
%[[SUB_MUL_AI_DIV_BRBI_AR]], %[[ADD_BI_MUL_DIV_BRBI_BR]]
+// OGCG-IMPROVED:  br label %[[STORE_RESULT]]
+// OGCG-IMPROVED: [[STORE_RESULT]]:
+// OGCG-IMPROVED:  %[[RESULT_REAL:.*]] = phi float [ %[[THEN_RESULT_REAL]], 
%[[ABS_BR_GT_ABS_BI]] ], [ %[[ELSE_RESULT_REAL]], %[[ABS_BR_LT_ABS_BI]] ]
+// OGCG-IMPROVED:  %[[RESULT_IMAG:.*]] = phi float [ %[[THEN_RESULT_IMAG]], 
%[[ABS_BR_GT_ABS_BI]] ], [ %[[ELSE_RESULT_IMAG]], %[[ABS_BR_LT_ABS_BI]] ]
+// OGCG-IMPROVED:  %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, 
float }, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-IMPROVED:  %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, 
float }, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-IMPROVED:  store float %[[RESULT_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-IMPROVED:  store float %[[RESULT_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+// CIR-BEFORE-PROMOTED: %{{.*}} = cir.complex.div {{.*}}, {{.*}} 
range(promoted) : !cir.complex<!cir.float>
+
+// CIR-AFTER-PROMOTED: %[[A_ADDR:.*]] = cir.alloca !cir.float, 
!cir.ptr<!cir.float>, ["a"]
+// CIR-AFTER-PROMOTED: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, 
!cir.ptr<!cir.complex<!cir.float>>, ["b"]
+// CIR-AFTER-PROMOTED: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, 
!cir.ptr<!cir.complex<!cir.float>>, ["c", init]
+// CIR-AFTER-PROMOTED: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.float>, !cir.float
+// CIR-AFTER-PROMOTED: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : 
!cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-PROMOTED: %[[CONST_0:.*]] = cir.const #cir.fp<0.000000e+00> : 
!cir.float
+// CIR-AFTER-PROMOTED: %[[COMPLEX_A:.*]] = cir.complex.create %[[TMP_A]], 
%[[CONST_0]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-PROMOTED: %[[A_REAL:.*]] = cir.complex.real %[[COMPLEX_A]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-PROMOTED: %[[A_IMAG:.*]] = cir.complex.imag %[[COMPLEX_A]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-PROMOTED: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-PROMOTED: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-PROMOTED: %[[A_REAL_F64:.*]] = cir.cast(floating, %[[A_REAL]] : 
!cir.float), !cir.double
+// CIR-AFTER-PROMOTED: %[[A_IMAG_F64:.*]] = cir.cast(floating, %[[A_IMAG]] : 
!cir.float), !cir.double
+// CIR-AFTER-PROMOTED: %[[B_REAL_F64:.*]] = cir.cast(floating, %[[B_REAL]] : 
!cir.float), !cir.double
+// CIR-AFTER-PROMOTED: %[[B_IMAG_F64:.*]] = cir.cast(floating, %[[B_IMAG]] : 
!cir.float), !cir.double
+// CIR-AFTER-PROMOTED: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL_F64]], 
%[[B_REAL_F64]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG_F64]], 
%[[B_IMAG_F64]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[MUL_BR_BR:.*]] = cir.binop(mul, %[[B_REAL_F64]], 
%[[B_REAL_F64]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[MUL_BI_BI:.*]] = cir.binop(mul, %[[B_IMAG_F64]], 
%[[B_IMAG_F64]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[ADD_ARBR_AIBI:.*]] = cir.binop(add, %[[MUL_AR_BR]], 
%[[MUL_AI_BI]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[ADD_BRBR_BIBI:.*]] = cir.binop(add, %[[MUL_BR_BR]], 
%[[MUL_BI_BI]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[RESULT_REAL:.*]] = cir.binop(div, 
%[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[MUL_AI_BR:.*]] = cir.binop(mul, %[[A_IMAG_F64]], 
%[[B_REAL_F64]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[MUL_AR_BI:.*]] = cir.binop(mul, %[[A_REAL_F64]], 
%[[B_IMAG_F64]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[SUB_AIBR_ARBI:.*]] = cir.binop(sub, %[[MUL_AI_BR]], 
%[[MUL_AR_BI]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[RESULT_IMAG:.*]] = cir.binop(div, 
%[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]) : !cir.double
+// CIR-AFTER-PROMOTED: %[[RESULT_F64:.*]] = cir.complex.create 
%[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.double -> !cir.complex<!cir.double>
+// CIR-AFTER-PROMOTED: %[[RESULT_REAL_F64:.*]] = cir.complex.real 
%[[RESULT_F64]] : !cir.complex<!cir.double> -> !cir.double
+// CIR-AFTER-PROMOTED: %[[RESULT_IMAG_F64:.*]] = cir.complex.imag 
%[[RESULT_F64]] : !cir.complex<!cir.double> -> !cir.double
+// CIR-AFTER-PROMOTED: %[[RESULT_REAL_F32:.*]] = cir.cast(floating, 
%[[RESULT_REAL_F64]] : !cir.double), !cir.float
+// CIR-AFTER-PROMOTED: %[[RESULT_IMAG_F32:.*]] = cir.cast(floating, 
%[[RESULT_IMAG_F64]] : !cir.double), !cir.float
+// CIR-AFTER-PROMOTED: %[[RESULT_F32:.*]] = cir.complex.create 
%[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> 
!cir.complex<!cir.float>
+// CIR-AFTER-PROMOTED: cir.store{{.*}} %[[RESULT_F32]], %[[C_ADDR]] : 
!cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM-PROMOTED: %[[A_ADDR:.*]] = alloca float, i64 1, align 4
+// LLVM-PROMOTED: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-PROMOTED: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-PROMOTED: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
+// LLVM-PROMOTED: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], 
align 4
+// LLVM-PROMOTED: %[[TMP_COMPLEX_A:.*]] = insertvalue { float, float } {{.*}}, 
float %[[TMP_A]], 0
+// LLVM-PROMOTED: %[[COMPLEX_A:.*]] = insertvalue { float, float } 
%[[TMP_COMPLEX_A]], float 0.000000e+00, 1
+// LLVM-PROMOTED: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
+// LLVM-PROMOTED: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
+// LLVM-PROMOTED: %[[A_REAL_F64:.*]] = fpext float %[[TMP_A]] to double
+// LLVM-PROMOTED: %[[B_REAL_F64:.*]] = fpext float %[[B_REAL]] to double
+// LLVM-PROMOTED: %[[B_IMAG_F64:.*]] = fpext float %[[B_IMAG]] to double
+// LLVM-PROMOTED: %[[MUL_AR_BR:.*]] = fmul double %[[A_REAL_F64]], 
%[[B_REAL_F64]]
+// LLVM-PROMOTED: %[[MUL_AI_BI:.*]] = fmul double 0.000000e+00, %[[B_IMAG_F64]]
+// LLVM-PROMOTED: %[[MUL_BR_BR:.*]] = fmul double %[[B_REAL_F64]], 
%[[B_REAL_F64]]
+// LLVM-PROMOTED: %[[MUL_BI_BI:.*]] = fmul double %[[B_IMAG_F64]], 
%[[B_IMAG_F64]]
+// LLVM-PROMOTED: %[[ADD_ARBR_AIBI:.*]] = fadd double %[[MUL_AR_BR]], 
%[[MUL_AI_BI]]
+// LLVM-PROMOTED: %[[ADD_BRBR_BIBI:.*]] = fadd double %[[MUL_BR_BR]], 
%[[MUL_BI_BI]]
+// LLVM-PROMOTED: %[[RESULT_REAL:.*]] = fdiv double %[[ADD_ARBR_AIBI]], 
%[[ADD_BRBR_BIBI]]
+// LLVM-PROMOTED: %[[MUL_AI_BR:.*]] = fmul double 0.000000e+00, %[[B_REAL_F64]]
+// LLVM-PROMOTED: %[[MUL_AR_BR:.*]] = fmul double %[[A_REAL_F64]], 
%[[B_IMAG_F64]]
+// LLVM-PROMOTED: %[[SUB_AIBR_ARBI:.*]] = fsub double %[[MUL_AI_BR]], 
%[[MUL_AR_BR]]
+// LLVM-PROMOTED: %[[RESULT_IMAG:.*]] = fdiv double %[[SUB_AIBR_ARBI]], 
%[[ADD_BRBR_BIBI]]
+// LLVM-PROMOTED: %[[TMP_RESULT_F64:.*]] = insertvalue { double, double } 
{{.*}}, double %[[RESULT_REAL]], 0
+// LLVM-PROMOTED: %[[RESULT_F64:.*]] = insertvalue { double, double } 
%[[TMP_RESULT_F64]], double %[[RESULT_IMAG]], 1
+// LLVM-PROMOTED: %[[RESULT_REAL_F32:.*]] = fptrunc double %[[RESULT_REAL]] to 
float
+// LLVM-PROMOTED: %[[RESULT_IMAG_F32:.*]] = fptrunc double %[[RESULT_IMAG]] to 
float
+// LLVM-PROMOTED: %[[TMP_RESULT_F32:.*]] = insertvalue { float, float } 
{{.*}}, float %[[RESULT_REAL_F32]], 0
+// LLVM-PROMOTED: %[[RESULT_F32:.*]] = insertvalue { float, float } 
%[[TMP_RESULT_F32]], float %[[RESULT_IMAG_F32]], 1
+// LLVM-PROMOTED: store { float, float } %[[RESULT_F32]], ptr %[[C_ADDR]], 
align 4
+
+// OGCG-PROMOTED: %[[A_ADDR:.*]] = alloca float, align 4
+// OGCG-PROMOTED: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-PROMOTED: %[[C_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-PROMOTED: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
+// OGCG-PROMOTED: %[[A_REAL_F64:.*]] = fpext float %[[TMP_A]] to double
+// OGCG-PROMOTED: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, 
float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG-PROMOTED: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
+// OGCG-PROMOTED: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, 
float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG-PROMOTED: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
+// OGCG-PROMOTED: %[[B_REAL_F64:.*]] = fpext float %[[B_REAL]] to double
+// OGCG-PROMOTED: %[[B_IMAG_F64:.*]] = fpext float %[[B_IMAG]] to double
+// OGCG-PROMOTED: %[[MUL_AR_BR:.*]] = fmul double %[[A_REAL_F64]], 
%[[B_REAL_F64]]
+// OGCG-PROMOTED: %[[MUL_AI_BI:.*]] = fmul double 0.000000e+00, %[[B_IMAG_F64]]
+// OGCG-PROMOTED: %[[ADD_ARBR_AIBI:.*]] = fadd double %[[MUL_AR_BR]], 
%[[MUL_AI_BI]]
+// OGCG-PROMOTED: %[[MUL_BR_BR:.*]] = fmul double %[[B_REAL_F64]], 
%[[B_REAL_F64]]
+// OGCG-PROMOTED: %[[MUL_BI_BI:.*]] = fmul double %[[B_IMAG_F64]], 
%[[B_IMAG_F64]]
+// OGCG-PROMOTED: %[[ADD_BRBR_BIBI:.*]] = fadd double %[[MUL_BR_BR]], 
%[[MUL_BI_BI]]
+// OGCG-PROMOTED: %[[MUL_AI_BR:.*]] = fmul double 0.000000e+00, %[[B_REAL_F64]]
+// OGCG-PROMOTED: %[[MUL_AR_BI:.*]] = fmul double %[[A_REAL_F64]], 
%[[B_IMAG_F64]]
+// OGCG-PROMOTED: %[[SUB_AIBR_ARBI:.*]] = fsub double %[[MUL_AI_BR]], 
%[[MUL_AR_BI]]
+// OGCG-PROMOTED: %[[RESULT_REAL:.*]] = fdiv double %[[ADD_ARBR_AIBI]], 
%[[ADD_BRBR_BIBI]]
+// OGCG-PROMOTED: %[[RESULT_IMAG:.*]] = fdiv double %[[SUB_AIBR_ARBI]], 
%[[ADD_BRBR_BIBI]]
+// OGCG-PROMOTED: %[[UNPROMOTION_RESULT_REAL:.*]] = fptrunc double 
%[[RESULT_REAL]] to float
+// OGCG-PROMOTED: %[[UNPROMOTION_RESULT_IMAG:.*]] = fptrunc double 
%[[RESULT_IMAG]] to float
+// OGCG-PROMOTED: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, 
float }, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-PROMOTED: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, 
float }, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-PROMOTED: store float %[[UNPROMOTION_RESULT_REAL]], ptr 
%[[C_REAL_PTR]], align 4
+// OGCG-PROMOTED: store float %[[UNPROMOTION_RESULT_IMAG]], ptr 
%[[C_IMAG_PTR]], align 4
+
+// CIR-BEFORE-FULL: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(full) : 
!cir.complex<!cir.float>
+
+// CIR-AFTER-FULL: %[[A_ADDR:.*]] = cir.alloca !cir.float, 
!cir.ptr<!cir.float>, ["a"]
+// CIR-AFTER-FULL: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, 
!cir.ptr<!cir.complex<!cir.float>>, ["b"]
+// CIR-AFTER-FULL: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, 
!cir.ptr<!cir.complex<!cir.float>>, ["c", init]
+// CIR-AFTER-FULL: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.float>, !cir.float
+// CIR-AFTER-FULL: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : 
!cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-FULL: %[[CONST_0:.*]] = cir.const #cir.fp<0.000000e+00> : 
!cir.float
+// CIR-AFTER-FULL: %[[COMPLEX_A:.*]] = cir.complex.create %[[TMP_A]], 
%[[CONST_0]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-FULL: %[[A_REAL:.*]] = cir.complex.real %[[COMPLEX_A]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-FULL: %[[A_IMAG:.*]] = cir.complex.imag %[[COMPLEX_A]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-FULL: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-FULL: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-FULL: %[[RESULT:.*]] = cir.call @__divsc3(%[[A_REAL]], 
%[[A_IMAG]], %[[B_REAL]], %[[B_IMAG]]) : (!cir.float, !cir.float, !cir.float, 
!cir.float) -> !cir.complex<!cir.float>
+// CIR-AFTER-FULL: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : 
!cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM-FULL: %[[A_ADDR:.*]] = alloca float, i64 1, align 4
+// LLVM-FULL: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-FULL: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-FULL: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
+// LLVM-FULL: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4
+// LLVM-FULL: %[[TMP_COMPLEX_A:.*]] = insertvalue { float, float } {{.*}}, 
float %[[TMP_A]], 0
+// LLVM-FULL: %[[COMPLEX_A:.*]] = insertvalue { float, float } %6, float 
0.000000e+00, 1
+// LLVM-FULL: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
+// LLVM-FULL: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
+// LLVM-FULL: %[[RESULT:.*]] = call { float, float } @__divsc3(float 
%[[TMP_A]], float 0.000000e+00, float %[[B_REAL]], float %[[B_IMAG]])
+// LLVM-FULL: store { float, float } %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+// OGCG-FULL: %[[A_ADDR:.*]] = alloca float, align 4
+// OGCG-FULL: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-FULL: %[[C_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-FULL: %[[RESULT_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-FULL: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
+// OGCG-FULL: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float 
}, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG-FULL: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
+// OGCG-FULL: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float 
}, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG-FULL: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
+// OGCG-FULL: %[[RESULT:.*]] = call noundef <2 x float> @__divsc3(float 
noundef %[[TMP_A]], float noundef 0.000000e+00, float noundef %[[B_REAL]], 
float noundef %[[B_IMAG]])
+// OGCG-FULL: store <2 x float> %[[RESULT]], ptr %[[RESULT_ADDR]], align 4
+// OGCG-FULL: %[[RESULT_REAL_PTR:.*]] = getelementptr inbounds nuw { float, 
float }, ptr %[[RESULT_ADDR]], i32 0, i32 0
+// OGCG-FULL: %[[RESULT_REAL:.*]] = load float, ptr %[[RESULT_REAL_PTR]], 
align 4
+// OGCG-FULL: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, 
float }, ptr %[[RESULT_ADDR]], i32 0, i32 1
+// OGCG-FULL: %[[RESULT_IMAG:.*]] = load float, ptr %[[RESULT_IMAG_PTR]], 
align 4
+// OGCG-FULL: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float 
}, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-FULL: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float 
}, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-FULL: store float %[[RESULT_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-FULL: store float %[[RESULT_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+void foo7() {
+  int _Complex a;
+  int b;
+  int _Complex c = a / b;
+}
+
+// CIR-BEFORE-BASIC: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(basic) : 
!cir.complex<!s32i>
+
+// CIR-BEFORE-IMPROVED: %{{.*}} = cir.complex.div {{.*}}, {{.*}} 
range(improved) : !cir.complex<!s32i>
+
+// CIR-BEFORE-PROMOTED: %{{.*}} = cir.complex.div {{.*}}, {{.*}} 
range(promoted) : !cir.complex<!s32i>
+
+// CIR-BEFORE-FULL: %{{.*}} = cir.complex.div {{.*}}, {{.*}} range(full) : 
!cir.complex<!s32i>
+
+// CIR-COMBINED: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, 
!cir.ptr<!cir.complex<!s32i>>, ["a"]
+// CIR-COMBINED: %[[B_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b"]
+// CIR-COMBINED: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, 
!cir.ptr<!cir.complex<!s32i>>, ["c", init]
+// CIR-COMBINED: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR-COMBINED: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!s32i>, 
!s32i
+// CIR-COMBINED: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s32i
+// CIR-COMBINED: %[[COMPLEX_B:.*]] = cir.complex.create %4, %5 : !s32i -> 
!cir.complex<!s32i>
+// CIR-COMBINED: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : 
!cir.complex<!s32i> -> !s32i
+// CIR-COMBINED: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : 
!cir.complex<!s32i> -> !s32i
+// CIR-COMBINED: %[[B_REAL:.*]] = cir.complex.real %[[COMPLEX_B]] : 
!cir.complex<!s32i> -> !s32i
+// CIR-COMBINED: %[[B_IMAG:.*]] = cir.complex.imag %[[COMPLEX_B]] : 
!cir.complex<!s32i> -> !s32i
+// CIR-COMBINED: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_REAL]]) 
: !s32i
+// CIR-COMBINED: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_IMAG]]) 
: !s32i
+// CIR-COMBINED: %[[MUL_BR_BR:.*]] = cir.binop(mul, %[[B_REAL]], %[[B_REAL]]) 
: !s32i
+// CIR-COMBINED: %[[MUL_BI_BI:.*]] = cir.binop(mul, %[[B_IMAG]], %[[B_IMAG]]) 
: !s32i
+// CIR-COMBINED: %[[ADD_ARBR_AIBI:.*]] = cir.binop(add, %[[MUL_AR_BR]], 
%[[MUL_AI_BI]]) : !s32i
+// CIR-COMBINED: %[[ADD_BRBR_BIBI:.*]] = cir.binop(add, %[[MUL_BR_BR]], 
%[[MUL_BI_BI]]) : !s32i
+// CIR-COMBINED: %[[RESULT_REAL:.*]] = cir.binop(div, %[[ADD_ARBR_AIBI]], 
%[[ADD_BRBR_BIBI]]) : !s32i
+// CIR-COMBINED: %[[MUL_AI_BR:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_REAL]]) 
: !s32i
+// CIR-COMBINED: %[[MUL_AR_BI:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_IMAG]]) 
: !s32i
+// CIR-COMBINED: %[[SUB_AIBR_ARBI:.*]] = cir.binop(sub, %[[MUL_AI_BR]], 
%[[MUL_AR_BI]]) : !s32i
+// CIR-COMBINED: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[SUB_AIBR_ARBI]], 
%[[ADD_BRBR_BIBI]]) : !s32i
+// CIR-COMBINED: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], 
%[[RESULT_IMAG]] : !s32i -> !cir.complex<!s32i>
+// CIR-COMBINED: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : 
!cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
+
+// LLVM-COMBINED: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM-COMBINED: %[[B_ADDR:.*]] = alloca i32, i64 1, align 4
+// LLVM-COMBINED: %[[C_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM-COMBINED: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[A_ADDR]], align 4
+// LLVM-COMBINED: %[[TMP_B:.*]] = load i32, ptr %[[B_ADDR]], align 4
+// LLVM-COMBINED: %[[TMP_COMPLEX_B:.*]] = insertvalue { i32, i32 } {{.*}}, i32 
%[[TMP_B]], 0
+// LLVM-COMBINED: %[[COMPLEX_B:.*]] = insertvalue { i32, i32 } 
%[[TMP_COMPLEX_B]], i32 0, 1
+// LLVM-COMBINED: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0
+// LLVM-COMBINED: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 1
+// LLVM-COMBINED: %[[MUL_AR_BR:.*]] = mul i32 %[[A_REAL]], %[[TMP_B]]
+// LLVM-COMBINED: %[[MUL_AI_BI:.*]] = mul i32 %[[A_IMAG]], 0
+// LLVM-COMBINED: %[[MUL_BR_BR:.*]] = mul i32 %[[TMP_B]], %[[TMP_B]]
+// LLVM-COMBINED: %[[ADD_ARBR_AIBI:.*]] = add i32 %[[MUL_AR_BR]], 
%[[MUL_AI_BI]]
+// LLVM-COMBINED: %[[ADD_BRBR_BIBI:.*]] = add i32 %[[MUL_BR_BR]], 0
+// LLVM-COMBINED: %[[RESULT_REAL:.*]] = sdiv i32 %[[ADD_ARBR_AIBI]], 
%[[ADD_BRBR_BIBI]]
+// LLVM-COMBINED: %[[MUL_AI_BR:.*]] = mul i32 %[[A_IMAG]], %[[TMP_B]]
+// LLVM-COMBINED: %[[MUL_AR_BI:.*]] = mul i32 %[[A_REAL]], 0
+// LLVM-COMBINED: %[[SUB_AIBR_ARBI:.*]] = sub i32 %[[MUL_AI_BR]], 
%[[MUL_AR_BI]]
+// LLVM-COMBINED: %[[RESULT_IMAG:.*]] = sdiv i32 %[[SUB_AIBR_ARBI]], 
%[[ADD_BRBR_BIBI]]
+// LLVM-COMBINED: %[[TMP_RESULT:.*]] = insertvalue { i32, i32 } {{.*}}, i32 
%[[RESULT_REAL]], 0
+// LLVM-COMBINED: %[[RESULT:.*]] = insertvalue { i32, i32 } %[[TMP_RESULT]], 
i32 %[[RESULT_IMAG]], 1
+// LLVM-COMBINED: store { i32, i32 } %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+// OGCG-COMBINED: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG-COMBINED: %[[B_ADDR:.*]] = alloca i32, align 4
+// OGCG-COMBINED: %[[C_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG-COMBINED: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 
}, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG-COMBINED: %[[A_REAL:.*]] = load i32, ptr %[[A_REAL_PTR]], align 4
+// OGCG-COMBINED: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 
}, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG-COMBINED: %[[A_IMAG:.*]] = load i32, ptr %[[A_IMAG_PTR]], align 4
+// OGCG-COMBINED: %[[TMP_B:.*]] = load i32, ptr %[[B_ADDR]], align 4
+// OGCG-COMBINED: %[[MUL_AR_BR:.*]] = mul i32 %[[A_REAL]], %[[TMP_B]]
+// OGCG-COMBINED: %[[MUL_AI_BI:.*]] = mul i32 %[[A_IMAG]], 0
+// OGCG-COMBINED: %[[ADD_ARBR_AIBI:.*]] = add i32 %[[MUL_AR_BR]], 
%[[MUL_AI_BI]]
+// OGCG-COMBINED: %[[MUL_BR_BR:.*]] = mul i32 %[[TMP_B]], %[[TMP_B]]
+// OGCG-COMBINED: %[[ADD_BRBR_BIBI:.*]] = add i32 %[[MUL_BR_BR]], 0
+// OGCG-COMBINED: %[[MUL_AI_BR:.*]] = mul i32 %[[A_IMAG]], %[[TMP_B]]
+// OGCG-COMBINED: %[[MUL_AR_BI:.*]] = mul i32 %[[A_REAL]], 0
+// OGCG-COMBINED: %[[SUB_AIBR_ARBI:.*]] = sub i32 %[[MUL_AI_BR]], 
%[[MUL_AR_BI]]
+// OGCG-COMBINED: %[[RESULT_REAL:.*]] = sdiv i32 %[[ADD_ARBR_AIBI]], 
%[[ADD_BRBR_BIBI]]
+// OGCG-COMBINED: %[[RESULT_IMAG:.*]] = sdiv i32 %[[SUB_AIBR_ARBI]], 
%[[ADD_BRBR_BIBI]]
+// OGCG-COMBINED: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 
}, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-COMBINED: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 
}, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-COMBINED: store i32 %[[RESULT_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-COMBINED: store i32 %[[RESULT_IMAG]], ptr %[[C_IMAG_PTR]], align 4


        
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to