https://github.com/vasu-the-sharma updated 
https://github.com/llvm/llvm-project/pull/175032

>From f10bbba299bfcda6ac69af7aa3b7e11107484d72 Mon Sep 17 00:00:00 2001
From: vasu-ibm <[email protected]>
Date: Thu, 8 Jan 2026 11:49:39 -0500
Subject: [PATCH 1/4] add coverage ubsan-aggregate-null-align.c

---
 .../test/CodeGen/ubsan-aggregate-null-align.c | 48 +++++++++++++++++++
 1 file changed, 48 insertions(+)
 create mode 100644 clang/test/CodeGen/ubsan-aggregate-null-align.c

diff --git a/clang/test/CodeGen/ubsan-aggregate-null-align.c 
b/clang/test/CodeGen/ubsan-aggregate-null-align.c
new file mode 100644
index 0000000000000..7ca9d32c3305b
--- /dev/null
+++ b/clang/test/CodeGen/ubsan-aggregate-null-align.c
@@ -0,0 +1,48 @@
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - \
+// RUN:    -fsanitize=null,alignment | FileCheck %s 
--check-prefix=CHECK-SANITIZE
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - \
+// RUN:    | FileCheck %s --check-prefix=CHECK-NO-SANITIZE
+
+struct Small { int x; };
+struct Container { struct Small inner; };
+
+// CHECK-SANITIZE-LABEL: define {{.*}}void @test_direct_assign_ptr(
+// CHECK-SANITIZE: %[[D:.*]] = load ptr, ptr %dest.addr
+// CHECK-SANITIZE: %[[S:.*]] = load ptr, ptr %src.addr
+// CHECK-SANITIZE: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[D]], ptr 
align 4 %[[S]], i64 4, i1 false)
+//
+// CHECK-NO-SANITIZE-LABEL: define {{.*}}void @test_direct_assign_ptr(
+// CHECK-NO-SANITIZE-NOT: @__ubsan_handle_type_mismatch
+void test_direct_assign_ptr(struct Small *dest, struct Small *src) {
+  *dest = *src;
+}
+
+// CHECK-SANITIZE-LABEL: define {{.*}}void @test_null_dest(
+// CHECK-SANITIZE: %[[D:.*]] = load ptr, ptr %dest
+// CHECK-SANITIZE: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[D]], ptr 
{{.*}}, i64 4, i1 false)
+//
+// CHECK-NO-SANITIZE-LABEL: define {{.*}}void @test_null_dest(
+// CHECK-NO-SANITIZE-NOT: @__ubsan_handle_type_mismatch
+void test_null_dest(struct Small *src) {
+  struct Small *dest = 0;
+  *dest = *src;
+}
+
+// CHECK-SANITIZE-LABEL: define {{.*}}void @test_nested_struct(
+// CHECK-SANITIZE: %[[VAL1:.*]] = icmp ne ptr %[[C:.*]], null
+// CHECK-SANITIZE: br i1 %{{.*}}, label %cont, label %handler.type_mismatch
+//
+// CHECK-NO-SANITIZE-LABEL: define {{.*}}void @test_nested_struct(
+// CHECK-NO-SANITIZE-NOT: @__ubsan_handle_type_mismatch
+void test_nested_struct(struct Container *c, struct Small *s) {
+  c->inner = *s;
+}
+
+// CHECK-SANITIZE-LABEL: define {{.*}}void @test_comma_operator(
+// CHECK-SANITIZE: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %{{.*}}, ptr 
align 4 %{{.*}}, i64 4, i1 false)
+//
+// CHECK-NO-SANITIZE-LABEL: define {{.*}}void @test_comma_operator(
+// CHECK-NO-SANITIZE-NOT: @__ubsan_handle_type_mismatch
+void test_comma_operator(struct Small *dest, struct Small *src) {
+  *dest = (0, *src);
+}

>From cc64e3f111e790760b1c624fa27ae03701f1d021 Mon Sep 17 00:00:00 2001
From: vasu-ibm <[email protected]>
Date: Thu, 8 Jan 2026 12:15:28 -0500
Subject: [PATCH 2/4] add null and alignment checks for aggregates

---
 clang/lib/CodeGen/CGExprAgg.cpp               | 43 +++------
 clang/lib/CodeGen/CGExprCXX.cpp               | 29 +++---
 .../test/CodeGen/ubsan-aggregate-null-align.c | 91 +++++++++++--------
 3 files changed, 82 insertions(+), 81 deletions(-)

diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index 7cc4d6c8f06f6..919e510a82af0 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -1292,45 +1292,29 @@ static bool isBlockVarRef(const Expr *E) {
 
 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
   ApplyAtomGroup Grp(CGF.getDebugInfo());
-  // For an assignment to work, the value on the right has
-  // to be compatible with the value on the left.
   assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
                                                  E->getRHS()->getType())
-         && "Invalid assignment");
+          && "Invalid assignment");
 
-  // If the LHS might be a __block variable, and the RHS can
-  // potentially cause a block copy, we need to evaluate the RHS first
-  // so that the assignment goes the right place.
-  // This is pretty semantically fragile.
   if (isBlockVarRef(E->getLHS()) &&
       E->getRHS()->HasSideEffects(CGF.getContext())) {
-    // Ensure that we have a destination, and evaluate the RHS into that.
     EnsureDest(E->getRHS()->getType());
     Visit(E->getRHS());
-
-    // Now emit the LHS and copy into it.
     LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), 
CodeGenFunction::TCK_Store);
 
-    // That copy is an atomic copy if the LHS is atomic.
     if (LHS.getType()->isAtomicType() ||
         CGF.LValueIsSuitableForInlineAtomic(LHS)) {
       CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
       return;
     }
-
-    EmitCopy(E->getLHS()->getType(),
-             AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
-                                     needsGC(E->getLHS()->getType()),
-                                     AggValueSlot::IsAliased,
-                                     AggValueSlot::MayOverlap),
-             Dest);
+    EmitFinalDestCopy(E->getLHS()->getType(), LHS);
     return;
   }
 
-  LValue LHS = CGF.EmitLValue(E->getLHS());
+  // ✅ FIX: Use EmitCheckedLValue for LHS
+  LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
 
-  // If we have an atomic type, evaluate into the destination and then
-  // do an atomic copy.
+  // ✅ RE-ADD: Original atomic handling logic
   if (LHS.getType()->isAtomicType() ||
       CGF.LValueIsSuitableForInlineAtomic(LHS)) {
     EnsureDest(E->getRHS()->getType());
@@ -1339,20 +1323,23 @@ void AggExprEmitter::VisitBinAssign(const 
BinaryOperator *E) {
     return;
   }
 
-  // Codegen the RHS so that it stores directly into the LHS.
+  // ✅ FIX: Handle RHS based on LValue/RValue
   AggValueSlot LHSSlot = AggValueSlot::forLValue(
       LHS, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
       AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
-  // A non-volatile aggregate destination might have volatile member.
-  if (!LHSSlot.isVolatile() &&
-      CGF.hasVolatileMember(E->getLHS()->getType()))
-    LHSSlot.setVolatile(true);
 
-  CGF.EmitAggExpr(E->getRHS(), LHSSlot);
+  if (E->getRHS()->isLValue()) {
+    LValue RHS = CGF.EmitCheckedLValue(E->getRHS(), CodeGenFunction::TCK_Load);
+    CGF.EmitAggregateCopy(LHS, RHS, E->getType(), Dest.isVolatile());
+  } else {
+    if (!LHSSlot.isVolatile() && CGF.hasVolatileMember(E->getLHS()->getType()))
+      LHSSlot.setVolatile(true);
+    CGF.EmitAggExpr(E->getRHS(), LHSSlot);
+  }
 
-  // Copy into the destination if the assignment isn't ignored.
   EmitFinalDestCopy(E->getType(), LHS);
 
+  // ✅ RE-ADD: Original Nontrivial C struct destruction logic
   if (!Dest.isIgnored() && !Dest.isExternallyDestructed() &&
       E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
     CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(),
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index ce2ed9026fa1f..eae27a6a3f1c8 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -267,7 +267,7 @@ RValue 
CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
   if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
     if (OCE->isAssignmentOp()) {
       if (TrivialAssignment) {
-        TrivialAssignmentRHS = EmitLValue(CE->getArg(1));
+        TrivialAssignmentRHS = EmitCheckedLValue(CE->getArg(1), TCK_Load);
       } else {
         RtlArgs = &RtlArgStorage;
         EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(),
@@ -309,22 +309,21 @@ RValue 
CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
   if (TrivialForCodegen) {
     if (isa<CXXDestructorDecl>(MD))
       return RValue::get(nullptr);
+  }
 
-    if (TrivialAssignment) {
-      // We don't like to generate the trivial copy/move assignment operator
-      // when it isn't necessary; just produce the proper effect here.
-      // It's important that we use the result of EmitLValue here rather than
-      // emitting call arguments, in order to preserve TBAA information from
-      // the RHS.
-      LValue RHS = isa<CXXOperatorCallExpr>(CE)
-                       ? TrivialAssignmentRHS
-                       : EmitLValue(*CE->arg_begin());
-      EmitAggregateAssign(This, RHS, CE->getType());
-      return RValue::get(This.getPointer(*this));
-    }
+  if (TrivialAssignment) {
+    // 1. Evaluate 'this' (Destination) as a checked store.
+    LValue This = EmitCheckedLValue(Base, TCK_Store);
+
+    // 2. Evaluate RHS (Source) as a checked load.
+    // If it's an operator call (a = b), we use the RHS evaluated at line 270.
+    // If it's a direct call (constructor), we evaluate the first argument.
+    LValue RHS = isa<CXXOperatorCallExpr>(CE)
+                     ? TrivialAssignmentRHS
+                     : EmitCheckedLValue(*CE->arg_begin(), TCK_Load);
 
-    assert(MD->getParent()->mayInsertExtraPadding() &&
-           "unknown trivial member function");
+    EmitAggregateAssign(This, RHS, CE->getType());
+    return RValue::get(This.getPointer(*this));
   }
 
   // Compute the function type we're calling.
diff --git a/clang/test/CodeGen/ubsan-aggregate-null-align.c 
b/clang/test/CodeGen/ubsan-aggregate-null-align.c
index 7ca9d32c3305b..18133327f0fc8 100644
--- a/clang/test/CodeGen/ubsan-aggregate-null-align.c
+++ b/clang/test/CodeGen/ubsan-aggregate-null-align.c
@@ -1,48 +1,63 @@
-// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - \
-// RUN:    -fsanitize=null,alignment | FileCheck %s 
--check-prefix=CHECK-SANITIZE
-// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - \
-// RUN:    | FileCheck %s --check-prefix=CHECK-NO-SANITIZE
-
-struct Small { int x; };
-struct Container { struct Small inner; };
-
-// CHECK-SANITIZE-LABEL: define {{.*}}void @test_direct_assign_ptr(
-// CHECK-SANITIZE: %[[D:.*]] = load ptr, ptr %dest.addr
-// CHECK-SANITIZE: %[[S:.*]] = load ptr, ptr %src.addr
-// CHECK-SANITIZE: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[D]], ptr 
align 4 %[[S]], i64 4, i1 false)
-//
-// CHECK-NO-SANITIZE-LABEL: define {{.*}}void @test_direct_assign_ptr(
-// CHECK-NO-SANITIZE-NOT: @__ubsan_handle_type_mismatch
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -fsanitize=alignment,null \
+// RUN:   -emit-llvm -std=c23 %s -o - \
+// RUN:   | FileCheck %s --check-prefixes=CHECK,CHECK-UBSAN
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm -std=c23 %s -o - \
+// RUN:   | FileCheck %s --check-prefixes=CHECK,CHECK-NO-UBSAN
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -fsanitize=alignment,null \
+// RUN:   -emit-llvm -xc++ %s -o - \
+// RUN:   | FileCheck %s --check-prefixes=CHECK,CHECK-UBSAN
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm -xc++ %s -o - \
+// RUN:   | FileCheck %s --check-prefixes=CHECK,CHECK-NO-UBSAN
+
+typedef struct Small { int x; } Small;
+typedef struct Container { struct Small inner; } Container;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// CHECK-LABEL: define {{.*}}void @test_direct_assign_ptr(
 void test_direct_assign_ptr(struct Small *dest, struct Small *src) {
-  *dest = *src;
-}
+  // CHECK-UBSAN: %[[D:.*]] = load ptr, ptr %dest.addr
+  // CHECK-UBSAN: %[[S:.*]] = load ptr, ptr %src.addr
+  
+  // Verify LHS (Dest) Check
+  // CHECK-UBSAN: %[[D_NULL:.*]] = icmp ne ptr %[[D]], null
+  // CHECK-UBSAN: %[[D_ALIGN:.*]] = and i64 %{{.*}}, 3
+  // CHECK-UBSAN: %[[D_ALIGN_OK:.*]] = icmp eq i64 %[[D_ALIGN]], 0
+  // CHECK-UBSAN: %[[D_OK:.*]] = and i1 %[[D_NULL]], %[[D_ALIGN_OK]]
+  // CHECK-UBSAN: br i1 %[[D_OK]], label %[[D_CONT:.*]], label 
%[[D_HANDLER:.*]]
+
+  // CHECK-UBSAN: [[D_HANDLER]]:
+  // CHECK-UBSAN: call void @__ubsan_handle_type_mismatch_v1_abort
+  // CHECK-UBSAN: unreachable
+
+  // CHECK-UBSAN: [[D_CONT]]:
+  // Verify RHS (Src) Check
+  // CHECK-UBSAN: %[[S_NULL:.*]] = icmp ne ptr %[[S]], null
+  // CHECK-UBSAN: br i1 %[[S_NULL]], label %[[S_CONT:.*]], label 
%[[S_HANDLER:.*]]
+
+  // CHECK-UBSAN: [[S_HANDLER]]:
+  // CHECK-UBSAN: call void @__ubsan_handle_type_mismatch_v1_abort
 
-// CHECK-SANITIZE-LABEL: define {{.*}}void @test_null_dest(
-// CHECK-SANITIZE: %[[D:.*]] = load ptr, ptr %dest
-// CHECK-SANITIZE: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[D]], ptr 
{{.*}}, i64 4, i1 false)
-//
-// CHECK-NO-SANITIZE-LABEL: define {{.*}}void @test_null_dest(
-// CHECK-NO-SANITIZE-NOT: @__ubsan_handle_type_mismatch
-void test_null_dest(struct Small *src) {
-  struct Small *dest = 0;
+  // CHECK-UBSAN: [[S_CONT]]:
+  // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[D]], ptr align 4 
%[[S]], i64 4, i1 false)
+  
+  // CHECK-NO-UBSAN-NOT: @__ubsan_handle_type_mismatch
   *dest = *src;
 }
 
-// CHECK-SANITIZE-LABEL: define {{.*}}void @test_nested_struct(
-// CHECK-SANITIZE: %[[VAL1:.*]] = icmp ne ptr %[[C:.*]], null
-// CHECK-SANITIZE: br i1 %{{.*}}, label %cont, label %handler.type_mismatch
-//
-// CHECK-NO-SANITIZE-LABEL: define {{.*}}void @test_nested_struct(
-// CHECK-NO-SANITIZE-NOT: @__ubsan_handle_type_mismatch
+// CHECK-LABEL: define {{.*}}void @test_nested_struct(
 void test_nested_struct(struct Container *c, struct Small *s) {
+  // CHECK-UBSAN: %[[C:.*]] = load ptr, ptr %c.addr
+  // CHECK-UBSAN: icmp ne ptr %[[C]], null
+  // CHECK-UBSAN: br i1 %{{.*}}, label %[[CONT:.*]], label %[[HANDLER:.*]]
+  
+  // CHECK-UBSAN: [[HANDLER]]:
+  // CHECK-UBSAN: call void @__ubsan_handle_type_mismatch_v1_abort
   c->inner = *s;
 }
 
-// CHECK-SANITIZE-LABEL: define {{.*}}void @test_comma_operator(
-// CHECK-SANITIZE: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %{{.*}}, ptr 
align 4 %{{.*}}, i64 4, i1 false)
-//
-// CHECK-NO-SANITIZE-LABEL: define {{.*}}void @test_comma_operator(
-// CHECK-NO-SANITIZE-NOT: @__ubsan_handle_type_mismatch
-void test_comma_operator(struct Small *dest, struct Small *src) {
-  *dest = (0, *src);
+#ifdef __cplusplus
 }
+#endif

>From d2f768b38404f718c06fbfc3e7ba2b952c9802e2 Mon Sep 17 00:00:00 2001
From: vasu-ibm <[email protected]>
Date: Fri, 9 Jan 2026 02:16:03 -0500
Subject: [PATCH 3/4] handle git clang format error

---
 clang/lib/CodeGen/CGExprAgg.cpp | 24 ++++++++++++++++++------
 1 file changed, 18 insertions(+), 6 deletions(-)

diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index 919e510a82af0..7cc1174b98e66 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -1292,29 +1292,41 @@ static bool isBlockVarRef(const Expr *E) {
 
 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
   ApplyAtomGroup Grp(CGF.getDebugInfo());
+  // For an assignment to work, the value on the right has
+  // to be compatible with the value on the left.
   assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
                                                  E->getRHS()->getType())
-          && "Invalid assignment");
+         && "Invalid assignment");
 
+  // If the LHS might be a __block variable, and the RHS can
+  // potentially cause a block copy, we need to evaluate the RHS first
+  // so that the assignment goes the right place.
+  // This is pretty semantically fragile.
   if (isBlockVarRef(E->getLHS()) &&
       E->getRHS()->HasSideEffects(CGF.getContext())) {
+    // Ensure that we have a destination, and evaluate the RHS into that.
     EnsureDest(E->getRHS()->getType());
     Visit(E->getRHS());
+
+    // Now emit the LHS and copy into it.
     LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), 
CodeGenFunction::TCK_Store);
 
+    // That copy is an atomic copy if the LHS is atomic.
     if (LHS.getType()->isAtomicType() ||
         CGF.LValueIsSuitableForInlineAtomic(LHS)) {
       CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
-      return;
+      return;  
     }
+
     EmitFinalDestCopy(E->getLHS()->getType(), LHS);
     return;
   }
 
-  // ✅ FIX: Use EmitCheckedLValue for LHS
+  // Use EmitCheckedLValue for LHS
   LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
 
-  // ✅ RE-ADD: Original atomic handling logic
+  // If we have an atomic type, evaluate into the destination and then
+  // do an atomic copy.
   if (LHS.getType()->isAtomicType() ||
       CGF.LValueIsSuitableForInlineAtomic(LHS)) {
     EnsureDest(E->getRHS()->getType());
@@ -1323,7 +1335,7 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator 
*E) {
     return;
   }
 
-  // ✅ FIX: Handle RHS based on LValue/RValue
+  // Codegen the RHS so that it stores directly into the LHS.
   AggValueSlot LHSSlot = AggValueSlot::forLValue(
       LHS, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
       AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
@@ -1337,9 +1349,9 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator 
*E) {
     CGF.EmitAggExpr(E->getRHS(), LHSSlot);
   }
 
+  // Copy into the destination if the assignment isn't ignored.
   EmitFinalDestCopy(E->getType(), LHS);
 
-  // ✅ RE-ADD: Original Nontrivial C struct destruction logic
   if (!Dest.isIgnored() && !Dest.isExternallyDestructed() &&
       E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
     CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(),

>From c2dab866cd2720196da7f0bb5225d10cd29d7b56 Mon Sep 17 00:00:00 2001
From: vasu-ibm <[email protected]>
Date: Fri, 9 Jan 2026 02:18:13 -0500
Subject: [PATCH 4/4] update git clang-format

---
 clang/lib/CodeGen/CGExprAgg.cpp | 178 ++++++------
 clang/lib/CodeGen/CGExprCXX.cpp | 488 +++++++++++++++-----------------
 2 files changed, 314 insertions(+), 352 deletions(-)

diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index 7cc1174b98e66..b0e112c493c19 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -50,11 +50,13 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
   bool IsResultUnused;
 
   AggValueSlot EnsureSlot(QualType T) {
-    if (!Dest.isIgnored()) return Dest;
+    if (!Dest.isIgnored())
+      return Dest;
     return CGF.CreateAggTemp(T, "agg.tmp.ensured");
   }
   void EnsureDest(QualType T) {
-    if (!Dest.isIgnored()) return;
+    if (!Dest.isIgnored())
+      return;
     Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
   }
 
@@ -72,8 +74,8 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
 
 public:
   AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
-    : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
-    IsResultUnused(IsResultUnused) { }
+      : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
+        IsResultUnused(IsResultUnused) {}
 
   
//===--------------------------------------------------------------------===//
   //                               Utilities
@@ -114,9 +116,7 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
     StmtVisitor<AggExprEmitter>::Visit(E);
   }
 
-  void VisitStmt(Stmt *S) {
-    CGF.ErrorUnsupported(S, "aggregate expression");
-  }
+  void VisitStmt(Stmt *S) { CGF.ErrorUnsupported(S, "aggregate expression"); }
   void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
   void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
     Visit(GE->getResultExpr());
@@ -157,9 +157,7 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
   void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
     EmitAggLoadOfLValue(E);
   }
-  void VisitPredefinedExpr(const PredefinedExpr *E) {
-    EmitAggLoadOfLValue(E);
-  }
+  void VisitPredefinedExpr(const PredefinedExpr *E) { EmitAggLoadOfLValue(E); }
 
   // Operators.
   void VisitCastExpr(CastExpr *E);
@@ -175,9 +173,7 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
   }
 
   void VisitObjCMessageExpr(ObjCMessageExpr *E);
-  void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
-    EmitAggLoadOfLValue(E);
-  }
+  void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { EmitAggLoadOfLValue(E); }
 
   void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
   void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
@@ -189,7 +185,7 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
   void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
                               llvm::Value *outerBegin = nullptr);
   void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
-  void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
+  void VisitNoInitExpr(NoInitExpr *E) {} // Do nothing.
   void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
     CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
     Visit(DAE->getExpr());
@@ -244,7 +240,7 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
     Visit(E->getSelectedExpr());
   }
 };
-}  // end anonymous namespace.
+} // end anonymous namespace.
 
 
//===----------------------------------------------------------------------===//
 //                                Utilities
@@ -393,10 +389,8 @@ void AggExprEmitter::EmitCopy(QualType type, const 
AggValueSlot &dest,
   if (dest.requiresGCollection()) {
     CharUnits sz = dest.getPreferredSize(CGF.getContext(), type);
     llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
-    CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
-                                                      dest.getAddress(),
-                                                      src.getAddress(),
-                                                      size);
+    CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, dest.getAddress(),
+                                                      src.getAddress(), size);
     return;
   }
 
@@ -411,8 +405,8 @@ void AggExprEmitter::EmitCopy(QualType type, const 
AggValueSlot &dest,
 
 /// Emit the initializer for a std::initializer_list initialized with a
 /// real initializer list.
-void
-AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
+void AggExprEmitter::VisitCXXStdInitializerListExpr(
+    CXXStdInitializerListExpr *E) {
   // Emit an array containing the elements.  The array is externally destructed
   // if the std::initializer_list object is.
   ASTContext &Ctx = CGF.getContext();
@@ -454,7 +448,7 @@ 
AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
                            ArrayType->getElementType()) &&
            "Expected std::initializer_list second field to be const E *");
     llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
-    llvm::Value *IdxEnd[] = { Zero, Size };
+    llvm::Value *IdxEnd[] = {Zero, Size};
     llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP(
         ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxEnd,
         "arrayend");
@@ -571,7 +565,7 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, 
llvm::ArrayType *AType,
       CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
   CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
   CharUnits elementAlign =
-    DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
+      DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
   llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
 
   // Consider initializing the array by copying from a global. For this to be
@@ -686,7 +680,8 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, 
llvm::ArrayType *AType,
           llvmElementType, element,
           llvm::ConstantInt::get(CGF.SizeTy, NumInitElements),
           "arrayinit.start");
-      if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
+      if (endOfInit.isValid())
+        Builder.CreateStore(element, endOfInit);
     }
 
     // Compute the end of the array.
@@ -700,7 +695,7 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, 
llvm::ArrayType *AType,
     // Jump into the body.
     CGF.EmitBlock(bodyBB);
     llvm::PHINode *currentElement =
-      Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
+        Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
     currentElement->addIncoming(element, entryBB);
 
     // Emit the actual filler expression.
@@ -724,11 +719,12 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, 
llvm::ArrayType *AType,
         llvmElementType, currentElement, one, "arrayinit.next");
 
     // Tell the EH cleanup that we finished with the last element.
-    if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
+    if (endOfInit.isValid())
+      Builder.CreateStore(nextElement, endOfInit);
 
     // Leave the loop if we're done.
-    llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
-                                             "arrayinit.done");
+    llvm::Value *done =
+        Builder.CreateICmpEQ(nextElement, end, "arrayinit.done");
     llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
     Builder.CreateCondBr(done, endBB, bodyBB);
     currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
@@ -741,7 +737,8 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, 
llvm::ArrayType *AType,
 //                            Visitor Methods
 
//===----------------------------------------------------------------------===//
 
-void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr 
*E){
+void AggExprEmitter::VisitMaterializeTemporaryExpr(
+    MaterializeTemporaryExpr *E) {
   Visit(E->getSubExpr());
 }
 
@@ -753,8 +750,7 @@ void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr 
*e) {
     EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));
 }
 
-void
-AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
   if (Dest.isPotentiallyAliased()) {
     // Just emit a load of the lvalue + a copy, because our compound literal
     // might alias the destination.
@@ -798,8 +794,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
   case CK_Dynamic: {
     // FIXME: Can this actually happen? We have no test coverage for it.
     assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
-    LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
-                                      CodeGenFunction::TCK_Load);
+    LValue LV =
+        CGF.EmitCheckedLValue(E->getSubExpr(), CodeGenFunction::TCK_Load);
     // FIXME: Do we also need to handle property references here?
     if (LV.isSimple())
       CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
@@ -848,7 +844,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
   case CK_BaseToDerived:
   case CK_UncheckedDerivedToBase: {
     llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
-                "should have been unpacked before we got here");
+                     "should have been unpacked before we got here");
   }
 
   case CK_NonAtomicToAtomic:
@@ -858,11 +854,12 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
     // Determine the atomic and value types.
     QualType atomicType = E->getSubExpr()->getType();
     QualType valueType = E->getType();
-    if (isToAtomic) std::swap(atomicType, valueType);
+    if (isToAtomic)
+      std::swap(atomicType, valueType);
 
     assert(atomicType->isAtomicType());
-    assert(CGF.getContext().hasSameUnqualifiedType(valueType,
-                          atomicType->castAs<AtomicType>()->getValueType()));
+    assert(CGF.getContext().hasSameUnqualifiedType(
+        valueType, atomicType->castAs<AtomicType>()->getValueType()));
 
     // Just recurse normally if we're ignoring the result or the
     // atomic type doesn't change representation.
@@ -871,14 +868,14 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
     }
 
     CastKind peepholeTarget =
-      (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
+        (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
 
     // These two cases are reverses of each other; try to peephole them.
     if (Expr *op =
             findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) {
       assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
                                                      E->getType()) &&
-           "peephole significantly changed types?");
+             "peephole significantly changed types?");
       return Visit(op);
     }
 
@@ -895,13 +892,11 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
         // Build a GEP to refer to the subobject.
         Address valueAddr =
             CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0);
-        valueDest = AggValueSlot::forAddr(valueAddr,
-                                          valueDest.getQualifiers(),
-                                          valueDest.isExternallyDestructed(),
-                                          valueDest.requiresGCollection(),
-                                          valueDest.isPotentiallyAliased(),
-                                          AggValueSlot::DoesNotOverlap,
-                                          AggValueSlot::IsZeroed);
+        valueDest = AggValueSlot::forAddr(
+            valueAddr, valueDest.getQualifiers(),
+            valueDest.isExternallyDestructed(), 
valueDest.requiresGCollection(),
+            valueDest.isPotentiallyAliased(), AggValueSlot::DoesNotOverlap,
+            AggValueSlot::IsZeroed);
       }
 
       CGF.EmitAggExpr(E->getSubExpr(), valueDest);
@@ -911,7 +906,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
     // Otherwise, we're converting an atomic type to a non-atomic type.
     // Make an atomic temporary, emit into that, and then copy the value out.
     AggValueSlot atomicSlot =
-      CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
+        CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
     CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
 
     Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0);
@@ -919,7 +914,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
     return EmitFinalDestCopy(valueType, rvalue);
   }
   case CK_AddressSpaceConversion:
-     return Visit(E->getSubExpr());
+    return Visit(E->getSubExpr());
 
   case CK_LValueToRValue:
     // If we're loading from a volatile type, force the destination
@@ -1054,9 +1049,8 @@ void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
     return;
   }
 
-  withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
-    return CGF.EmitCallExpr(E, Slot);
-  });
+  withReturnValueSlot(
+      E, [&](ReturnValueSlot Slot) { return CGF.EmitCallExpr(E, Slot); });
 }
 
 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
@@ -1219,7 +1213,7 @@ void AggExprEmitter::VisitBinaryOperator(const 
BinaryOperator *E) {
 }
 
 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
-                                                    const BinaryOperator *E) {
+    const BinaryOperator *E) {
   LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
   EmitFinalDestCopy(E->getType(), LV);
 }
@@ -1252,37 +1246,36 @@ static bool isBlockVarRef(const Expr *E) {
     // FIXME: pointer arithmetic?
     return false;
 
-  // Check both sides of a conditional operator.
-  } else if (const AbstractConditionalOperator *op
-               = dyn_cast<AbstractConditionalOperator>(E)) {
-    return isBlockVarRef(op->getTrueExpr())
-        || isBlockVarRef(op->getFalseExpr());
+    // Check both sides of a conditional operator.
+  } else if (const AbstractConditionalOperator *op =
+                 dyn_cast<AbstractConditionalOperator>(E)) {
+    return isBlockVarRef(op->getTrueExpr()) ||
+           isBlockVarRef(op->getFalseExpr());
 
-  // OVEs are required to support BinaryConditionalOperators.
-  } else if (const OpaqueValueExpr *op
-               = dyn_cast<OpaqueValueExpr>(E)) {
+    // OVEs are required to support BinaryConditionalOperators.
+  } else if (const OpaqueValueExpr *op = dyn_cast<OpaqueValueExpr>(E)) {
     if (const Expr *src = op->getSourceExpr())
       return isBlockVarRef(src);
 
-  // Casts are necessary to get things like (*(int*)&var) = foo().
-  // We don't really care about the kind of cast here, except
-  // we don't want to look through l2r casts, because it's okay
-  // to get the *value* in a __block variable.
+    // Casts are necessary to get things like (*(int*)&var) = foo().
+    // We don't really care about the kind of cast here, except
+    // we don't want to look through l2r casts, because it's okay
+    // to get the *value* in a __block variable.
   } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
     if (cast->getCastKind() == CK_LValueToRValue)
       return false;
     return isBlockVarRef(cast->getSubExpr());
 
-  // Handle unary operators.  Again, just aggressively look through
-  // it, ignoring the operation.
+    // Handle unary operators.  Again, just aggressively look through
+    // it, ignoring the operation.
   } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
     return isBlockVarRef(uop->getSubExpr());
 
-  // Look into the base of a field access.
+    // Look into the base of a field access.
   } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
     return isBlockVarRef(mem->getBase());
 
-  // Look into the base of a subscript.
+    // Look into the base of a subscript.
   } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
     return isBlockVarRef(sub->getBase());
   }
@@ -1295,8 +1288,8 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator 
*E) {
   // For an assignment to work, the value on the right has
   // to be compatible with the value on the left.
   assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
-                                                 E->getRHS()->getType())
-         && "Invalid assignment");
+                                                 E->getRHS()->getType()) &&
+         "Invalid assignment");
 
   // If the LHS might be a __block variable, and the RHS can
   // potentially cause a block copy, we need to evaluate the RHS first
@@ -1315,7 +1308,7 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator 
*E) {
     if (LHS.getType()->isAtomicType() ||
         CGF.LValueIsSuitableForInlineAtomic(LHS)) {
       CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
-      return;  
+      return;
     }
 
     EmitFinalDestCopy(E->getLHS()->getType(), LHS);
@@ -1358,8 +1351,8 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator 
*E) {
                     E->getType());
 }
 
-void AggExprEmitter::
-VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
+void AggExprEmitter::VisitAbstractConditionalOperator(
+    const AbstractConditionalOperator *E) {
   llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
   llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
@@ -1444,8 +1437,7 @@ void 
AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
     CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
 }
 
-void
-AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
+void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
   AggValueSlot Slot = EnsureSlot(E->getType());
   CGF.EmitCXXConstructExpr(E, Slot);
 }
@@ -1453,13 +1445,12 @@ AggExprEmitter::VisitCXXConstructExpr(const 
CXXConstructExpr *E) {
 void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
     const CXXInheritedCtorInitExpr *E) {
   AggValueSlot Slot = EnsureSlot(E->getType());
-  CGF.EmitInheritedCXXConstructorCall(
-      E->getConstructor(), E->constructsVBase(), Slot.getAddress(),
-      E->inheritedFromVBase(), E);
+  CGF.EmitInheritedCXXConstructorCall(E->getConstructor(), 
E->constructsVBase(),
+                                      Slot.getAddress(),
+                                      E->inheritedFromVBase(), E);
 }
 
-void
-AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
+void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
   AggValueSlot Slot = EnsureSlot(E->getType());
   LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
 
@@ -1643,9 +1634,7 @@ static bool isSimpleZero(const Expr *E, CodeGenFunction 
&CGF) {
   return false;
 }
 
-
-void
-AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
+void AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
   QualType type = LV.getType();
   // FIXME: Ignore result?
   // FIXME: Are initializers affected by volatile?
@@ -1788,10 +1777,8 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
           Dest.getAddress(), CXXRD, BaseRD,
           /*isBaseVirtual*/ false);
       AggValueSlot AggSlot = AggValueSlot::forAddr(
-          V, Qualifiers(),
-          AggValueSlot::IsDestructed,
-          AggValueSlot::DoesNotNeedGCBarriers,
-          AggValueSlot::IsNotAliased,
+          V, Qualifiers(), AggValueSlot::IsDestructed,
+          AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
           CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
       CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot);
 
@@ -1887,8 +1874,8 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
     // Push a destructor if necessary.
     // FIXME: if we have an array of structures, all explicitly
     // initialized, we can end up pushing a linear number of cleanups.
-    if (QualType::DestructionKind dtorKind
-          = field->getType().isDestructedType()) {
+    if (QualType::DestructionKind dtorKind =
+            field->getType().isDestructedType()) {
       assert(LV.isSimple());
       if (dtorKind) {
         CGF.pushDestroyAndDeferDeactivation(NormalAndEHCleanup, 
LV.getAddress(),
@@ -2043,7 +2030,8 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const 
ArrayInitLoopExpr *E,
     CGF.DeactivateCleanupBlock(cleanup, index);
 }
 
-void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr 
*E) {
+void AggExprEmitter::VisitDesignatedInitUpdateExpr(
+    DesignatedInitUpdateExpr *E) {
   AggValueSlot Dest = EnsureSlot(E->getType());
 
   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
@@ -2064,7 +2052,8 @@ static CharUnits GetNumNonZeroBytesInInit(const Expr *E, 
CodeGenFunction &CGF) {
   E = E->IgnoreParenNoopCasts(CGF.getContext());
 
   // 0 and 0.0 won't require any non-zero stores!
-  if (isSimpleZero(E, CGF)) return CharUnits::Zero();
+  if (isSimpleZero(E, CGF))
+    return CharUnits::Zero();
 
   // If this is an initlist expr, sum up the size of sizes of the (present)
   // elements.  If this is something weird, assume the whole thing is non-zero.
@@ -2145,7 +2134,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, 
const Expr *E,
   // Check to see if over 3/4 of the initializer are known to be zero.  If so,
   // we prefer to emit memset + individual stores for the rest.
   CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
-  if (NumNonZeroBytes*4 > Size)
+  if (NumNonZeroBytes * 4 > Size)
     return;
 
   // Okay, it seems like a good idea to use an initial memset, emit the call.
@@ -2158,9 +2147,6 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, 
const Expr *E,
   Slot.setZeroed();
 }
 
-
-
-
 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
 /// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
 /// the value of the aggregate expression is not needed.  If VolatileDest is
@@ -2174,7 +2160,7 @@ void CodeGenFunction::EmitAggExpr(const Expr *E, 
AggValueSlot Slot) {
   // Optimize the slot if possible.
   CheckAggExprForMemSetUse(Slot, E, *this);
 
-  AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
+  AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr *>(E));
 }
 
 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index eae27a6a3f1c8..1a6c99beadb76 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -30,7 +30,7 @@ struct MemberCallInfo {
   // Number of prefix arguments for the call. Ignores the `this` pointer.
   unsigned PrefixSize;
 };
-}
+} // namespace
 
 static MemberCallInfo
 commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, GlobalDecl GD,
@@ -125,8 +125,8 @@ RValue CodeGenFunction::EmitCXXDestructorCall(
                   CE ? CE->getExprLoc() : SourceLocation{});
 }
 
-RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
-                                            const CXXPseudoDestructorExpr *E) {
+RValue
+CodeGenFunction::EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) 
{
   QualType DestroyedType = E->getDestroyedType();
   if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
     // Automatic Reference Counting:
@@ -155,9 +155,9 @@ RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
       break;
 
     case Qualifiers::OCL_Strong:
-      EmitARCRelease(Builder.CreateLoad(BaseValue,
-                        DestroyedType.isVolatileQualified()),
-                     ARCPreciseLifetime);
+      EmitARCRelease(
+          Builder.CreateLoad(BaseValue, DestroyedType.isVolatileQualified()),
+          ARCPreciseLifetime);
       break;
 
     case Qualifiers::OCL_Weak:
@@ -272,7 +272,7 @@ RValue 
CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
         RtlArgs = &RtlArgStorage;
         EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(),
                      drop_begin(CE->arguments(), 1), CE->getDirectCallee(),
-                     /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft);
+                     /*ParamsToSkip*/ 0, EvaluationOrder::ForceRightToLeft);
       }
     }
   }
@@ -468,9 +468,8 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const 
CXXMemberCallExpr *E,
 
   // Ask the ABI to load the callee.  Note that This is modified.
   llvm::Value *ThisPtrForCall = nullptr;
-  CGCallee Callee =
-    CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This,
-                                             ThisPtrForCall, MemFnPtr, MPT);
+  CGCallee Callee = CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(
+      *this, BO, This, ThisPtrForCall, MemFnPtr, MPT);
 
   CallArgList Args;
 
@@ -582,9 +581,9 @@ static void EmitNullBaseClassInitialization(CodeGenFunction 
&CGF,
           StoreSizeVal);
     }
 
-  // Otherwise, just memset the whole thing to zero.  This is legal
-  // because in LLVM, all default initializers (other than the ones we just
-  // handled above) are guaranteed to have a bit pattern of all zeros.
+    // Otherwise, just memset the whole thing to zero.  This is legal
+    // because in LLVM, all default initializers (other than the ones we just
+    // handled above) are guaranteed to have a bit pattern of all zeros.
   } else {
     for (std::pair<CharUnits, CharUnits> Store : Stores) {
       CharUnits StoreOffset = Store.first;
@@ -597,9 +596,8 @@ static void EmitNullBaseClassInitialization(CodeGenFunction 
&CGF,
   }
 }
 
-void
-CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
-                                      AggValueSlot Dest) {
+void CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
+                                           AggValueSlot Dest) {
   assert(!Dest.isIgnored() && "Must have a destination!");
   const CXXConstructorDecl *CD = E->getConstructor();
 
@@ -640,8 +638,7 @@ CodeGenFunction::EmitCXXConstructExpr(const 
CXXConstructExpr *E,
     return;
   }
 
-  if (const ArrayType *arrayType
-        = getContext().getAsArrayType(E->getType())) {
+  if (const ArrayType *arrayType = getContext().getAsArrayType(E->getType())) {
     EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E,
                                Dest.isSanitizerChecked());
   } else {
@@ -666,10 +663,10 @@ CodeGenFunction::EmitCXXConstructExpr(const 
CXXConstructExpr *E,
 
     case CXXConstructionKind::NonVirtualBase:
       Type = Ctor_Base;
-     }
+    }
 
-     // Call the constructor.
-     EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E);
+    // Call the constructor.
+    EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E);
   }
 }
 
@@ -679,7 +676,7 @@ void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address 
Dest, Address Src,
     Exp = E->getSubExpr();
   assert(isa<CXXConstructExpr>(Exp) &&
          "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
-  const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
+  const CXXConstructExpr *E = cast<CXXConstructExpr>(Exp);
   const CXXConstructorDecl *CD = E->getConstructor();
   RunCleanupsScope Scope(*this);
 
@@ -690,8 +687,8 @@ void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address 
Dest, Address Src,
   if (E->requiresZeroInitialization())
     EmitNullInitialization(Dest, E->getType());
 
-  assert(!getContext().getAsConstantArrayType(E->getType())
-         && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
+  assert(!getContext().getAsConstantArrayType(E->getType()) &&
+         "EmitSynthesizedCXXCopyCtor - Copied-in Array");
   EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E);
 }
 
@@ -717,8 +714,8 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction 
&CGF,
 
   if (!e->isArray()) {
     CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
-    sizeWithoutCookie
-      = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
+    sizeWithoutCookie =
+        llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
     return sizeWithoutCookie;
   }
 
@@ -744,16 +741,16 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction 
&CGF,
   // size_t.  That's just a gloss, though, and it's wrong in one
   // important way: if the count is negative, it's an error even if
   // the cookie size would bring the total size >= 0.
-  bool isSigned
-    = (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType();
-  llvm::IntegerType *numElementsType
-    = cast<llvm::IntegerType>(numElements->getType());
+  bool isSigned =
+      (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType();
+  llvm::IntegerType *numElementsType =
+      cast<llvm::IntegerType>(numElements->getType());
   unsigned numElementsWidth = numElementsType->getBitWidth();
 
   // Compute the constant factor.
   llvm::APInt arraySizeMultiplier(sizeWidth, 1);
-  while (const ConstantArrayType *CAT
-             = CGF.getContext().getAsConstantArrayType(type)) {
+  while (const ConstantArrayType *CAT =
+             CGF.getContext().getAsConstantArrayType(type)) {
     type = CAT->getElementType();
     arraySizeMultiplier *= CAT->getSize();
   }
@@ -768,7 +765,7 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction 
&CGF,
   // If someone is doing 'new int[42]' there is no need to do a dynamic check.
   // Don't bloat the -O0 code.
   if (llvm::ConstantInt *numElementsC =
-        dyn_cast<llvm::ConstantInt>(numElements)) {
+          dyn_cast<llvm::ConstantInt>(numElements)) {
     const llvm::APInt &count = numElementsC->getValue();
 
     bool hasAnyOverflow = false;
@@ -795,13 +792,13 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction 
&CGF,
     // Scale numElements by that.  This might overflow, but we don't
     // care because it only overflows if allocationSize does, too, and
     // if that overflows then we shouldn't use this.
-    numElements = llvm::ConstantInt::get(CGF.SizeTy,
-                                         adjustedCount * arraySizeMultiplier);
+    numElements =
+        llvm::ConstantInt::get(CGF.SizeTy, adjustedCount * 
arraySizeMultiplier);
 
     // Compute the size before cookie, and track whether it overflowed.
     bool overflow;
-    llvm::APInt allocationSize
-      = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
+    llvm::APInt allocationSize =
+        adjustedCount.umul_ov(typeSizeMultiplier, overflow);
     hasAnyOverflow |= overflow;
 
     // Add in the cookie, and check whether it's overflowed.
@@ -821,7 +818,7 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction 
&CGF,
       size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
     }
 
-  // Otherwise, we might need to use the overflow intrinsics.
+    // Otherwise, we might need to use the overflow intrinsics.
   } else {
     // There are up to five conditions we need to test for:
     // 1) if isSigned, we need to check whether numElements is negative;
@@ -845,13 +842,13 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction 
&CGF,
       llvm::APInt threshold =
           llvm::APInt::getOneBitSet(numElementsWidth, sizeWidth);
 
-      llvm::Value *thresholdV
-        = llvm::ConstantInt::get(numElementsType, threshold);
+      llvm::Value *thresholdV =
+          llvm::ConstantInt::get(numElementsType, threshold);
 
       hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
       numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
 
-    // Otherwise, if we're signed, we want to sext up to size_t.
+      // Otherwise, if we're signed, we want to sext up to size_t.
     } else if (isSigned) {
       if (numElementsWidth < sizeWidth)
         numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
@@ -862,10 +859,10 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction 
&CGF,
       // unsigned overflow.  Otherwise, we have to do it here. But at least
       // in this case, we can subsume the >= minElements check.
       if (typeSizeMultiplier == 1)
-        hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
-                              llvm::ConstantInt::get(CGF.SizeTy, minElements));
+        hasOverflow = CGF.Builder.CreateICmpSLT(
+            numElements, llvm::ConstantInt::get(CGF.SizeTy, minElements));
 
-    // Otherwise, zext up to size_t if necessary.
+      // Otherwise, zext up to size_t if necessary.
     } else if (numElementsWidth < sizeWidth) {
       numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
     }
@@ -875,15 +872,16 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction 
&CGF,
     if (minElements) {
       // Don't allow allocation of fewer elements than we have initializers.
       if (!hasOverflow) {
-        hasOverflow = CGF.Builder.CreateICmpULT(numElements,
-                              llvm::ConstantInt::get(CGF.SizeTy, minElements));
+        hasOverflow = CGF.Builder.CreateICmpULT(
+            numElements, llvm::ConstantInt::get(CGF.SizeTy, minElements));
       } else if (numElementsWidth > sizeWidth) {
         // The other existing overflow subsumes this check.
         // We do an unsigned comparison, since any signed value < -1 is
         // taken care of either above or below.
-        hasOverflow = CGF.Builder.CreateOr(hasOverflow,
-                          CGF.Builder.CreateICmpULT(numElements,
-                              llvm::ConstantInt::get(CGF.SizeTy, 
minElements)));
+        hasOverflow = CGF.Builder.CreateOr(
+            hasOverflow,
+            CGF.Builder.CreateICmpULT(
+                numElements, llvm::ConstantInt::get(CGF.SizeTy, minElements)));
       }
     }
 
@@ -897,11 +895,11 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction 
&CGF,
     // can be ignored because the result shouldn't be used if
     // allocation fails.
     if (typeSizeMultiplier != 1) {
-      llvm::Function *umul_with_overflow
-        = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, 
CGF.SizeTy);
+      llvm::Function *umul_with_overflow =
+          CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, 
CGF.SizeTy);
 
       llvm::Value *tsmV =
-        llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
+          llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
       llvm::Value *result =
           CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV});
 
@@ -921,10 +919,10 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction 
&CGF,
           assert(arraySizeMultiplier == typeSizeMultiplier);
           numElements = size;
 
-        // Otherwise we need a separate multiply.
+          // Otherwise we need a separate multiply.
         } else {
           llvm::Value *asmV =
-            llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
+              llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
           numElements = CGF.Builder.CreateMul(numElements, asmV);
         }
       }
@@ -937,8 +935,8 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction 
&CGF,
     if (cookieSize != 0) {
       sizeWithoutCookie = size;
 
-      llvm::Function *uadd_with_overflow
-        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, 
CGF.SizeTy);
+      llvm::Function *uadd_with_overflow =
+          CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, 
CGF.SizeTy);
 
       llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, 
cookieSize);
       llvm::Value *result =
@@ -957,9 +955,8 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction 
&CGF,
     // overwrite 'size' with an all-ones value, which should cause
     // operator new to throw.
     if (hasOverflow)
-      size = CGF.Builder.CreateSelect(hasOverflow,
-                                 llvm::Constant::getAllOnesValue(CGF.SizeTy),
-                                      size);
+      size = CGF.Builder.CreateSelect(
+          hasOverflow, llvm::Constant::getAllOnesValue(CGF.SizeTy), size);
   }
 
   if (cookieSize == 0)
@@ -976,21 +973,19 @@ static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, 
const Expr *Init,
   // FIXME: Refactor with EmitExprAsInit.
   switch (CGF.getEvaluationKind(AllocType)) {
   case TEK_Scalar:
-    CGF.EmitScalarInit(Init, nullptr,
-                       CGF.MakeAddrLValue(NewPtr, AllocType), false);
+    CGF.EmitScalarInit(Init, nullptr, CGF.MakeAddrLValue(NewPtr, AllocType),
+                       false);
     return;
   case TEK_Complex:
     CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType),
                                   /*isInit*/ true);
     return;
   case TEK_Aggregate: {
-    AggValueSlot Slot
-      = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
-                              AggValueSlot::IsDestructed,
-                              AggValueSlot::DoesNotNeedGCBarriers,
-                              AggValueSlot::IsNotAliased,
-                              MayOverlap, AggValueSlot::IsNotZeroed,
-                              AggValueSlot::IsSanitizerChecked);
+    AggValueSlot Slot = AggValueSlot::forAddr(
+        NewPtr, AllocType.getQualifiers(), AggValueSlot::IsDestructed,
+        AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
+        MayOverlap, AggValueSlot::IsNotZeroed,
+        AggValueSlot::IsSanitizerChecked);
     CGF.EmitAggExpr(Init, Slot);
     return;
   }
@@ -1019,7 +1014,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
 
   CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType);
   CharUnits ElementAlign =
-    BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);
+      BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);
 
   // Attempt to perform zero-initialization using memset.
   auto TryMemsetInitialization = [&]() -> bool {
@@ -1069,22 +1064,19 @@ void CodeGenFunction::EmitNewArrayInitializer(
       // Initialize the initial portion of length equal to that of the string
       // literal. The allocation must be for at least this much; we emitted a
       // check for that earlier.
-      AggValueSlot Slot =
-          AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(),
-                                AggValueSlot::IsDestructed,
-                                AggValueSlot::DoesNotNeedGCBarriers,
-                                AggValueSlot::IsNotAliased,
-                                AggValueSlot::DoesNotOverlap,
-                                AggValueSlot::IsNotZeroed,
-                                AggValueSlot::IsSanitizerChecked);
+      AggValueSlot Slot = AggValueSlot::forAddr(
+          CurPtr, ElementType.getQualifiers(), AggValueSlot::IsDestructed,
+          AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
+          AggValueSlot::DoesNotOverlap, AggValueSlot::IsNotZeroed,
+          AggValueSlot::IsSanitizerChecked);
       EmitAggExpr(ILE ? ILE->getInit(0) : Init, Slot);
 
       // Move past these elements.
       InitListElements =
           cast<ConstantArrayType>(Init->getType()->getAsArrayTypeUnsafe())
               ->getZExtSize();
-      CurPtr = Builder.CreateConstInBoundsGEP(
-          CurPtr, InitListElements, "string.init.end");
+      CurPtr = Builder.CreateConstInBoundsGEP(CurPtr, InitListElements,
+                                              "string.init.end");
 
       // Zero out the rest, if any remain.
       llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
@@ -1207,7 +1199,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
           NumElements,
           llvm::ConstantInt::get(NumElements->getType(), InitListElements));
     EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE,
-                               /*NewPointerIsChecked*/true,
+                               /*NewPointerIsChecked*/ true,
                                CCE->requiresZeroInitialization());
     return;
   }
@@ -1343,10 +1335,9 @@ static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
   llvm::CallBase *CallOrInvoke;
   llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl);
   CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl));
-  RValue RV =
-      CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
-                       Args, CalleeType, /*ChainCall=*/false),
-                   Callee, ReturnValueSlot(), Args, &CallOrInvoke);
+  RValue RV = CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
+                               Args, CalleeType, /*ChainCall=*/false),
+                           Callee, ReturnValueSlot(), Args, &CallOrInvoke);
 
   /// C++1y [expr.new]p10:
   ///   [In a new-expression,] an implementation is allowed to omit a call
@@ -1354,8 +1345,8 @@ static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
   ///
   /// We model such elidable calls with the 'builtin' attribute.
   llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr);
-  if (CalleeDecl->isReplaceableGlobalAllocationFunction() &&
-      Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
+  if (CalleeDecl->isReplaceableGlobalAllocationFunction() && Fn &&
+      Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
     CallOrInvoke->addFnAttr(llvm::Attribute::Builtin);
   }
 
@@ -1369,8 +1360,8 @@ RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const 
FunctionProtoType *Type,
   EmitCallArgs(Args, Type, TheCall->arguments());
   // Find the allocation or deallocation function that we're calling.
   ASTContext &Ctx = getContext();
-  DeclarationName Name = Ctx.DeclarationNames
-      .getCXXOperatorName(IsDelete ? OO_Delete : OO_New);
+  DeclarationName Name =
+      Ctx.DeclarationNames.getCXXOperatorName(IsDelete ? OO_Delete : OO_New);
 
   for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
     if (auto *FD = dyn_cast<FunctionDecl>(Decl))
@@ -1388,113 +1379,111 @@ RValue 
CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
 }
 
 namespace {
-  /// A cleanup to call the given 'operator delete' function upon abnormal
-  /// exit from a new expression. Templated on a traits type that deals with
-  /// ensuring that the arguments dominate the cleanup if necessary.
-  template<typename Traits>
-  class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
-    /// Type used to hold llvm::Value*s.
-    typedef typename Traits::ValueTy ValueTy;
-    /// Type used to hold RValues.
-    typedef typename Traits::RValueTy RValueTy;
-    struct PlacementArg {
-      RValueTy ArgValue;
-      QualType ArgType;
-    };
-
-    unsigned NumPlacementArgs : 30;
-    LLVM_PREFERRED_TYPE(AlignedAllocationMode)
-    unsigned PassAlignmentToPlacementDelete : 1;
-    const FunctionDecl *OperatorDelete;
-    RValueTy TypeIdentity;
-    ValueTy Ptr;
-    ValueTy AllocSize;
-    CharUnits AllocAlign;
-
-    PlacementArg *getPlacementArgs() {
-      return reinterpret_cast<PlacementArg *>(this + 1);
-    }
+/// A cleanup to call the given 'operator delete' function upon abnormal
+/// exit from a new expression. Templated on a traits type that deals with
+/// ensuring that the arguments dominate the cleanup if necessary.
+template <typename Traits>
+class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
+  /// Type used to hold llvm::Value*s.
+  typedef typename Traits::ValueTy ValueTy;
+  /// Type used to hold RValues.
+  typedef typename Traits::RValueTy RValueTy;
+  struct PlacementArg {
+    RValueTy ArgValue;
+    QualType ArgType;
+  };
 
-  public:
-    static size_t getExtraSize(size_t NumPlacementArgs) {
-      return NumPlacementArgs * sizeof(PlacementArg);
-    }
+  unsigned NumPlacementArgs : 30;
+  LLVM_PREFERRED_TYPE(AlignedAllocationMode)
+  unsigned PassAlignmentToPlacementDelete : 1;
+  const FunctionDecl *OperatorDelete;
+  RValueTy TypeIdentity;
+  ValueTy Ptr;
+  ValueTy AllocSize;
+  CharUnits AllocAlign;
+
+  PlacementArg *getPlacementArgs() {
+    return reinterpret_cast<PlacementArg *>(this + 1);
+  }
 
-    CallDeleteDuringNew(size_t NumPlacementArgs,
-                        const FunctionDecl *OperatorDelete,
-                        RValueTy TypeIdentity, ValueTy Ptr, ValueTy AllocSize,
-                        const ImplicitAllocationParameters &IAP,
-                        CharUnits AllocAlign)
-        : NumPlacementArgs(NumPlacementArgs),
-          PassAlignmentToPlacementDelete(
-              isAlignedAllocation(IAP.PassAlignment)),
-          OperatorDelete(OperatorDelete), TypeIdentity(TypeIdentity), Ptr(Ptr),
-          AllocSize(AllocSize), AllocAlign(AllocAlign) {}
-
-    void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
-      assert(I < NumPlacementArgs && "index out of range");
-      getPlacementArgs()[I] = {Arg, Type};
-    }
+public:
+  static size_t getExtraSize(size_t NumPlacementArgs) {
+    return NumPlacementArgs * sizeof(PlacementArg);
+  }
 
-    void Emit(CodeGenFunction &CGF, Flags flags) override {
-      const auto *FPT = OperatorDelete->getType()->castAs<FunctionProtoType>();
-      CallArgList DeleteArgs;
-      unsigned FirstNonTypeArg = 0;
-      TypeAwareAllocationMode TypeAwareDeallocation =
-          TypeAwareAllocationMode::No;
-      if (OperatorDelete->isTypeAwareOperatorNewOrDelete()) {
-        TypeAwareDeallocation = TypeAwareAllocationMode::Yes;
-        QualType SpecializedTypeIdentity = FPT->getParamType(0);
-        ++FirstNonTypeArg;
-        DeleteArgs.add(Traits::get(CGF, TypeIdentity), 
SpecializedTypeIdentity);
-      }
-      // The first argument after type-identity parameter (if any) is always
-      // a void* (or C* for a destroying operator delete for class type C).
-      DeleteArgs.add(Traits::get(CGF, Ptr), 
FPT->getParamType(FirstNonTypeArg));
-
-      // Figure out what other parameters we should be implicitly passing.
-      UsualDeleteParams Params;
-      if (NumPlacementArgs) {
-        // A placement deallocation function is implicitly passed an alignment
-        // if the placement allocation function was, but is never passed a 
size.
-        Params.Alignment =
-            alignedAllocationModeFromBool(PassAlignmentToPlacementDelete);
-        Params.TypeAwareDelete = TypeAwareDeallocation;
-        Params.Size = isTypeAwareAllocation(Params.TypeAwareDelete);
-      } else {
-        // For a non-placement new-expression, 'operator delete' can take a
-        // size and/or an alignment if it has the right parameters.
-        Params = OperatorDelete->getUsualDeleteParams();
-      }
+  CallDeleteDuringNew(size_t NumPlacementArgs,
+                      const FunctionDecl *OperatorDelete, RValueTy 
TypeIdentity,
+                      ValueTy Ptr, ValueTy AllocSize,
+                      const ImplicitAllocationParameters &IAP,
+                      CharUnits AllocAlign)
+      : NumPlacementArgs(NumPlacementArgs),
+        PassAlignmentToPlacementDelete(isAlignedAllocation(IAP.PassAlignment)),
+        OperatorDelete(OperatorDelete), TypeIdentity(TypeIdentity), Ptr(Ptr),
+        AllocSize(AllocSize), AllocAlign(AllocAlign) {}
+
+  void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
+    assert(I < NumPlacementArgs && "index out of range");
+    getPlacementArgs()[I] = {Arg, Type};
+  }
 
-      assert(!Params.DestroyingDelete &&
-             "should not call destroying delete in a new-expression");
-
-      // The second argument can be a std::size_t (for non-placement delete).
-      if (Params.Size)
-        DeleteArgs.add(Traits::get(CGF, AllocSize),
-                       CGF.getContext().getSizeType());
-
-      // The next (second or third) argument can be a std::align_val_t, which
-      // is an enum whose underlying type is std::size_t.
-      // FIXME: Use the right type as the parameter type. Note that in a call
-      // to operator delete(size_t, ...), we may not have it available.
-      if (isAlignedAllocation(Params.Alignment))
-        DeleteArgs.add(RValue::get(llvm::ConstantInt::get(
-                           CGF.SizeTy, AllocAlign.getQuantity())),
-                       CGF.getContext().getSizeType());
-
-      // Pass the rest of the arguments, which must match exactly.
-      for (unsigned I = 0; I != NumPlacementArgs; ++I) {
-        auto Arg = getPlacementArgs()[I];
-        DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);
-      }
+  void Emit(CodeGenFunction &CGF, Flags flags) override {
+    const auto *FPT = OperatorDelete->getType()->castAs<FunctionProtoType>();
+    CallArgList DeleteArgs;
+    unsigned FirstNonTypeArg = 0;
+    TypeAwareAllocationMode TypeAwareDeallocation = 
TypeAwareAllocationMode::No;
+    if (OperatorDelete->isTypeAwareOperatorNewOrDelete()) {
+      TypeAwareDeallocation = TypeAwareAllocationMode::Yes;
+      QualType SpecializedTypeIdentity = FPT->getParamType(0);
+      ++FirstNonTypeArg;
+      DeleteArgs.add(Traits::get(CGF, TypeIdentity), SpecializedTypeIdentity);
+    }
+    // The first argument after type-identity parameter (if any) is always
+    // a void* (or C* for a destroying operator delete for class type C).
+    DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(FirstNonTypeArg));
+
+    // Figure out what other parameters we should be implicitly passing.
+    UsualDeleteParams Params;
+    if (NumPlacementArgs) {
+      // A placement deallocation function is implicitly passed an alignment
+      // if the placement allocation function was, but is never passed a size.
+      Params.Alignment =
+          alignedAllocationModeFromBool(PassAlignmentToPlacementDelete);
+      Params.TypeAwareDelete = TypeAwareDeallocation;
+      Params.Size = isTypeAwareAllocation(Params.TypeAwareDelete);
+    } else {
+      // For a non-placement new-expression, 'operator delete' can take a
+      // size and/or an alignment if it has the right parameters.
+      Params = OperatorDelete->getUsualDeleteParams();
+    }
 
-      // Call 'operator delete'.
-      EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
+    assert(!Params.DestroyingDelete &&
+           "should not call destroying delete in a new-expression");
+
+    // The second argument can be a std::size_t (for non-placement delete).
+    if (Params.Size)
+      DeleteArgs.add(Traits::get(CGF, AllocSize),
+                     CGF.getContext().getSizeType());
+
+    // The next (second or third) argument can be a std::align_val_t, which
+    // is an enum whose underlying type is std::size_t.
+    // FIXME: Use the right type as the parameter type. Note that in a call
+    // to operator delete(size_t, ...), we may not have it available.
+    if (isAlignedAllocation(Params.Alignment))
+      DeleteArgs.add(RValue::get(llvm::ConstantInt::get(
+                         CGF.SizeTy, AllocAlign.getQuantity())),
+                     CGF.getContext().getSizeType());
+
+    // Pass the rest of the arguments, which must match exactly.
+    for (unsigned I = 0; I != NumPlacementArgs; ++I) {
+      auto Arg = getPlacementArgs()[I];
+      DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);
     }
-  };
-}
+
+    // Call 'operator delete'.
+    EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
+  }
+};
+} // namespace
 
 /// Enter a cleanup to call 'operator delete' if the initializer in a
 /// new-expression throws.
@@ -1532,7 +1521,7 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF, 
const CXXNewExpr *E,
   DominatingValue<RValue>::saved_type SavedNewPtr =
       DominatingValue<RValue>::save(CGF, RValue::get(NewPtr, CGF));
   DominatingValue<RValue>::saved_type SavedAllocSize =
-    DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
+      DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
   DominatingValue<RValue>::saved_type SavedTypeIdentity =
       DominatingValue<RValue>::save(CGF, TypeIdentity);
   struct ConditionalCleanupTraits {
@@ -1586,9 +1575,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const 
CXXNewExpr *E) {
 
   llvm::Value *numElements = nullptr;
   llvm::Value *allocSizeWithoutCookie = nullptr;
-  llvm::Value *allocSize =
-    EmitCXXNewAllocSize(*this, E, minElements, numElements,
-                        allocSizeWithoutCookie);
+  llvm::Value *allocSize = EmitCXXNewAllocSize(
+      *this, E, minElements, numElements, allocSizeWithoutCookie);
   CharUnits allocAlign = getContext().getTypeAlignInChars(allocType);
 
   // Emit the allocation call.  If the allocator is a global placement
@@ -1619,7 +1607,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const 
CXXNewExpr *E) {
 
   } else {
     const FunctionProtoType *allocatorType =
-      allocator->getType()->castAs<FunctionProtoType>();
+        allocator->getType()->castAs<FunctionProtoType>();
     ImplicitAllocationParameters IAP = E->implicitAllocationParameters();
     unsigned ParamsToSkip = 0;
     if (isTypeAwareAllocation(IAP.PassTypeIdentity)) {
@@ -1661,10 +1649,10 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const 
CXXNewExpr *E) {
 
     // FIXME: Why do we not pass a CalleeDecl here?
     EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
-                 /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip);
+                 /*AC*/ AbstractCallee(), /*ParamsToSkip*/ ParamsToSkip);
 
     RValue RV =
-      EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
+        EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
 
     if (auto *newCall = dyn_cast<llvm::CallBase>(RV.getScalarVal())) {
       if (auto *CGDI = getDebugInfo()) {
@@ -1736,9 +1724,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const 
CXXNewExpr *E) {
          CalculateCookiePadding(*this, E).isZero());
   if (allocSize != allocSizeWithoutCookie) {
     assert(E->isArray());
-    allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
-                                                       numElements,
-                                                       E, allocType);
+    allocation = CGM.getCXXABI().InitializeArrayCookie(
+        *this, allocation, numElements, E, allocType);
   }
 
   llvm::Type *elementTy = ConvertTypeForMem(allocType);
@@ -1871,27 +1858,25 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl 
*DeleteFD,
     (*TagAlloca)->eraseFromParent();
 }
 namespace {
-  /// Calls the given 'operator delete' on a single object.
-  struct CallObjectDelete final : EHScopeStack::Cleanup {
-    llvm::Value *Ptr;
-    const FunctionDecl *OperatorDelete;
-    QualType ElementType;
-
-    CallObjectDelete(llvm::Value *Ptr,
-                     const FunctionDecl *OperatorDelete,
-                     QualType ElementType)
+/// Calls the given 'operator delete' on a single object.
+struct CallObjectDelete final : EHScopeStack::Cleanup {
+  llvm::Value *Ptr;
+  const FunctionDecl *OperatorDelete;
+  QualType ElementType;
+
+  CallObjectDelete(llvm::Value *Ptr, const FunctionDecl *OperatorDelete,
+                   QualType ElementType)
       : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) override {
-      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
-    }
-  };
-}
+  void Emit(CodeGenFunction &CGF, Flags flags) override {
+    CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
+  }
+};
+} // namespace
 
-void
-CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl 
*OperatorDelete,
-                                             llvm::Value *CompletePtr,
-                                             QualType ElementType) {
+void CodeGenFunction::pushCallObjectDeleteCleanup(
+    const FunctionDecl *OperatorDelete, llvm::Value *CompletePtr,
+    QualType ElementType) {
   EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr,
                                         OperatorDelete, ElementType);
 }
@@ -1915,10 +1900,8 @@ static void EmitDestroyingObjectDelete(CodeGenFunction 
&CGF,
 /// Emit the code for deleting a single object.
 /// \return \c true if we started emitting UnconditionalDeleteBlock, \c false
 /// if not.
-static bool EmitObjectDelete(CodeGenFunction &CGF,
-                             const CXXDeleteExpr *DE,
-                             Address Ptr,
-                             QualType ElementType,
+static bool EmitObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
+                             Address Ptr, QualType ElementType,
                              llvm::BasicBlock *UnconditionalDeleteBlock) {
   // C++11 [expr.delete]p3:
   //   If the static type of the object to be deleted is different from its
@@ -1941,10 +1924,9 @@ static bool EmitObjectDelete(CodeGenFunction &CGF,
       if (Dtor->isVirtual()) {
         bool UseVirtualCall = true;
         const Expr *Base = DE->getArgument();
-        if (auto *DevirtualizedDtor =
-                dyn_cast_or_null<const CXXDestructorDecl>(
-                    Dtor->getDevirtualizedMethod(
-                        Base, CGF.CGM.getLangOpts().AppleKext))) {
+        if (auto *DevirtualizedDtor = dyn_cast_or_null<const 
CXXDestructorDecl>(
+                Dtor->getDevirtualizedMethod(
+                    Base, CGF.CGM.getLangOpts().AppleKext))) {
           UseVirtualCall = false;
           const CXXRecordDecl *DevirtualizedClass =
               DevirtualizedDtor->getParent();
@@ -1979,8 +1961,7 @@ static bool EmitObjectDelete(CodeGenFunction &CGF,
   if (Dtor)
     CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
                               /*ForVirtualBase=*/false,
-                              /*Delegating=*/false,
-                              Ptr, ElementType);
+                              /*Delegating=*/false, Ptr, ElementType);
   else if (auto Lifetime = ElementType.getObjCLifetime()) {
     switch (Lifetime) {
     case Qualifiers::OCL_None:
@@ -2010,34 +1991,30 @@ static bool EmitObjectDelete(CodeGenFunction &CGF,
 }
 
 namespace {
-  /// Calls the given 'operator delete' on an array of objects.
-  struct CallArrayDelete final : EHScopeStack::Cleanup {
-    llvm::Value *Ptr;
-    const FunctionDecl *OperatorDelete;
-    llvm::Value *NumElements;
-    QualType ElementType;
-    CharUnits CookieSize;
-
-    CallArrayDelete(llvm::Value *Ptr,
-                    const FunctionDecl *OperatorDelete,
-                    llvm::Value *NumElements,
-                    QualType ElementType,
-                    CharUnits CookieSize)
+/// Calls the given 'operator delete' on an array of objects.
+struct CallArrayDelete final : EHScopeStack::Cleanup {
+  llvm::Value *Ptr;
+  const FunctionDecl *OperatorDelete;
+  llvm::Value *NumElements;
+  QualType ElementType;
+  CharUnits CookieSize;
+
+  CallArrayDelete(llvm::Value *Ptr, const FunctionDecl *OperatorDelete,
+                  llvm::Value *NumElements, QualType ElementType,
+                  CharUnits CookieSize)
       : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
         ElementType(ElementType), CookieSize(CookieSize) {}
 
-    void Emit(CodeGenFunction &CGF, Flags flags) override {
-      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements,
-                         CookieSize);
-    }
-  };
-}
+  void Emit(CodeGenFunction &CGF, Flags flags) override {
+    CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements,
+                       CookieSize);
+  }
+};
+} // namespace
 
 /// Emit the code for deleting an array of objects.
-static void EmitArrayDelete(CodeGenFunction &CGF,
-                            const CXXDeleteExpr *E,
-                            Address deletedPtr,
-                            QualType elementType) {
+static void EmitArrayDelete(CodeGenFunction &CGF, const CXXDeleteExpr *E,
+                            Address deletedPtr, QualType elementType) {
   llvm::Value *numElements = nullptr;
   llvm::Value *allocatedPtr = nullptr;
   CharUnits cookieSize;
@@ -2048,10 +2025,9 @@ static void EmitArrayDelete(CodeGenFunction &CGF,
 
   // Make sure that we call delete even if one of the dtors throws.
   const FunctionDecl *operatorDelete = E->getOperatorDelete();
-  CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
-                                           allocatedPtr, operatorDelete,
-                                           numElements, elementType,
-                                           cookieSize);
+  CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup, allocatedPtr,
+                                           operatorDelete, numElements,
+                                           elementType, cookieSize);
 
   // Destroy the elements.
   if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
@@ -2059,11 +2035,11 @@ static void EmitArrayDelete(CodeGenFunction &CGF,
 
     CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
     CharUnits elementAlign =
-      deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
+        deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
 
     llvm::Value *arrayBegin = deletedPtr.emitRawPointer(CGF);
     llvm::Value *arrayEnd = CGF.Builder.CreateInBoundsGEP(
-      deletedPtr.getElementType(), arrayBegin, numElements, "delete.end");
+        deletedPtr.getElementType(), arrayBegin, numElements, "delete.end");
 
     // Note that it is legal to allocate a zero-length array, and we
     // can never fold the check away because the length should always

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to