Author: Andy Kaylor
Date: 2026-04-03T08:38:06-07:00
New Revision: 5b56352757434eb6756cf0211c32832a2ced4ea3

URL: 
https://github.com/llvm/llvm-project/commit/5b56352757434eb6756cf0211c32832a2ced4ea3
DIFF: 
https://github.com/llvm/llvm-project/commit/5b56352757434eb6756cf0211c32832a2ced4ea3.diff

LOG: [CIR] Implement cleanups for temporaries with automatic duration (#189754)

This implements handling for cleanup of temporary variables with
automatic storage duration. This is a simplified implementation that
doesn't yet handle the possibility of exceptions being thrown within
this cleanup scope or the cleanup scope being inside a conditional
operation. Support for those cases will be added later.

Added: 
    

Modified: 
    clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
    clang/lib/CIR/CodeGen/CIRGenDecl.cpp
    clang/lib/CIR/CodeGen/CIRGenExpr.cpp
    clang/lib/CIR/CodeGen/CIRGenFunction.h
    clang/lib/CIR/CodeGen/EHScopeStack.h
    clang/test/CIR/CodeGen/cleanup.cpp

Removed: 
    


################################################################################
diff  --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp 
b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
index f83f99e3ebb63..1bc3df92cc64b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
@@ -542,3 +542,18 @@ void CIRGenFunction::popCleanupBlocks(
     }
   }
 }
+
+/// Pops cleanup blocks until the given savepoint is reached, then add the
+/// cleanups from the given savepoint in the lifetime-extended cleanups stack.
+void CIRGenFunction::popCleanupBlocks(
+    EHScopeStack::stable_iterator oldCleanupStackDepth,
+    size_t oldLifetimeExtendedSize, ArrayRef<mlir::Value *> valuesToReload) {
+  popCleanupBlocks(oldCleanupStackDepth, valuesToReload);
+
+  // Promote deferred lifetime-extended cleanups onto the EH scope stack.
+  for (const LifetimeExtendedCleanupEntry &cleanup : llvm::make_range(
+           lifetimeExtendedCleanupStack.begin() + oldLifetimeExtendedSize,
+           lifetimeExtendedCleanupStack.end()))
+    pushLifetimeExtendedCleanupToEHStack(cleanup);
+  lifetimeExtendedCleanupStack.truncate(oldLifetimeExtendedSize);
+}

diff  --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp 
b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index 94d8632a48437..26a1a17243130 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -1007,6 +1007,37 @@ void CIRGenFunction::pushDestroy(CleanupKind 
cleanupKind, Address addr,
   pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type, destroyer);
 }
 
+void CIRGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind,
+                                                 Address addr, QualType type,
+                                                 Destroyer *destroyer,
+                                                 bool useEHCleanupForArray) {
+  if (isInConditionalBranch()) {
+    cgm.errorNYI("conditional lifetime-extended destroy");
+    return;
+  }
+
+  // Classic codegen also uses pushDestroyAndDeferDeactivation here to push an
+  // EH cleanup that protects the temporary during the rest of the full
+  // expression, then deactivates it when the full expression ends. We don't
+  // have deferred deactivation yet, so we only queue the lifetime-extended
+  // cleanup below. When deferred deactivation is implemented, add the
+  // pushDestroyAndDeferDeactivation call here.
+  if (getLangOpts().Exceptions) {
+    cgm.errorNYI("lifetime-extended cleanup with exceptions enabled");
+    return;
+  }
+
+  assert(!cir::MissingFeatures::useEHCleanupForArray());
+
+  pushCleanupAfterFullExpr(cleanupKind, addr, type, destroyer);
+}
+
+void CIRGenFunction::pushLifetimeExtendedCleanupToEHStack(
+    const LifetimeExtendedCleanupEntry &entry) {
+  ehStack.pushCleanup<DestroyObject>(entry.kind, entry.addr, entry.type,
+                                     entry.destroyer);
+}
+
 /// Destroys all the elements of the given array, beginning from last to first.
 /// The array cannot be zero-length.
 ///

diff  --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 65644bc1a3fd9..060a90ca5fb8c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -1719,8 +1719,9 @@ static void pushTemporaryCleanup(CIRGenFunction &cgf,
     break;
 
   case SD_Automatic:
-    cgf.cgm.errorNYI(e->getSourceRange(),
-                     "pushTemporaryCleanup: automatic storage duration");
+    cgf.pushLifetimeExtendedDestroy(
+        NormalAndEHCleanup, referenceTemporary, e->getType(),
+        CIRGenFunction::destroyCXXObject, cgf.getLangOpts().Exceptions);
     break;
 
   case SD_Dynamic:

diff  --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h 
b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 0bd440a61db20..7914a03a7f1a8 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -92,6 +92,25 @@ class CIRGenFunction : public CIRGenTypeCache {
   /// Tracks function scope overall cleanup handling.
   EHScopeStack ehStack;
 
+  typedef void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty);
+
+  /// An entry in the lifetime-extended cleanup stack. Each entry represents a
+  /// cleanup that was deferred past a full-expression boundary (e.g.,
+  /// destroying a temporary bound to a local reference). When the enclosing
+  /// scope exits, these entries are promoted to the EH scope stack.
+  ///
+  /// Currently only DestroyObject cleanups are lifetime-extended. When other
+  /// cleanup types are needed (e.g., CallLifetimeEnd), this struct can be
+  /// extended with a std::variant of cleanup data types.
+  struct LifetimeExtendedCleanupEntry {
+    CleanupKind kind;
+    Address addr;
+    QualType type;
+    Destroyer *destroyer;
+  };
+
+  llvm::SmallVector<LifetimeExtendedCleanupEntry> lifetimeExtendedCleanupStack;
+
   GlobalDecl curSEHParent;
 
   /// A mapping from NRVO variables to the flags used to indicate
@@ -970,6 +989,12 @@ class CIRGenFunction : public CIRGenTypeCache {
   /// that have been added.
   void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth,
                         ArrayRef<mlir::Value *> valuesToReload = {});
+
+  /// Pops cleanup blocks until the given savepoint is reached, then adds the
+  /// cleanups from the given savepoint in the lifetime-extended cleanups 
stack.
+  void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth,
+                        size_t oldLifetimeExtendedSize,
+                        ArrayRef<mlir::Value *> valuesToReload = {});
   void popCleanupBlock();
 
   void terminateStructuredRegionBody(mlir::Region &r, mlir::Location loc);
@@ -997,10 +1022,19 @@ class CIRGenFunction : public CIRGenTypeCache {
     cgm.errorNYI("pushFullExprCleanup in conditional branch");
   }
 
+  /// Queue a cleanup to be pushed after finishing the current full-expression.
+  /// When the enclosing RunCleanupsScope exits, popCleanupBlocks promotes 
these
+  /// entries onto the EH scope stack for the enclosing scope.
+  void pushCleanupAfterFullExpr(CleanupKind kind, Address addr, QualType type,
+                                Destroyer *destroyer) {
+    lifetimeExtendedCleanupStack.push_back({kind, addr, type, destroyer});
+  }
+
   /// Enters a new scope for capturing cleanups, all of which
   /// will be executed once the scope is exited.
   class RunCleanupsScope {
     EHScopeStack::stable_iterator cleanupStackDepth, oldCleanupStackDepth;
+    size_t lifetimeExtendedCleanupStackSize;
 
   protected:
     bool performCleanup;
@@ -1018,6 +1052,8 @@ class CIRGenFunction : public CIRGenTypeCache {
     explicit RunCleanupsScope(CIRGenFunction &cgf)
         : performCleanup(true), cgf(cgf) {
       cleanupStackDepth = cgf.ehStack.stable_begin();
+      lifetimeExtendedCleanupStackSize =
+          cgf.lifetimeExtendedCleanupStack.size();
       oldDidCallStackSave = cgf.didCallStackSave;
       cgf.didCallStackSave = false;
       oldCleanupStackDepth = cgf.currentCleanupStackDepth;
@@ -1035,7 +1071,8 @@ class CIRGenFunction : public CIRGenTypeCache {
     void forceCleanup(ArrayRef<mlir::Value *> valuesToReload = {}) {
       assert(performCleanup && "Already forced cleanup");
       cgf.didCallStackSave = oldDidCallStackSave;
-      cgf.popCleanupBlocks(cleanupStackDepth, valuesToReload);
+      cgf.popCleanupBlocks(cleanupStackDepth, lifetimeExtendedCleanupStackSize,
+                           valuesToReload);
       performCleanup = false;
       cgf.currentCleanupStackDepth = oldCleanupStackDepth;
     }
@@ -1247,8 +1284,6 @@ class CIRGenFunction : public CIRGenTypeCache {
 
   LexicalScope *curLexScope = nullptr;
 
-  typedef void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty);
-
   static Destroyer destroyCXXObject;
 
   void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
@@ -1257,6 +1292,15 @@ class CIRGenFunction : public CIRGenTypeCache {
   void pushDestroy(CleanupKind kind, Address addr, QualType type,
                    Destroyer *destroyer);
 
+  void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr,
+                                   QualType type, Destroyer *destroyer,
+                                   bool useEHCleanupForArray);
+
+  /// Promote a single lifetime-extended cleanup entry onto the EH scope stack.
+  /// Defined in CIRGenDecl.cpp where the concrete cleanup types are visible.
+  void pushLifetimeExtendedCleanupToEHStack(
+      const LifetimeExtendedCleanupEntry &entry);
+
   Destroyer *getDestroyer(clang::QualType::DestructionKind kind);
 
   /// Start generating a thunk function.

diff  --git a/clang/lib/CIR/CodeGen/EHScopeStack.h 
b/clang/lib/CIR/CodeGen/EHScopeStack.h
index 09b78820a2587..308d98f108101 100644
--- a/clang/lib/CIR/CodeGen/EHScopeStack.h
+++ b/clang/lib/CIR/CodeGen/EHScopeStack.h
@@ -206,6 +206,21 @@ class EHScopeStack {
     return new (buffer) T(n, a...);
   }
 
+  /// Push a cleanup by copying a serialized cleanup object from the
+  /// LifetimeExtendedCleanupStack onto the EH scope stack. This is used when
+  /// a full-expression's RunCleanupsScope exits: cleanups that were deferred
+  /// for lifetime extension (e.g. destroying a temporary bound to a local
+  /// reference) are promoted from the byte buffer to the enclosing scope's
+  /// EH stack so they run when that scope ends.
+  ///
+  /// The memcpy is safe because Cleanup subclasses are required to be POD-like
+  /// (see the Cleanup class comment), and the vtable pointer is part of the
+  /// copied bytes, so the clone dispatches to the correct emit() override.
+  void pushCopyOfCleanup(CleanupKind kind, const void *cleanup, size_t size) {
+    void *buffer = pushCleanup(kind, size);
+    std::memcpy(buffer, cleanup, size);
+  }
+
   void setCGF(CIRGenFunction *inCGF) { cgf = inCGF; }
 
   /// Pops a cleanup scope off the stack.  This is private to 
CIRGenCleanup.cpp.

diff  --git a/clang/test/CIR/CodeGen/cleanup.cpp 
b/clang/test/CIR/CodeGen/cleanup.cpp
index 542ca4b6c9499..4097522525fc7 100644
--- a/clang/test/CIR/CodeGen/cleanup.cpp
+++ b/clang/test/CIR/CodeGen/cleanup.cpp
@@ -149,3 +149,18 @@ void complex_expr_with_cleanup_inside_cleanupscope() {
 // CHECK:   }
 // CHECK:   %[[RELOAD:.*]] = cir.load {{.*}} %[[TEMP_ADDR]] : 
!cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
 // CHECK:   cir.store {{.*}} %[[RELOAD]], %[[RESULT]] : !cir.complex<!s32i>, 
!cir.ptr<!cir.complex<!s32i>>
+
+void test_cleanup_with_automatic_storage_duration() {
+  const Struk &ref = Struk{};
+}
+
+// CHECK: cir.func{{.*}} @_Z44test_cleanup_with_automatic_storage_durationv()
+// CHECK:   %[[REF_TMP:.*]] = cir.alloca !rec_Struk, !cir.ptr<!rec_Struk>, 
["ref.tmp0"]
+// CHECK:   %[[REF:.*]] = cir.alloca !cir.ptr<!rec_Struk>, 
!cir.ptr<!cir.ptr<!rec_Struk>>, ["ref", init, const]
+// CHECK:   cir.cleanup.scope {
+// CHECK:     cir.store{{.*}} %[[REF_TMP]], %[[REF]]
+// CHECK:     cir.yield
+// CHECK:   } cleanup normal {
+// CHECK:     cir.call @_ZN5StrukD1Ev(%[[REF_TMP]]) nothrow
+// CHECK:     cir.yield
+// CHECK:   }


        
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to