https://github.com/bcardosolopes updated 
https://github.com/llvm/llvm-project/pull/179828

>From e04b1a58ebc81f7401fc15079c4111e3b8f1a839 Mon Sep 17 00:00:00 2001
From: Bruno Cardoso Lopes <[email protected]>
Date: Fri, 30 Jan 2026 22:49:28 -0800
Subject: [PATCH] [CIR][LoweringPrepare] Emit guard variables for static local
 initialization

This implements the lowering of static local variables with the Itanium C++ ABI
guard variable pattern in LoweringPrepare.

When a GlobalOp has the static_local attribute and a ctor region, this pass:
1. Creates a guard variable global (mangled name from AST)
2. Inserts the guard check pattern at each GetGlobalOp use site:
   - Load guard byte with acquire ordering
   - If zero, call __cxa_guard_acquire
   - If acquire returns non-zero, inline the ctor region code
   - Call __cxa_guard_release
3. Clears the static_local attribute and ctor region from the GlobalOp
---
 clang/include/clang/CIR/MissingFeatures.h     |   1 +
 .../Dialect/Transforms/LoweringPrepare.cpp    | 399 +++++++++++++++++-
 clang/test/CIR/CodeGen/static-local.cpp       |  33 ++
 3 files changed, 432 insertions(+), 1 deletion(-)

diff --git a/clang/include/clang/CIR/MissingFeatures.h 
b/clang/include/clang/CIR/MissingFeatures.h
index 5cb0991326a3c..132616df531c5 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -286,6 +286,7 @@ struct MissingFeatures {
   static bool getRuntimeFunctionDecl() { return false; }
   static bool globalViewIndices() { return false; }
   static bool globalViewIntLowering() { return false; }
+  static bool guardAbortOnException() { return false; }
   static bool handleBuiltinICEArguments() { return false; }
   static bool hip() { return false; }
   static bool incrementProfileCounter() { return false; }
diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp 
b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
index b7cc8775d298f..b5b5b7067e35a 100644
--- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
@@ -9,13 +9,18 @@
 #include "PassDetail.h"
 #include "mlir/IR/Attributes.h"
 #include "clang/AST/ASTContext.h"
+#include "clang/AST/Mangle.h"
 #include "clang/Basic/Module.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/TargetCXXABI.h"
 #include "clang/Basic/TargetInfo.h"
 #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h"
 #include "clang/CIR/Dialect/IR/CIRAttrs.h"
+#include "clang/CIR/Dialect/IR/CIRDataLayout.h"
 #include "clang/CIR/Dialect/IR/CIRDialect.h"
 #include "clang/CIR/Dialect/IR/CIROpsEnums.h"
 #include "clang/CIR/Dialect/Passes.h"
+#include "clang/CIR/Interfaces/ASTAttrInterfaces.h"
 #include "clang/CIR/MissingFeatures.h"
 #include "llvm/Support/Path.h"
 
@@ -99,6 +104,77 @@ struct LoweringPreparePass
       cir::GlobalLinkageKind linkage = cir::GlobalLinkageKind::ExternalLinkage,
       cir::VisibilityKind visibility = cir::VisibilityKind::Default);
 
+  /// Handle static local variable initialization with guard variables.
+  void handleStaticLocal(cir::GlobalOp globalOp, cir::GetGlobalOp getGlobalOp);
+
+  /// Get or create __cxa_guard_acquire function.
+  cir::FuncOp getGuardAcquireFn(cir::PointerType guardPtrTy);
+
+  /// Get or create __cxa_guard_release function.
+  cir::FuncOp getGuardReleaseFn(cir::PointerType guardPtrTy);
+
+  /// Create a guard global variable for a static local.
+  cir::GlobalOp createGuardGlobalOp(CIRBaseBuilderTy &builder,
+                                    mlir::Location loc, llvm::StringRef name,
+                                    cir::IntType guardTy,
+                                    cir::GlobalLinkageKind linkage);
+
+  /// Get the guard variable for a static local declaration.
+  cir::GlobalOp
+  getStaticLocalDeclGuardAddress(cir::ASTVarDeclInterface varDecl) {
+    auto it = staticLocalDeclGuardMap.find(varDecl.getVarDecl());
+    if (it != staticLocalDeclGuardMap.end())
+      return it->second;
+    return nullptr;
+  }
+
+  /// Set the guard variable for a static local declaration.
+  void setStaticLocalDeclGuardAddress(cir::ASTVarDeclInterface varDecl,
+                                      cir::GlobalOp guard) {
+    staticLocalDeclGuardMap[varDecl.getVarDecl()] = guard;
+  }
+
+  /// Get or create the guard variable for a static local declaration.
+  cir::GlobalOp getOrCreateStaticLocalDeclGuardAddress(
+      CIRBaseBuilderTy &builder, cir::GlobalOp globalOp,
+      cir::ASTVarDeclInterface varDecl, cir::IntType guardTy,
+      clang::CharUnits guardAlignment) {
+    cir::GlobalOp guard = getStaticLocalDeclGuardAddress(varDecl);
+    if (!guard) {
+      // Mangle the name for the guard.
+      llvm::SmallString<256> guardName;
+      {
+        llvm::raw_svector_ostream out(guardName);
+        varDecl.mangleStaticGuardVariable(out);
+      }
+
+      // Create the guard variable with a zero-initializer.
+      guard = createGuardGlobalOp(builder, globalOp->getLoc(), guardName,
+                                  guardTy, globalOp.getLinkage());
+      guard.setInitialValueAttr(cir::IntAttr::get(guardTy, 0));
+      guard.setDSOLocal(globalOp.getDsoLocal());
+      guard.setAlignment(guardAlignment.getAsAlign().value());
+
+      // The ABI says: "It is suggested that it be emitted in the same COMDAT
+      // group as the associated data object." In practice, this doesn't work
+      // for non-ELF and non-Wasm object formats, so only do it for ELF and
+      // Wasm.
+      bool hasComdat = globalOp.getComdat();
+      const llvm::Triple &triple = astCtx->getTargetInfo().getTriple();
+      if (!varDecl.getVarDecl()->isLocalVarDecl() && hasComdat &&
+          (triple.isOSBinFormatELF() || triple.isOSBinFormatWasm())) {
+        globalOp->emitError("NYI: guard COMDAT for non-local variables");
+        return {};
+      } else if (hasComdat && globalOp.isWeakForLinker()) {
+        globalOp->emitError("NYI: guard COMDAT for weak linkage");
+        return {};
+      }
+
+      setStaticLocalDeclGuardAddress(varDecl, guard);
+    }
+    return guard;
+  }
+
   ///
   /// AST related
   /// -----------
@@ -112,11 +188,119 @@ struct LoweringPreparePass
   llvm::StringMap<uint32_t> dynamicInitializerNames;
   llvm::SmallVector<cir::FuncOp> dynamicInitializers;
 
+  /// Tracks guard variables for static locals.
+  llvm::DenseMap<const clang::VarDecl *, cir::GlobalOp> 
staticLocalDeclGuardMap;
+
   /// List of ctors and their priorities to be called before main()
   llvm::SmallVector<std::pair<std::string, uint32_t>, 4> globalCtorList;
   /// List of dtors and their priorities to be called when unloading module.
   llvm::SmallVector<std::pair<std::string, uint32_t>, 4> globalDtorList;
 
+  /// Returns true if the target uses ARM-style guard variables for static
+  /// local initialization (32-bit guard, check bit 0 only).
+  bool useARMGuardVarABI() const {
+    switch (astCtx->getCXXABIKind()) {
+    case clang::TargetCXXABI::GenericARM:
+    case clang::TargetCXXABI::iOS:
+    case clang::TargetCXXABI::WatchOS:
+    case clang::TargetCXXABI::GenericAArch64:
+    case clang::TargetCXXABI::WebAssembly:
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  /// Emit the guarded initialization for a static local variable.
+  /// This handles the if/else structure after the guard byte check,
+  /// following OG's ItaniumCXXABI::EmitGuardedInit skeleton.
+  void emitCXXGuardedInitIf(CIRBaseBuilderTy &builder, cir::GlobalOp globalOp,
+                            cir::ASTVarDeclInterface varDecl,
+                            mlir::Value guardPtr, cir::PointerType guardPtrTy,
+                            bool threadsafe) {
+    auto loc = globalOp->getLoc();
+
+    // The semantics of dynamic initialization of variables with static or
+    // thread storage duration depends on whether they are declared at
+    // block-scope. The initialization of such variables at block-scope can be
+    // aborted with an exception and later retried (per C++20 [stmt.dcl]p4),
+    // and recursive entry to their initialization has undefined behavior (also
+    // per C++20 [stmt.dcl]p4). For such variables declared at non-block scope,
+    // exceptions lead to termination (per C++20 [except.terminate]p1), and
+    // recursive references to the variables are governed only by the lifetime
+    // rules (per C++20 [class.cdtor]p2), which means such references are
+    // perfectly fine as long as they avoid touching memory. As a result,
+    // block-scope variables must not be marked as initialized until after
+    // initialization completes (unless the mark is reverted following an
+    // exception), but non-block-scope variables must be marked prior to
+    // initialization so that recursive accesses during initialization do not
+    // restart initialization.
+
+    // Variables used when coping with thread-safe statics and exceptions.
+    if (threadsafe) {
+      // Call __cxa_guard_acquire.
+      cir::CallOp acquireCall = builder.createCallOp(
+          loc, getGuardAcquireFn(guardPtrTy), mlir::ValueRange{guardPtr});
+      mlir::Value acquireResult = acquireCall.getResult();
+
+      auto acquireZero = builder.getConstantInt(
+          loc, mlir::cast<cir::IntType>(acquireResult.getType()), 0);
+      auto shouldInit = builder.createCompare(loc, cir::CmpOpKind::ne,
+                                              acquireResult, acquireZero);
+
+      // Create the IfOp for the shouldInit check.
+      // Pass an empty callback to avoid auto-creating a yield terminator.
+      auto ifOp =
+          cir::IfOp::create(builder, loc, shouldInit, /*withElseRegion=*/false,
+                            [](mlir::OpBuilder &, mlir::Location) {});
+      mlir::OpBuilder::InsertionGuard insertGuard(builder);
+      builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
+
+      // Call __cxa_guard_abort along the exceptional edge.
+      // OG: CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
+      assert(!cir::MissingFeatures::guardAbortOnException());
+
+      // Emit the initializer and add a global destructor if appropriate.
+      auto &ctorRegion = globalOp.getCtorRegion();
+      assert(!ctorRegion.empty() && "This should never be empty here.");
+      if (!ctorRegion.hasOneBlock())
+        llvm_unreachable("Multiple blocks NYI");
+      mlir::Block &block = ctorRegion.front();
+      mlir::Block *insertBlock = builder.getInsertionBlock();
+      insertBlock->getOperations().splice(insertBlock->end(),
+                                          block.getOperations(), block.begin(),
+                                          std::prev(block.end()));
+      builder.setInsertionPointToEnd(insertBlock);
+      ctorRegion.getBlocks().clear();
+
+      // Pop the guard-abort cleanup if we pushed one.
+      // OG: CGF.PopCleanupBlock();
+      assert(!cir::MissingFeatures::guardAbortOnException());
+
+      // Call __cxa_guard_release. This cannot throw.
+      builder.createCallOp(loc, getGuardReleaseFn(guardPtrTy),
+                           mlir::ValueRange{guardPtr});
+
+      builder.createYield(loc);
+    } else if (!varDecl.getVarDecl()->isLocalVarDecl()) {
+      // For non-local variables, store 1 into the first byte of the guard
+      // variable before the object initialization begins so that references
+      // to the variable during initialization don't restart initialization.
+      // OG: Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1), ...);
+      // Then: CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
+      globalOp->emitError("NYI: non-threadsafe init for non-local variables");
+      return;
+    } else {
+      // For local variables, store 1 into the first byte of the guard variable
+      // after the object initialization completes so that initialization is
+      // retried if initialization is interrupted by an exception.
+      globalOp->emitError("NYI: non-threadsafe init for local variables");
+      return;
+    }
+
+    builder.createYield(loc); // Outermost IfOp
+  }
+
   void setASTContext(clang::ASTContext *c) { astCtx = c; }
 };
 
@@ -871,7 +1055,210 @@ 
LoweringPreparePass::buildCXXGlobalVarDeclInitFunc(cir::GlobalOp op) {
   return f;
 }
 
+cir::FuncOp
+LoweringPreparePass::getGuardAcquireFn(cir::PointerType guardPtrTy) {
+  // int __cxa_guard_acquire(__guard *guard_object);
+  CIRBaseBuilderTy builder(getContext());
+  mlir::OpBuilder::InsertionGuard ipGuard{builder};
+  builder.setInsertionPointToStart(mlirModule.getBody());
+  mlir::Location loc = mlirModule.getLoc();
+  cir::IntType intTy = cir::IntType::get(&getContext(), 32, /*isSigned=*/true);
+  auto fnType = cir::FuncType::get({guardPtrTy}, intTy);
+  return buildRuntimeFunction(builder, "__cxa_guard_acquire", loc, fnType);
+}
+
+cir::FuncOp
+LoweringPreparePass::getGuardReleaseFn(cir::PointerType guardPtrTy) {
+  // void __cxa_guard_release(__guard *guard_object);
+  CIRBaseBuilderTy builder(getContext());
+  mlir::OpBuilder::InsertionGuard ipGuard{builder};
+  builder.setInsertionPointToStart(mlirModule.getBody());
+  mlir::Location loc = mlirModule.getLoc();
+  cir::VoidType voidTy = cir::VoidType::get(&getContext());
+  auto fnType = cir::FuncType::get({guardPtrTy}, voidTy);
+  return buildRuntimeFunction(builder, "__cxa_guard_release", loc, fnType);
+}
+
+cir::GlobalOp LoweringPreparePass::createGuardGlobalOp(
+    CIRBaseBuilderTy &builder, mlir::Location loc, llvm::StringRef name,
+    cir::IntType guardTy, cir::GlobalLinkageKind linkage) {
+  mlir::OpBuilder::InsertionGuard guard(builder);
+  builder.setInsertionPointToStart(mlirModule.getBody());
+  cir::GlobalOp g = cir::GlobalOp::create(builder, loc, name, guardTy);
+  g.setLinkageAttr(
+      cir::GlobalLinkageKindAttr::get(builder.getContext(), linkage));
+  mlir::SymbolTable::setSymbolVisibility(
+      g, mlir::SymbolTable::Visibility::Private);
+  return g;
+}
+
+void LoweringPreparePass::handleStaticLocal(cir::GlobalOp globalOp,
+                                            cir::GetGlobalOp getGlobalOp) {
+  CIRBaseBuilderTy builder(getContext());
+
+  std::optional<cir::ASTVarDeclInterface> astOption = globalOp.getAst();
+  assert(astOption.has_value());
+  cir::ASTVarDeclInterface varDecl = astOption.value();
+
+  builder.setInsertionPointAfter(getGlobalOp);
+  mlir::Block *getGlobalOpBlock = builder.getInsertionBlock();
+
+  // Remove the terminator temporarily - we'll add it back at the end.
+  mlir::Operation *ret = getGlobalOpBlock->getTerminator();
+  ret->remove();
+  builder.setInsertionPointAfter(getGlobalOp);
+
+  // Inline variables that weren't instantiated from variable templates have
+  // partially-ordered initialization within their translation unit.
+  bool nonTemplateInline =
+      varDecl.isInline() &&
+      !clang::isTemplateInstantiation(varDecl.getTemplateSpecializationKind());
+
+  // Inline namespace-scope variables require guarded initialization in a
+  // __cxx_global_var_init function. This is not yet implemented.
+  if (nonTemplateInline) {
+    globalOp->emitError(
+        "NYI: guarded initialization for inline namespace-scope variables");
+    return;
+  }
+
+  // We only need to use thread-safe statics for local non-TLS variables and
+  // inline variables; other global initialization is always single-threaded
+  // or (through lazy dynamic loading in multiple threads) unsequenced.
+  bool threadsafe = astCtx->getLangOpts().ThreadsafeStatics &&
+                    (varDecl.isLocalVarDecl() || nonTemplateInline) &&
+                    !varDecl.getTLSKind();
+
+  // TLS variables need special handling - the guard must also be thread-local.
+  if (varDecl.getTLSKind()) {
+    globalOp->emitError("NYI: guarded initialization for thread-local 
statics");
+    return;
+  }
+
+  // If we have a global variable with internal linkage and thread-safe statics
+  // are disabled, we can just let the guard variable be of type i8.
+  bool useInt8GuardVariable = !threadsafe && globalOp.hasInternalLinkage();
+  if (useInt8GuardVariable) {
+    globalOp->emitError("NYI: int8 guard variables for non-threadsafe 
statics");
+    return;
+  }
+
+  // Guard variables are 64 bits in the generic ABI and size width on ARM
+  // (i.e. 32-bit on AArch32, 64-bit on AArch64).
+  if (useARMGuardVarABI()) {
+    globalOp->emitError("NYI: ARM-style guard variables for static locals");
+    return;
+  }
+  cir::IntType guardTy =
+      cir::IntType::get(&getContext(), 64, /*isSigned=*/true);
+  cir::CIRDataLayout dataLayout(mlirModule);
+  clang::CharUnits guardAlignment =
+      clang::CharUnits::fromQuantity(dataLayout.getABITypeAlign(guardTy));
+  auto guardPtrTy = cir::PointerType::get(guardTy);
+
+  // Create the guard variable if we don't already have it.
+  cir::GlobalOp guard = getOrCreateStaticLocalDeclGuardAddress(
+      builder, globalOp, varDecl, guardTy, guardAlignment);
+
+  mlir::Value guardPtr = builder.createGetGlobal(guard, /*threadLocal*/ false);
+
+  // Test whether the variable has completed initialization.
+  //
+  // Itanium C++ ABI 3.3.2:
+  //   The following is pseudo-code showing how these functions can be used:
+  //     if (obj_guard.first_byte == 0) {
+  //       if ( __cxa_guard_acquire (&obj_guard) ) {
+  //         try {
+  //           ... initialize the object ...;
+  //         } catch (...) {
+  //            __cxa_guard_abort (&obj_guard);
+  //            throw;
+  //         }
+  //         ... queue object destructor with __cxa_atexit() ...;
+  //         __cxa_guard_release (&obj_guard);
+  //       }
+  //     }
+  //
+  // If threadsafe statics are enabled, but we don't have inline atomics, just
+  // call __cxa_guard_acquire unconditionally. The "inline" check isn't
+  // actually inline, and the user might not expect calls to __atomic libcalls.
+  unsigned maxInlineWidthInBits =
+      astCtx->getTargetInfo().getMaxAtomicInlineWidth();
+
+  if (!threadsafe || maxInlineWidthInBits) {
+    // Load the first byte of the guard variable.
+    auto bytePtrTy = cir::PointerType::get(builder.getSIntNTy(8));
+    mlir::Value bytePtr = builder.createBitcast(guardPtr, bytePtrTy);
+    mlir::Value guardLoad = builder.createAlignedLoad(
+        getGlobalOp.getLoc(), bytePtr, guardAlignment.getAsAlign().value());
+
+    // Itanium ABI:
+    //   An implementation supporting thread-safety on multiprocessor
+    //   systems must also guarantee that references to the initialized
+    //   object do not occur before the load of the initialization flag.
+    //
+    // In LLVM, we do this by marking the load Acquire.
+    if (threadsafe) {
+      auto loadOp = mlir::cast<cir::LoadOp>(guardLoad.getDefiningOp());
+      loadOp.setMemOrder(cir::MemOrder::Acquire);
+      loadOp.setSyncScope(cir::SyncScopeKind::System);
+    }
+
+    // For ARM, we should only check the first bit, rather than the entire 
byte:
+    //
+    // ARM C++ ABI 3.2.3.1:
+    //   To support the potential use of initialization guard variables
+    //   as semaphores that are the target of ARM SWP and LDREX/STREX
+    //   synchronizing instructions we define a static initialization
+    //   guard variable to be a 4-byte aligned, 4-byte word with the
+    //   following inline access protocol.
+    //     #define INITIALIZED 1
+    //     if ((obj_guard & INITIALIZED) != INITIALIZED) {
+    //       if (__cxa_guard_acquire(&obj_guard))
+    //         ...
+    //     }
+    //
+    // and similarly for ARM64:
+    //
+    // ARM64 C++ ABI 3.2.2:
+    //   This ABI instead only specifies the value bit 0 of the static guard
+    //   variable; all other bits are platform defined. Bit 0 shall be 0 when
+    //   the variable is not initialized and 1 when it is.
+    if (useARMGuardVarABI()) {
+      globalOp->emitError(
+          "NYI: ARM-style guard variable check (bit 0 only) for static 
locals");
+      return;
+    }
+
+    // Check if the first byte of the guard variable is zero.
+    auto zero = builder.getConstantInt(
+        getGlobalOp.getLoc(), mlir::cast<cir::IntType>(guardLoad.getType()), 
0);
+    auto needsInit = builder.createCompare(getGlobalOp.getLoc(),
+                                           cir::CmpOpKind::eq, guardLoad, 
zero);
+
+    // Build the guarded initialization inside an if block.
+    cir::IfOp::create(builder, globalOp.getLoc(), needsInit,
+                      /*withElseRegion=*/false,
+                      [&](mlir::OpBuilder &, mlir::Location) {
+                        emitCXXGuardedInitIf(builder, globalOp, varDecl,
+                                             guardPtr, guardPtrTy, threadsafe);
+                      });
+  } else {
+    // Threadsafe statics without inline atomics - call __cxa_guard_acquire
+    // unconditionally without the initial guard byte check.
+    globalOp->emitError("NYI: guarded init without inline atomics support");
+    return;
+  }
+
+  // Insert the removed terminator back.
+  builder.getInsertionBlock()->push_back(ret);
+}
+
 void LoweringPreparePass::lowerGlobalOp(GlobalOp op) {
+  // Static locals are handled separately via handleStaticLocal.
+  if (op.getStaticLocal())
+    return;
+
   mlir::Region &ctorRegion = op.getCtorRegion();
   mlir::Region &dtorRegion = op.getDtorRegion();
 
@@ -1085,6 +1472,15 @@ void LoweringPreparePass::runOnOp(mlir::Operation *op) {
     lowerComplexMulOp(complexMul);
   } else if (auto glob = mlir::dyn_cast<cir::GlobalOp>(op)) {
     lowerGlobalOp(glob);
+  } else if (auto getGlobal = mlir::dyn_cast<cir::GetGlobalOp>(op)) {
+    // Handle static local variables with guard variables.
+    if (getGlobal.getStaticLocal()) {
+      auto globalOp = mlir::dyn_cast_or_null<cir::GlobalOp>(
+          mlir::SymbolTable::lookupNearestSymbolFrom(getGlobal,
+                                                     getGlobal.getNameAttr()));
+      if (globalOp && globalOp.getStaticLocal())
+        handleStaticLocal(globalOp, getGlobal);
+    }
   } else if (auto unary = mlir::dyn_cast<cir::UnaryOp>(op)) {
     lowerUnaryOp(unary);
   } else if (auto callOp = dyn_cast<cir::CallOp>(op)) {
@@ -1107,7 +1503,8 @@ void LoweringPreparePass::runOnOperation() {
   op->walk([&](mlir::Operation *op) {
     if (mlir::isa<cir::ArrayCtor, cir::ArrayDtor, cir::CastOp,
                   cir::ComplexMulOp, cir::ComplexDivOp, cir::DynamicCastOp,
-                  cir::FuncOp, cir::CallOp, cir::GlobalOp, cir::UnaryOp>(op))
+                  cir::FuncOp, cir::CallOp, cir::GetGlobalOp, cir::GlobalOp,
+                  cir::UnaryOp>(op))
       opsToTransform.push_back(op);
   });
 
diff --git a/clang/test/CIR/CodeGen/static-local.cpp 
b/clang/test/CIR/CodeGen/static-local.cpp
index 0297028e96438..61b628fa680ae 100644
--- a/clang/test/CIR/CodeGen/static-local.cpp
+++ b/clang/test/CIR/CodeGen/static-local.cpp
@@ -1,4 +1,6 @@
 // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir 
-emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2>&1 
| FileCheck %s --check-prefix=CIR-BEFORE-LPP
+// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir 
-emit-cir %s -o - | FileCheck %s --check-prefix=CIR
+// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir 
-emit-llvm %s -o - | FileCheck %s --check-prefix=LLVM
 // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -emit-llvm %s 
-o - | FileCheck %s --check-prefix=OGCG
 
 class A {
@@ -6,8 +8,10 @@ class A {
   A();
 };
 
+void use(A*);
 void f() {
   static A a;
+  use(&a);
 }
 
 // CIR-BEFORE-LPP: cir.global "private" internal dso_local static_local 
@_ZZ1fvE1a = ctor : !rec_A {
@@ -17,8 +21,36 @@ void f() {
 
 // CIR-BEFORE-LPP: cir.func no_inline dso_local @_Z1fv()
 // CIR-BEFORE-LPP:   %[[VAR:.*]] = cir.get_global static_local @_ZZ1fvE1a : 
!cir.ptr<!rec_A>
+// CIR-BEFORE-LPP:   cir.call @_Z3useP1A(%[[VAR]])
 // CIR-BEFORE-LPP:   cir.return
 
+// CIR: cir.global "private" internal dso_local @_ZGVZ1fvE1a = #cir.int<0> : 
!s64i
+// CIR: cir.func{{.*}}@_Z1fv()
+// CIR:   %[[ADDR:.*]] = cir.get_global static_local @_ZZ1fvE1a : 
!cir.ptr<!rec_A>
+// CIR:   %[[GUARD:.*]] = cir.get_global @_ZGVZ1fvE1a : !cir.ptr<!s64i>
+// CIR:   %[[GUARD_BYTE_PTR:.*]] = cir.cast bitcast %[[GUARD]] : 
!cir.ptr<!s64i> -> !cir.ptr<!s8i>
+// CIR:   %[[GUARD_LOAD:.*]] = cir.load{{.*}}%[[GUARD_BYTE_PTR]]
+// CIR:   %[[ZERO:.*]] = cir.const #cir.int<0>
+// CIR:   %[[IS_UNINIT:.*]] = cir.cmp(eq, %[[GUARD_LOAD]], %[[ZERO]])
+// CIR:   cir.if %[[IS_UNINIT]]
+// CIR:     cir.call @__cxa_guard_acquire
+// CIR:     cir.if
+// CIR:       cir.call @_ZN1AC1Ev
+// CIR:       cir.call @__cxa_guard_release
+// CIR:   cir.call @_Z3useP1A(%[[ADDR]])
+// CIR:   cir.return
+
+// LLVM: @_ZGVZ1fvE1a = internal global i64 0
+// LLVM: define{{.*}}void @_Z1fv()
+// LLVM:   %[[GUARD:.*]] = load atomic i8, ptr @_ZGVZ1fvE1a acquire
+// LLVM:   %[[IS_UNINIT:.*]] = icmp eq i8 %[[GUARD]], 0
+// LLVM:   br i1 %[[IS_UNINIT]], label %[[IF_THEN:.*]], label %[[IF_END:.*]]
+// LLVM: call i32 @__cxa_guard_acquire
+// LLVM: call void @_ZN1AC1Ev
+// LLVM: call void @__cxa_guard_release
+// LLVM: call void @_Z3useP1A(ptr @_ZZ1fvE1a)
+// LLVM: ret void
+
 // OGCG: @_ZGVZ1fvE1a = internal global i64 0
 // OGCG: define{{.*}}void @_Z1fv()
 // OGCG:   %[[GUARD:.*]] = load atomic i8, ptr @_ZGVZ1fvE1a acquire
@@ -27,4 +59,5 @@ void f() {
 // OGCG: call i32 @__cxa_guard_acquire
 // OGCG: call void @_ZN1AC1Ev
 // OGCG: call void @__cxa_guard_release
+// OGCG: call void @_Z3useP1A(ptr {{.*}}@_ZZ1fvE1a)
 // OGCG: ret void

_______________________________________________
llvm-branch-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to