Author: Erich Keane
Date: 2026-01-15T11:28:23-08:00
New Revision: 41dd36b8f8c43682d48ffd387a53d318e899e236

URL: 
https://github.com/llvm/llvm-project/commit/41dd36b8f8c43682d48ffd387a53d318e899e236
DIFF: 
https://github.com/llvm/llvm-project/commit/41dd36b8f8c43682d48ffd387a53d318e899e236.diff

LOG: [CIR] Implement codegen for inline assembly with output operands (#176006)

Part of: #153267

This is a continuation of: #154014

This patch handles output operands for inline assembly, taking the
original patch, but adding some additional tests, plus responding to all
of the original comments, plus doing some of my personal cleanup
(including extracting addVariableConstraints in a separate patch).

---------

Co-authored-by: Iris Shi <[email protected]>

Added: 
    

Modified: 
    clang/include/clang/CIR/MissingFeatures.h
    clang/lib/CIR/CodeGen/CIRGenAsm.cpp
    clang/lib/CIR/CodeGen/CIRGenModule.cpp
    clang/lib/CIR/CodeGen/CIRGenModule.h
    clang/lib/CIR/CodeGen/TargetInfo.h
    clang/test/CIR/CodeGen/inline-asm.c

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/CIR/MissingFeatures.h 
b/clang/include/clang/CIR/MissingFeatures.h
index 2135f6cc7bbc9..39818417fc3d0 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -216,9 +216,10 @@ struct MissingFeatures {
   static bool asmGoto() { return false; }
   static bool asmInputOperands() { return false; }
   static bool asmLabelAttr() { return false; }
+  static bool asmLLVMAssume() { return false; }
   static bool asmMemoryEffects() { return false; }
-  static bool asmOutputOperands() { return false; }
   static bool asmUnwindClobber() { return false; }
+  static bool asmVectorType() { return false; }
   static bool assignMemcpyizer() { return false; }
   static bool astVarDeclInterface() { return false; }
   static bool attributeBuiltin() { return false; }

diff  --git a/clang/lib/CIR/CodeGen/CIRGenAsm.cpp 
b/clang/lib/CIR/CodeGen/CIRGenAsm.cpp
index 88a7e85cb2a64..7032781dc97f8 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAsm.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAsm.cpp
@@ -10,6 +10,8 @@
 //
 
//===----------------------------------------------------------------------===//
 
+#include "clang/Basic/DiagnosticSema.h"
+
 #include "CIRGenFunction.h"
 #include "clang/CIR/MissingFeatures.h"
 
@@ -83,16 +85,153 @@ static void collectClobbers(const CIRGenFunction &cgf, 
const AsmStmt &s,
   }
 }
 
+static void
+collectInOutConstraintInfos(const CIRGenFunction &cgf, const AsmStmt &s,
+                            SmallVectorImpl<TargetInfo::ConstraintInfo> &out,
+                            SmallVectorImpl<TargetInfo::ConstraintInfo> &in) {
+
+  for (unsigned i = 0, e = s.getNumOutputs(); i != e; ++i) {
+    StringRef name;
+    if (const GCCAsmStmt *gas = dyn_cast<GCCAsmStmt>(&s))
+      name = gas->getOutputName(i);
+    TargetInfo::ConstraintInfo info(s.getOutputConstraint(i), name);
+    // `validateOutputConstraint` modifies the `info` object by setting the
+    // read/write, clobber, allows-register, and allows-memory process.
+    bool isValid = cgf.getTarget().validateOutputConstraint(info);
+    (void)isValid;
+    assert(isValid && "Failed to parse output constraint");
+    out.push_back(info);
+  }
+
+  for (unsigned i = 0, e = s.getNumInputs(); i != e; ++i) {
+    StringRef name;
+    if (const GCCAsmStmt *gas = dyn_cast<GCCAsmStmt>(&s))
+      name = gas->getInputName(i);
+    TargetInfo::ConstraintInfo info(s.getInputConstraint(i), name);
+    // `validateInputConstraint` modifies the `info` object by setting the
+    // read/write, clobber, allows-register, and allows-memory process.
+    bool isValid = cgf.getTarget().validateInputConstraint(out, info);
+    assert(isValid && "Failed to parse input constraint");
+    (void)isValid;
+    in.push_back(info);
+  }
+}
+
+static void emitAsmStores(CIRGenFunction &cgf, const AsmStmt &s,
+                          const llvm::ArrayRef<mlir::Value> regResults,
+                          const llvm::ArrayRef<mlir::Type> resultRegTypes,
+                          const llvm::ArrayRef<mlir::Type> resultTruncRegTypes,
+                          const llvm::ArrayRef<LValue> resultRegDests,
+                          const llvm::ArrayRef<QualType> resultRegQualTys,
+                          const llvm::BitVector &resultTypeRequiresCast,
+                          const llvm::BitVector &resultRegIsFlagReg) {
+  CIRGenBuilderTy &builder = cgf.getBuilder();
+  CIRGenModule &cgm = cgf.cgm;
+  mlir::MLIRContext *ctx = builder.getContext();
+
+  assert(regResults.size() == resultRegTypes.size());
+  assert(regResults.size() == resultTruncRegTypes.size());
+  assert(regResults.size() == resultRegDests.size());
+
+  // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
+  // in which case its size may grow.
+  assert(resultTypeRequiresCast.size() <= resultRegDests.size());
+  assert(resultRegIsFlagReg.size() <= resultRegDests.size());
+
+  for (unsigned i = 0, e = regResults.size(); i != e; ++i) {
+    mlir::Value tmp = regResults[i];
+    mlir::Type truncTy = resultTruncRegTypes[i];
+
+    if (i < resultRegIsFlagReg.size() && resultRegIsFlagReg[i])
+      assert(!cir::MissingFeatures::asmLLVMAssume());
+
+    // If the result type of the LLVM IR asm doesn't match the result type of
+    // the expression, do the conversion.
+    if (resultRegTypes[i] != truncTy) {
+
+      // Truncate the integer result to the right size, note that TruncTy can 
be
+      // a pointer.
+      if (mlir::isa<cir::FPTypeInterface>(truncTy)) {
+        tmp = builder.createFloatingCast(tmp, truncTy);
+      } else if (isa<cir::PointerType>(truncTy) &&
+                 isa<cir::IntType>(tmp.getType())) {
+        uint64_t resSize = cgm.getDataLayout().getTypeSizeInBits(truncTy);
+        tmp = builder.createIntCast(
+            tmp, cir::IntType::get(ctx, (unsigned)resSize, false));
+        tmp = builder.createIntToPtr(tmp, truncTy);
+      } else if (isa<cir::PointerType>(tmp.getType()) &&
+                 isa<cir::IntType>(truncTy)) {
+        uint64_t tmpSize = 
cgm.getDataLayout().getTypeSizeInBits(tmp.getType());
+        tmp = builder.createPtrToInt(
+            tmp, cir::IntType::get(ctx, (unsigned)tmpSize, false));
+        tmp = builder.createIntCast(tmp, truncTy);
+      } else if (isa<cir::IntType>(truncTy)) {
+        tmp = builder.createIntCast(tmp, truncTy);
+      } else if (isa<cir::VectorType>(truncTy)) {
+        assert(!cir::MissingFeatures::asmVectorType());
+      }
+    }
+
+    LValue dest = resultRegDests[i];
+    // ResultTypeRequiresCast elements correspond to the first
+    // ResultTypeRequiresCast.size() elements of RegResults.
+    if ((i < resultTypeRequiresCast.size()) && resultTypeRequiresCast[i]) {
+      unsigned size = cgf.getContext().getTypeSize(resultRegQualTys[i]);
+      Address addr =
+          dest.getAddress().withElementType(builder, resultRegTypes[i]);
+      if (cgm.getTargetCIRGenInfo().isScalarizableAsmOperand(cgf, truncTy)) {
+        builder.createStore(cgf.getLoc(s.getAsmLoc()), tmp, addr);
+        continue;
+      }
+
+      QualType ty =
+          cgf.getContext().getIntTypeForBitwidth(size, /*Signed=*/false);
+      if (ty.isNull()) {
+        const Expr *outExpr = s.getOutputExpr(i);
+        cgm.getDiags().Report(outExpr->getExprLoc(),
+                              diag::err_store_value_to_reg);
+        return;
+      }
+      dest = cgf.makeAddrLValue(addr, ty);
+    }
+
+    cgf.emitStoreThroughLValue(RValue::get(tmp), dest);
+  }
+}
+
 mlir::LogicalResult CIRGenFunction::emitAsmStmt(const AsmStmt &s) {
   // Assemble the final asm string.
   std::string asmString = s.generateAsmString(getContext());
+  SourceLocation srcLoc = s.getAsmLoc();
+  mlir::Location loc = getLoc(srcLoc);
+
+  // Get all the output and input constraints together.
+  SmallVector<TargetInfo::ConstraintInfo> outputConstraintInfos;
+  SmallVector<TargetInfo::ConstraintInfo> inputConstraintInfos;
+  collectInOutConstraintInfos(*this, s, outputConstraintInfos,
+                              inputConstraintInfos);
 
   bool isGCCAsmGoto = false;
 
   std::string constraints;
-  std::vector<mlir::Value> outArgs;
-  std::vector<mlir::Value> inArgs;
-  std::vector<mlir::Value> inOutArgs;
+  SmallVector<LValue> resultRegDests;
+  SmallVector<QualType> resultRegQualTys;
+  SmallVector<mlir::Type> resultRegTypes;
+  SmallVector<mlir::Type> resultTruncRegTypes;
+  SmallVector<mlir::Type> argTypes;
+  SmallVector<mlir::Type> argElemTypes;
+  SmallVector<mlir::Value> args;
+  SmallVector<mlir::Value> outArgs;
+  SmallVector<mlir::Value> inArgs;
+  SmallVector<mlir::Value> inOutArgs;
+  llvm::BitVector resultTypeRequiresCast;
+  llvm::BitVector resultRegIsFlagReg;
+
+  // Keep track of out constraints for tied input operand.
+  SmallVector<std::string> outputConstraints;
+
+  // Keep track of defined physregs.
+  llvm::SmallSet<std::string, 8> physRegOutputs;
 
   // An inline asm can be marked readonly if it meets the following conditions:
   //  - it doesn't have any sideeffects
@@ -102,12 +241,113 @@ mlir::LogicalResult CIRGenFunction::emitAsmStmt(const 
AsmStmt &s) {
   // in addition to meeting the conditions listed above.
   bool readOnly = true, readNone = true;
 
-  if (s.getNumInputs() != 0 || s.getNumOutputs() != 0) {
+  if (s.getNumInputs() != 0) {
     assert(!cir::MissingFeatures::asmInputOperands());
-    assert(!cir::MissingFeatures::asmOutputOperands());
-    cgm.errorNYI(s.getAsmLoc(), "asm with operands");
+    cgm.errorNYI(srcLoc, "asm with input operands");
   }
 
+  std::string outputConstraint;
+  for (unsigned i = 0, e = s.getNumOutputs(); i != e; ++i) {
+    TargetInfo::ConstraintInfo &info = outputConstraintInfos[i];
+
+    // Simplify the output constraint.
+    outputConstraint = s.getOutputConstraint(i);
+    outputConstraint = getTarget().simplifyConstraint(
+        StringRef(outputConstraint).drop_front());
+
+    const Expr *outExpr = s.getOutputExpr(i);
+    outExpr = outExpr->IgnoreParenNoopCasts(getContext());
+
+    std::string gccReg;
+    outputConstraint = s.addVariableConstraints(
+        outputConstraint, *outExpr, getTarget(), info.earlyClobber(),
+        [&](const Stmt *unspStmt, StringRef msg) {
+          cgm.errorUnsupported(unspStmt, msg);
+        },
+        &gccReg);
+
+    // Give an error on multiple outputs to same physreg.
+    if (!gccReg.empty() && !physRegOutputs.insert(gccReg).second)
+      cgm.error(srcLoc, "multiple outputs to hard register: " + gccReg);
+
+    outputConstraints.push_back(outputConstraint);
+    LValue dest = emitLValue(outExpr);
+
+    if (!constraints.empty())
+      constraints += ',';
+
+    // If this is a register output, then make the inline a sm return it
+    // by-value.  If this is a memory result, return the value by-reference.
+    QualType qty = outExpr->getType();
+    const bool isScalarOrAggregate =
+        hasScalarEvaluationKind(qty) || hasAggregateEvaluationKind(qty);
+    if (!info.allowsMemory() && isScalarOrAggregate) {
+      constraints += "=" + outputConstraint;
+      resultRegQualTys.push_back(qty);
+      resultRegDests.push_back(dest);
+
+      bool isFlagReg = llvm::StringRef(outputConstraint).starts_with("{@cc");
+      resultRegIsFlagReg.push_back(isFlagReg);
+
+      mlir::Type ty = convertTypeForMem(qty);
+      const bool requiresCast =
+          info.allowsRegister() &&
+          (cgm.getTargetCIRGenInfo().isScalarizableAsmOperand(*this, ty) ||
+           isa<cir::RecordType, cir::ArrayType>(ty));
+
+      resultTruncRegTypes.push_back(ty);
+      resultTypeRequiresCast.push_back(requiresCast);
+
+      if (requiresCast) {
+        unsigned size = getContext().getTypeSize(qty);
+        if (size == 0)
+          cgm.error(outExpr->getExprLoc(), "output size should not be zero");
+
+        ty = cir::IntType::get(&getMLIRContext(), size, false);
+      }
+
+      resultRegTypes.push_back(ty);
+      // If this output is tied to an input, and if the input is larger, then
+      // we need to set the actual result type of the inline asm node to be the
+      // same as the input type.
+      if (info.hasMatchingInput())
+        assert(!cir::MissingFeatures::asmInputOperands());
+
+      if (mlir::Type adjTy = cgm.getTargetCIRGenInfo().adjustInlineAsmType(
+              *this, outputConstraint, resultRegTypes.back()))
+        resultRegTypes.back() = adjTy;
+      else
+        cgm.getDiags().Report(srcLoc, diag::err_asm_invalid_type_in_input)
+            << outExpr->getType() << outputConstraint;
+
+      // Update largest vector width for any vector types.
+      assert(!cir::MissingFeatures::asmVectorType());
+    } else {
+      Address destAddr = dest.getAddress();
+
+      // Matrix types in memory are represented by arrays, but accessed through
+      // vector pointers, with the alignment specified on the access operation.
+      // For inline assembly, update pointer arguments to use vector pointers.
+      // Otherwise there will be a mis-match if the matrix is also an
+      // input-argument which is represented as vector.
+      if (isa<MatrixType>(outExpr->getType().getCanonicalType()))
+        destAddr =
+            destAddr.withElementType(builder, convertType(outExpr->getType()));
+
+      argTypes.push_back(destAddr.getType());
+      argElemTypes.push_back(destAddr.getElementType());
+      outArgs.push_back(destAddr.getPointer());
+      args.push_back(destAddr.getPointer());
+      constraints += "=*";
+      constraints += outputConstraint;
+      readOnly = readNone = false;
+    }
+
+    if (info.isReadWrite())
+      assert(!cir::MissingFeatures::asmInputOperands());
+
+  } // iterate over output operands
+
   bool hasUnwindClobber = false;
   collectClobbers(*this, s, constraints, hasUnwindClobber, readOnly, readNone);
 
@@ -115,8 +355,15 @@ mlir::LogicalResult CIRGenFunction::emitAsmStmt(const 
AsmStmt &s) {
 
   mlir::Type resultType;
 
+  if (resultRegTypes.size() == 1)
+    resultType = resultRegTypes[0];
+  else if (resultRegTypes.size() > 1)
+    resultType = builder.getAnonRecordTy(resultRegTypes, /*packed=*/false,
+                                         /*padded=*/false);
+
   bool hasSideEffect = s.isVolatile() || s.getNumOutputs() == 0;
 
+  std::vector<mlir::Value> regResults;
   cir::InlineAsmOp ia = cir::InlineAsmOp::create(
       builder, getLoc(s.getAsmLoc()), resultType, operands, asmString,
       constraints, hasSideEffect, inferFlavor(cgm, s), mlir::ArrayAttr());
@@ -127,10 +374,56 @@ mlir::LogicalResult CIRGenFunction::emitAsmStmt(const 
AsmStmt &s) {
     assert(!cir::MissingFeatures::asmUnwindClobber());
   } else {
     assert(!cir::MissingFeatures::asmMemoryEffects());
+
+    mlir::Value result;
+    if (ia.getNumResults())
+      result = ia.getResult(0);
+
+    llvm::SmallVector<mlir::Attribute> operandAttrs;
+
+    int i = 0;
+    for (auto typ : argElemTypes) {
+      if (typ) {
+        auto op = args[i++];
+        assert(mlir::isa<cir::PointerType>(op.getType()) &&
+               "pointer type expected");
+        assert(cast<cir::PointerType>(op.getType()).getPointee() == typ &&
+               "element type 
diff ers from pointee type!");
+
+        operandAttrs.push_back(mlir::UnitAttr::get(&getMLIRContext()));
+      } else {
+        // We need to add an attribute for every arg since later, during
+        // the lowering to LLVM IR the attributes will be assigned to the
+        // CallInsn argument by index, i.e. we can't skip null type here
+        operandAttrs.push_back(mlir::Attribute());
+      }
+    }
+    assert(args.size() == operandAttrs.size() &&
+           "The number of attributes is not even with the number of operands");
+
+    ia.setOperandAttrsAttr(builder.getArrayAttr(operandAttrs));
+
+    if (resultRegTypes.size() == 1) {
+      regResults.push_back(result);
+    } else if (resultRegTypes.size() > 1) {
+      CharUnits alignment = CharUnits::One();
+      mlir::Value dest =
+          emitAlloca("__asm_result", resultType, loc, alignment, false);
+      Address addr = Address(dest, alignment);
+      builder.createStore(loc, result, addr);
+
+      for (unsigned i = 0, e = resultRegTypes.size(); i != e; ++i) {
+        cir::PointerType typ = builder.getPointerTo(resultRegTypes[i]);
+        cir::GetMemberOp ptr = builder.createGetMember(loc, typ, dest, "", i);
+        cir::LoadOp tmp = builder.createLoad(loc, Address(ptr, alignment));
+        regResults.push_back(tmp);
+      }
+    }
   }
 
-  llvm::SmallVector<mlir::Attribute> operandAttrs;
-  ia.setOperandAttrsAttr(builder.getArrayAttr(operandAttrs));
+  emitAsmStores(*this, s, regResults, resultRegTypes, resultTruncRegTypes,
+                resultRegDests, resultRegQualTys, resultTypeRequiresCast,
+                resultRegIsFlagReg);
 
   return mlir::success();
 }

diff  --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp 
b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index fcd3f105807ca..44fe9cbd96879 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -2709,6 +2709,26 @@ DiagnosticBuilder CIRGenModule::errorNYI(SourceRange loc,
   return errorNYI(loc.getBegin(), feature) << loc;
 }
 
+void CIRGenModule::error(SourceLocation loc, StringRef error) {
+  unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0");
+  getDiags().Report(astContext.getFullLoc(loc), diagID) << error;
+}
+
+/// Print out an error that codegen doesn't support the specified stmt yet.
+void CIRGenModule::errorUnsupported(const Stmt *s, llvm::StringRef type) {
+  unsigned diagId = diags.getCustomDiagID(DiagnosticsEngine::Error,
+                                          "cannot compile this %0 yet");
+  diags.Report(astContext.getFullLoc(s->getBeginLoc()), diagId)
+      << type << s->getSourceRange();
+}
+
+/// Print out an error that codegen doesn't support the specified decl yet.
+void CIRGenModule::errorUnsupported(const Decl *d, llvm::StringRef type) {
+  unsigned diagId = diags.getCustomDiagID(DiagnosticsEngine::Error,
+                                          "cannot compile this %0 yet");
+  diags.Report(astContext.getFullLoc(d->getLocation()), diagId) << type;
+}
+
 void CIRGenModule::mapBlockAddress(cir::BlockAddrInfoAttr blockInfo,
                                    cir::LabelOp label) {
   [[maybe_unused]] auto result =

diff  --git a/clang/lib/CIR/CodeGen/CIRGenModule.h 
b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 63ad5e879cf30..db63a5d636373 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -650,6 +650,15 @@ class CIRGenModule : public CIRGenTypeCache {
     return errorNYI(loc.getBegin(), feature, name) << loc;
   }
 
+  /// Emit a general error that something can't be done.
+  void error(SourceLocation loc, llvm::StringRef error);
+
+  /// Print out an error that codegen doesn't support the specified stmt yet.
+  void errorUnsupported(const Stmt *s, llvm::StringRef type);
+
+  /// Print out an error that codegen doesn't support the specified decl yet.
+  void errorUnsupported(const Decl *d, llvm::StringRef type);
+
 private:
   // An ordered map of canonical GlobalDecls to their mangled names.
   llvm::MapVector<clang::GlobalDecl, llvm::StringRef> mangledDeclNames;

diff  --git a/clang/lib/CIR/CodeGen/TargetInfo.h 
b/clang/lib/CIR/CodeGen/TargetInfo.h
index 72682641a460b..9535ba94fb08b 100644
--- a/clang/lib/CIR/CodeGen/TargetInfo.h
+++ b/clang/lib/CIR/CodeGen/TargetInfo.h
@@ -104,6 +104,22 @@ class TargetCIRGenInfo {
   /// may need to adjust the debugger-support code in Sema to do the
   /// right thing when calling a function with no know signature.
   virtual bool isNoProtoCallVariadic(const FunctionNoProtoType *fnType) const;
+
+  virtual bool isScalarizableAsmOperand(CIRGenFunction &cgf,
+                                        mlir::Type ty) const {
+    return false;
+  }
+
+  /// Corrects the MLIR type for a given constraint and "usual"
+  /// type.
+  ///
+  /// \returns A new MLIR type, possibly the same as the original
+  /// on success
+  virtual mlir::Type adjustInlineAsmType(CIRGenFunction &cgf,
+                                         llvm::StringRef constraint,
+                                         mlir::Type ty) const {
+    return ty;
+  }
 };
 
 std::unique_ptr<TargetCIRGenInfo> createX8664TargetCIRGenInfo(CIRGenTypes 
&cgt);

diff  --git a/clang/test/CIR/CodeGen/inline-asm.c 
b/clang/test/CIR/CodeGen/inline-asm.c
index fc959f9326876..adcd0c96905a0 100644
--- a/clang/test/CIR/CodeGen/inline-asm.c
+++ b/clang/test/CIR/CodeGen/inline-asm.c
@@ -1,24 +1,156 @@
-// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value 
-fclangir -emit-cir %s -o %t.cir
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o 
%t.cir
 // RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
 // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o 
%t-cir.ll
-// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefix=LLVM
-
-void f1() {
-  // CIR: cir.asm(x86_att, 
-  // CIR:   out = [],
-  // CIR:   in = [],
-  // CIR:   in_out = [],
-  // CIR:   {"" "~{dirflag},~{fpsr},~{flags}"}) side_effects
-  // LLVM: call void asm sideeffect "", "~{dirflag},~{fpsr},~{flags}"()
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefixes=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefixes=LLVM
+
+__asm__ ("foo1");
+__asm__ ("foo2");
+__asm__ ("foo3");
+// CIR: module{{.*}} cir.module_asm = ["foo1", "foo2", "foo3"]
+// LLVM: module asm "foo1"
+// LLVM-NEXT: module asm "foo2"
+// LLVM-NEXT: module asm "foo3"
+
+//      CIR: cir.func{{.*}}@empty1
+//      CIR: cir.asm(x86_att, 
+// CIR-NEXT:   out = [],
+// CIR-NEXT:   in = [],
+// CIR-NEXT:   in_out = [],
+// CIR-NEXT:   {"" "~{dirflag},~{fpsr},~{flags}"}) side_effects
+// LLVM: define{{.*}}@empty1
+// LLVM: call void asm sideeffect "", "~{dirflag},~{fpsr},~{flags}"()
+void empty1() {
   __asm__ volatile("" : : : );
 }
 
-void f2() {
-  // CIR: cir.asm(x86_att,
-  // CIR:   out = [],
-  // CIR:   in = [],
-  // CIR:   in_out = [],
-  // CIR:   {"nop" "~{dirflag},~{fpsr},~{flags}"}) side_effects
-  // LLVM: call void asm sideeffect "nop", "~{dirflag},~{fpsr},~{flags}"()
+//      CIR: cir.func{{.*}}@empty2
+//      CIR: cir.asm(x86_att,
+// CIR-NEXT:   out = [],
+// CIR-NEXT:   in = [],
+// CIR-NEXT:   in_out = [],
+// CIR-NEXT:   {"nop" "~{dirflag},~{fpsr},~{flags}"}) side_effects
+// LLVM: define{{.*}}@empty2
+// LLVM: call void asm sideeffect "nop", "~{dirflag},~{fpsr},~{flags}"()
+void empty2() {
   __asm__ volatile("nop" : : : );
 }
+
+//      CIR: cir.func{{.*}}@empty5
+//      CIR: %[[X:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init]
+//      CIR: cir.asm(x86_att, 
+// CIR-NEXT:   out = [%[[X]] : !cir.ptr<!s32i> (maybe_memory)],
+// CIR-NEXT:   in = [],
+// CIR-NEXT:   in_out = [],
+// CIR-NEXT:   {"" "=*m,~{dirflag},~{fpsr},~{flags}"}) side_effects
+// LLVM: define{{.*}}@empty5
+// LLVM: %[[X:.*]] = alloca i32
+// LLVM: call void asm sideeffect "", "=*m,~{dirflag},~{fpsr},~{flags}"(ptr 
elementtype(i32) %[[X]])
+void empty5(int x) {
+  __asm__ volatile("" : "=m"(x));
+}
+
+//      CIR: cir.func{{.*}}@add4
+//      CIR: %[[X:.*]] = cir.alloca !cir.ptr<!s32i>, 
!cir.ptr<!cir.ptr<!s32i>>, ["x", init] 
+//      CIR: %[[X_LOAD:.*]] = cir.load {{.*}}  %[[X]] : 
!cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR-NEXT: cir.asm(x86_att, 
+// CIR-NEXT:       out = [%[[X_LOAD]] : !cir.ptr<!s32i> (maybe_memory)],
+// CIR-NEXT:       in = [],
+// CIR-NEXT:       in_out = [],
+// CIR-NEXT:       {"addl $$42, $0" "=*m,~{dirflag},~{fpsr},~{flags}"})
+// CIR-NEXT: cir.return
+// LLVM: define {{.*}}add4
+// LLVM: %[[X:.*]] = alloca ptr
+// LLVM: %[[X_LOAD:.*]] = load ptr, ptr %[[X]]
+// LLVM: call void asm "addl $$42, $0", "=*m,~{dirflag},~{fpsr},~{flags}"(ptr 
elementtype(i32) %[[X_LOAD]])
+void add4(int *x) {
+  __asm__("addl $42, %[addr]" : [addr] "=m" (*x));
+}
+
+//      CIR: cir.func{{.*}}@mov
+//      CIR: %[[A:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a"] {alignment = 
4 : i64}
+//      CIR: %[[RES:.*]] = cir.asm(x86_att, 
+// CIR-NEXT:   out = [],
+// CIR-NEXT:   in = [],
+// CIR-NEXT:   in_out = [],
+// CIR-NEXT:   {"movl $$42, $0" "=r,~{dirflag},~{fpsr},~{flags}"}) -> !s32i
+// CIR: cir.store align(4) %[[RES]], %[[A]] : !s32i, !cir.ptr<!s32i>
+// LLVM: define{{.*}}@mov
+// LLVM: call i32 asm "movl $$42, $0", "=r,~{dirflag},~{fpsr},~{flags}"()
+unsigned mov(unsigned x) {
+  int a;
+  __asm__("movl $42, %0" : "=r" (a) : );
+  return a;
+}
+
+// bitfield destination of an asm.
+struct S {
+  int a : 4;
+};
+
+//      CIR: cir.func{{.*}}@t14
+//      CIR: cir.asm(x86_att,
+// CIR-NEXT:         out = [],
+// CIR-NEXT:         in = [],
+// CIR-NEXT:         in_out = [],
+// CIR-NEXT:         {"abc $0" "=r,~{dirflag},~{fpsr},~{flags}"}) -> !s32i
+// LLVM: define{{.*}}@t14
+// LLVM: call i32 asm "abc $0", "=r,~{dirflag},~{fpsr},~{flags}"()
+void t14(struct S *P) {
+  __asm__("abc %0" : "=r"(P->a) );
+}
+
+struct large {
+  int x[1000];
+};
+
+//      CIR: cir.func{{.*}}@t17
+//      CIR: %[[I:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["i"]
+//      CIR: cir.asm(x86_att,
+// CIR-NEXT:         out = [%[[I]] : !cir.ptr<!s32i> (maybe_memory)],
+// CIR-NEXT:         in = [],
+// CIR-NEXT:         in_out = [],
+// CIR-NEXT:         {"nop" "=*m,~{dirflag},~{fpsr},~{flags}"})
+// LLVM: define{{.*}}@t17
+// LLVM: %[[I:.*]] = alloca i32
+// LLVM: call void asm "nop", "=*m,~{dirflag},~{fpsr},~{flags}"(ptr 
elementtype(i32) %[[I]])
+void t17(void) {
+  int i;
+  __asm__ ( "nop": "=m"(i));
+}
+
+//      CIR: cir.func{{.*}}@t25
+//      CIR: cir.asm(x86_att,
+// CIR-NEXT:         out = [],
+// CIR-NEXT:         in = [],
+// CIR-NEXT:         in_out = [],
+// CIR-NEXT:         {"finit" 
"~{st},~{st(1)},~{st(2)},~{st(3)},~{st(4)},~{st(5)},~{st(6)},~{st(7)},~{fpsr},~{fpcr},~{dirflag},~{fpsr},~{flags}"})
 side_effects
+// LLVM: define{{.*}}@t25
+// LLVM: call void asm sideeffect "finit", 
"~{st},~{st(1)},~{st(2)},~{st(3)},~{st(4)},~{st(5)},~{st(6)},~{st(7)},~{fpsr},~{fpcr},~{dirflag},~{fpsr},~{flags}"()
+void t25(void)
+{
+  __asm__ __volatile__(                                           \
+                      "finit"                             \
+                      :                                   \
+                      :                                   \
+                      :"st","st(1)","st(2)","st(3)",      \
+                       "st(4)","st(5)","st(6)","st(7)",   \
+                       "fpsr","fpcr"                      \
+                                                          );
+}
+
+//t26 skipped - no vector type support
+
+//      CIR: cir.func{{.*}}@t27
+//      CIR: cir.asm(x86_att,
+// CIR-NEXT:         out = [],
+// CIR-NEXT:         in = [],
+// CIR-NEXT:         in_out = [],
+// CIR-NEXT:         {"nop" "~{dirflag},~{fpsr},~{flags}"}) side_effects
+// LLVM: define{{.*}}@t27
+// LLVM: call void asm sideeffect "nop", "~{dirflag},~{fpsr},~{flags}"()
+void t27(void) {
+  asm volatile("nop");
+}
+


        
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to