https://github.com/alowqie updated 
https://github.com/llvm/llvm-project/pull/186693

>From 577d301fb0f826bbf0c2f2f429ab286de2972cd9 Mon Sep 17 00:00:00 2001
From: alowqie <[email protected]>
Date: Sun, 15 Mar 2026 17:14:47 +0100
Subject: [PATCH 1/6] [CIR][AArch64] add vshr_* builtins

---
 clang/lib/CIR/CodeGen/CIRGenBuilder.h         |  18 ++
 .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp  |  81 ++++++-
 clang/test/CodeGen/AArch64/neon/intrinsics.c  | 215 ++++++++++++++++++
 3 files changed, 312 insertions(+), 2 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h 
b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index fb047919b003d..41e87e8c2005d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -461,6 +461,24 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
     return getConstantInt(loc, getUInt64Ty(), c);
   }
 
+  /// Create constant nullptr for pointer-to-data-member type ty.
+  cir::ConstantOp getNullDataMemberPtr(cir::DataMemberType ty,
+                                       mlir::Location loc) {
+    return cir::ConstantOp::create(*this, loc, getNullDataMemberAttr(ty));
+  }
+
+  cir::ConstantOp getNullMethodPtr(cir::MethodType ty, mlir::Location loc) {
+    return cir::ConstantOp::create(*this, loc, getNullMethodAttr(ty));
+  }
+
+  cir::ConstantOp getZero(mlir::Location loc, mlir::Type ty) {
+    // TODO: dispatch creation for primitive types.
+    assert((mlir::isa<cir::RecordType>(ty) || mlir::isa<cir::ArrayType>(ty) ||
+            mlir::isa<cir::VectorType>(ty)) &&
+           "NYI for other types");
+    return cir::ConstantOp::create(*this, loc, cir::ZeroAttr::get(ty));
+  }
+
   
//===--------------------------------------------------------------------===//
   // UnaryOp creation helpers
   
//===--------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
index 3d1e11ab87354..ce74ac1aaf93c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
@@ -180,6 +180,63 @@ static mlir::Value emitNeonSplat(CIRGenBuilderTy &builder, 
mlir::Location loc,
   int64_t laneCst = getIntValueFromConstOp(lane);
   llvm::SmallVector<int64_t, 4> shuffleMask(resEltCnt, laneCst);
   return builder.createVecShuffle(loc, v, shuffleMask);
+  
+// Build a constant shift amount vector of `vecTy` to shift a vector
+// Here `shitfVal` is a constant integer that will be splated into a
+// a const vector of `vecTy` which is the return of this function
+static mlir::Value emitNeonShiftVector(CIRGenBuilderTy &builder,
+                                       mlir::Value shiftVal,
+                                       cir::VectorType vecTy,
+                                       mlir::Location loc, bool neg) {
+  int shiftAmt = getIntValueFromConstOp(shiftVal);
+  if (neg)
+    shiftAmt = -shiftAmt;
+  llvm::SmallVector<mlir::Attribute> vecAttr{
+      vecTy.getSize(),
+      // ConstVectorAttr requires cir::IntAttr
+      cir::IntAttr::get(vecTy.getElementType(), shiftAmt)};
+  cir::ConstVectorAttr constVecAttr = cir::ConstVectorAttr::get(
+      vecTy, mlir::ArrayAttr::get(builder.getContext(), vecAttr));
+  return cir::ConstantOp::create(builder, loc, constVecAttr);
+}
+
+// Build ShiftOp of vector type whose shift amount is a vector built
+// from a constant integer using `emitNeonShiftVector` function
+static mlir::Value
+emitCommonNeonShift(CIRGenBuilderTy &builder, mlir::Location loc,
+                    cir::VectorType resTy, mlir::Value shifTgt,
+                    mlir::Value shiftAmt, bool shiftLeft, bool negAmt = false) 
{
+  shiftAmt = emitNeonShiftVector(builder, shiftAmt, resTy, loc, negAmt);
+  return cir::ShiftOp::create(builder, loc, resTy,
+                              builder.createBitcast(shifTgt, resTy), shiftAmt,
+                              shiftLeft);
+}
+
+// Right-shift a vector by a constant.
+static mlir::Value emitNeonRShiftImm(CIRGenFunction &cgf, mlir::Value shiftVec,
+                                     mlir::Value shiftVal,
+                                     cir::VectorType vecTy, bool usgn,
+                                     mlir::Location loc) {
+  CIRGenBuilderTy &builder = cgf.getBuilder();
+  int64_t shiftAmt = getIntValueFromConstOp(shiftVal);
+  int eltSize =
+      cgf.cgm.getDataLayout().getTypeSizeInBits(vecTy.getElementType());
+
+  shiftVec = builder.createBitcast(shiftVec, vecTy);
+  // lshr/ashr are undefined when the shift amount is equal to the vector
+  // element size.
+  if (shiftAmt == eltSize) {
+    if (usgn) {
+      // Right-shifting an unsigned value by its size yields 0.
+      return builder.getZero(loc, vecTy);
+    }
+    // Right-shifting a signed value by its size is equivalent
+    // to a shift of size-1.
+    --shiftAmt;
+    shiftVal = builder.getConstInt(loc, vecTy.getElementType(), shiftAmt);
+  }
+  return emitCommonNeonShift(builder, loc, vecTy, shiftVec, shiftVal,
+                             false /* right shift */);
 }
 
 /// Build a constant shift amount vector of `vecTy` to shift a vector
@@ -225,6 +282,7 @@ static mlir::Value emitCommonNeonBuiltinExpr(
 
   // Determine the type of this overloaded NEON intrinsic.
   NeonTypeFlags neonType(neonTypeConst->getZExtValue());
+  bool isUnsigned = neonType.isUnsigned();
   const bool hasLegalHalfType = cgf.getTarget().hasFastHalfType();
 
   // The value of allowBFloatArgsAndRet is true for AArch64, but it should
@@ -448,8 +506,13 @@ static mlir::Value emitCommonNeonBuiltinExpr(
                                /*shiftLeft=*/true);
   case NEON::BI__builtin_neon_vshll_n_v:
   case NEON::BI__builtin_neon_vshrn_n_v:
+    cgf.cgm.errorNYI(expr->getSourceRange(),
+                     std::string("unimplemented AArch64 builtin call: ") +
+                         ctx.BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
   case NEON::BI__builtin_neon_vshr_n_v:
   case NEON::BI__builtin_neon_vshrq_n_v:
+    return emitNeonRShiftImm(cgf, ops[0], ops[1], vTy, isUnsigned, loc);
   case NEON::BI__builtin_neon_vst1_v:
   case NEON::BI__builtin_neon_vst1q_v:
   case NEON::BI__builtin_neon_vst2_v:
@@ -2197,8 +2260,22 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned 
builtinID, const CallExpr *expr,
     assert(amt && "Expected argument to be a constant");
     return builder.createShiftLeft(loc, ops[0], amt->getZExtValue());
   }
-  case NEON::BI__builtin_neon_vshrd_n_s64:
-  case NEON::BI__builtin_neon_vshrd_n_u64:
+  case NEON::BI__builtin_neon_vshrd_n_s64: {
+    std::optional<llvm::APSInt> amt =
+        expr->getArg(1)->getIntegerConstantExpr(getContext());
+    assert(amt && "Expected argument to be a constant");
+    uint64_t bits = std::min(static_cast<uint64_t>(63), amt->getZExtValue());
+    return builder.createShiftRight(loc, ops[0], bits);
+  }
+  case NEON::BI__builtin_neon_vshrd_n_u64: {
+    std::optional<llvm::APSInt> amt =
+        expr->getArg(1)->getIntegerConstantExpr(getContext());
+    assert(amt && "Expected argument to be a constant");
+    uint64_t shiftAmt = amt->getZExtValue();
+    if (shiftAmt == 64)
+      return builder.getConstInt(loc, builder.getUInt64Ty(), 0);
+    return builder.createShiftRight(loc, ops[0], shiftAmt);
+  }
   case NEON::BI__builtin_neon_vsrad_n_s64:
   case NEON::BI__builtin_neon_vsrad_n_u64:
   case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c 
b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index e2708a846edc4..b2ba79416f2bb 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -1704,4 +1704,219 @@ uint64x1_t test_vshl_n_u64(uint64x1_t a) {
 // LLVM:    [[VSHL_N:%.*]] = shl <1 x i64> [[TMP1]], splat (i64 1)
 // LLVM:    ret <1 x i64> [[VSHL_N]]
  return vshl_n_u64(a, 1);
+
+//===------------------------------------------------------===//
+// 2.1.3.2.1 Vector shift right
+//===------------------------------------------------------===//
+
+// LLVM-LABEL: @test_vshr_n_s8( 
+// CIR-LABEL: @test_vshr_n_s8( 
+int8x8_t test_vshr_n_s8(int8x8_t a) {
+// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s8i, 
#cir.int<3> : !s8i, #cir.int<3> : !s8i,
+// CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, 
#cir.int<3> : !s8i, #cir.int<3> : !s8i]> : !cir.vector<8 x !s8i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !s8i>, [[AMT]] : 
!cir.vector<8 x !s8i>) -> !cir.vector<8 x !s8i>
+
+// LLVM:   [[VSHR_N:%.*]] = ashr <8 x i8> {{.*}}, splat (i8 3)
+// LLVM:   ret <8 x i8> [[VSHR_N]]
+  return vshr_n_s8(a, 3);
+}
+
+// LLVM-LABEL: @test_vshr_n_s16( 
+// CIR-LABEL: @test_vshr_n_s16( 
+int16x4_t test_vshr_n_s16(int16x4_t a) {
+// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, 
#cir.int<3> : !s16i,
+// CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i]> : !cir.vector<4 x 
!s16i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !s16i>, [[AMT]] : 
!cir.vector<4 x !s16i>) -> !cir.vector<4 x !s16i>
+
+// LLVM:   [[VSHR_N:%.*]] = ashr <4 x i16> {{.*}}, splat (i16 3)
+// LLVM:   ret <4 x i16> [[VSHR_N]]
+  return vshr_n_s16(a, 3);
+}
+
+// LLVM-LABEL: @test_vshr_n_s32( 
+// CIR-LABEL: @test_vshr_n_s32( 
+int32x2_t test_vshr_n_s32(int32x2_t a) {
+// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, 
#cir.int<3> : !s32i]> : !cir.vector<2 x !s32i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !s32i>, [[AMT]] : 
!cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
+
+// LLVM: [[VSHR_N:%.*]] = ashr <2 x i32> {{.*}}, splat (i32 3)
+// LLVM: ret <2 x i32> [[VSHR_N]]
+  return vshr_n_s32(a, 3);
+}
+
+// LLVM-LABEL: @test_vshr_n_s64( 
+// CIR-LABEL: @test_vshr_n_s64( 
+int64x1_t test_vshr_n_s64(int64x1_t a) {
+// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s64i]> : 
!cir.vector<1 x !s64i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<1 x !s64i>, [[AMT]] : 
!cir.vector<1 x !s64i>) -> !cir.vector<1 x !s64i>
+
+// LLVM: [[VSHR_N:%.*]] = ashr <1 x i64> {{.*}}, splat (i64 3)
+// LLVM: ret <1 x i64> [[VSHR_N]]
+  return vshr_n_s64(a, 3);
+}
+
+// LLVM-LABEL: @test_vshrq_n_s8( 
+// CIR-LABEL: @test_vshrq_n_s8( 
+int8x16_t test_vshrq_n_s8(int8x16_t a) {
+// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s8i, 
#cir.int<3> : !s8i, #cir.int<3> : !s8i,
+// CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, 
#cir.int<3> : !s8i, #cir.int<3> : !s8i,
+// CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, 
#cir.int<3> : !s8i, #cir.int<3> : !s8i,
+// CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i]> : 
!cir.vector<16 x !s8i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<16 x !s8i>, [[AMT]] : 
!cir.vector<16 x !s8i>) -> !cir.vector<16 x !s8i>
+
+// LLVM: [[VSHR_N:%.*]] = ashr <16 x i8> {{.*}}, splat (i8 3)
+// LLVM: ret <16 x i8> [[VSHR_N]]
+  return vshrq_n_s8(a, 3);
+}
+
+// LLVM-LABEL: @test_vshrq_n_s16( 
+// CIR-LABEL: @test_vshrq_n_s16( 
+int16x8_t test_vshrq_n_s16(int16x8_t a) {
+// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, 
#cir.int<3> : !s16i, #cir.int<3> : !s16i,
+// CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, 
#cir.int<3> : !s16i,
+// CIR-SAME: #cir.int<3> : !s16i]> : !cir.vector<8 x !s16i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !s16i>, [[AMT]] : 
!cir.vector<8 x !s16i>) -> !cir.vector<8 x !s16i>
+
+// LLVM: [[VSHR_N:%.*]] = ashr <8 x i16> {{.*}}, splat (i16 3)
+// LLVM: ret <8 x i16> [[VSHR_N]]
+  return vshrq_n_s16(a, 3);
+}
+
+// LLVM-LABEL: @test_vshrq_n_s32( 
+// CIR-LABEL: @test_vshrq_n_s32( 
+int32x4_t test_vshrq_n_s32(int32x4_t a) {
+// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, 
#cir.int<3> : !s32i,
+// CIR-SAME: #cir.int<3> : !s32i, #cir.int<3> : !s32i]> : !cir.vector<4 x 
!s32i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !s32i>, [[AMT]] : 
!cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i>
+
+// LLVM: [[VSHR_N:%.*]] = ashr <4 x i32> {{.*}}, splat (i32 3)
+// LLVM: ret <4 x i32> [[VSHR_N]]   
+  return vshrq_n_s32(a, 3);
+}
+
+// LLVM-LABEL: @test_vshrq_n_s64( 
+// CIR-LABEL: @test_vshrq_n_s64( 
+int64x2_t test_vshrq_n_s64(int64x2_t a) {
+// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s64i, 
#cir.int<3> : !s64i]> : !cir.vector<2 x !s64i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !s64i>, [[AMT]] : 
!cir.vector<2 x !s64i>) -> !cir.vector<2 x !s64i>
+
+// LLVM: [[VSHR_N:%.*]] = ashr <2 x i64> {{.*}}, splat (i64 3)
+// LLVM: ret <2 x i64> [[VSHR_N]]
+  return vshrq_n_s64(a, 3);
+}
+
+// LLVM-LABEL: @test_vshr_n_u8( 
+// CIR-LABEL: @test_vshr_n_u8( 
+uint8x8_t test_vshr_n_u8(uint8x8_t a) {
+// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u8i, 
#cir.int<3> : !u8i, #cir.int<3> : !u8i,
+// CIR-SAME: #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, 
#cir.int<3> : !u8i, #cir.int<3> : !u8i]> : !cir.vector<8 x !u8i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !u8i>, [[AMT]] : 
!cir.vector<8 x !u8i>) -> !cir.vector<8 x !u8i>
+
+// LLVM: [[VSHR_N:%.*]] = lshr <8 x i8> {{.*}}, splat (i8 3)
+// LLVM: ret <8 x i8> [[VSHR_N]]
+  return vshr_n_u8(a, 3);
+}
+
+// LLVM-LABEL: @test_vshr_n_u16( 
+// CIR-LABEL: @test_vshr_n_u16( 
+uint16x4_t test_vshr_n_u16(uint16x4_t a) {
+// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u16i, 
#cir.int<3> : !u16i,
+// CIR-SAME: #cir.int<3> : !u16i, #cir.int<3> : !u16i]> : !cir.vector<4 x 
!u16i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !u16i>, [[AMT]] : 
!cir.vector<4 x !u16i>) -> !cir.vector<4 x !u16i>
+
+// LLVM: [[VSHR_N:%.*]] = lshr <4 x i16> {{.*}}, splat (i16 3)
+// LLVM: ret <4 x i16> [[VSHR_N]] 
+  return vshr_n_u16(a, 3);
+}
+
+// LLVM-LABEL: @test_vshr_n_u32( 
+// CIR-LABEL: @test_vshr_n_u32( 
+uint32x2_t test_vshr_n_u32(uint32x2_t a) {
+// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u32i, 
#cir.int<3> : !u32i]> : !cir.vector<2 x !u32i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !u32i>, [[AMT]] : 
!cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
+
+// LLVM: [[VSHR_N:%.*]] = lshr <2 x i32> {{.*}}, splat (i32 3)
+// LLVM: ret <2 x i32> [[VSHR_N]]
+  return vshr_n_u32(a, 3);
+}
+
+// LLVM-LABEL: @test_vshr_n_u64( 
+// CIR-LABEL: @test_vshr_n_u64( 
+uint64x1_t test_vshr_n_u64(uint64x1_t a) {
+// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u64i]> : 
!cir.vector<1 x !u64i>
+  // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<1 x !u64i>, [[AMT]] 
: !cir.vector<1 x !u64i>) -> !cir.vector<1 x !u64i>
+
+// LLVM: [[VSHR_N:%.*]] = lshr <1 x i64> {{.*}}, splat (i64 3)
+// ret <1 x i64> [[VSHR_N]]
+  return vshr_n_u64(a, 3);
+}
+
+// LLVM-LABEL: @test_vshrq_n_u8( 
+// CIR-LABEL: @test_vshrq_n_u8( 
+uint8x16_t test_vshrq_n_u8(uint8x16_t a) {
+// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u8i, 
#cir.int<3> : !u8i, #cir.int<3> : !u8i,
+// CIR-SAME: #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, 
#cir.int<3> : !u8i, #cir.int<3> : !u8i,
+// CIR-SAME: #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, 
#cir.int<3> : !u8i,
+// CIR-SAME: #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, 
#cir.int<3> : !u8i]> : !cir.vector<16 x !u8i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<16 x !u8i>, [[AMT]] : 
!cir.vector<16 x !u8i>) -> !cir.vector<16 x !u8i>
+
+// LLVM: [[VSHR_N:%.*]] = lshr <16 x i8> {{.*}}, splat (i8 3)
+// LLVM: ret <16 x i8> [[VSHR_N]]
+  return vshrq_n_u8(a, 3);
+}
+
+// LLVM-LABEL: @test_vshrq_n_u16( 
+// CIR-LABEL: @test_vshrq_n_u16( 
+uint16x8_t test_vshrq_n_u16(uint16x8_t a) {
+// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u16i, 
#cir.int<3> : !u16i, #cir.int<3> : !u16i,
+// CIR-SAME: #cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i, 
#cir.int<3> : !u16i,
+// CIR-SAME: #cir.int<3> : !u16i]> : !cir.vector<8 x !u16i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !u16i>, [[AMT]] : 
!cir.vector<8 x !u16i>) -> !cir.vector<8 x !u16i>
+
+// LLVM: [[VSHR_N:%.*]] = lshr <8 x i16> {{.*}}, splat (i16 3)
+// LLVM: ret <8 x i16> [[VSHR_N]]
+  return vshrq_n_u16(a, 3);
+}
+
+// LLVM-LABEL: @test_vshrq_n_u32( 
+// CIR-LABEL: @test_vshrq_n_u32( 
+uint32x4_t test_vshrq_n_u32(uint32x4_t a) {
+// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u32i, 
#cir.int<3> : !u32i,
+// CIR-SAME: #cir.int<3> : !u32i, #cir.int<3> : !u32i]> : !cir.vector<4 x 
!u32i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !u32i>, [[AMT]] : 
!cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i>
+
+// LLVM: [[VSHR_N:%.*]] = lshr <4 x i32> {{.*}}, splat (i32 3)
+// LLVM: ret <4 x i32> [[VSHR_N]]
+  return vshrq_n_u32(a, 3);
+}
+
+// LLVM-LABEL: @test_vshrq_n_u64( 
+// CIR-LABEL: @test_vshrq_n_u64( 
+uint64x2_t test_vshrq_n_u64(uint64x2_t a) {
+// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u64i, 
#cir.int<3> : !u64i]> : !cir.vector<2 x !u64i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !u64i>, [[AMT]] : 
!cir.vector<2 x !u64i>) -> !cir.vector<2 x !u64i>
+
+// LLVM: [[VSHR_N:%.*]] = lshr <2 x i64> {{.*}}, splat (i64 3)
+// LLVM: ret <2 x i64> [[VSHR_N]]
+  return vshrq_n_u64(a, 3);
+}
+
+// LLVM-LABEL: @test_vshrd_n_s64( 
+// CIR-LABEL: @test_vshrd_n_s64( 
+int64_t test_vshrd_n_s64(int64_t a) {
+  // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !s64i, {{%.*}} : !s64i) -> !s64i
+
+  // LLVM:   [[SHRD_N:%.*]] = ashr i64 {{.*}}, 1
+  // LLVM:   ret i64 [[SHRD_N]]
+  return (int64_t)vshrd_n_s64(a, 1);
+}
+
+// LLVM-LABEL: @test_vshrd_n_u64( 
+// CIR-LABEL: @test_vshrd_n_u64( 
+uint64_t test_vshrd_n_u64(uint64_t a) {
+  // CIR: {{.*}} = cir.const #cir.int<0> : !u64i
+  // CIR: cir.return {{.*}} : !u64i
+
+  // LLVM:   ret i64 0
+  return (uint64_t)vshrd_n_u64(a, 64);
 }

>From d07786feac7511889f32670ab36ce78974d3003e Mon Sep 17 00:00:00 2001
From: alowqie <[email protected]>
Date: Sun, 15 Mar 2026 18:26:49 +0100
Subject: [PATCH 2/6] Remove tests from neon-intrinsics.c, moved to
 neon/intrinsics.c

---
 clang/test/CodeGen/AArch64/neon-intrinsics.c | 191 -------------------
 1 file changed, 191 deletions(-)

diff --git a/clang/test/CodeGen/AArch64/neon-intrinsics.c 
b/clang/test/CodeGen/AArch64/neon-intrinsics.c
index 4d511e508430d..be6d8d301a05c 100644
--- a/clang/test/CodeGen/AArch64/neon-intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon-intrinsics.c
@@ -6342,166 +6342,6 @@ float64x2_t test_vmulxq_f64(float64x2_t a, float64x2_t 
b) {
   return vmulxq_f64(a, b);
 }
 
-// CHECK-LABEL: define dso_local <8 x i8> @test_vshr_n_s8(
-// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VSHR_N:%.*]] = ashr <8 x i8> [[A]], splat (i8 3)
-// CHECK-NEXT:    ret <8 x i8> [[VSHR_N]]
-//
-int8x8_t test_vshr_n_s8(int8x8_t a) {
-  return vshr_n_s8(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <4 x i16> @test_vshr_n_s16(
-// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = ashr <4 x i16> [[TMP1]], splat (i16 3)
-// CHECK-NEXT:    ret <4 x i16> [[VSHR_N]]
-//
-int16x4_t test_vshr_n_s16(int16x4_t a) {
-  return vshr_n_s16(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <2 x i32> @test_vshr_n_s32(
-// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = ashr <2 x i32> [[TMP1]], splat (i32 3)
-// CHECK-NEXT:    ret <2 x i32> [[VSHR_N]]
-//
-int32x2_t test_vshr_n_s32(int32x2_t a) {
-  return vshr_n_s32(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <16 x i8> @test_vshrq_n_s8(
-// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VSHR_N:%.*]] = ashr <16 x i8> [[A]], splat (i8 3)
-// CHECK-NEXT:    ret <16 x i8> [[VSHR_N]]
-//
-int8x16_t test_vshrq_n_s8(int8x16_t a) {
-  return vshrq_n_s8(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <8 x i16> @test_vshrq_n_s16(
-// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = ashr <8 x i16> [[TMP1]], splat (i16 3)
-// CHECK-NEXT:    ret <8 x i16> [[VSHR_N]]
-//
-int16x8_t test_vshrq_n_s16(int16x8_t a) {
-  return vshrq_n_s16(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <4 x i32> @test_vshrq_n_s32(
-// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = ashr <4 x i32> [[TMP1]], splat (i32 3)
-// CHECK-NEXT:    ret <4 x i32> [[VSHR_N]]
-//
-int32x4_t test_vshrq_n_s32(int32x4_t a) {
-  return vshrq_n_s32(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <2 x i64> @test_vshrq_n_s64(
-// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = ashr <2 x i64> [[TMP1]], splat (i64 3)
-// CHECK-NEXT:    ret <2 x i64> [[VSHR_N]]
-//
-int64x2_t test_vshrq_n_s64(int64x2_t a) {
-  return vshrq_n_s64(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <8 x i8> @test_vshr_n_u8(
-// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VSHR_N:%.*]] = lshr <8 x i8> [[A]], splat (i8 3)
-// CHECK-NEXT:    ret <8 x i8> [[VSHR_N]]
-//
-uint8x8_t test_vshr_n_u8(uint8x8_t a) {
-  return vshr_n_u8(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <4 x i16> @test_vshr_n_u16(
-// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = lshr <4 x i16> [[TMP1]], splat (i16 3)
-// CHECK-NEXT:    ret <4 x i16> [[VSHR_N]]
-//
-uint16x4_t test_vshr_n_u16(uint16x4_t a) {
-  return vshr_n_u16(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <2 x i32> @test_vshr_n_u32(
-// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = lshr <2 x i32> [[TMP1]], splat (i32 3)
-// CHECK-NEXT:    ret <2 x i32> [[VSHR_N]]
-//
-uint32x2_t test_vshr_n_u32(uint32x2_t a) {
-  return vshr_n_u32(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <16 x i8> @test_vshrq_n_u8(
-// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VSHR_N:%.*]] = lshr <16 x i8> [[A]], splat (i8 3)
-// CHECK-NEXT:    ret <16 x i8> [[VSHR_N]]
-//
-uint8x16_t test_vshrq_n_u8(uint8x16_t a) {
-  return vshrq_n_u8(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <8 x i16> @test_vshrq_n_u16(
-// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = lshr <8 x i16> [[TMP1]], splat (i16 3)
-// CHECK-NEXT:    ret <8 x i16> [[VSHR_N]]
-//
-uint16x8_t test_vshrq_n_u16(uint16x8_t a) {
-  return vshrq_n_u16(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <4 x i32> @test_vshrq_n_u32(
-// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = lshr <4 x i32> [[TMP1]], splat (i32 3)
-// CHECK-NEXT:    ret <4 x i32> [[VSHR_N]]
-//
-uint32x4_t test_vshrq_n_u32(uint32x4_t a) {
-  return vshrq_n_u32(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <2 x i64> @test_vshrq_n_u64(
-// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = lshr <2 x i64> [[TMP1]], splat (i64 3)
-// CHECK-NEXT:    ret <2 x i64> [[VSHR_N]]
-//
-uint64x2_t test_vshrq_n_u64(uint64x2_t a) {
-  return vshrq_n_u64(a, 3);
-}
-
 // CHECK-LABEL: define dso_local <8 x i8> @test_vsra_n_s8(
 // CHECK-SAME: <8 x i8> noundef [[A:%.*]], <8 x i8> noundef [[B:%.*]]) 
#[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
@@ -17043,37 +16883,6 @@ uint64_t test_vcaltd_f64(float64_t a, float64_t b) {
   return (uint64_t)vcaltd_f64(a, b);
 }
 
-// CHECK-LABEL: define dso_local i64 @test_vshrd_n_s64(
-// CHECK-SAME: i64 noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[SHRD_N:%.*]] = ashr i64 [[A]], 1
-// CHECK-NEXT:    ret i64 [[SHRD_N]]
-//
-int64_t test_vshrd_n_s64(int64_t a) {
-  return (int64_t)vshrd_n_s64(a, 1);
-}
-
-// CHECK-LABEL: define dso_local <1 x i64> @test_vshr_n_s64(
-// CHECK-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = ashr <1 x i64> [[TMP1]], splat (i64 1)
-// CHECK-NEXT:    ret <1 x i64> [[VSHR_N]]
-//
-int64x1_t test_vshr_n_s64(int64x1_t a) {
-  return vshr_n_s64(a, 1);
-}
-
-// CHECK-LABEL: define dso_local i64 @test_vshrd_n_u64(
-// CHECK-SAME: i64 noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    ret i64 0
-//
-uint64_t test_vshrd_n_u64(uint64_t a) {
-  return (uint64_t)vshrd_n_u64(a, 64);
-}
-
 // CHECK-LABEL: define dso_local i64 @test_vshrd_n_u64_2(
 // CHECK-SAME: ) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]

>From 49162027692e27a5ec907da7d3584085e72fd94d Mon Sep 17 00:00:00 2001
From: alowqie <[email protected]>
Date: Wed, 18 Mar 2026 17:50:33 +0100
Subject: [PATCH 3/6] addressing comments

---
 clang/lib/CIR/CodeGen/CIRGenBuilder.h         | 10 ---
 .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp  | 22 +++---
 clang/test/CodeGen/AArch64/neon-intrinsics.c  | 12 ----
 clang/test/CodeGen/AArch64/neon/intrinsics.c  | 68 +++++++------------
 4 files changed, 36 insertions(+), 76 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h 
b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index 41e87e8c2005d..f8d3d93e49075 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -461,16 +461,6 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
     return getConstantInt(loc, getUInt64Ty(), c);
   }
 
-  /// Create constant nullptr for pointer-to-data-member type ty.
-  cir::ConstantOp getNullDataMemberPtr(cir::DataMemberType ty,
-                                       mlir::Location loc) {
-    return cir::ConstantOp::create(*this, loc, getNullDataMemberAttr(ty));
-  }
-
-  cir::ConstantOp getNullMethodPtr(cir::MethodType ty, mlir::Location loc) {
-    return cir::ConstantOp::create(*this, loc, getNullMethodAttr(ty));
-  }
-
   cir::ConstantOp getZero(mlir::Location loc, mlir::Type ty) {
     // TODO: dispatch creation for primitive types.
     assert((mlir::isa<cir::RecordType>(ty) || mlir::isa<cir::ArrayType>(ty) ||
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
index ce74ac1aaf93c..32fe4f2ee26a0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
@@ -187,10 +187,8 @@ static mlir::Value emitNeonSplat(CIRGenBuilderTy &builder, 
mlir::Location loc,
 static mlir::Value emitNeonShiftVector(CIRGenBuilderTy &builder,
                                        mlir::Value shiftVal,
                                        cir::VectorType vecTy,
-                                       mlir::Location loc, bool neg) {
+                                       mlir::Location loc) {
   int shiftAmt = getIntValueFromConstOp(shiftVal);
-  if (neg)
-    shiftAmt = -shiftAmt;
   llvm::SmallVector<mlir::Attribute> vecAttr{
       vecTy.getSize(),
       // ConstVectorAttr requires cir::IntAttr
@@ -202,11 +200,12 @@ static mlir::Value emitNeonShiftVector(CIRGenBuilderTy 
&builder,
 
 // Build ShiftOp of vector type whose shift amount is a vector built
 // from a constant integer using `emitNeonShiftVector` function
-static mlir::Value
-emitCommonNeonShift(CIRGenBuilderTy &builder, mlir::Location loc,
-                    cir::VectorType resTy, mlir::Value shifTgt,
-                    mlir::Value shiftAmt, bool shiftLeft, bool negAmt = false) 
{
-  shiftAmt = emitNeonShiftVector(builder, shiftAmt, resTy, loc, negAmt);
+static mlir::Value emitCommonNeonShift(CIRGenBuilderTy &builder,
+                                       mlir::Location loc,
+                                       cir::VectorType resTy,
+                                       mlir::Value shifTgt,
+                                       mlir::Value shiftAmt, bool shiftLeft) {
+  shiftAmt = emitNeonShiftVector(builder, shiftAmt, resTy, loc);
   return cir::ShiftOp::create(builder, loc, resTy,
                               builder.createBitcast(shifTgt, resTy), shiftAmt,
                               shiftLeft);
@@ -282,7 +281,7 @@ static mlir::Value emitCommonNeonBuiltinExpr(
 
   // Determine the type of this overloaded NEON intrinsic.
   NeonTypeFlags neonType(neonTypeConst->getZExtValue());
-  bool isUnsigned = neonType.isUnsigned();
+  const bool isUnsigned = neonType.isUnsigned();
   const bool hasLegalHalfType = cgf.getTarget().hasFastHalfType();
 
   // The value of allowBFloatArgsAndRet is true for AArch64, but it should
@@ -2264,14 +2263,15 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned 
builtinID, const CallExpr *expr,
     std::optional<llvm::APSInt> amt =
         expr->getArg(1)->getIntegerConstantExpr(getContext());
     assert(amt && "Expected argument to be a constant");
-    uint64_t bits = std::min(static_cast<uint64_t>(63), amt->getZExtValue());
-    return builder.createShiftRight(loc, ops[0], bits);
+    return builder.createShiftRight(
+        loc, ops[0], std::min(static_cast<uint64_t>(63), amt->getZExtValue()));
   }
   case NEON::BI__builtin_neon_vshrd_n_u64: {
     std::optional<llvm::APSInt> amt =
         expr->getArg(1)->getIntegerConstantExpr(getContext());
     assert(amt && "Expected argument to be a constant");
     uint64_t shiftAmt = amt->getZExtValue();
+    // Right-shifting an unsigned value by its size yields 0.
     if (shiftAmt == 64)
       return builder.getConstInt(loc, builder.getUInt64Ty(), 0);
     return builder.createShiftRight(loc, ops[0], shiftAmt);
diff --git a/clang/test/CodeGen/AArch64/neon-intrinsics.c 
b/clang/test/CodeGen/AArch64/neon-intrinsics.c
index be6d8d301a05c..6db14a6d77753 100644
--- a/clang/test/CodeGen/AArch64/neon-intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon-intrinsics.c
@@ -16893,18 +16893,6 @@ uint64_t test_vshrd_n_u64_2() {
   return vshrd_n_u64(a, 64);
 }
 
-// CHECK-LABEL: define dso_local <1 x i64> @test_vshr_n_u64(
-// CHECK-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = lshr <1 x i64> [[TMP1]], splat (i64 1)
-// CHECK-NEXT:    ret <1 x i64> [[VSHR_N]]
-//
-uint64x1_t test_vshr_n_u64(uint64x1_t a) {
-  return vshr_n_u64(a, 1);
-}
-
 // CHECK-LABEL: define dso_local i64 @test_vrshrd_n_s64(
 // CHECK-SAME: i64 noundef [[A:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c 
b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index b2ba79416f2bb..ebb0d1f6be09a 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -1709,8 +1709,7 @@ uint64x1_t test_vshl_n_u64(uint64x1_t a) {
 // 2.1.3.2.1 Vector shift right
 //===------------------------------------------------------===//
 
-// LLVM-LABEL: @test_vshr_n_s8( 
-// CIR-LABEL: @test_vshr_n_s8( 
+// ALL-LABEL: @test_vshr_n_s8( 
 int8x8_t test_vshr_n_s8(int8x8_t a) {
 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s8i, 
#cir.int<3> : !s8i, #cir.int<3> : !s8i,
 // CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, 
#cir.int<3> : !s8i, #cir.int<3> : !s8i]> : !cir.vector<8 x !s8i>
@@ -1721,8 +1720,7 @@ int8x8_t test_vshr_n_s8(int8x8_t a) {
   return vshr_n_s8(a, 3);
 }
 
-// LLVM-LABEL: @test_vshr_n_s16( 
-// CIR-LABEL: @test_vshr_n_s16( 
+// ALL-LABEL: @test_vshr_n_s16( 
 int16x4_t test_vshr_n_s16(int16x4_t a) {
 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, 
#cir.int<3> : !s16i,
 // CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i]> : !cir.vector<4 x 
!s16i>
@@ -1733,8 +1731,7 @@ int16x4_t test_vshr_n_s16(int16x4_t a) {
   return vshr_n_s16(a, 3);
 }
 
-// LLVM-LABEL: @test_vshr_n_s32( 
-// CIR-LABEL: @test_vshr_n_s32( 
+// ALL-LABEL: @test_vshr_n_s32( 
 int32x2_t test_vshr_n_s32(int32x2_t a) {
 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, 
#cir.int<3> : !s32i]> : !cir.vector<2 x !s32i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !s32i>, [[AMT]] : 
!cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
@@ -1744,8 +1741,7 @@ int32x2_t test_vshr_n_s32(int32x2_t a) {
   return vshr_n_s32(a, 3);
 }
 
-// LLVM-LABEL: @test_vshr_n_s64( 
-// CIR-LABEL: @test_vshr_n_s64( 
+// ALL-LABEL: @test_vshr_n_s64( 
 int64x1_t test_vshr_n_s64(int64x1_t a) {
 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s64i]> : 
!cir.vector<1 x !s64i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<1 x !s64i>, [[AMT]] : 
!cir.vector<1 x !s64i>) -> !cir.vector<1 x !s64i>
@@ -1755,8 +1751,7 @@ int64x1_t test_vshr_n_s64(int64x1_t a) {
   return vshr_n_s64(a, 3);
 }
 
-// LLVM-LABEL: @test_vshrq_n_s8( 
-// CIR-LABEL: @test_vshrq_n_s8( 
+// ALL-LABEL: @test_vshrq_n_s8( 
 int8x16_t test_vshrq_n_s8(int8x16_t a) {
 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s8i, 
#cir.int<3> : !s8i, #cir.int<3> : !s8i,
 // CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, 
#cir.int<3> : !s8i, #cir.int<3> : !s8i,
@@ -1769,8 +1764,7 @@ int8x16_t test_vshrq_n_s8(int8x16_t a) {
   return vshrq_n_s8(a, 3);
 }
 
-// LLVM-LABEL: @test_vshrq_n_s16( 
-// CIR-LABEL: @test_vshrq_n_s16( 
+// ALL-LABEL: @test_vshrq_n_s16( 
 int16x8_t test_vshrq_n_s16(int16x8_t a) {
 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, 
#cir.int<3> : !s16i, #cir.int<3> : !s16i,
 // CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, 
#cir.int<3> : !s16i,
@@ -1782,8 +1776,7 @@ int16x8_t test_vshrq_n_s16(int16x8_t a) {
   return vshrq_n_s16(a, 3);
 }
 
-// LLVM-LABEL: @test_vshrq_n_s32( 
-// CIR-LABEL: @test_vshrq_n_s32( 
+// ALL-LABEL: @test_vshrq_n_s32( 
 int32x4_t test_vshrq_n_s32(int32x4_t a) {
 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, 
#cir.int<3> : !s32i,
 // CIR-SAME: #cir.int<3> : !s32i, #cir.int<3> : !s32i]> : !cir.vector<4 x 
!s32i>
@@ -1794,8 +1787,7 @@ int32x4_t test_vshrq_n_s32(int32x4_t a) {
   return vshrq_n_s32(a, 3);
 }
 
-// LLVM-LABEL: @test_vshrq_n_s64( 
-// CIR-LABEL: @test_vshrq_n_s64( 
+// ALL-LABEL: @test_vshrq_n_s64( 
 int64x2_t test_vshrq_n_s64(int64x2_t a) {
 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s64i, 
#cir.int<3> : !s64i]> : !cir.vector<2 x !s64i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !s64i>, [[AMT]] : 
!cir.vector<2 x !s64i>) -> !cir.vector<2 x !s64i>
@@ -1805,8 +1797,7 @@ int64x2_t test_vshrq_n_s64(int64x2_t a) {
   return vshrq_n_s64(a, 3);
 }
 
-// LLVM-LABEL: @test_vshr_n_u8( 
-// CIR-LABEL: @test_vshr_n_u8( 
+// ALL-LABEL: @test_vshr_n_u8( 
 uint8x8_t test_vshr_n_u8(uint8x8_t a) {
 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u8i, 
#cir.int<3> : !u8i, #cir.int<3> : !u8i,
 // CIR-SAME: #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, 
#cir.int<3> : !u8i, #cir.int<3> : !u8i]> : !cir.vector<8 x !u8i>
@@ -1817,8 +1808,7 @@ uint8x8_t test_vshr_n_u8(uint8x8_t a) {
   return vshr_n_u8(a, 3);
 }
 
-// LLVM-LABEL: @test_vshr_n_u16( 
-// CIR-LABEL: @test_vshr_n_u16( 
+// ALL-LABEL: @test_vshr_n_u16( 
 uint16x4_t test_vshr_n_u16(uint16x4_t a) {
 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u16i, 
#cir.int<3> : !u16i,
 // CIR-SAME: #cir.int<3> : !u16i, #cir.int<3> : !u16i]> : !cir.vector<4 x 
!u16i>
@@ -1829,8 +1819,7 @@ uint16x4_t test_vshr_n_u16(uint16x4_t a) {
   return vshr_n_u16(a, 3);
 }
 
-// LLVM-LABEL: @test_vshr_n_u32( 
-// CIR-LABEL: @test_vshr_n_u32( 
+// ALL-LABEL: @test_vshr_n_u32( 
 uint32x2_t test_vshr_n_u32(uint32x2_t a) {
 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u32i, 
#cir.int<3> : !u32i]> : !cir.vector<2 x !u32i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !u32i>, [[AMT]] : 
!cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
@@ -1840,19 +1829,17 @@ uint32x2_t test_vshr_n_u32(uint32x2_t a) {
   return vshr_n_u32(a, 3);
 }
 
-// LLVM-LABEL: @test_vshr_n_u64( 
-// CIR-LABEL: @test_vshr_n_u64( 
+// ALL-LABEL: @test_vshr_n_u64( 
 uint64x1_t test_vshr_n_u64(uint64x1_t a) {
 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u64i]> : 
!cir.vector<1 x !u64i>
-  // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<1 x !u64i>, [[AMT]] 
: !cir.vector<1 x !u64i>) -> !cir.vector<1 x !u64i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<1 x !u64i>, [[AMT]] : 
!cir.vector<1 x !u64i>) -> !cir.vector<1 x !u64i>
 
 // LLVM: [[VSHR_N:%.*]] = lshr <1 x i64> {{.*}}, splat (i64 3)
 // ret <1 x i64> [[VSHR_N]]
   return vshr_n_u64(a, 3);
 }
 
-// LLVM-LABEL: @test_vshrq_n_u8( 
-// CIR-LABEL: @test_vshrq_n_u8( 
+// ALL-LABEL: @test_vshrq_n_u8( 
 uint8x16_t test_vshrq_n_u8(uint8x16_t a) {
 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u8i, 
#cir.int<3> : !u8i, #cir.int<3> : !u8i,
 // CIR-SAME: #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, 
#cir.int<3> : !u8i, #cir.int<3> : !u8i,
@@ -1865,8 +1852,7 @@ uint8x16_t test_vshrq_n_u8(uint8x16_t a) {
   return vshrq_n_u8(a, 3);
 }
 
-// LLVM-LABEL: @test_vshrq_n_u16( 
-// CIR-LABEL: @test_vshrq_n_u16( 
+// ALL-LABEL: @test_vshrq_n_u16( 
 uint16x8_t test_vshrq_n_u16(uint16x8_t a) {
 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u16i, 
#cir.int<3> : !u16i, #cir.int<3> : !u16i,
 // CIR-SAME: #cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i, 
#cir.int<3> : !u16i,
@@ -1878,8 +1864,7 @@ uint16x8_t test_vshrq_n_u16(uint16x8_t a) {
   return vshrq_n_u16(a, 3);
 }
 
-// LLVM-LABEL: @test_vshrq_n_u32( 
-// CIR-LABEL: @test_vshrq_n_u32( 
+// ALL-LABEL: @test_vshrq_n_u32( 
 uint32x4_t test_vshrq_n_u32(uint32x4_t a) {
 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u32i, 
#cir.int<3> : !u32i,
 // CIR-SAME: #cir.int<3> : !u32i, #cir.int<3> : !u32i]> : !cir.vector<4 x 
!u32i>
@@ -1890,8 +1875,7 @@ uint32x4_t test_vshrq_n_u32(uint32x4_t a) {
   return vshrq_n_u32(a, 3);
 }
 
-// LLVM-LABEL: @test_vshrq_n_u64( 
-// CIR-LABEL: @test_vshrq_n_u64( 
+// ALL-LABEL: @test_vshrq_n_u64( 
 uint64x2_t test_vshrq_n_u64(uint64x2_t a) {
 // CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u64i, 
#cir.int<3> : !u64i]> : !cir.vector<2 x !u64i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !u64i>, [[AMT]] : 
!cir.vector<2 x !u64i>) -> !cir.vector<2 x !u64i>
@@ -1901,22 +1885,20 @@ uint64x2_t test_vshrq_n_u64(uint64x2_t a) {
   return vshrq_n_u64(a, 3);
 }
 
-// LLVM-LABEL: @test_vshrd_n_s64( 
-// CIR-LABEL: @test_vshrd_n_s64( 
+// ALL-LABEL: @test_vshrd_n_s64( 
 int64_t test_vshrd_n_s64(int64_t a) {
-  // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !s64i, {{%.*}} : !s64i) -> !s64i
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !s64i, {{%.*}} : !s64i) -> !s64i
 
-  // LLVM:   [[SHRD_N:%.*]] = ashr i64 {{.*}}, 1
-  // LLVM:   ret i64 [[SHRD_N]]
+// LLVM:   [[SHRD_N:%.*]] = ashr i64 {{.*}}, 1
+// LLVM:   ret i64 [[SHRD_N]]
   return (int64_t)vshrd_n_s64(a, 1);
 }
 
-// LLVM-LABEL: @test_vshrd_n_u64( 
-// CIR-LABEL: @test_vshrd_n_u64( 
+// ALL-LABEL: @test_vshrd_n_u64( 
 uint64_t test_vshrd_n_u64(uint64_t a) {
-  // CIR: {{.*}} = cir.const #cir.int<0> : !u64i
-  // CIR: cir.return {{.*}} : !u64i
+// CIR: {{.*}} = cir.const #cir.int<0> : !u64i
+// CIR: cir.return {{.*}} : !u64i
 
-  // LLVM:   ret i64 0
+// LLVM:   ret i64 0
   return (uint64_t)vshrd_n_u64(a, 64);
 }

>From 7eecad01add5467d5385326e95014129758986d9 Mon Sep 17 00:00:00 2001
From: alowqie <[email protected]>
Date: Fri, 20 Mar 2026 22:25:57 +0100
Subject: [PATCH 4/6] switch to cir.vec.splat

---
 .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp  | 13 ++-
 clang/test/CodeGen/AArch64/neon/intrinsics.c  | 80 ++++++++-----------
 2 files changed, 37 insertions(+), 56 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
index 32fe4f2ee26a0..da76c82271789 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
@@ -188,14 +188,11 @@ static mlir::Value emitNeonShiftVector(CIRGenBuilderTy 
&builder,
                                        mlir::Value shiftVal,
                                        cir::VectorType vecTy,
                                        mlir::Location loc) {
-  int shiftAmt = getIntValueFromConstOp(shiftVal);
-  llvm::SmallVector<mlir::Attribute> vecAttr{
-      vecTy.getSize(),
-      // ConstVectorAttr requires cir::IntAttr
-      cir::IntAttr::get(vecTy.getElementType(), shiftAmt)};
-  cir::ConstVectorAttr constVecAttr = cir::ConstVectorAttr::get(
-      vecTy, mlir::ArrayAttr::get(builder.getContext(), vecAttr));
-  return cir::ConstantOp::create(builder, loc, constVecAttr);
+  mlir::Type eltTy = vecTy.getElementType();
+  if (shiftVal.getType() != eltTy) {
+    shiftVal = builder.createIntCast(shiftVal, eltTy);
+  }
+  return cir::VecSplatOp::create(builder, loc, vecTy, shiftVal);
 }
 
 // Build ShiftOp of vector type whose shift amount is a vector built
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c 
b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index ebb0d1f6be09a..a16f9465d3f7a 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -1711,9 +1711,8 @@ uint64x1_t test_vshl_n_u64(uint64x1_t a) {
 
 // ALL-LABEL: @test_vshr_n_s8( 
 int8x8_t test_vshr_n_s8(int8x8_t a) {
-// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s8i, 
#cir.int<3> : !s8i, #cir.int<3> : !s8i,
-// CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, 
#cir.int<3> : !s8i, #cir.int<3> : !s8i]> : !cir.vector<8 x !s8i>
-// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !s8i>, [[AMT]] : 
!cir.vector<8 x !s8i>) -> !cir.vector<8 x !s8i>
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s8i, !cir.vector<8 x !s8i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !s8i>, {{%.*}} : 
!cir.vector<8 x !s8i>) -> !cir.vector<8 x !s8i>
 
 // LLVM:   [[VSHR_N:%.*]] = ashr <8 x i8> {{.*}}, splat (i8 3)
 // LLVM:   ret <8 x i8> [[VSHR_N]]
@@ -1722,9 +1721,8 @@ int8x8_t test_vshr_n_s8(int8x8_t a) {
 
 // ALL-LABEL: @test_vshr_n_s16( 
 int16x4_t test_vshr_n_s16(int16x4_t a) {
-// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, 
#cir.int<3> : !s16i,
-// CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i]> : !cir.vector<4 x 
!s16i>
-// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !s16i>, [[AMT]] : 
!cir.vector<4 x !s16i>) -> !cir.vector<4 x !s16i>
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s16i, !cir.vector<4 x !s16i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !s16i>, {{%.*}} : 
!cir.vector<4 x !s16i>) -> !cir.vector<4 x !s16i>
 
 // LLVM:   [[VSHR_N:%.*]] = ashr <4 x i16> {{.*}}, splat (i16 3)
 // LLVM:   ret <4 x i16> [[VSHR_N]]
@@ -1733,8 +1731,8 @@ int16x4_t test_vshr_n_s16(int16x4_t a) {
 
 // ALL-LABEL: @test_vshr_n_s32( 
 int32x2_t test_vshr_n_s32(int32x2_t a) {
-// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, 
#cir.int<3> : !s32i]> : !cir.vector<2 x !s32i>
-// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !s32i>, [[AMT]] : 
!cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s32i, !cir.vector<2 x !s32i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !s32i>, {{%.*}} : 
!cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
 
 // LLVM: [[VSHR_N:%.*]] = ashr <2 x i32> {{.*}}, splat (i32 3)
 // LLVM: ret <2 x i32> [[VSHR_N]]
@@ -1743,8 +1741,8 @@ int32x2_t test_vshr_n_s32(int32x2_t a) {
 
 // ALL-LABEL: @test_vshr_n_s64( 
 int64x1_t test_vshr_n_s64(int64x1_t a) {
-// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s64i]> : 
!cir.vector<1 x !s64i>
-// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<1 x !s64i>, [[AMT]] : 
!cir.vector<1 x !s64i>) -> !cir.vector<1 x !s64i>
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s64i, !cir.vector<1 x !s64i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<1 x !s64i>, {{%.*}} : 
!cir.vector<1 x !s64i>) -> !cir.vector<1 x !s64i>
 
 // LLVM: [[VSHR_N:%.*]] = ashr <1 x i64> {{.*}}, splat (i64 3)
 // LLVM: ret <1 x i64> [[VSHR_N]]
@@ -1753,11 +1751,8 @@ int64x1_t test_vshr_n_s64(int64x1_t a) {
 
 // ALL-LABEL: @test_vshrq_n_s8( 
 int8x16_t test_vshrq_n_s8(int8x16_t a) {
-// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s8i, 
#cir.int<3> : !s8i, #cir.int<3> : !s8i,
-// CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, 
#cir.int<3> : !s8i, #cir.int<3> : !s8i,
-// CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i, 
#cir.int<3> : !s8i, #cir.int<3> : !s8i,
-// CIR-SAME: #cir.int<3> : !s8i, #cir.int<3> : !s8i, #cir.int<3> : !s8i]> : 
!cir.vector<16 x !s8i>
-// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<16 x !s8i>, [[AMT]] : 
!cir.vector<16 x !s8i>) -> !cir.vector<16 x !s8i>
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s8i, !cir.vector<16 x !s8i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<16 x !s8i>, {{%.*}} : 
!cir.vector<16 x !s8i>) -> !cir.vector<16 x !s8i>
 
 // LLVM: [[VSHR_N:%.*]] = ashr <16 x i8> {{.*}}, splat (i8 3)
 // LLVM: ret <16 x i8> [[VSHR_N]]
@@ -1766,10 +1761,8 @@ int8x16_t test_vshrq_n_s8(int8x16_t a) {
 
 // ALL-LABEL: @test_vshrq_n_s16( 
 int16x8_t test_vshrq_n_s16(int16x8_t a) {
-// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s16i, 
#cir.int<3> : !s16i, #cir.int<3> : !s16i,
-// CIR-SAME: #cir.int<3> : !s16i, #cir.int<3> : !s16i, #cir.int<3> : !s16i, 
#cir.int<3> : !s16i,
-// CIR-SAME: #cir.int<3> : !s16i]> : !cir.vector<8 x !s16i>
-// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !s16i>, [[AMT]] : 
!cir.vector<8 x !s16i>) -> !cir.vector<8 x !s16i>
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s16i, !cir.vector<8 x !s16i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !s16i>, {{%.*}} : 
!cir.vector<8 x !s16i>) -> !cir.vector<8 x !s16i>
 
 // LLVM: [[VSHR_N:%.*]] = ashr <8 x i16> {{.*}}, splat (i16 3)
 // LLVM: ret <8 x i16> [[VSHR_N]]
@@ -1778,9 +1771,8 @@ int16x8_t test_vshrq_n_s16(int16x8_t a) {
 
 // ALL-LABEL: @test_vshrq_n_s32( 
 int32x4_t test_vshrq_n_s32(int32x4_t a) {
-// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s32i, 
#cir.int<3> : !s32i,
-// CIR-SAME: #cir.int<3> : !s32i, #cir.int<3> : !s32i]> : !cir.vector<4 x 
!s32i>
-// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !s32i>, [[AMT]] : 
!cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i>
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s32i, !cir.vector<4 x !s32i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !s32i>, {{%.*}} : 
!cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i>
 
 // LLVM: [[VSHR_N:%.*]] = ashr <4 x i32> {{.*}}, splat (i32 3)
 // LLVM: ret <4 x i32> [[VSHR_N]]   
@@ -1789,8 +1781,8 @@ int32x4_t test_vshrq_n_s32(int32x4_t a) {
 
 // ALL-LABEL: @test_vshrq_n_s64( 
 int64x2_t test_vshrq_n_s64(int64x2_t a) {
-// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !s64i, 
#cir.int<3> : !s64i]> : !cir.vector<2 x !s64i>
-// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !s64i>, [[AMT]] : 
!cir.vector<2 x !s64i>) -> !cir.vector<2 x !s64i>
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s64i, !cir.vector<2 x !s64i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !s64i>, {{%.*}} : 
!cir.vector<2 x !s64i>) -> !cir.vector<2 x !s64i>
 
 // LLVM: [[VSHR_N:%.*]] = ashr <2 x i64> {{.*}}, splat (i64 3)
 // LLVM: ret <2 x i64> [[VSHR_N]]
@@ -1799,9 +1791,8 @@ int64x2_t test_vshrq_n_s64(int64x2_t a) {
 
 // ALL-LABEL: @test_vshr_n_u8( 
 uint8x8_t test_vshr_n_u8(uint8x8_t a) {
-// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u8i, 
#cir.int<3> : !u8i, #cir.int<3> : !u8i,
-// CIR-SAME: #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, 
#cir.int<3> : !u8i, #cir.int<3> : !u8i]> : !cir.vector<8 x !u8i>
-// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !u8i>, [[AMT]] : 
!cir.vector<8 x !u8i>) -> !cir.vector<8 x !u8i>
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u8i, !cir.vector<8 x !u8i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !u8i>, {{%.*}} : 
!cir.vector<8 x !u8i>) -> !cir.vector<8 x !u8i>
 
 // LLVM: [[VSHR_N:%.*]] = lshr <8 x i8> {{.*}}, splat (i8 3)
 // LLVM: ret <8 x i8> [[VSHR_N]]
@@ -1810,9 +1801,8 @@ uint8x8_t test_vshr_n_u8(uint8x8_t a) {
 
 // ALL-LABEL: @test_vshr_n_u16( 
 uint16x4_t test_vshr_n_u16(uint16x4_t a) {
-// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u16i, 
#cir.int<3> : !u16i,
-// CIR-SAME: #cir.int<3> : !u16i, #cir.int<3> : !u16i]> : !cir.vector<4 x 
!u16i>
-// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !u16i>, [[AMT]] : 
!cir.vector<4 x !u16i>) -> !cir.vector<4 x !u16i>
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u16i, !cir.vector<4 x !u16i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !u16i>, {{%.*}} : 
!cir.vector<4 x !u16i>) -> !cir.vector<4 x !u16i>
 
 // LLVM: [[VSHR_N:%.*]] = lshr <4 x i16> {{.*}}, splat (i16 3)
 // LLVM: ret <4 x i16> [[VSHR_N]] 
@@ -1821,8 +1811,8 @@ uint16x4_t test_vshr_n_u16(uint16x4_t a) {
 
 // ALL-LABEL: @test_vshr_n_u32( 
 uint32x2_t test_vshr_n_u32(uint32x2_t a) {
-// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u32i, 
#cir.int<3> : !u32i]> : !cir.vector<2 x !u32i>
-// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !u32i>, [[AMT]] : 
!cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u32i, !cir.vector<2 x !u32i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !u32i>, {{%.*}} : 
!cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
 
 // LLVM: [[VSHR_N:%.*]] = lshr <2 x i32> {{.*}}, splat (i32 3)
 // LLVM: ret <2 x i32> [[VSHR_N]]
@@ -1831,8 +1821,8 @@ uint32x2_t test_vshr_n_u32(uint32x2_t a) {
 
 // ALL-LABEL: @test_vshr_n_u64( 
 uint64x1_t test_vshr_n_u64(uint64x1_t a) {
-// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u64i]> : 
!cir.vector<1 x !u64i>
-// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<1 x !u64i>, [[AMT]] : 
!cir.vector<1 x !u64i>) -> !cir.vector<1 x !u64i>
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u64i, !cir.vector<1 x !u64i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<1 x !u64i>, {{%.*}} : 
!cir.vector<1 x !u64i>) -> !cir.vector<1 x !u64i>
 
 // LLVM: [[VSHR_N:%.*]] = lshr <1 x i64> {{.*}}, splat (i64 3)
 // ret <1 x i64> [[VSHR_N]]
@@ -1841,11 +1831,8 @@ uint64x1_t test_vshr_n_u64(uint64x1_t a) {
 
 // ALL-LABEL: @test_vshrq_n_u8( 
 uint8x16_t test_vshrq_n_u8(uint8x16_t a) {
-// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u8i, 
#cir.int<3> : !u8i, #cir.int<3> : !u8i,
-// CIR-SAME: #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, 
#cir.int<3> : !u8i, #cir.int<3> : !u8i,
-// CIR-SAME: #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, 
#cir.int<3> : !u8i,
-// CIR-SAME: #cir.int<3> : !u8i, #cir.int<3> : !u8i, #cir.int<3> : !u8i, 
#cir.int<3> : !u8i]> : !cir.vector<16 x !u8i>
-// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<16 x !u8i>, [[AMT]] : 
!cir.vector<16 x !u8i>) -> !cir.vector<16 x !u8i>
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u8i, !cir.vector<16 x !u8i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<16 x !u8i>, {{%.*}} : 
!cir.vector<16 x !u8i>) -> !cir.vector<16 x !u8i>
 
 // LLVM: [[VSHR_N:%.*]] = lshr <16 x i8> {{.*}}, splat (i8 3)
 // LLVM: ret <16 x i8> [[VSHR_N]]
@@ -1854,10 +1841,8 @@ uint8x16_t test_vshrq_n_u8(uint8x16_t a) {
 
 // ALL-LABEL: @test_vshrq_n_u16( 
 uint16x8_t test_vshrq_n_u16(uint16x8_t a) {
-// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u16i, 
#cir.int<3> : !u16i, #cir.int<3> : !u16i,
-// CIR-SAME: #cir.int<3> : !u16i, #cir.int<3> : !u16i, #cir.int<3> : !u16i, 
#cir.int<3> : !u16i,
-// CIR-SAME: #cir.int<3> : !u16i]> : !cir.vector<8 x !u16i>
-// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !u16i>, [[AMT]] : 
!cir.vector<8 x !u16i>) -> !cir.vector<8 x !u16i>
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u16i, !cir.vector<8 x !u16i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !u16i>, {{%.*}} : 
!cir.vector<8 x !u16i>) -> !cir.vector<8 x !u16i>
 
 // LLVM: [[VSHR_N:%.*]] = lshr <8 x i16> {{.*}}, splat (i16 3)
 // LLVM: ret <8 x i16> [[VSHR_N]]
@@ -1866,9 +1851,8 @@ uint16x8_t test_vshrq_n_u16(uint16x8_t a) {
 
 // ALL-LABEL: @test_vshrq_n_u32( 
 uint32x4_t test_vshrq_n_u32(uint32x4_t a) {
-// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u32i, 
#cir.int<3> : !u32i,
-// CIR-SAME: #cir.int<3> : !u32i, #cir.int<3> : !u32i]> : !cir.vector<4 x 
!u32i>
-// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !u32i>, [[AMT]] : 
!cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i>
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u32i, !cir.vector<4 x !u32i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !u32i>, {{%.*}} : 
!cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i>
 
 // LLVM: [[VSHR_N:%.*]] = lshr <4 x i32> {{.*}}, splat (i32 3)
 // LLVM: ret <4 x i32> [[VSHR_N]]
@@ -1877,8 +1861,8 @@ uint32x4_t test_vshrq_n_u32(uint32x4_t a) {
 
 // ALL-LABEL: @test_vshrq_n_u64( 
 uint64x2_t test_vshrq_n_u64(uint64x2_t a) {
-// CIR: [[AMT:%.*]] = cir.const #cir.const_vector<[#cir.int<3> : !u64i, 
#cir.int<3> : !u64i]> : !cir.vector<2 x !u64i>
-// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !u64i>, [[AMT]] : 
!cir.vector<2 x !u64i>) -> !cir.vector<2 x !u64i>
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u64i, !cir.vector<2 x !u64i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !u64i>, {{%.*}} : 
!cir.vector<2 x !u64i>) -> !cir.vector<2 x !u64i>
 
 // LLVM: [[VSHR_N:%.*]] = lshr <2 x i64> {{.*}}, splat (i64 3)
 // LLVM: ret <2 x i64> [[VSHR_N]]

>From 63bd703387750881b1a9b77faf5b28f451c71db1 Mon Sep 17 00:00:00 2001
From: alowqie <[email protected]>
Date: Mon, 6 Apr 2026 20:34:38 +0200
Subject: [PATCH 5/6] addressing new comments

---
 .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp  |  6 +-
 clang/test/CodeGen/AArch64/neon-intrinsics.c  | 10 ---
 clang/test/CodeGen/AArch64/neon/intrinsics.c  | 85 +++++++++++++++----
 3 files changed, 73 insertions(+), 28 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
index da76c82271789..3e6b1fed931f1 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
@@ -180,7 +180,8 @@ static mlir::Value emitNeonSplat(CIRGenBuilderTy &builder, 
mlir::Location loc,
   int64_t laneCst = getIntValueFromConstOp(lane);
   llvm::SmallVector<int64_t, 4> shuffleMask(resEltCnt, laneCst);
   return builder.createVecShuffle(loc, v, shuffleMask);
-  
+}
+
 // Build a constant shift amount vector of `vecTy` to shift a vector
 // Here `shitfVal` is a constant integer that will be splated into a
 // a const vector of `vecTy` which is the return of this function
@@ -231,8 +232,7 @@ static mlir::Value emitNeonRShiftImm(CIRGenFunction &cgf, 
mlir::Value shiftVec,
     --shiftAmt;
     shiftVal = builder.getConstInt(loc, vecTy.getElementType(), shiftAmt);
   }
-  return emitCommonNeonShift(builder, loc, vecTy, shiftVec, shiftVal,
-                             false /* right shift */);
+  return emitCommonNeonShift(builder, loc, vecTy, shiftVec, shiftVal, 
/*shiftLeft=*/false);
 }
 
 /// Build a constant shift amount vector of `vecTy` to shift a vector
diff --git a/clang/test/CodeGen/AArch64/neon-intrinsics.c 
b/clang/test/CodeGen/AArch64/neon-intrinsics.c
index 6db14a6d77753..b67a795e237ee 100644
--- a/clang/test/CodeGen/AArch64/neon-intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon-intrinsics.c
@@ -16883,16 +16883,6 @@ uint64_t test_vcaltd_f64(float64_t a, float64_t b) {
   return (uint64_t)vcaltd_f64(a, b);
 }
 
-// CHECK-LABEL: define dso_local i64 @test_vshrd_n_u64_2(
-// CHECK-SAME: ) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    ret i64 0
-//
-uint64_t test_vshrd_n_u64_2() {
-  uint64_t a = UINT64_C(0xf000000000000000);
-  return vshrd_n_u64(a, 64);
-}
-
 // CHECK-LABEL: define dso_local i64 @test_vrshrd_n_s64(
 // CHECK-SAME: i64 noundef [[A:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c 
b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index a16f9465d3f7a..fac9baba1944d 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -1704,6 +1704,7 @@ uint64x1_t test_vshl_n_u64(uint64x1_t a) {
 // LLVM:    [[VSHL_N:%.*]] = shl <1 x i64> [[TMP1]], splat (i64 1)
 // LLVM:    ret <1 x i64> [[VSHL_N]]
  return vshl_n_u64(a, 1);
+}
 
 //===------------------------------------------------------===//
 // 2.1.3.2.1 Vector shift right
@@ -1714,17 +1715,22 @@ int8x8_t test_vshr_n_s8(int8x8_t a) {
 // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s8i, !cir.vector<8 x !s8i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !s8i>, {{%.*}} : 
!cir.vector<8 x !s8i>) -> !cir.vector<8 x !s8i>
 
+// LLVM-SAME: <8 x i8> {{.*}} [[A:%.*]]) {{.*}} {
 // LLVM:   [[VSHR_N:%.*]] = ashr <8 x i8> {{.*}}, splat (i8 3)
 // LLVM:   ret <8 x i8> [[VSHR_N]]
   return vshr_n_s8(a, 3);
 }
 
-// ALL-LABEL: @test_vshr_n_s16( 
+// LLVM-LABEL: @test_vshr_n_s16( 
+// CIR-LABEL: @test_vshr_n_s16( 
 int16x4_t test_vshr_n_s16(int16x4_t a) {
 // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s16i, !cir.vector<4 x !s16i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !s16i>, {{%.*}} : 
!cir.vector<4 x !s16i>) -> !cir.vector<4 x !s16i>
 
-// LLVM:   [[VSHR_N:%.*]] = ashr <4 x i16> {{.*}}, splat (i16 3)
+// LLVM-SAME: <4 x i16> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM:   [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
+// LLVM:   [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
+// LLVM:   [[VSHR_N:%.*]] = ashr <4 x i16> [[TMP1]], splat (i16 3)
 // LLVM:   ret <4 x i16> [[VSHR_N]]
   return vshr_n_s16(a, 3);
 }
@@ -1734,8 +1740,11 @@ int32x2_t test_vshr_n_s32(int32x2_t a) {
 // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s32i, !cir.vector<2 x !s32i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !s32i>, {{%.*}} : 
!cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
 
-// LLVM: [[VSHR_N:%.*]] = ashr <2 x i32> {{.*}}, splat (i32 3)
-// LLVM: ret <2 x i32> [[VSHR_N]]
+// LLVM-SAME: <2 x i32> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM:  [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
+// LLVM:  [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
+// LLVM:  [[VSHR_N:%.*]] = ashr <2 x i32> [[TMP1]], splat (i32 3)
+// LLVM:  ret <2 x i32> [[VSHR_N]]
   return vshr_n_s32(a, 3);
 }
 
@@ -1744,9 +1753,12 @@ int64x1_t test_vshr_n_s64(int64x1_t a) {
 // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s64i, !cir.vector<1 x !s64i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<1 x !s64i>, {{%.*}} : 
!cir.vector<1 x !s64i>) -> !cir.vector<1 x !s64i>
 
-// LLVM: [[VSHR_N:%.*]] = ashr <1 x i64> {{.*}}, splat (i64 3)
+// LLVM-SAME: <1 x i64> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
+// LLVM: [[VSHR_N:%.*]] = ashr <1 x i64> [[TMP1]], splat (i64 1)
 // LLVM: ret <1 x i64> [[VSHR_N]]
-  return vshr_n_s64(a, 3);
+  return vshr_n_s64(a, 1);
 }
 
 // ALL-LABEL: @test_vshrq_n_s8( 
@@ -1754,6 +1766,7 @@ int8x16_t test_vshrq_n_s8(int8x16_t a) {
 // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s8i, !cir.vector<16 x !s8i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<16 x !s8i>, {{%.*}} : 
!cir.vector<16 x !s8i>) -> !cir.vector<16 x !s8i>
 
+// LLVM-SAME: <16 x i8> {{.*}} [[A:%.*]]) {{.*}} {
 // LLVM: [[VSHR_N:%.*]] = ashr <16 x i8> {{.*}}, splat (i8 3)
 // LLVM: ret <16 x i8> [[VSHR_N]]
   return vshrq_n_s8(a, 3);
@@ -1764,7 +1777,10 @@ int16x8_t test_vshrq_n_s16(int16x8_t a) {
 // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s16i, !cir.vector<8 x !s16i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !s16i>, {{%.*}} : 
!cir.vector<8 x !s16i>) -> !cir.vector<8 x !s16i>
 
-// LLVM: [[VSHR_N:%.*]] = ashr <8 x i16> {{.*}}, splat (i16 3)
+// LLVM-SAME: <8 x i16> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
+// LLVM: [[VSHR_N:%.*]] = ashr <8 x i16> [[TMP1]], splat (i16 3)
 // LLVM: ret <8 x i16> [[VSHR_N]]
   return vshrq_n_s16(a, 3);
 }
@@ -1774,7 +1790,10 @@ int32x4_t test_vshrq_n_s32(int32x4_t a) {
 // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s32i, !cir.vector<4 x !s32i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !s32i>, {{%.*}} : 
!cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i>
 
-// LLVM: [[VSHR_N:%.*]] = ashr <4 x i32> {{.*}}, splat (i32 3)
+// LLVM-SAME: <4 x i32> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
+// LLVM: [[VSHR_N:%.*]] = ashr <4 x i32> [[TMP1]], splat (i32 3)
 // LLVM: ret <4 x i32> [[VSHR_N]]   
   return vshrq_n_s32(a, 3);
 }
@@ -1784,7 +1803,10 @@ int64x2_t test_vshrq_n_s64(int64x2_t a) {
 // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s64i, !cir.vector<2 x !s64i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !s64i>, {{%.*}} : 
!cir.vector<2 x !s64i>) -> !cir.vector<2 x !s64i>
 
-// LLVM: [[VSHR_N:%.*]] = ashr <2 x i64> {{.*}}, splat (i64 3)
+// LLVM-SAME: <2 x i64> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
+// LLVM: [[VSHR_N:%.*]] = ashr <2 x i64> [[TMP1]], splat (i64 3)
 // LLVM: ret <2 x i64> [[VSHR_N]]
   return vshrq_n_s64(a, 3);
 }
@@ -1794,6 +1816,7 @@ uint8x8_t test_vshr_n_u8(uint8x8_t a) {
 // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u8i, !cir.vector<8 x !u8i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !u8i>, {{%.*}} : 
!cir.vector<8 x !u8i>) -> !cir.vector<8 x !u8i>
 
+// LLVM-SAME: <8 x i8> {{.*}} [[A:%.*]]) {{.*}} {
 // LLVM: [[VSHR_N:%.*]] = lshr <8 x i8> {{.*}}, splat (i8 3)
 // LLVM: ret <8 x i8> [[VSHR_N]]
   return vshr_n_u8(a, 3);
@@ -1804,7 +1827,10 @@ uint16x4_t test_vshr_n_u16(uint16x4_t a) {
 // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u16i, !cir.vector<4 x !u16i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !u16i>, {{%.*}} : 
!cir.vector<4 x !u16i>) -> !cir.vector<4 x !u16i>
 
-// LLVM: [[VSHR_N:%.*]] = lshr <4 x i16> {{.*}}, splat (i16 3)
+// LLVM-SAME: <4 x i16> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
+// LLVM: [[VSHR_N:%.*]] = lshr <4 x i16> [[TMP1]], splat (i16 3)
 // LLVM: ret <4 x i16> [[VSHR_N]] 
   return vshr_n_u16(a, 3);
 }
@@ -1814,7 +1840,10 @@ uint32x2_t test_vshr_n_u32(uint32x2_t a) {
 // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u32i, !cir.vector<2 x !u32i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !u32i>, {{%.*}} : 
!cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
 
-// LLVM: [[VSHR_N:%.*]] = lshr <2 x i32> {{.*}}, splat (i32 3)
+// LLVM-SAME: <2 x i32> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
+// LLVM: [[VSHR_N:%.*]] = lshr <2 x i32> [[TMP1]], splat (i32 3)
 // LLVM: ret <2 x i32> [[VSHR_N]]
   return vshr_n_u32(a, 3);
 }
@@ -1824,7 +1853,10 @@ uint64x1_t test_vshr_n_u64(uint64x1_t a) {
 // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u64i, !cir.vector<1 x !u64i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<1 x !u64i>, {{%.*}} : 
!cir.vector<1 x !u64i>) -> !cir.vector<1 x !u64i>
 
-// LLVM: [[VSHR_N:%.*]] = lshr <1 x i64> {{.*}}, splat (i64 3)
+// LLVM-SAME: <1 x i64> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
+// LLVM: [[VSHR_N:%.*]] = lshr <1 x i64> [[TMP1]], splat (i64 3)
 // ret <1 x i64> [[VSHR_N]]
   return vshr_n_u64(a, 3);
 }
@@ -1834,6 +1866,7 @@ uint8x16_t test_vshrq_n_u8(uint8x16_t a) {
 // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u8i, !cir.vector<16 x !u8i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<16 x !u8i>, {{%.*}} : 
!cir.vector<16 x !u8i>) -> !cir.vector<16 x !u8i>
 
+// LLVM-SAME: <16 x i8> {{.*}} [[A:%.*]]) {{.*}} {
 // LLVM: [[VSHR_N:%.*]] = lshr <16 x i8> {{.*}}, splat (i8 3)
 // LLVM: ret <16 x i8> [[VSHR_N]]
   return vshrq_n_u8(a, 3);
@@ -1844,7 +1877,10 @@ uint16x8_t test_vshrq_n_u16(uint16x8_t a) {
 // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u16i, !cir.vector<8 x !u16i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !u16i>, {{%.*}} : 
!cir.vector<8 x !u16i>) -> !cir.vector<8 x !u16i>
 
-// LLVM: [[VSHR_N:%.*]] = lshr <8 x i16> {{.*}}, splat (i16 3)
+// LLVM-SAME: <8 x i16> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
+// LLVM: [[VSHR_N:%.*]] = lshr <8 x i16> [[TMP1]], splat (i16 3)
 // LLVM: ret <8 x i16> [[VSHR_N]]
   return vshrq_n_u16(a, 3);
 }
@@ -1854,7 +1890,10 @@ uint32x4_t test_vshrq_n_u32(uint32x4_t a) {
 // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u32i, !cir.vector<4 x !u32i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !u32i>, {{%.*}} : 
!cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i>
 
-// LLVM: [[VSHR_N:%.*]] = lshr <4 x i32> {{.*}}, splat (i32 3)
+// LLVM-SAME: <4 x i32> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
+// LLVM: [[VSHR_N:%.*]] = lshr <4 x i32> [[TMP1]], splat (i32 3)
 // LLVM: ret <4 x i32> [[VSHR_N]]
   return vshrq_n_u32(a, 3);
 }
@@ -1864,7 +1903,10 @@ uint64x2_t test_vshrq_n_u64(uint64x2_t a) {
 // CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u64i, !cir.vector<2 x !u64i>
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !u64i>, {{%.*}} : 
!cir.vector<2 x !u64i>) -> !cir.vector<2 x !u64i>
 
-// LLVM: [[VSHR_N:%.*]] = lshr <2 x i64> {{.*}}, splat (i64 3)
+// LLVM-SAME: <2 x i64> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
+// LLVM: [[VSHR_N:%.*]] = lshr <2 x i64> [[TMP1]], splat (i64 3)
 // LLVM: ret <2 x i64> [[VSHR_N]]
   return vshrq_n_u64(a, 3);
 }
@@ -1873,6 +1915,7 @@ uint64x2_t test_vshrq_n_u64(uint64x2_t a) {
 int64_t test_vshrd_n_s64(int64_t a) {
 // CIR: {{%.*}} = cir.shift(right, {{%.*}} : !s64i, {{%.*}} : !s64i) -> !s64i
 
+// LLVM-SAME: i64 {{.*}} [[A:%.*]]) {{.*}} {
 // LLVM:   [[SHRD_N:%.*]] = ashr i64 {{.*}}, 1
 // LLVM:   ret i64 [[SHRD_N]]
   return (int64_t)vshrd_n_s64(a, 1);
@@ -1883,6 +1926,18 @@ uint64_t test_vshrd_n_u64(uint64_t a) {
 // CIR: {{.*}} = cir.const #cir.int<0> : !u64i
 // CIR: cir.return {{.*}} : !u64i
 
+// LLVM-SAME: i64 {{.*}} [[A:%.*]]) {{.*}} {
 // LLVM:   ret i64 0
   return (uint64_t)vshrd_n_u64(a, 64);
 }
+
+// ALL-LABEL: @test_vshrd_n_u64_2( 
+uint64_t test_vshrd_n_u64_2() {
+// CIR: {{.*}} = cir.const #cir.int<0> : !u64i
+// CIR: cir.return {{.*}} : !u64i
+
+// LLVM-SAME: {{.*}} {
+// LLVM: ret i64 0
+  uint64_t a = UINT64_C(0xf000000000000000);
+  return vshrd_n_u64(a, 64);
+}
\ No newline at end of file

>From 00048d46a673a8544268208211e368cc9b492431 Mon Sep 17 00:00:00 2001
From: alowqie <[email protected]>
Date: Mon, 6 Apr 2026 23:01:21 +0200
Subject: [PATCH 6/6] fix wrong merge

---
 .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp  | 36 +++----------------
 1 file changed, 5 insertions(+), 31 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
index 3e6b1fed931f1..a1afa02ac10f1 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
@@ -182,9 +182,9 @@ static mlir::Value emitNeonSplat(CIRGenBuilderTy &builder, 
mlir::Location loc,
   return builder.createVecShuffle(loc, v, shuffleMask);
 }
 
-// Build a constant shift amount vector of `vecTy` to shift a vector
-// Here `shitfVal` is a constant integer that will be splated into a
-// a const vector of `vecTy` which is the return of this function
+/// Build a constant shift amount vector of `vecTy` to shift a vector
+/// Here `shitfVal` is a constant integer that will be broadcast into a
+/// a const vector of `vecTy` which is the return value of this function
 static mlir::Value emitNeonShiftVector(CIRGenBuilderTy &builder,
                                        mlir::Value shiftVal,
                                        cir::VectorType vecTy,
@@ -196,8 +196,6 @@ static mlir::Value emitNeonShiftVector(CIRGenBuilderTy 
&builder,
   return cir::VecSplatOp::create(builder, loc, vecTy, shiftVal);
 }
 
-// Build ShiftOp of vector type whose shift amount is a vector built
-// from a constant integer using `emitNeonShiftVector` function
 static mlir::Value emitCommonNeonShift(CIRGenBuilderTy &builder,
                                        mlir::Location loc,
                                        cir::VectorType resTy,
@@ -232,32 +230,8 @@ static mlir::Value emitNeonRShiftImm(CIRGenFunction &cgf, 
mlir::Value shiftVec,
     --shiftAmt;
     shiftVal = builder.getConstInt(loc, vecTy.getElementType(), shiftAmt);
   }
-  return emitCommonNeonShift(builder, loc, vecTy, shiftVec, shiftVal, 
/*shiftLeft=*/false);
-}
-
-/// Build a constant shift amount vector of `vecTy` to shift a vector
-/// Here `shitfVal` is a constant integer that will be broadcast into a
-/// a const vector of `vecTy` which is the return value of this function
-static mlir::Value emitNeonShiftVector(CIRGenBuilderTy &builder,
-                                       mlir::Value shiftVal,
-                                       cir::VectorType vecTy,
-                                       mlir::Location loc) {
-  mlir::Type eltTy = vecTy.getElementType();
-  if (shiftVal.getType() != eltTy) {
-    shiftVal = builder.createIntCast(shiftVal, eltTy);
-  }
-  return cir::VecSplatOp::create(builder, loc, vecTy, shiftVal);
-}
-
-static mlir::Value emitCommonNeonShift(CIRGenBuilderTy &builder,
-                                       mlir::Location loc,
-                                       cir::VectorType resTy,
-                                       mlir::Value shifTgt,
-                                       mlir::Value shiftAmt, bool shiftLeft) {
-  shiftAmt = emitNeonShiftVector(builder, shiftAmt, resTy, loc);
-  return cir::ShiftOp::create(builder, loc, resTy,
-                              builder.createBitcast(shifTgt, resTy), shiftAmt,
-                              shiftLeft);
+  return emitCommonNeonShift(builder, loc, vecTy, shiftVec, shiftVal,
+                             /*shiftLeft=*/false);
 }
 
 static mlir::Value emitCommonNeonBuiltinExpr(

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to