https://github.com/ArfiH updated 
https://github.com/llvm/llvm-project/pull/185992

>From 6ca92fc78cbbce1c299913bb31e3307b1d15ef95 Mon Sep 17 00:00:00 2001
From: ArfiH <[email protected]>
Date: Wed, 11 Mar 2026 19:28:35 +0530
Subject: [PATCH] [CIR][AArch64] Implement vrshr_n_v and vrshrd_n_s64/u64 NEON
 builtins

Implement the following AArch64 NEON rounding right shift builtins in CIR:
- vrshr_n_v / vrshrq_n_v (vector variants, all element types)
- vrshrd_n_s64 / vrshrd_n_u64 (scalar 64-bit variants)

Part of issue #185382.
---
 .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp  |  99 +++++++++-
 clang/test/CodeGen/AArch64/neon-intrinsics.c  | 157 ++++++++--------
 clang/test/CodeGen/AArch64/neon/intrinsics.c  | 172 ++++++++++++++++++
 3 files changed, 342 insertions(+), 86 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
index 5534e69b5f8bc..fd54abc50a849 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
@@ -831,6 +831,55 @@ static cir::VectorType getNeonType(CIRGenFunction *cgf, 
NeonTypeFlags typeFlags,
   llvm_unreachable("Unknown vector element type!");
 }
 
+/// Get integer from a mlir::Value that is a constant op.
+static int64_t getIntValueFromConstOp(mlir::Value val) {
+  auto constOp = val.getDefiningOp<cir::ConstantOp>();
+  assert(constOp && "Expected constant op for shift amount");
+  return constOp.getIntValue().getSExtValue();
+}
+
+/// Build a constant shift amount vector to shift a vector of `vecTy`.
+/// The shift amount is always stored as a signed integer vector since
+/// srshl/urshl both require a signed shift vector even for unsigned data.
+/// If `neg` is true, the shift amount is negated (used for right shift).
+static mlir::Value emitNeonShiftVector(CIRGenBuilderTy &builder,
+                                       mlir::Value shiftVal,
+                                       cir::VectorType vecTy,
+                                       mlir::Location loc, bool neg) {
+  int shiftAmt = getIntValueFromConstOp(shiftVal);
+  if (neg)
+    shiftAmt = -shiftAmt;
+  auto eltTy = mlir::cast<cir::IntType>(vecTy.getElementType());
+  // Always use signed element type: srshl/urshl require a signed shift vector.
+  cir::IntType signedEltTy = builder.getSIntNTy(eltTy.getWidth());
+  cir::VectorType shiftVecTy =
+      cir::VectorType::get(signedEltTy, vecTy.getSize());
+  llvm::SmallVector<mlir::Attribute> vecAttr(
+      shiftVecTy.getSize(), cir::IntAttr::get(signedEltTy, shiftAmt));
+  cir::ConstVectorAttr constVecAttr = cir::ConstVectorAttr::get(
+      shiftVecTy, mlir::ArrayAttr::get(builder.getContext(), vecAttr));
+  return cir::ConstantOp::create(builder, loc, constVecAttr);
+}
+
+// Forward declarations — defined after hasExtraNeonArgument below.
+template <typename Operation>
+static mlir::Value
+emitNeonCallToOp(CIRGenModule &cgm, CIRGenBuilderTy &builder,
+                 llvm::SmallVector<mlir::Type> argTypes,
+                 llvm::SmallVectorImpl<mlir::Value> &args,
+                 std::optional<llvm::StringRef> intrinsicName,
+                 mlir::Type funcResTy, mlir::Location loc,
+                 bool isConstrainedFPIntrinsic = false, unsigned shift = 0,
+                 bool rightshift = false);
+
+static mlir::Value emitNeonCall(CIRGenModule &cgm, CIRGenBuilderTy &builder,
+                                llvm::SmallVector<mlir::Type> argTypes,
+                                llvm::SmallVectorImpl<mlir::Value> &args,
+                                llvm::StringRef intrinsicName,
+                                mlir::Type funcResTy, mlir::Location loc,
+                                bool isConstrainedFPIntrinsic = false,
+                                unsigned shift = 0, bool rightshift = false);
+
 static mlir::Value emitCommonNeonBuiltinExpr(
     CIRGenFunction &cgf, unsigned builtinID, unsigned llvmIntrinsic,
     unsigned altLLVMIntrinsic, const char *nameHint, unsigned modifier,
@@ -1047,8 +1096,21 @@ static mlir::Value emitCommonNeonBuiltinExpr(
   case NEON::BI__builtin_neon_vrsqrteq_v:
   case NEON::BI__builtin_neon_vrndi_v:
   case NEON::BI__builtin_neon_vrndiq_v:
+    cgf.cgm.errorNYI(expr->getSourceRange(),
+                     std::string("unimplemented AArch64 builtin call: ") +
+                         ctx.BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
   case NEON::BI__builtin_neon_vrshr_n_v:
-  case NEON::BI__builtin_neon_vrshrq_n_v:
+  case NEON::BI__builtin_neon_vrshrq_n_v: {
+    // srshl/urshl are left-shift intrinsics; a negative shift performs a
+    // rounding right-shift. The shift amount is negated via rightshift=true.
+    bool isUnsigned = neonType.isUnsigned();
+    llvm::StringRef intrName =
+        isUnsigned ? "aarch64.neon.urshl" : "aarch64.neon.srshl";
+    return emitNeonCall(cgf.cgm, cgf.getBuilder(), {ty, ty}, ops, intrName, ty,
+                        loc, /*isConstrainedFP=*/false, /*shift=*/1,
+                        /*rightshift=*/true);
+  }
   case NEON::BI__builtin_neon_vsha512hq_u64:
   case NEON::BI__builtin_neon_vsha512h2q_u64:
   case NEON::BI__builtin_neon_vsha512su0q_u64:
@@ -1306,8 +1368,8 @@ emitNeonCallToOp(CIRGenModule &cgm, CIRGenBuilderTy 
&builder,
                  llvm::SmallVectorImpl<mlir::Value> &args,
                  std::optional<llvm::StringRef> intrinsicName,
                  mlir::Type funcResTy, mlir::Location loc,
-                 bool isConstrainedFPIntrinsic = false, unsigned shift = 0,
-                 bool rightshift = false) {
+                 bool isConstrainedFPIntrinsic, unsigned shift,
+                 bool rightshift) {
   // TODO(cir): Consider removing the following unreachable when we have
   // emitConstrainedFPCall feature implemented
   assert(!cir::MissingFeatures::emitConstrainedFPCall());
@@ -1319,7 +1381,9 @@ emitNeonCallToOp(CIRGenModule &cgm, CIRGenBuilderTy 
&builder,
       assert(!cir::MissingFeatures::emitConstrainedFPCall());
     }
     if (shift > 0 && shift == j) {
-      cgm.errorNYI(loc, std::string("intrinsic requiring a shift Op"));
+      args[j] = emitNeonShiftVector(builder, args[j],
+                                    mlir::cast<cir::VectorType>(argTypes[j]),
+                                    loc, rightshift);
     } else {
       args[j] = builder.createBitcast(args[j], argTypes[j]);
     }
@@ -1344,8 +1408,8 @@ static mlir::Value emitNeonCall(CIRGenModule &cgm, 
CIRGenBuilderTy &builder,
                                 llvm::SmallVectorImpl<mlir::Value> &args,
                                 llvm::StringRef intrinsicName,
                                 mlir::Type funcResTy, mlir::Location loc,
-                                bool isConstrainedFPIntrinsic = false,
-                                unsigned shift = 0, bool rightshift = false) {
+                                bool isConstrainedFPIntrinsic,
+                                unsigned shift, bool rightshift) {
   return emitNeonCallToOp<cir::LLVMIntrinsicCallOp>(
       cgm, builder, std::move(argTypes), args, intrinsicName, funcResTy, loc,
       isConstrainedFPIntrinsic, shift, rightshift);
@@ -2781,8 +2845,27 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned 
builtinID, const CallExpr *expr,
   case NEON::BI__builtin_neon_vqshlud_n_s64:
   case NEON::BI__builtin_neon_vqshld_n_u64:
   case NEON::BI__builtin_neon_vqshld_n_s64:
+    cgm.errorNYI(expr->getSourceRange(),
+                 std::string("unimplemented AArch64 builtin call: ") +
+                     getContext().BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
   case NEON::BI__builtin_neon_vrshrd_n_u64:
-  case NEON::BI__builtin_neon_vrshrd_n_s64:
+  case NEON::BI__builtin_neon_vrshrd_n_s64: {
+  // srshl/urshl are left-shift intrinsics; passing -n performs a rounding
+  // right-shift by n.
+    llvm::StringRef intrName = builtinID == NEON::BI__builtin_neon_vrshrd_n_s64
+                                   ? "aarch64.neon.srshl"
+                                   : "aarch64.neon.urshl";
+    cir::IntType intType = builtinID == NEON::BI__builtin_neon_vqshld_n_u64
+                               ? builder.getUInt64Ty()
+                               : builder.getSInt64Ty();
+    int64_t sv = -cast<cir::IntAttr>(
+                      cast<cir::ConstantOp>(ops[1].getDefiningOp()).getValue())
+                      .getSInt();
+    ops[1] = builder.getSInt64(sv, loc);
+    return emitNeonCall(cgm, builder, {intType, builder.getSInt64Ty()}, ops,
+                        intrName, intType, loc);
+}
   case NEON::BI__builtin_neon_vrsrad_n_u64:
   case NEON::BI__builtin_neon_vrsrad_n_s64:
   case NEON::BI__builtin_neon_vshld_n_s64:
@@ -3208,4 +3291,4 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned 
builtinID, const CallExpr *expr,
   }
 
   // Unreachable: All cases in the switch above return.
-}
+}
\ No newline at end of file
diff --git a/clang/test/CodeGen/AArch64/neon-intrinsics.c 
b/clang/test/CodeGen/AArch64/neon-intrinsics.c
index bfaea2b8ae909..0b4bdde13d857 100644
--- a/clang/test/CodeGen/AArch64/neon-intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon-intrinsics.c
@@ -7124,6 +7124,7 @@ uint64x2_t test_vsraq_n_u64(uint64x2_t a, uint64x2_t b) {
   return vsraq_n_u64(a, b, 3);
 }
 
+
 // CHECK-LABEL: define dso_local <8 x i8> @test_vrshr_n_s8(
 // CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
@@ -7158,6 +7159,64 @@ int32x2_t test_vrshr_n_s32(int32x2_t a) {
   return vrshr_n_s32(a, 3);
 }
 
+// CHECK-LABEL: define dso_local <1 x i64> @test_vrshr_n_s64(
+// CHECK-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
+// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
+// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <1 x i64> 
@llvm.aarch64.neon.srshl.v1i64(<1 x i64> [[VRSHR_N]], <1 x i64> splat (i64 -1))
+// CHECK-NEXT:    ret <1 x i64> [[VRSHR_N1]]
+//
+int64x1_t test_vrshr_n_s64(int64x1_t a) {
+  return vrshr_n_s64(a, 1);
+}
+
+// CHECK-LABEL: define dso_local <8 x i8> @test_vrshr_n_u8(
+// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[VRSHR_N:%.*]] = call <8 x i8> 
@llvm.aarch64.neon.urshl.v8i8(<8 x i8> [[A]], <8 x i8> splat (i8 -3))
+// CHECK-NEXT:    ret <8 x i8> [[VRSHR_N]]
+//
+uint8x8_t test_vrshr_n_u8(uint8x8_t a) {
+  return vrshr_n_u8(a, 3);
+}
+
+// CHECK-LABEL: define dso_local <4 x i16> @test_vrshr_n_u16(
+// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
+// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
+// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <4 x i16> 
@llvm.aarch64.neon.urshl.v4i16(<4 x i16> [[VRSHR_N]], <4 x i16> splat (i16 -3))
+// CHECK-NEXT:    ret <4 x i16> [[VRSHR_N1]]
+//
+uint16x4_t test_vrshr_n_u16(uint16x4_t a) {
+  return vrshr_n_u16(a, 3);
+}
+
+// CHECK-LABEL: define dso_local <2 x i32> @test_vrshr_n_u32(
+// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
+// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
+// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <2 x i32> 
@llvm.aarch64.neon.urshl.v2i32(<2 x i32> [[VRSHR_N]], <2 x i32> splat (i32 -3))
+// CHECK-NEXT:    ret <2 x i32> [[VRSHR_N1]]
+//
+uint32x2_t test_vrshr_n_u32(uint32x2_t a) {
+  return vrshr_n_u32(a, 3);
+}
+
+// CHECK-LABEL: define dso_local <1 x i64> @test_vrshr_n_u64(
+// CHECK-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
+// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
+// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <1 x i64> 
@llvm.aarch64.neon.urshl.v1i64(<1 x i64> [[VRSHR_N]], <1 x i64> splat (i64 -1))
+// CHECK-NEXT:    ret <1 x i64> [[VRSHR_N1]]
+//
+uint64x1_t test_vrshr_n_u64(uint64x1_t a) {
+  return vrshr_n_u64(a, 1);
+}
+
 // CHECK-LABEL: define dso_local <16 x i8> @test_vrshrq_n_s8(
 // CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
@@ -7204,40 +7263,6 @@ int64x2_t test_vrshrq_n_s64(int64x2_t a) {
   return vrshrq_n_s64(a, 3);
 }
 
-// CHECK-LABEL: define dso_local <8 x i8> @test_vrshr_n_u8(
-// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = call <8 x i8> 
@llvm.aarch64.neon.urshl.v8i8(<8 x i8> [[A]], <8 x i8> splat (i8 -3))
-// CHECK-NEXT:    ret <8 x i8> [[VRSHR_N]]
-//
-uint8x8_t test_vrshr_n_u8(uint8x8_t a) {
-  return vrshr_n_u8(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <4 x i16> @test_vrshr_n_u16(
-// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <4 x i16> 
@llvm.aarch64.neon.urshl.v4i16(<4 x i16> [[VRSHR_N]], <4 x i16> splat (i16 -3))
-// CHECK-NEXT:    ret <4 x i16> [[VRSHR_N1]]
-//
-uint16x4_t test_vrshr_n_u16(uint16x4_t a) {
-  return vrshr_n_u16(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <2 x i32> @test_vrshr_n_u32(
-// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
-// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <2 x i32> 
@llvm.aarch64.neon.urshl.v2i32(<2 x i32> [[VRSHR_N]], <2 x i32> splat (i32 -3))
-// CHECK-NEXT:    ret <2 x i32> [[VRSHR_N1]]
-//
-uint32x2_t test_vrshr_n_u32(uint32x2_t a) {
-  return vrshr_n_u32(a, 3);
-}
-
 // CHECK-LABEL: define dso_local <16 x i8> @test_vrshrq_n_u8(
 // CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
@@ -7284,6 +7309,26 @@ uint64x2_t test_vrshrq_n_u64(uint64x2_t a) {
   return vrshrq_n_u64(a, 3);
 }
 
+// CHECK-LABEL: define dso_local i64 @test_vrshrd_n_s64(
+// CHECK-SAME: i64 noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[VRSHR_N:%.*]] = call i64 @llvm.aarch64.neon.srshl.i64(i64 
[[A]], i64 -63)
+// CHECK-NEXT:    ret i64 [[VRSHR_N]]
+//
+int64_t test_vrshrd_n_s64(int64_t a) {
+  return (int64_t)vrshrd_n_s64(a, 63);
+}
+
+// CHECK-LABEL: define dso_local i64 @test_vrshrd_n_u64(
+// CHECK-SAME: i64 noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[VRSHR_N:%.*]] = call i64 @llvm.aarch64.neon.urshl.i64(i64 
[[A]], i64 -63)
+// CHECK-NEXT:    ret i64 [[VRSHR_N]]
+//
+uint64_t test_vrshrd_n_u64(uint64_t a) {
+  return (uint64_t)vrshrd_n_u64(a, 63);
+}
+
 // CHECK-LABEL: define dso_local <8 x i8> @test_vrsra_n_s8(
 // CHECK-SAME: <8 x i8> noundef [[A:%.*]], <8 x i8> noundef [[B:%.*]]) 
#[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
@@ -17730,50 +17775,6 @@ uint64x1_t test_vshr_n_u64(uint64x1_t a) {
   return vshr_n_u64(a, 1);
 }
 
-// CHECK-LABEL: define dso_local i64 @test_vrshrd_n_s64(
-// CHECK-SAME: i64 noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = call i64 @llvm.aarch64.neon.srshl.i64(i64 
[[A]], i64 -63)
-// CHECK-NEXT:    ret i64 [[VRSHR_N]]
-//
-int64_t test_vrshrd_n_s64(int64_t a) {
-  return (int64_t)vrshrd_n_s64(a, 63);
-}
-
-// CHECK-LABEL: define dso_local <1 x i64> @test_vrshr_n_s64(
-// CHECK-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
-// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <1 x i64> 
@llvm.aarch64.neon.srshl.v1i64(<1 x i64> [[VRSHR_N]], <1 x i64> splat (i64 -1))
-// CHECK-NEXT:    ret <1 x i64> [[VRSHR_N1]]
-//
-int64x1_t test_vrshr_n_s64(int64x1_t a) {
-  return vrshr_n_s64(a, 1);
-}
-
-// CHECK-LABEL: define dso_local i64 @test_vrshrd_n_u64(
-// CHECK-SAME: i64 noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = call i64 @llvm.aarch64.neon.urshl.i64(i64 
[[A]], i64 -63)
-// CHECK-NEXT:    ret i64 [[VRSHR_N]]
-//
-uint64_t test_vrshrd_n_u64(uint64_t a) {
-  return (uint64_t)vrshrd_n_u64(a, 63);
-}
-
-// CHECK-LABEL: define dso_local <1 x i64> @test_vrshr_n_u64(
-// CHECK-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[VRSHR_N:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
-// CHECK-NEXT:    [[VRSHR_N1:%.*]] = call <1 x i64> 
@llvm.aarch64.neon.urshl.v1i64(<1 x i64> [[VRSHR_N]], <1 x i64> splat (i64 -1))
-// CHECK-NEXT:    ret <1 x i64> [[VRSHR_N1]]
-//
-uint64x1_t test_vrshr_n_u64(uint64x1_t a) {
-  return vrshr_n_u64(a, 1);
-}
-
 // CHECK-LABEL: define dso_local i64 @test_vsrad_n_s64(
 // CHECK-SAME: i64 noundef [[A:%.*]], i64 noundef [[B:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c 
b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index b740c3b5b2310..0f76bc389cce9 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -936,3 +936,175 @@ uint32x4_t test_vabaq_u32(uint32x4_t v1, uint32x4_t v2, 
uint32x4_t v3) {
 // LLVM-NEXT:    ret <4 x i32> [[ADD_I]]
   return vabaq_u32(v1, v2, v3);
 }
+
+//===----------------------------------------------------------------------===//
+// 2.x. Vector rounding shift right
+// 
https://arm-software.github.io/acle/neon_intrinsics/advsimd.html#vector-rounding-shift-right
+//===----------------------------------------------------------------------===//
+
+// vrshr_n_* (64-bit vectors)
+// ALL-LABEL: @test_vrshr_n_s8(
+int8x8_t test_vrshr_n_s8(int8x8_t a) {
+// CIR: cir.const #cir.const_vector
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}}
+
+// LLVM: [[VRSHR_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x 
i8> {{.*}}, <8 x i8> splat (i8 -3))
+// LLVM: ret <8 x i8> [[VRSHR_N]]
+  return vrshr_n_s8(a, 3);
+}
+
+// ALL-LABEL: @test_vrshr_n_s16(
+int16x4_t test_vrshr_n_s16(int16x4_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}}
+
+// LLVM: [[VRSHR_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x 
i16> {{.*}}, <4 x i16> splat (i16 -3))
+// LLVM: ret <4 x i16> [[VRSHR_N1]]
+  return vrshr_n_s16(a, 3);
+}
+
+// ALL-LABEL: @test_vrshr_n_s32(
+int32x2_t test_vrshr_n_s32(int32x2_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}}
+
+// LLVM: [[VRSHR_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x 
i32> {{.*}}, <2 x i32> splat (i32 -3))
+// LLVM: ret <2 x i32> [[VRSHR_N1]]
+  return vrshr_n_s32(a, 3);
+}
+
+// ALL-LABEL: @test_vrshr_n_s64(
+int64x1_t test_vrshr_n_s64(int64x1_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}}
+
+// LLVM: [[VRSHR_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x 
i64> {{.*}}, <1 x i64> splat (i64 -1))
+// LLVM: ret <1 x i64> [[VRSHR_N1]]
+  return vrshr_n_s64(a, 1);
+}
+
+// ALL-LABEL: @test_vrshr_n_u8(
+uint8x8_t test_vrshr_n_u8(uint8x8_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}}
+
+// LLVM: [[VRSHR_N:%.*]] = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x 
i8> {{.*}}, <8 x i8> splat (i8 -3))
+// LLVM: ret <8 x i8> [[VRSHR_N]]
+  return vrshr_n_u8(a, 3);
+}
+
+// ALL-LABEL: @test_vrshr_n_u16(
+uint16x4_t test_vrshr_n_u16(uint16x4_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}}
+
+// LLVM: [[VRSHR_N1:%.*]] = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x 
i16> {{.*}}, <4 x i16> splat (i16 -3))
+// LLVM: ret <4 x i16> [[VRSHR_N1]]
+  return vrshr_n_u16(a, 3);
+}
+
+// ALL-LABEL: @test_vrshr_n_u32(
+uint32x2_t test_vrshr_n_u32(uint32x2_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}}
+
+// LLVM: [[VRSHR_N1:%.*]] = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x 
i32> {{.*}}, <2 x i32> splat (i32 -3))
+// LLVM: ret <2 x i32> [[VRSHR_N1]]
+  return vrshr_n_u32(a, 3);
+}
+
+// ALL-LABEL: @test_vrshr_n_u64(
+uint64x1_t test_vrshr_n_u64(uint64x1_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}}
+
+// LLVM: [[VRSHR_N1:%.*]] = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x 
i64> {{.*}}, <1 x i64> splat (i64 -1))
+// LLVM: ret <1 x i64> [[VRSHR_N1]]
+  return vrshr_n_u64(a, 1);
+}
+
+// vrshrq_n_* (128-bit vectors)
+// ALL-LABEL: @test_vrshrq_n_s8(
+int8x16_t test_vrshrq_n_s8(int8x16_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}}
+
+// LLVM: [[VRSHR_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x 
i8> {{.*}}, <16 x i8> splat (i8 -3))
+// LLVM: ret <16 x i8> [[VRSHR_N]]
+  return vrshrq_n_s8(a, 3);
+}
+
+// ALL-LABEL: @test_vrshrq_n_s16(
+int16x8_t test_vrshrq_n_s16(int16x8_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}}
+
+// LLVM: [[VRSHR_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x 
i16> {{.*}}, <8 x i16> splat (i16 -3))
+// LLVM: ret <8 x i16> [[VRSHR_N1]]
+  return vrshrq_n_s16(a, 3);
+}
+
+// ALL-LABEL: @test_vrshrq_n_s32(
+int32x4_t test_vrshrq_n_s32(int32x4_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}}
+
+// LLVM: [[VRSHR_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x 
i32> {{.*}}, <4 x i32> splat (i32 -3))
+// LLVM: ret <4 x i32> [[VRSHR_N1]]
+  return vrshrq_n_s32(a, 3);
+}
+
+// ALL-LABEL: @test_vrshrq_n_s64(
+int64x2_t test_vrshrq_n_s64(int64x2_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}}
+
+// LLVM: [[VRSHR_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x 
i64> {{.*}}, <2 x i64> splat (i64 -3))
+// LLVM: ret <2 x i64> [[VRSHR_N1]]
+  return vrshrq_n_s64(a, 3);
+}
+
+// ALL-LABEL: @test_vrshrq_n_u8(
+uint8x16_t test_vrshrq_n_u8(uint8x16_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}}
+
+// LLVM: [[VRSHR_N:%.*]] = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x 
i8> {{.*}}, <16 x i8> splat (i8 -3))
+// LLVM: ret <16 x i8> [[VRSHR_N]]
+  return vrshrq_n_u8(a, 3);
+}
+
+// ALL-LABEL: @test_vrshrq_n_u16(
+uint16x8_t test_vrshrq_n_u16(uint16x8_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}}
+
+// LLVM: [[VRSHR_N1:%.*]] = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x 
i16> {{.*}}, <8 x i16> splat (i16 -3))
+// LLVM: ret <8 x i16> [[VRSHR_N1]]
+  return vrshrq_n_u16(a, 3);
+}
+
+// ALL-LABEL: @test_vrshrq_n_u32(
+uint32x4_t test_vrshrq_n_u32(uint32x4_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}}
+
+// LLVM: [[VRSHR_N1:%.*]] = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x 
i32> {{.*}}, <4 x i32> splat (i32 -3))
+// LLVM: ret <4 x i32> [[VRSHR_N1]]
+  return vrshrq_n_u32(a, 3);
+}
+
+// ALL-LABEL: @test_vrshrq_n_u64(
+uint64x2_t test_vrshrq_n_u64(uint64x2_t a) {
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}}
+// LLVM: [[VRSHR_N1:%.*]] = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x 
i64> {{.*}}, <2 x i64> splat (i64 -3))
+// LLVM: ret <2 x i64> [[VRSHR_N1]]
+  return vrshrq_n_u64(a, 3);
+}
+
+// vrshrd_n_* (scalar 64-bit)
+// ALL-LABEL: @test_vrshrd_n_s64(
+int64_t test_vrshrd_n_s64(int64_t a) {
+// CIR: cir.const #cir.int<{{.*}}> : !s64i
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.srshl" {{%.*}}, {{%.*}}
+
+// LLVM: [[VRSHR_N:%.*]] = call i64 @llvm.aarch64.neon.srshl.i64(i64 {{.*}}, 
i64 -63)
+// LLVM: ret i64 [[VRSHR_N]]
+  return (int64_t)vrshrd_n_s64(a, 63);
+}
+
+// ALL-LABEL: @test_vrshrd_n_u64(
+uint64_t test_vrshrd_n_u64(uint64_t a) {
+// CIR: cir.const #cir.int<{{.*}}> : !s64i
+// CIR: cir.call_llvm_intrinsic "aarch64.neon.urshl" {{%.*}}, {{%.*}}
+  
+// LLVM: [[VRSHR_N:%.*]] = call i64 @llvm.aarch64.neon.urshl.i64(i64 {{.*}}, 
i64 -63)
+// LLVM: ret i64 [[VRSHR_N]]
+  return (uint64_t)vrshrd_n_u64(a, 63);
+}

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to