https://github.com/kcloudy0717 created 
https://github.com/llvm/llvm-project/pull/184360

This PR adds QuadReadAcrossX intrinsic support in HLSL with codegen for both 
DirectX and SPIRV backends. Resolves 
https://github.com/llvm/llvm-project/issues/99175.

- [x] Implement QuadReadAcrossX clang builtin
- [x]  Link QuadReadAcrossX clang builtin with hlsl_intrinsics.h
- [x]  Add sema checks for QuadReadAcrossX to CheckHLSLBuiltinFunctionCall in 
SemaChecking.cpp
- [x]  Add codegen for QuadReadAcrossX to EmitHLSLBuiltinExpr in CGBuiltin.cpp
- [x]  Add codegen tests to clang/test/CodeGenHLSL/builtins/QuadReadAcrossX.hlsl
- [x]  Add sema tests to 
clang/test/SemaHLSL/BuiltIns/QuadReadAcrossX-errors.hlsl
- [x]  Create the int_dx_QuadReadAcrossX intrinsic in IntrinsicsDirectX.td
- [x]  Create the DXILOpMapping of int_dx_QuadReadAcrossX to 123 in DXIL.td
- [x]  Create the QuadReadAcrossX.ll and QuadReadAcrossX_errors.ll tests in 
llvm/test/CodeGen/DirectX/
- [x]  Create the int_spv_QuadReadAcrossX intrinsic in IntrinsicsSPIRV.td
- [x]  In SPIRVInstructionSelector.cpp create the QuadReadAcrossX lowering and 
map it to int_spv_QuadReadAcrossX in SPIRVInstructionSelector::selectIntrinsic.
- [x]  Create SPIR-V backend test case in 
llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossX.ll

>From bd5643f6e82aeef300fd3ff51d07f84fde00c7a9 Mon Sep 17 00:00:00 2001
From: Kai Huang <[email protected]>
Date: Tue, 3 Mar 2026 19:28:09 +0800
Subject: [PATCH] [HLSL][DXIL][SPIRV] QuadReadAcrossX intrinsic support

---
 clang/include/clang/Basic/Builtins.td         |  6 ++
 clang/lib/CodeGen/CGHLSLBuiltins.cpp          |  7 ++
 clang/lib/CodeGen/CGHLSLRuntime.h             |  1 +
 .../lib/Headers/hlsl/hlsl_alias_intrinsics.h  | 99 +++++++++++++++++++
 clang/lib/Sema/SemaHLSL.cpp                   | 14 +++
 .../CodeGenHLSL/builtins/QuadReadAcrossX.hlsl | 46 +++++++++
 .../BuiltIns/QuadReadAcrossX-errors.hlsl      | 28 ++++++
 llvm/include/llvm/IR/IntrinsicsDirectX.td     |  1 +
 llvm/include/llvm/IR/IntrinsicsSPIRV.td       |  1 +
 llvm/lib/Target/DirectX/DXIL.td               | 22 +++++
 llvm/lib/Target/DirectX/DXILShaderFlags.cpp   |  3 +-
 .../DirectX/DirectXTargetTransformInfo.cpp    |  1 +
 llvm/lib/Target/SPIRV/SPIRVInstrInfo.td       |  1 +
 .../Target/SPIRV/SPIRVInstructionSelector.cpp | 34 +++++++
 llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp |  6 +-
 llvm/test/CodeGen/DirectX/QuadReadAcrossX.ll  | 87 ++++++++++++++++
 .../CodeGen/DirectX/ShaderFlags/wave-ops.ll   |  7 ++
 .../SPIRV/hlsl-intrinsics/QuadReadAcrossX.ll  | 44 +++++++++
 18 files changed, 406 insertions(+), 2 deletions(-)
 create mode 100644 clang/test/CodeGenHLSL/builtins/QuadReadAcrossX.hlsl
 create mode 100644 clang/test/SemaHLSL/BuiltIns/QuadReadAcrossX-errors.hlsl
 create mode 100644 llvm/test/CodeGen/DirectX/QuadReadAcrossX.ll
 create mode 100644 llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossX.ll

diff --git a/clang/include/clang/Basic/Builtins.td 
b/clang/include/clang/Basic/Builtins.td
index 531c3702161f2..a4acc58cf2049 100644
--- a/clang/include/clang/Basic/Builtins.td
+++ b/clang/include/clang/Basic/Builtins.td
@@ -5228,6 +5228,12 @@ def HLSLWavePrefixProduct : LangBuiltin<"HLSL_LANG"> {
   let Prototype = "void(...)";
 }
 
+def HLSLQuadReadAcrossX : LangBuiltin<"HLSL_LANG"> {
+  let Spellings = ["__builtin_hlsl_quad_read_across_x"];
+  let Attributes = [NoThrow, Const];
+  let Prototype = "void(...)";
+}
+
 def HLSLClamp : LangBuiltin<"HLSL_LANG"> {
   let Spellings = ["__builtin_hlsl_elementwise_clamp"];
   let Attributes = [NoThrow, Const, CustomTypeChecking];
diff --git a/clang/lib/CodeGen/CGHLSLBuiltins.cpp 
b/clang/lib/CodeGen/CGHLSLBuiltins.cpp
index 70891eac39425..395c9039a06d6 100644
--- a/clang/lib/CodeGen/CGHLSLBuiltins.cpp
+++ b/clang/lib/CodeGen/CGHLSLBuiltins.cpp
@@ -1213,6 +1213,13 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned 
BuiltinID,
                                &CGM.getModule(), IID, {OpExpr->getType()}),
                            ArrayRef{OpExpr}, "hlsl.wave.prefix.product");
   }
+  case Builtin::BI__builtin_hlsl_quad_read_across_x: {
+    Value *OpExpr = EmitScalarExpr(E->getArg(0));
+    Intrinsic::ID ID = CGM.getHLSLRuntime().getQuadReadAcrossXIntrinsic();
+    return EmitRuntimeCall(Intrinsic::getOrInsertDeclaration(
+                               &CGM.getModule(), ID, {OpExpr->getType()}),
+                           ArrayRef{OpExpr}, "hlsl.quad.read.across.x");
+  }
   case Builtin::BI__builtin_hlsl_elementwise_sign: {
     auto *Arg0 = E->getArg(0);
     Value *Op0 = EmitScalarExpr(Arg0);
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.h 
b/clang/lib/CodeGen/CGHLSLRuntime.h
index dbbc887353cec..fd5d4811c3ba2 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.h
+++ b/clang/lib/CodeGen/CGHLSLRuntime.h
@@ -154,6 +154,7 @@ class CGHLSLRuntime {
   GENERATE_HLSL_INTRINSIC_FUNCTION(WaveIsFirstLane, wave_is_first_lane)
   GENERATE_HLSL_INTRINSIC_FUNCTION(WaveGetLaneCount, wave_get_lane_count)
   GENERATE_HLSL_INTRINSIC_FUNCTION(WaveReadLaneAt, wave_readlane)
+  GENERATE_HLSL_INTRINSIC_FUNCTION(QuadReadAcrossX, quad_read_across_x)
   GENERATE_HLSL_INTRINSIC_FUNCTION(FirstBitUHigh, firstbituhigh)
   GENERATE_HLSL_INTRINSIC_FUNCTION(FirstBitSHigh, firstbitshigh)
   GENERATE_HLSL_INTRINSIC_FUNCTION(FirstBitLow, firstbitlow)
diff --git a/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h 
b/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h
index 2543401bdfbf9..ba122b7abd7b2 100644
--- a/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h
+++ b/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h
@@ -3106,6 +3106,105 @@ __attribute__((convergent)) double3 
WavePrefixProduct(double3);
 _HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_prefix_product)
 __attribute__((convergent)) double4 WavePrefixProduct(double4);
 
+//===----------------------------------------------------------------------===//
+// QuadReadAcrossX builtins
+//===----------------------------------------------------------------------===//
+
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) half QuadReadAcrossX(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) half2 QuadReadAcrossX(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) half3 QuadReadAcrossX(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) half4 QuadReadAcrossX(half4);
+
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) int16_t QuadReadAcrossX(int16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) int16_t2 QuadReadAcrossX(int16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) int16_t3 QuadReadAcrossX(int16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) int16_t4 QuadReadAcrossX(int16_t4);
+
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) uint16_t QuadReadAcrossX(uint16_t);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) uint16_t2 QuadReadAcrossX(uint16_t2);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) uint16_t3 QuadReadAcrossX(uint16_t3);
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) uint16_t4 QuadReadAcrossX(uint16_t4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) int QuadReadAcrossX(int);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) int2 QuadReadAcrossX(int2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) int3 QuadReadAcrossX(int3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) int4 QuadReadAcrossX(int4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) uint QuadReadAcrossX(uint);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) uint2 QuadReadAcrossX(uint2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) uint3 QuadReadAcrossX(uint3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) uint4 QuadReadAcrossX(uint4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) int64_t QuadReadAcrossX(int64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) int64_t2 QuadReadAcrossX(int64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) int64_t3 QuadReadAcrossX(int64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) int64_t4 QuadReadAcrossX(int64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) uint64_t QuadReadAcrossX(uint64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) uint64_t2 QuadReadAcrossX(uint64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) uint64_t3 QuadReadAcrossX(uint64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) uint64_t4 QuadReadAcrossX(uint64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) float QuadReadAcrossX(float);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) float2 QuadReadAcrossX(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) float3 QuadReadAcrossX(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) float4 QuadReadAcrossX(float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) double QuadReadAcrossX(double);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) double2 QuadReadAcrossX(double2);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) double3 QuadReadAcrossX(double3);
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_quad_read_across_x)
+__attribute__((convergent)) double4 QuadReadAcrossX(double4);
+
 
//===----------------------------------------------------------------------===//
 // sign builtins
 
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index 804ea70aaddce..d11496bab7893 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -3900,6 +3900,20 @@ bool SemaHLSL::CheckBuiltinFunctionCall(unsigned 
BuiltinID, CallExpr *TheCall) {
     TheCall->setType(ArgTyExpr);
     break;
   }
+  case Builtin::BI__builtin_hlsl_quad_read_across_x: {
+    if (SemaRef.checkArgCount(TheCall, 1))
+      return true;
+
+    // Ensure input expr type is a scalar/vector and the same as the return 
type
+    if (CheckAnyScalarOrVector(&SemaRef, TheCall, 0))
+      return true;
+    if (CheckNotBoolScalarOrVector(&SemaRef, TheCall, 0))
+      return true;
+    ExprResult Expr = TheCall->getArg(0);
+    QualType ArgTyExpr = Expr.get()->getType();
+    TheCall->setType(ArgTyExpr);
+    break;
+  }
   case Builtin::BI__builtin_hlsl_elementwise_splitdouble: {
     if (SemaRef.checkArgCount(TheCall, 3))
       return true;
diff --git a/clang/test/CodeGenHLSL/builtins/QuadReadAcrossX.hlsl 
b/clang/test/CodeGenHLSL/builtins/QuadReadAcrossX.hlsl
new file mode 100644
index 0000000000000..e0f14cd8be9b5
--- /dev/null
+++ b/clang/test/CodeGenHLSL/builtins/QuadReadAcrossX.hlsl
@@ -0,0 +1,46 @@
+// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -triple \
+// RUN:   dxil-pc-shadermodel6.3-compute %s -emit-llvm -disable-llvm-passes -o 
- | \
+// RUN:   FileCheck %s --check-prefixes=CHECK,CHECK-DXIL
+// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -triple \
+// RUN:   spirv-pc-vulkan-compute %s -emit-llvm -disable-llvm-passes -o - | \
+// RUN:   FileCheck %s --check-prefixes=CHECK,CHECK-SPIRV
+
+// Test basic lowering to runtime function call.
+
+// CHECK-LABEL: test_int
+int test_int(int expr) {
+  // CHECK-SPIRV:  %[[RET:.*]] = call spir_func [[TY:.*]] 
@llvm.spv.quad.read.across.x.i32([[TY]] %[[#]])
+  // CHECK-DXIL:  %[[RET:.*]] = call [[TY:.*]] 
@llvm.dx.quad.read.across.x.i32([[TY]] %[[#]])
+  // CHECK:  ret [[TY]] %[[RET]]
+  return QuadReadAcrossX(expr);
+}
+
+// CHECK-DXIL: declare [[TY]] @llvm.dx.quad.read.across.x.i32([[TY]]) 
#[[#attr:]]
+// CHECK-SPIRV: declare [[TY]] @llvm.spv.quad.read.across.x.i32([[TY]]) 
#[[#attr:]]
+
+// CHECK-LABEL: test_uint64_t
+uint64_t test_uint64_t(uint64_t expr) {
+  // CHECK-SPIRV:  %[[RET:.*]] = call spir_func [[TY:.*]] 
@llvm.spv.quad.read.across.x.i64([[TY]] %[[#]])
+  // CHECK-DXIL:  %[[RET:.*]] = call [[TY:.*]] 
@llvm.dx.quad.read.across.x.i64([[TY]] %[[#]])
+  // CHECK:  ret [[TY]] %[[RET]]
+  return QuadReadAcrossX(expr);
+}
+
+// CHECK-DXIL: declare [[TY]] @llvm.dx.quad.read.across.x.i64([[TY]]) 
#[[#attr:]]
+// CHECK-SPIRV: declare [[TY]] @llvm.spv.quad.read.across.x.i64([[TY]]) 
#[[#attr:]]
+
+// Test basic lowering to runtime function call with array and float value.
+
+// CHECK-LABEL: test_floatv4
+float4 test_floatv4(float4 expr) {
+  // CHECK-SPIRV:  %[[RET1:.*]] = call reassoc nnan ninf nsz arcp afn 
spir_func [[TY1:.*]] @llvm.spv.quad.read.across.x.v4f32([[TY1]] %[[#]]
+  // CHECK-DXIL:  %[[RET1:.*]] = call reassoc nnan ninf nsz arcp afn 
[[TY1:.*]] @llvm.dx.quad.read.across.x.v4f32([[TY1]] %[[#]])
+  // CHECK:  ret [[TY1]] %[[RET1]]
+  return QuadReadAcrossX(expr);
+}
+
+// CHECK-DXIL: declare [[TY1]] @llvm.dx.quad.read.across.x.v4f32([[TY1]]) 
#[[#attr]]
+// CHECK-SPIRV: declare [[TY1]] @llvm.spv.quad.read.across.x.v4f32([[TY1]]) 
#[[#attr]]
+
+// CHECK: attributes #[[#attr]] = {{{.*}} convergent {{.*}}}
+
diff --git a/clang/test/SemaHLSL/BuiltIns/QuadReadAcrossX-errors.hlsl 
b/clang/test/SemaHLSL/BuiltIns/QuadReadAcrossX-errors.hlsl
new file mode 100644
index 0000000000000..a9dcc162bbbb5
--- /dev/null
+++ b/clang/test/SemaHLSL/BuiltIns/QuadReadAcrossX-errors.hlsl
@@ -0,0 +1,28 @@
+// RUN: %clang_cc1 -finclude-default-header -triple 
dxil-pc-shadermodel6.6-library %s -emit-llvm-only -disable-llvm-passes -verify
+
+int test_too_few_arg() {
+  return __builtin_hlsl_quad_read_across_x();
+  // expected-error@-1 {{too few arguments to function call, expected 1, have 
0}}
+}
+
+float2 test_too_many_arg(float2 p0) {
+  return __builtin_hlsl_quad_read_across_x(p0, p0);
+  // expected-error@-1 {{too many arguments to function call, expected 1, have 
2}}
+}
+
+bool test_expr_bool_type_check(bool p0) {
+  return __builtin_hlsl_quad_read_across_x(p0);
+  // expected-error@-1 {{invalid operand of type 'bool'}}
+}
+
+bool2 test_expr_bool_vec_type_check(bool2 p0) {
+  return __builtin_hlsl_quad_read_across_x(p0);
+  // expected-error@-1 {{invalid operand of type 'bool2' (aka 'vector<bool, 
2>')}}
+}
+
+struct S { float f; };
+
+S test_expr_struct_type_check(S p0) {
+  return __builtin_hlsl_quad_read_across_x(p0);
+  // expected-error@-1 {{invalid operand of type 'S' where a scalar or vector 
is required}}
+}
diff --git a/llvm/include/llvm/IR/IntrinsicsDirectX.td 
b/llvm/include/llvm/IR/IntrinsicsDirectX.td
index 909482d72aa88..f8e36decbcb87 100644
--- a/llvm/include/llvm/IR/IntrinsicsDirectX.td
+++ b/llvm/include/llvm/IR/IntrinsicsDirectX.td
@@ -231,6 +231,7 @@ def int_dx_wave_prefix_sum : 
DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType
 def int_dx_wave_prefix_usum : DefaultAttrsIntrinsic<[llvm_anyint_ty], 
[LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>;
 def int_dx_wave_prefix_product : DefaultAttrsIntrinsic<[llvm_any_ty], 
[LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>;
 def int_dx_wave_prefix_uproduct : DefaultAttrsIntrinsic<[llvm_anyint_ty], 
[LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>;
+def int_dx_quad_read_across_x : DefaultAttrsIntrinsic<[llvm_any_ty], 
[LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>;
 def int_dx_sign : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, 
llvm_i32_ty>], [llvm_any_ty], [IntrNoMem]>;
 def int_dx_step : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty, 
LLVMMatchType<0>], [IntrNoMem]>;
 def int_dx_splitdouble : DefaultAttrsIntrinsic<[llvm_anyint_ty, 
LLVMMatchType<0>],
diff --git a/llvm/include/llvm/IR/IntrinsicsSPIRV.td 
b/llvm/include/llvm/IR/IntrinsicsSPIRV.td
index 9819f881b5c30..67dc4ce93d142 100644
--- a/llvm/include/llvm/IR/IntrinsicsSPIRV.td
+++ b/llvm/include/llvm/IR/IntrinsicsSPIRV.td
@@ -135,6 +135,7 @@ def int_spv_rsqrt : 
DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty]
       : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrConvergent]>;
   def int_spv_wave_prefix_sum : DefaultAttrsIntrinsic<[llvm_any_ty], 
[LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>;
   def int_spv_wave_prefix_product : DefaultAttrsIntrinsic<[llvm_any_ty], 
[LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>;
+  def int_spv_quad_read_across_x : DefaultAttrsIntrinsic<[llvm_any_ty], 
[LLVMMatchType<0>], [IntrConvergent, IntrNoMem]>;
   def int_spv_sign : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, 
llvm_i32_ty>], [llvm_any_ty], [IntrNoMem]>;
   def int_spv_radians : DefaultAttrsIntrinsic<[LLVMMatchType<0>], 
[llvm_anyfloat_ty], [IntrNoMem]>;
   def int_spv_group_memory_barrier_with_group_sync : 
ClangBuiltin<"__builtin_spirv_group_barrier">,
diff --git a/llvm/lib/Target/DirectX/DXIL.td b/llvm/lib/Target/DirectX/DXIL.td
index 59a5b7fe4d508..590b11fcb6bb4 100644
--- a/llvm/lib/Target/DirectX/DXIL.td
+++ b/llvm/lib/Target/DirectX/DXIL.td
@@ -320,6 +320,10 @@ defvar WaveOpKind_Max = 3;
 defvar SignedOpKind_Signed = 0;
 defvar SignedOpKind_Unsigned = 1;
 
+defvar QuadOpKind_ReadAcrossX = 0;
+defvar QuadOpKind_ReadAcrossY = 1;
+defvar QuadOpKind_ReadAcrossDiagonal = 2;
+
 // Intrinsic arg selection
 class IntrinArgSelectType;
 def IntrinArgSelect_Index : IntrinArgSelectType;
@@ -1158,6 +1162,24 @@ def WavePrefixOp : DXILOp<121, wavePrefixOp> {
   let attributes = [Attributes<DXIL1_0, []>];
 }
 
+def QuadOp : DXILOp<123, quadOp> {
+  let Doc = "returns the value from another lane within the quad by swapping 
values in a direction";
+  let intrinsics = [
+    IntrinSelect<int_dx_quad_read_across_x,
+                 [
+                   IntrinArgIndex<0>, IntrinArgI8<QuadOpKind_ReadAcrossX>
+                 ]>,
+  ];
+
+  let arguments = [OverloadTy, Int8Ty];
+  let result = OverloadTy;
+  let overloads = [
+    Overloads<DXIL1_0, [HalfTy, FloatTy, DoubleTy, Int16Ty, Int32Ty, Int64Ty]>
+  ];
+  let stages = [Stages<DXIL1_0, [all_stages]>];
+  let attributes = [Attributes<DXIL1_0, []>];
+}
+
 def WavePrefixBitCount : DXILOp<136, wavePrefixOp> {
   let Doc = "returns the count of bits of Expr set to 1 on prior lanes";
   let intrinsics = [IntrinSelect<int_dx_wave_prefix_bit_count>];
diff --git a/llvm/lib/Target/DirectX/DXILShaderFlags.cpp 
b/llvm/lib/Target/DirectX/DXILShaderFlags.cpp
index 7e16dcda87a57..c5e48a911c848 100644
--- a/llvm/lib/Target/DirectX/DXILShaderFlags.cpp
+++ b/llvm/lib/Target/DirectX/DXILShaderFlags.cpp
@@ -71,7 +71,6 @@ static bool checkWaveOps(Intrinsic::ID IID) {
   // case Intrinsic::dx_wave_reduce.xor:
   // case Intrinsic::dx_wave_prefixop:
   // case Intrinsic::dx_quad.readat:
-  // case Intrinsic::dx_quad.readacrossx:
   // case Intrinsic::dx_quad.readacrossy:
   // case Intrinsic::dx_quad.readacrossdiagonal:
   // case Intrinsic::dx_wave_prefixballot:
@@ -102,6 +101,8 @@ static bool checkWaveOps(Intrinsic::ID IID) {
   case Intrinsic::dx_wave_prefix_usum:
   case Intrinsic::dx_wave_prefix_product:
   case Intrinsic::dx_wave_prefix_uproduct:
+    // Quad Op Variants
+  case Intrinsic::dx_quad_read_across_x:
     return true;
   }
 }
diff --git a/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp 
b/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp
index 8018b09c9f248..be7fdbef2b097 100644
--- a/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp
+++ b/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp
@@ -68,6 +68,7 @@ bool DirectXTTIImpl::isTargetIntrinsicTriviallyScalarizable(
   case Intrinsic::dx_wave_reduce_usum:
   case Intrinsic::dx_wave_prefix_usum:
   case Intrinsic::dx_wave_prefix_uproduct:
+  case Intrinsic::dx_quad_read_across_x:
   case Intrinsic::dx_imad:
   case Intrinsic::dx_umad:
   case Intrinsic::dx_ddx_coarse:
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td 
b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td
index d2f81bc30e949..b3e6512aac56c 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td
+++ b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td
@@ -849,6 +849,7 @@ def OpGroupNonUniformBitwiseXor: 
OpGroupNUGroup<"BitwiseXor", 361>;
 def OpGroupNonUniformLogicalAnd: OpGroupNUGroup<"LogicalAnd", 362>;
 def OpGroupNonUniformLogicalOr: OpGroupNUGroup<"LogicalOr", 363>;
 def OpGroupNonUniformLogicalXor: OpGroupNUGroup<"LogicalXor", 364>;
+def OpGroupNonUniformQuadSwap: OpGroupNU4<"QuadSwap", 366>;
 
 // SPV_KHR_subgroup_rotate
 def OpGroupNonUniformRotateKHR: Op<4431, (outs ID:$res),
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp 
b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 344628f258229..4cb577e8915bc 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -261,6 +261,9 @@ class SPIRVInstructionSelector : public InstructionSelector 
{
   bool selectWaveExclusiveScanProduct(Register ResVReg, SPIRVTypeInst ResType,
                                       MachineInstr &I) const;
 
+  bool selectQuadSwap(Register ResVReg, SPIRVTypeInst ResType, MachineInstr &I,
+                      unsigned Direction) const;
+
   bool selectConst(Register ResVReg, SPIRVTypeInst ResType,
                    MachineInstr &I) const;
 
@@ -2986,6 +2989,34 @@ bool SPIRVInstructionSelector::selectWaveExclusiveScan(
   return true;
 }
 
+bool SPIRVInstructionSelector::selectQuadSwap(Register ResVReg,
+                                              SPIRVTypeInst ResType,
+                                              MachineInstr &I,
+                                              unsigned Direction) const {
+  assert(I.getNumOperands() == 3);
+  assert(I.getOperand(2).isReg());
+  MachineBasicBlock &BB = *I.getParent();
+  Register InputRegister = I.getOperand(2).getReg();
+  SPIRVTypeInst InputType = GR.getSPIRVTypeForVReg(InputRegister);
+
+  if (!InputType)
+    report_fatal_error("Input Type could not be determined.");
+
+  SPIRVTypeInst IntTy = GR.getOrCreateSPIRVIntegerType(32, I, TII);
+  bool ZeroAsNull = !STI.isShader();
+  Register DirectionReg =
+      GR.getOrCreateConstInt(Direction, I, IntTy, TII, ZeroAsNull);
+  BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpGroupNonUniformQuadSwap))
+      .addDef(ResVReg)
+      .addUse(GR.getSPIRVTypeID(ResType))
+      .addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup, I, IntTy, TII,
+                                     ZeroAsNull))
+      .addUse(InputRegister)
+      .addUse(DirectionReg)
+      .constrainAllUses(TII, TRI, RBI);
+  return true;
+}
+
 bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
                                                 SPIRVTypeInst ResType,
                                                 MachineInstr &I) const {
@@ -4143,6 +4174,9 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register 
ResVReg,
     return selectWaveExclusiveScanSum(ResVReg, ResType, I);
   case Intrinsic::spv_wave_prefix_product:
     return selectWaveExclusiveScanProduct(ResVReg, ResType, I);
+  case Intrinsic::spv_quad_read_across_x: {
+    return selectQuadSwap(ResVReg, ResType, I, /*Direction*/ 0);
+  }
   case Intrinsic::spv_step:
     return selectExtInst(ResVReg, ResType, I, CL::step, GL::Step);
   case Intrinsic::spv_radians:
diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp 
b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
index 86659c623ebf2..7445c91f56441 100644
--- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
@@ -869,7 +869,8 @@ void RequirementHandler::initAvailableCapabilities(const 
SPIRVSubtarget &ST) {
                       Capability::GroupNonUniformBallot,
                       Capability::GroupNonUniformClustered,
                       Capability::GroupNonUniformShuffle,
-                      Capability::GroupNonUniformShuffleRelative});
+                      Capability::GroupNonUniformShuffleRelative,
+                      Capability::GroupNonUniformQuad});
 
   if (ST.isAtLeastSPIRVVer(VersionTuple(1, 6)))
     addAvailableCaps({Capability::DotProduct, Capability::DotProductInputAll,
@@ -1664,6 +1665,9 @@ void addInstrRequirements(const MachineInstr &MI,
     }
     break;
   }
+  case SPIRV::OpGroupNonUniformQuadSwap:
+    Reqs.addCapability(SPIRV::Capability::GroupNonUniformQuad);
+    break;
   case SPIRV::OpImageQueryFormat: {
     Register ResultReg = MI.getOperand(0).getReg();
     const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
diff --git a/llvm/test/CodeGen/DirectX/QuadReadAcrossX.ll 
b/llvm/test/CodeGen/DirectX/QuadReadAcrossX.ll
new file mode 100644
index 0000000000000..fd4fd165a52c1
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/QuadReadAcrossX.ll
@@ -0,0 +1,87 @@
+; RUN: opt -S -scalarizer -dxil-op-lower 
-mtriple=dxil-pc-shadermodel6.3-library < %s | FileCheck %s
+
+; Test that for scalar values, QuadReadAcrossX maps down to the DirectX op
+
+define noundef half @quad_read_across_x_half(half noundef %expr) {
+entry:
+; CHECK: call half @dx.op.quadOp.f16(i32 123, half %expr, i8 0)
+  %ret = call half @llvm.dx.quad.read.across.x.f16(half %expr)
+  ret half %ret
+}
+
+define noundef float @quad_read_across_x_float(float noundef %expr) {
+entry:
+; CHECK: call float @dx.op.quadOp.f32(i32 123, float %expr, i8 0)
+  %ret = call float @llvm.dx.quad.read.across.x.f32(float %expr)
+  ret float %ret
+}
+
+define noundef double @quad_read_across_x_double(double noundef %expr) {
+entry:
+; CHECK: call double @dx.op.quadOp.f64(i32 123, double %expr, i8 0)
+  %ret = call double @llvm.dx.quad.read.across.x.f64(double %expr)
+  ret double %ret
+}
+
+define noundef i16 @quad_read_across_x_i16(i16 noundef %expr) {
+entry:
+; CHECK: call i16 @dx.op.quadOp.i16(i32 123, i16 %expr, i8 0)
+  %ret = call i16 @llvm.dx.quad.read.across.x.i16(i16 %expr)
+  ret i16 %ret
+}
+
+define noundef i32 @quad_read_across_x_i32(i32 noundef %expr) {
+entry:
+; CHECK: call i32 @dx.op.quadOp.i32(i32 123, i32 %expr, i8 0)
+  %ret = call i32 @llvm.dx.quad.read.across.x.i32(i32 %expr)
+  ret i32 %ret
+}
+
+define noundef i64 @quad_read_across_x_i64(i64 noundef %expr) {
+entry:
+; CHECK: call i64 @dx.op.quadOp.i64(i32 123, i64 %expr, i8 0)
+  %ret = call i64 @llvm.dx.quad.read.across.x.i64(i64 %expr)
+  ret i64 %ret
+}
+
+declare half @llvm.dx.quad.read.across.x.f16(half)
+declare float @llvm.dx.quad.read.across.x.f32(float)
+declare double @llvm.dx.quad.read.across.x.f64(double)
+
+declare i16 @llvm.dx.quad.read.across.x.i16(i16)
+declare i32 @llvm.dx.quad.read.across.x.i32(i32)
+declare i64 @llvm.dx.quad.read.across.x.i64(i64)
+
+; Test that for vector values, QuadReadAcrossX scalarizes and maps down to the
+; DirectX op
+
+define noundef <2 x half> @quad_read_across_x_v2half(<2 x half> noundef %expr) 
{
+entry:
+; CHECK: call half @dx.op.quadOp.f16(i32 123, half %expr.i0, i8 0)
+; CHECK: call half @dx.op.quadOp.f16(i32 123, half %expr.i1, i8 0)
+  %ret = call <2 x half> @llvm.dx.quad.read.across.x.v2f16(<2 x half> %expr)
+  ret <2 x half> %ret
+}
+
+define noundef <3 x i32> @quad_read_across_x_v3i32(<3 x i32> noundef %expr) {
+entry:
+; CHECK: call i32 @dx.op.quadOp.i32(i32 123, i32 %expr.i0, i8 0)
+; CHECK: call i32 @dx.op.quadOp.i32(i32 123, i32 %expr.i1, i8 0)
+; CHECK: call i32 @dx.op.quadOp.i32(i32 123, i32 %expr.i2, i8 0)
+  %ret = call <3 x i32> @llvm.dx.quad.read.across.x.v3i32(<3 x i32> %expr)
+  ret <3 x i32> %ret
+}
+
+define noundef <4 x double> @quad_read_across_x_v4f64(<4 x double> noundef 
%expr) {
+entry:
+; CHECK: call double @dx.op.quadOp.f64(i32 123, double %expr.i0, i8 0)
+; CHECK: call double @dx.op.quadOp.f64(i32 123, double %expr.i1, i8 0)
+; CHECK: call double @dx.op.quadOp.f64(i32 123, double %expr.i2, i8 0)
+; CHECK: call double @dx.op.quadOp.f64(i32 123, double %expr.i3, i8 0)
+  %ret = call <4 x double> @llvm.dx.quad.read.across.x.v464(<4 x double> %expr)
+  ret <4 x double> %ret
+}
+
+declare <2 x half> @llvm.dx.quad.read.across.x.v2f16(<2 x half>)
+declare <3 x i32> @llvm.dx.quad.read.across.x.v3i32(<3 x i32>)
+declare <4 x double> @llvm.dx.quad.read.across.x.v4f64(<4 x double>)
diff --git a/llvm/test/CodeGen/DirectX/ShaderFlags/wave-ops.ll 
b/llvm/test/CodeGen/DirectX/ShaderFlags/wave-ops.ll
index be53d19aca8f2..6323cd8f5d249 100644
--- a/llvm/test/CodeGen/DirectX/ShaderFlags/wave-ops.ll
+++ b/llvm/test/CodeGen/DirectX/ShaderFlags/wave-ops.ll
@@ -139,3 +139,10 @@ entry:
   %ret = call i32 @llvm.dx.wave.prefix.uproduct.i32(i32 %x)
   ret i32 %ret
 }
+
+define noundef i32 @quad_read_across_x_i32(i32 noundef %expr) {
+entry:
+  ; CHECK: Function quad_read_across_x_i32 : [[WAVE_FLAG]]
+  %ret = call i32 @llvm.dx.quad.read.across.x.i32(i32 %expr)
+  ret i32 %ret
+}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossX.ll 
b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossX.ll
new file mode 100644
index 0000000000000..688ee1bce4ef4
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/QuadReadAcrossX.ll
@@ -0,0 +1,44 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-vulkan-unknown %s -o - | 
FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-vulkan-unknown %s -o - 
-filetype=obj | spirv-val %}
+
+; Test lowering to spir-v backend for various types and scalar/vector
+
+; CHECK: OpCapability GroupNonUniformQuad
+
+; CHECK-DAG:   %[[#f16:]] = OpTypeFloat 16
+; CHECK-DAG:   %[[#f32:]] = OpTypeFloat 32
+; CHECK-DAG:   %[[#uint:]] = OpTypeInt 32 0
+; CHECK-DAG:   %[[#v4_half:]] = OpTypeVector %[[#f16]] 4
+; CHECK-DAG:   %[[#scope:]] = OpConstant %[[#uint]] 3
+; CHECK-DAG:   %[[#direction:]] = OpConstant %[[#uint]] 0
+
+; CHECK-LABEL: Begin function test_float
+; CHECK:   %[[#fexpr:]] = OpFunctionParameter %[[#f32]]
+define float @test_float(float %fexpr) {
+entry:
+; CHECK:   %[[#fret:]] = OpGroupNonUniformQuadSwap %[[#f32]] %[[#scope]] 
%[[#fexpr]] %[[#direction]]
+  %0 = call float @llvm.spv.quad.read.across.x.f32(float %fexpr)
+  ret float %0
+}
+
+; CHECK-LABEL: Begin function test_int
+; CHECK:   %[[#iexpr:]] = OpFunctionParameter %[[#uint]]
+define i32 @test_int(i32 %iexpr) {
+entry:
+; CHECK:   %[[#iret:]] = OpGroupNonUniformQuadSwap %[[#uint]] %[[#scope]] 
%[[#iexpr]] %[[#direction]]
+  %0 = call i32 @llvm.spv.quad.read.across.x.i32(i32 %iexpr)
+  ret i32 %0
+}
+
+; CHECK-LABEL: Begin function test_vhalf
+; CHECK:   %[[#vbexpr:]] = OpFunctionParameter %[[#v4_half]]
+define <4 x half> @test_vhalf(<4 x half> %vbexpr) {
+entry:
+; CHECK:   %[[#vhalfret:]] = OpGroupNonUniformQuadSwap %[[#v4_half]] 
%[[#scope]] %[[#vbexpr]] %[[#direction]]
+  %0 = call <4 x half> @llvm.spv.quad.read.across.x.v4half(<4 x half> %vbexpr)
+  ret <4 x half> %0
+}
+
+declare float @llvm.spv.quad.read.across.x.f32(float)
+declare i32 @llvm.spv.quad.read.across.x.i32(i32)
+declare <4 x half> @llvm.spv.quad.read.across.x.v4half(<4 x half>)

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to