From: Prathap Kumar Valsan <prathap.kumar.val...@intel.com>

Add routines to interface with GuC firmware for selective TLB invalidation
supported on XeHP.

Signed-off-by: Prathap Kumar Valsan <prathap.kumar.val...@intel.com>
Cc: Matthew Brost <matthew.br...@intel.com>
Signed-off-by: Mauro Carvalho Chehab <mche...@kernel.org>
---

To avoid mailbombing on a large number of people, only mailing lists were C/C 
on the cover.
See [PATCH v2 00/21] at: 
https://lore.kernel.org/all/cover.1657800199.git.mche...@kernel.org/

 .../gpu/drm/i915/gt/uc/abi/guc_actions_abi.h  |  3 +
 drivers/gpu/drm/i915/gt/uc/intel_guc.c        | 90 +++++++++++++++++++
 drivers/gpu/drm/i915/gt/uc/intel_guc.h        | 10 +++
 drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h   |  3 +
 4 files changed, 106 insertions(+)

diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h 
b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index fb0af33e43cc..5c019856a269 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -188,6 +188,9 @@ enum intel_guc_state_capture_event_status {
 #define INTEL_GUC_TLB_INVAL_FLUSH_CACHE (1 << 31)
 
 enum intel_guc_tlb_invalidation_type {
+       INTEL_GUC_TLB_INVAL_FULL = 0x0,
+       INTEL_GUC_TLB_INVAL_PAGE_SELECTIVE = 0x1,
+       INTEL_GUC_TLB_INVAL_PAGE_SELECTIVE_CTX = 0x2,
        INTEL_GUC_TLB_INVAL_GUC = 0x3,
 };
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 8a104a292598..98260a7bc90b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -923,6 +923,96 @@ static int guc_send_invalidate_tlb(struct intel_guc *guc, 
u32 *action, u32 size)
        return err;
 }
 
+ /* Full TLB invalidation */
+int intel_guc_invalidate_tlb_full(struct intel_guc *guc,
+                                 enum intel_guc_tlb_inval_mode mode)
+{
+       u32 action[] = {
+               INTEL_GUC_ACTION_TLB_INVALIDATION,
+               0,
+               INTEL_GUC_TLB_INVAL_FULL << INTEL_GUC_TLB_INVAL_TYPE_SHIFT |
+                       mode << INTEL_GUC_TLB_INVAL_MODE_SHIFT |
+                       INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
+       };
+
+       if (!INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc)) {
+               DRM_ERROR("Tlb invalidation: Operation not supported in this 
platform!\n");
+               return 0;
+       }
+
+       return guc_send_invalidate_tlb(guc, action, ARRAY_SIZE(action));
+}
+
+/*
+ * Selective TLB Invalidation for Address Range:
+ * TLB's in the Address Range is Invalidated across all engines.
+ */
+int intel_guc_invalidate_tlb_page_selective(struct intel_guc *guc,
+                                           enum intel_guc_tlb_inval_mode mode,
+                                           u64 start, u64 length)
+{
+       u64 vm_total = BIT_ULL(INTEL_INFO(guc_to_gt(guc)->i915)->ppgtt_size);
+       u32 address_mask = (ilog2(length) - ilog2(I915_GTT_PAGE_SIZE_4K));
+       u32 full_range = vm_total == length;
+       u32 action[] = {
+               INTEL_GUC_ACTION_TLB_INVALIDATION,
+               0,
+               INTEL_GUC_TLB_INVAL_PAGE_SELECTIVE << 
INTEL_GUC_TLB_INVAL_TYPE_SHIFT |
+                       mode << INTEL_GUC_TLB_INVAL_MODE_SHIFT |
+                       INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
+               0,
+               full_range ? full_range : lower_32_bits(start),
+               full_range ? 0 : upper_32_bits(start),
+               full_range ? 0 : address_mask,
+       };
+
+       if (!INTEL_GUC_SUPPORTS_TLB_INVALIDATION_SELECTIVE(guc)) {
+               DRM_ERROR("Tlb invalidation: Operation not supported in this 
platform!\n");
+               return 0;
+       }
+
+       GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE_4K));
+       GEM_BUG_ON(!IS_ALIGNED(length, I915_GTT_PAGE_SIZE_4K));
+       GEM_BUG_ON(range_overflows(start, length, vm_total));
+
+       return guc_send_invalidate_tlb(guc, action, ARRAY_SIZE(action));
+}
+
+/*
+ * Selective TLB Invalidation for Context:
+ * Invalidates all TLB's for a specific context across all engines.
+ */
+int intel_guc_invalidate_tlb_page_selective_ctx(struct intel_guc *guc,
+                                               enum intel_guc_tlb_inval_mode 
mode,
+                                               u64 start, u64 length, u32 
ctxid)
+{
+       u64 vm_total = BIT_ULL(INTEL_INFO(guc_to_gt(guc)->i915)->ppgtt_size);
+       u32 address_mask = (ilog2(length) - ilog2(I915_GTT_PAGE_SIZE_4K));
+       u32 full_range = vm_total == length;
+       u32 action[] = {
+               INTEL_GUC_ACTION_TLB_INVALIDATION,
+               0,
+               INTEL_GUC_TLB_INVAL_PAGE_SELECTIVE_CTX << 
INTEL_GUC_TLB_INVAL_TYPE_SHIFT |
+                       mode << INTEL_GUC_TLB_INVAL_MODE_SHIFT |
+                       INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
+               ctxid,
+               full_range ? full_range : lower_32_bits(start),
+               full_range ? 0 : upper_32_bits(start),
+               full_range ? 0 : address_mask,
+       };
+
+       if (!INTEL_GUC_SUPPORTS_TLB_INVALIDATION_SELECTIVE(guc)) {
+               DRM_ERROR("Tlb invalidation: Operation not supported in this 
platform!\n");
+               return 0;
+       }
+
+       GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE_4K));
+       GEM_BUG_ON(!IS_ALIGNED(length, I915_GTT_PAGE_SIZE_4K));
+       GEM_BUG_ON(range_overflows(start, length, vm_total));
+
+       return guc_send_invalidate_tlb(guc, action, ARRAY_SIZE(action));
+}
+
 /*
  * Guc TLB Invalidation: Invalidate the TLB's of GuC itself.
  */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h 
b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 01c6478451cc..df6ba1c32808 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -384,6 +384,16 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, 
u32 size,
 int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value);
 int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value);
 
+int intel_guc_g2g_register(struct intel_guc *guc);
+
+int intel_guc_invalidate_tlb_full(struct intel_guc *guc,
+                                 enum intel_guc_tlb_inval_mode mode);
+int intel_guc_invalidate_tlb_page_selective(struct intel_guc *guc,
+                                           enum intel_guc_tlb_inval_mode mode,
+                                           u64 start, u64 length);
+int intel_guc_invalidate_tlb_page_selective_ctx(struct intel_guc *guc,
+                                                 enum intel_guc_tlb_inval_mode 
mode,
+                                                 u64 start, u64 length, u32 
ctxid);
 int intel_guc_invalidate_tlb_guc(struct intel_guc *guc,
                                 enum intel_guc_tlb_inval_mode mode);
 int intel_guc_invalidate_tlb_all(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 3edf567b3f65..29e402f70a94 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -436,5 +436,8 @@ enum intel_guc_recv_message {
        ((intel_guc_ct_enabled(&(guc)->ct)) && \
         (intel_guc_submission_is_used(guc)) && \
         (GRAPHICS_VER(guc_to_gt((guc))->i915) >= 12))
+#define INTEL_GUC_SUPPORTS_TLB_INVALIDATION_SELECTIVE(guc) \
+       (INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc) && \
+       HAS_SELECTIVE_TLB_INVALIDATION(guc_to_gt(guc)->i915))
 
 #endif
-- 
2.36.1

Reply via email to