Re: [PATCH v2 16/21] drm/i915: Define GuC Based TLB invalidation routines

2022-07-14 Thread Michal Wajdeczko



On 14.07.2022 14:06, Mauro Carvalho Chehab wrote:
> From: Prathap Kumar Valsan 
> 
> Add routines to interface with GuC firmware for selective TLB invalidation
> supported on XeHP.
> 
> Signed-off-by: Prathap Kumar Valsan 
> Cc: Matthew Brost 
> Signed-off-by: Mauro Carvalho Chehab 
> ---
> 
> To avoid mailbombing on a large number of people, only mailing lists were C/C 
> on the cover.
> See [PATCH v2 00/21] at: 
> https://lore.kernel.org/all/cover.1657800199.git.mche...@kernel.org/
> 
>  .../gpu/drm/i915/gt/uc/abi/guc_actions_abi.h  |  3 +
>  drivers/gpu/drm/i915/gt/uc/intel_guc.c| 90 +++
>  drivers/gpu/drm/i915/gt/uc/intel_guc.h| 10 +++
>  drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h   |  3 +
>  4 files changed, 106 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h 
> b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
> index fb0af33e43cc..5c019856a269 100644
> --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
> +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
> @@ -188,6 +188,9 @@ enum intel_guc_state_capture_event_status {
>  #define INTEL_GUC_TLB_INVAL_FLUSH_CACHE (1 << 31)
>  
>  enum intel_guc_tlb_invalidation_type {
> + INTEL_GUC_TLB_INVAL_FULL = 0x0,
> + INTEL_GUC_TLB_INVAL_PAGE_SELECTIVE = 0x1,
> + INTEL_GUC_TLB_INVAL_PAGE_SELECTIVE_CTX = 0x2,
>   INTEL_GUC_TLB_INVAL_GUC = 0x3,
>  };
>  
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c 
> b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
> index 8a104a292598..98260a7bc90b 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
> @@ -923,6 +923,96 @@ static int guc_send_invalidate_tlb(struct intel_guc 
> *guc, u32 *action, u32 size)
>   return err;
>  }
>  
> + /* Full TLB invalidation */
> +int intel_guc_invalidate_tlb_full(struct intel_guc *guc,
> +   enum intel_guc_tlb_inval_mode mode)
> +{
> + u32 action[] = {
> + INTEL_GUC_ACTION_TLB_INVALIDATION,
> + 0,
> + INTEL_GUC_TLB_INVAL_FULL << INTEL_GUC_TLB_INVAL_TYPE_SHIFT |
> + mode << INTEL_GUC_TLB_INVAL_MODE_SHIFT |
> + INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
> + };
> +
> + if (!INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc)) {
> + DRM_ERROR("Tlb invalidation: Operation not supported in this 
> platform!\n");

s/Tlb/TLB

and use drm_err() or even consider GEM_BUG_ON() as this looks more like
a coding mistake if we will be here, no ?

> + return 0;
> + }
> +
> + return guc_send_invalidate_tlb(guc, action, ARRAY_SIZE(action));
> +}
> +
> +/*
> + * Selective TLB Invalidation for Address Range:
> + * TLB's in the Address Range is Invalidated across all engines.
> + */
> +int intel_guc_invalidate_tlb_page_selective(struct intel_guc *guc,
> + enum intel_guc_tlb_inval_mode mode,
> + u64 start, u64 length)
> +{
> + u64 vm_total = BIT_ULL(INTEL_INFO(guc_to_gt(guc)->i915)->ppgtt_size);
> + u32 address_mask = (ilog2(length) - ilog2(I915_GTT_PAGE_SIZE_4K));

drop extra ( )

> + u32 full_range = vm_total == length;

bool ?

> + u32 action[] = {
> + INTEL_GUC_ACTION_TLB_INVALIDATION,
> + 0,
> + INTEL_GUC_TLB_INVAL_PAGE_SELECTIVE << 
> INTEL_GUC_TLB_INVAL_TYPE_SHIFT |
> + mode << INTEL_GUC_TLB_INVAL_MODE_SHIFT |
> + INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
> + 0,
> + full_range ? full_range : lower_32_bits(start),
> + full_range ? 0 : upper_32_bits(start),
> + full_range ? 0 : address_mask,
> + };
> +
> + if (!INTEL_GUC_SUPPORTS_TLB_INVALIDATION_SELECTIVE(guc)) {
> + DRM_ERROR("Tlb invalidation: Operation not supported in this 
> platform!\n");

as above

> + return 0;
> + }
> +
> + GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE_4K));
> + GEM_BUG_ON(!IS_ALIGNED(length, I915_GTT_PAGE_SIZE_4K));
> + GEM_BUG_ON(range_overflows(start, length, vm_total));
> +
> + return guc_send_invalidate_tlb(guc, action, ARRAY_SIZE(action));
> +}
> +
> +/*
> + * Selective TLB Invalidation for Context:
> + * Invalidates all TLB's for a specific context across all engines.
> + */
> +int intel_guc_invalidate_tlb_page_selective_ctx(struct intel_guc *guc,
> + enum intel_guc_tlb_inval_mode 
> mode,
> + u64 start, u64 length, u32 
> ctxid)
> +{
> + u64 vm_total = BIT_ULL(INTEL_INFO(guc_to_gt(guc)->i915)->ppgtt_size);
> + u32 address_mask = (ilog2(length) - ilog2(I915_GTT_PAGE_SIZE_4K));

drop ( )

> + u32 full_range = vm_total == length;

bool

> + u32 action[] = {
> + INTEL_GUC_ACTION_TLB_INVALIDATION,
> + 0,
> + INTEL_GUC_TLB_INVAL_PAGE_SELECTIVE_CTX << 
> 

[PATCH v2 16/21] drm/i915: Define GuC Based TLB invalidation routines

2022-07-14 Thread Mauro Carvalho Chehab
From: Prathap Kumar Valsan 

Add routines to interface with GuC firmware for selective TLB invalidation
supported on XeHP.

Signed-off-by: Prathap Kumar Valsan 
Cc: Matthew Brost 
Signed-off-by: Mauro Carvalho Chehab 
---

To avoid mailbombing on a large number of people, only mailing lists were C/C 
on the cover.
See [PATCH v2 00/21] at: 
https://lore.kernel.org/all/cover.1657800199.git.mche...@kernel.org/

 .../gpu/drm/i915/gt/uc/abi/guc_actions_abi.h  |  3 +
 drivers/gpu/drm/i915/gt/uc/intel_guc.c| 90 +++
 drivers/gpu/drm/i915/gt/uc/intel_guc.h| 10 +++
 drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h   |  3 +
 4 files changed, 106 insertions(+)

diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h 
b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index fb0af33e43cc..5c019856a269 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -188,6 +188,9 @@ enum intel_guc_state_capture_event_status {
 #define INTEL_GUC_TLB_INVAL_FLUSH_CACHE (1 << 31)
 
 enum intel_guc_tlb_invalidation_type {
+   INTEL_GUC_TLB_INVAL_FULL = 0x0,
+   INTEL_GUC_TLB_INVAL_PAGE_SELECTIVE = 0x1,
+   INTEL_GUC_TLB_INVAL_PAGE_SELECTIVE_CTX = 0x2,
INTEL_GUC_TLB_INVAL_GUC = 0x3,
 };
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 8a104a292598..98260a7bc90b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -923,6 +923,96 @@ static int guc_send_invalidate_tlb(struct intel_guc *guc, 
u32 *action, u32 size)
return err;
 }
 
+ /* Full TLB invalidation */
+int intel_guc_invalidate_tlb_full(struct intel_guc *guc,
+ enum intel_guc_tlb_inval_mode mode)
+{
+   u32 action[] = {
+   INTEL_GUC_ACTION_TLB_INVALIDATION,
+   0,
+   INTEL_GUC_TLB_INVAL_FULL << INTEL_GUC_TLB_INVAL_TYPE_SHIFT |
+   mode << INTEL_GUC_TLB_INVAL_MODE_SHIFT |
+   INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
+   };
+
+   if (!INTEL_GUC_SUPPORTS_TLB_INVALIDATION(guc)) {
+   DRM_ERROR("Tlb invalidation: Operation not supported in this 
platform!\n");
+   return 0;
+   }
+
+   return guc_send_invalidate_tlb(guc, action, ARRAY_SIZE(action));
+}
+
+/*
+ * Selective TLB Invalidation for Address Range:
+ * TLB's in the Address Range is Invalidated across all engines.
+ */
+int intel_guc_invalidate_tlb_page_selective(struct intel_guc *guc,
+   enum intel_guc_tlb_inval_mode mode,
+   u64 start, u64 length)
+{
+   u64 vm_total = BIT_ULL(INTEL_INFO(guc_to_gt(guc)->i915)->ppgtt_size);
+   u32 address_mask = (ilog2(length) - ilog2(I915_GTT_PAGE_SIZE_4K));
+   u32 full_range = vm_total == length;
+   u32 action[] = {
+   INTEL_GUC_ACTION_TLB_INVALIDATION,
+   0,
+   INTEL_GUC_TLB_INVAL_PAGE_SELECTIVE << 
INTEL_GUC_TLB_INVAL_TYPE_SHIFT |
+   mode << INTEL_GUC_TLB_INVAL_MODE_SHIFT |
+   INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
+   0,
+   full_range ? full_range : lower_32_bits(start),
+   full_range ? 0 : upper_32_bits(start),
+   full_range ? 0 : address_mask,
+   };
+
+   if (!INTEL_GUC_SUPPORTS_TLB_INVALIDATION_SELECTIVE(guc)) {
+   DRM_ERROR("Tlb invalidation: Operation not supported in this 
platform!\n");
+   return 0;
+   }
+
+   GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE_4K));
+   GEM_BUG_ON(!IS_ALIGNED(length, I915_GTT_PAGE_SIZE_4K));
+   GEM_BUG_ON(range_overflows(start, length, vm_total));
+
+   return guc_send_invalidate_tlb(guc, action, ARRAY_SIZE(action));
+}
+
+/*
+ * Selective TLB Invalidation for Context:
+ * Invalidates all TLB's for a specific context across all engines.
+ */
+int intel_guc_invalidate_tlb_page_selective_ctx(struct intel_guc *guc,
+   enum intel_guc_tlb_inval_mode 
mode,
+   u64 start, u64 length, u32 
ctxid)
+{
+   u64 vm_total = BIT_ULL(INTEL_INFO(guc_to_gt(guc)->i915)->ppgtt_size);
+   u32 address_mask = (ilog2(length) - ilog2(I915_GTT_PAGE_SIZE_4K));
+   u32 full_range = vm_total == length;
+   u32 action[] = {
+   INTEL_GUC_ACTION_TLB_INVALIDATION,
+   0,
+   INTEL_GUC_TLB_INVAL_PAGE_SELECTIVE_CTX << 
INTEL_GUC_TLB_INVAL_TYPE_SHIFT |
+   mode << INTEL_GUC_TLB_INVAL_MODE_SHIFT |
+   INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
+   ctxid,
+   full_range ? full_range : lower_32_bits(start),
+   full_range ? 0 : upper_32_bits(start),
+   full_range ? 0 : address_mask,
+   };
+
+   if