On Fri, Sep 09, 2022 at 04:47:36PM -0700, Dixit, Ashutosh wrote:
On Tue, 23 Aug 2022 13:41:37 -0700, Umesh Nerlige Ramappa wrote:


Hi Umesh,

With GuC mode of submission, GuC is in control of defining the context id field
that is part of the OA reports. To filter reports, UMD and KMD must know what sw
context id was chosen by GuC. There is not interface between KMD and GuC to
determine this, so read the upper-dword of EXECLIST_STATUS to filter/squash OA
reports for the specific context.

Do you think it is worth defining an interface for GuC to return the sw
ctx_id it will be using for a ctx, say at ctx registration time?

The scheme implemented in this patch to read the ctx_id is certainly very
clever, at least to me. But as Lionel was saying is it a agreed upon
immutable interface? If it is, we can go with this patch.

(Though even then we will need to maintain this code even if in the future
GuC FW is changed to return the ctx_id in order to preserve backwards
comptability with previous GuC versions. So maybe better to have a real
interface between GuC and KMD earlier rather than later?).

Agree, ideally this should be obtained from GuC and properly synchronized with kmd. OR GuC should provide a way to pin the context id for such cases so that the id is not stolen/unpinned. Anyways, we need to follow this up as a JIRA.

I may drop this patch and add a message that OA buffer filtering may be broken if a gem context is passed.


Also a couple of general comments below.


Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.rama...@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_lrc.h |   2 +
 drivers/gpu/drm/i915/i915_perf.c    | 141 ++++++++++++++++++++++++----
 2 files changed, 124 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h 
b/drivers/gpu/drm/i915/gt/intel_lrc.h
index a390f0813c8b..7111bae759f3 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -110,6 +110,8 @@ enum {
 #define XEHP_SW_CTX_ID_WIDTH                   16
 #define XEHP_SW_COUNTER_SHIFT                  58
 #define XEHP_SW_COUNTER_WIDTH                  6
+#define GEN12_GUC_SW_CTX_ID_SHIFT              39
+#define GEN12_GUC_SW_CTX_ID_WIDTH              16

 static inline void lrc_runtime_start(struct intel_context *ce)
 {
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index f3c23fe9ad9c..735244a3aedd 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1233,6 +1233,125 @@ static struct intel_context *oa_pin_context(struct 
i915_perf_stream *stream)
        return stream->pinned_ctx;
 }

+static int
+__store_reg_to_mem(struct i915_request *rq, i915_reg_t reg, u32 ggtt_offset)
+{
+       u32 *cs, cmd;
+
+       cmd = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+       if (GRAPHICS_VER(rq->engine->i915) >= 8)
+               cmd++;
+
+       cs = intel_ring_begin(rq, 4);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       *cs++ = cmd;
+       *cs++ = i915_mmio_reg_offset(reg);
+       *cs++ = ggtt_offset;
+       *cs++ = 0;
+
+       intel_ring_advance(rq, cs);
+
+       return 0;
+}
+
+static int
+__read_reg(struct intel_context *ce, i915_reg_t reg, u32 ggtt_offset)
+{
+       struct i915_request *rq;
+       int err;
+
+       rq = i915_request_create(ce);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
+
+       i915_request_get(rq);
+
+       err = __store_reg_to_mem(rq, reg, ggtt_offset);
+
+       i915_request_add(rq);
+       if (!err && i915_request_wait(rq, 0, HZ / 2) < 0)
+               err = -ETIME;
+
+       i915_request_put(rq);
+
+       return err;
+}
+
+static int
+gen12_guc_sw_ctx_id(struct intel_context *ce, u32 *ctx_id)
+{
+       struct i915_vma *scratch;
+       u32 *val;
+       int err;
+
+       scratch = 
__vm_create_scratch_for_read_pinned(&ce->engine->gt->ggtt->vm, 4);
+       if (IS_ERR(scratch))
+               return PTR_ERR(scratch);
+
+       err = i915_vma_sync(scratch);
+       if (err)
+               goto err_scratch;
+
+       err = __read_reg(ce, RING_EXECLIST_STATUS_HI(ce->engine->mmio_base),
+                        i915_ggtt_offset(scratch));

Actually the RING_EXECLIST_STATUS_HI is MMIO so can be read using say
ENGINE_READ/intel_uncore_read. The only issue is how to read it when this
ctx is scheduled which is cleverly solved by the scheme above. But I am not
sure if there is any other simpler way to do it.

+       if (err)
+               goto err_scratch;
+
+       val = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
+       if (IS_ERR(val)) {
+               err = PTR_ERR(val);
+               goto err_scratch;
+       }
+
+       *ctx_id = *val;
+       i915_gem_object_unpin_map(scratch->obj);
+
+err_scratch:
+       i915_vma_unpin_and_release(&scratch, 0);
+       return err;
+}
+
+/*
+ * For execlist mode of submission, pick an unused context id
+ * 0 - (NUM_CONTEXT_TAG -1) are used by other contexts
+ * XXX_MAX_CONTEXT_HW_ID is used by idle context
+ *
+ * For GuC mode of submission read context id from the upper dword of the
+ * EXECLIST_STATUS register.
+ */
+static int gen12_get_render_context_id(struct i915_perf_stream *stream)
+{
+       u32 ctx_id, mask;
+       int ret;
+
+       if (intel_engine_uses_guc(stream->engine)) {
+               ret = gen12_guc_sw_ctx_id(stream->pinned_ctx, &ctx_id);
+               if (ret)
+                       return ret;
+
+               mask = ((1U << GEN12_GUC_SW_CTX_ID_WIDTH) - 1) <<
+                       (GEN12_GUC_SW_CTX_ID_SHIFT - 32);
+       } else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 50)) {
+               ctx_id = (XEHP_MAX_CONTEXT_HW_ID - 1) <<
+                       (XEHP_SW_CTX_ID_SHIFT - 32);
+
+               mask = ((1U << XEHP_SW_CTX_ID_WIDTH) - 1) <<
+                       (XEHP_SW_CTX_ID_SHIFT - 32);
+       } else {
+               ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) <<
+                        (GEN11_SW_CTX_ID_SHIFT - 32);
+
+               mask = ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) <<
+                       (GEN11_SW_CTX_ID_SHIFT - 32);

Previously I missed that these ctx_id's for non-GuC cases are just
constants. How does it work in these cases?

In those cases we use a fixed id for the OA use case:

in gen12_get_render_context_id()
stream->specific_ctx_id = ctx_id & mask

in oa_get_render_ctx_id()
ce->tag = stream->specific_ctx_id;

in __execlists_schedule_in()
ce->lrc.ccid = ce->tag;

Thanks,
Umesh


Thanks.
--
Ashutosh

Reply via email to