With the introduction of asymetric slices in CNL, we cannot rely on
the previous SUBSLICE_MASK getparam. Here we introduce a more detailed
way of querying the Gen's GPU topology that doesn't aggregate numbers.

This is essential for monitoring parts of the GPU with the OA unit,
because signals need to be accounted properly based on whether part of
the GPU has been fused off. The current aggregated numbers like
EU_TOTAL do not gives us sufficient information.

Here is the sysfs layout on a Skylake GT4 :

/sys/devices/pci0000:00/0000:00:02.0/drm/card0/topology/
├── enabled_mask
├── slice0
│   ├── enabled_mask
│   ├── subslice0
│   │   └── enabled_mask
│   ├── subslice1
│   │   └── enabled_mask
│   ├── subslice2
│   │   └── enabled_mask
│   └── subslice3
│       └── enabled_mask
├── slice1
│   ├── enabled_mask
│   ├── subslice0
│   │   └── enabled_mask
│   ├── subslice1
│   │   └── enabled_mask
│   ├── subslice2
│   │   └── enabled_mask
│   └── subslice3
│       └── enabled_mask
└── slice2
    ├── enabled_mask
    ├── subslice0
    │   └── enabled_mask
    ├── subslice1
    │   └── enabled_mask
    ├── subslice2
    │   └── enabled_mask
    └── subslice3
        └── enabled_mask

Each enabled_mask file gives us a mask of the enabled units :

$ cat /sys/devices/pci0000\:00/0000\:00\:02.0/drm/card0/topology/enabled_mask
0x7

$ cat 
/sys/devices/pci0000\:00/0000\:00\:02.0/drm/card0/topology/slice0/subslice2/enabled_mask
0xff

Signed-off-by: Lionel Landwerlin <lionel.g.landwer...@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h   |  30 +++++++
 drivers/gpu/drm/i915/i915_sysfs.c | 159 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 189 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f8d239d8bfab..44b807421af8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2259,6 +2259,24 @@ struct intel_cdclk_state {
        u8 voltage_level;
 };
 
+struct intel_topology_kobject {
+       struct kobject kobj;
+       struct drm_i915_private *dev_priv;
+};
+
+struct intel_slice_kobject {
+       struct kobject kobj;
+       struct drm_i915_private *dev_priv;
+       u8 slice_index;
+};
+
+struct intel_subslice_kobject {
+       struct kobject kobj;
+       struct drm_i915_private *dev_priv;
+       u8 slice_index;
+       u8 subslice_index;
+};
+
 struct drm_i915_private {
        struct drm_device drm;
 
@@ -2729,6 +2747,18 @@ struct drm_i915_private {
                } oa;
        } perf;
 
+       struct {
+               struct intel_topology_kobject kobj;
+
+               struct sysfs_slice {
+                       struct intel_slice_kobject kobj;
+
+                       struct sysfs_subslice {
+                               struct intel_subslice_kobject kobj;
+                       } subslices[GEN_MAX_SUBSLICES];
+               } slices[GEN_MAX_SLICES];
+       } topology;
+
        /* Abstract the submission mechanism (legacy ringbuffer or execlists) 
away */
        struct {
                void (*resume)(struct drm_i915_private *);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c 
b/drivers/gpu/drm/i915/i915_sysfs.c
index 791759f632e1..1d835f164d80 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -559,6 +559,159 @@ static void i915_setup_error_capture(struct device *kdev) 
{}
 static void i915_teardown_error_capture(struct device *kdev) {}
 #endif
 
+static struct attribute mask_attr = {
+       .name = "enabled_mask",
+       .mode = 0444,
+};
+
+static ssize_t
+show_topology_attr(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+       struct intel_topology_kobject *kobj_wrapper =
+               container_of(kobj, struct intel_topology_kobject, kobj);
+       struct drm_i915_private *dev_priv = kobj_wrapper->dev_priv;
+       const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+
+       if (attr == &mask_attr)
+               return sprintf(buf, "0x%hhx\n", sseu->slice_mask);
+       return sprintf(buf, "0x0\n");
+}
+
+static const struct sysfs_ops topology_ops = {
+       .show = show_topology_attr,
+};
+
+static struct kobj_type topology_type = {
+       .sysfs_ops = &topology_ops,
+};
+
+static ssize_t
+show_slice_attr(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+       struct intel_slice_kobject *kobj_wrapper =
+               container_of(kobj, struct intel_slice_kobject, kobj);
+       struct drm_i915_private *dev_priv = kobj_wrapper->dev_priv;
+       const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+
+       if (attr == &mask_attr) {
+               return sprintf(buf, "0x%hhx\n",
+                              sseu->subslices_mask[kobj_wrapper->slice_index]);
+       }
+
+       return sprintf(buf, "0x0\n");
+}
+
+static const struct sysfs_ops slice_ops = {
+       .show = show_slice_attr,
+};
+
+static struct kobj_type slice_type = {
+       .sysfs_ops = &slice_ops,
+};
+
+static ssize_t
+show_subslice_attr(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+       struct intel_subslice_kobject *kobj_wrapper =
+               container_of(kobj, struct intel_subslice_kobject, kobj);
+       struct drm_i915_private *dev_priv = kobj_wrapper->dev_priv;
+       const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+       int subslice_stride = ALIGN(sseu->max_eus_per_subslice, 8) / 8;
+       int slice_stride = sseu->max_subslices * subslice_stride;
+
+       if (attr == &mask_attr)
+               return sprintf(buf, "0x%hhx\n",
+                              sseu->eu_mask[kobj_wrapper->slice_index * 
slice_stride +
+                                            kobj_wrapper->subslice_index * 
subslice_stride]);
+       return sprintf(buf, "0x0\n");
+}
+
+static const struct sysfs_ops subslice_ops = {
+       .show = show_subslice_attr,
+};
+
+static struct kobj_type subslice_type = {
+       .sysfs_ops = &subslice_ops,
+};
+
+static int i915_setup_topology_sysfs(struct drm_i915_private *dev_priv)
+{
+       const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+       struct intel_topology_kobject *topology_kobj = &dev_priv->topology.kobj;
+       int ret, s, ss;
+
+       topology_kobj->dev_priv = dev_priv;
+       kobject_init_and_add(&topology_kobj->kobj,
+                            &topology_type,
+                            &dev_priv->drm.primary->kdev->kobj,
+                            "topology");
+
+       ret = sysfs_create_file(&topology_kobj->kobj, &mask_attr);
+       if (ret)
+               return ret;
+
+       for (s = 0; s < sseu->max_slices; s++) {
+               struct intel_slice_kobject *slice_kobj =
+                       &dev_priv->topology.slices[s].kobj;
+
+               slice_kobj->dev_priv = dev_priv;
+               slice_kobj->slice_index = s;
+               kobject_init_and_add(&slice_kobj->kobj,
+                                    &slice_type,
+                                    &topology_kobj->kobj,
+                                    "slice%i", s);
+
+               ret = sysfs_create_file(&slice_kobj->kobj, &mask_attr);
+               if (ret)
+                       return ret;
+
+               for (ss = 0; ss < sseu->max_subslices; ss++) {
+                       struct intel_subslice_kobject *subslice_kobj =
+                               
&dev_priv->topology.slices[s].subslices[ss].kobj;
+
+                       subslice_kobj->dev_priv = dev_priv;
+                       subslice_kobj->slice_index = s;
+                       subslice_kobj->subslice_index = ss;
+                       kobject_init_and_add(&subslice_kobj->kobj,
+                                            &subslice_type,
+                                            &slice_kobj->kobj,
+                                            "subslice%i", ss);
+
+                       ret = sysfs_create_file(&subslice_kobj->kobj,
+                                               &mask_attr);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       return 0;
+}
+
+static void i915_teardown_topology_sysfs(struct drm_i915_private *dev_priv)
+{
+       const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+       struct intel_topology_kobject *topology_kobj = &dev_priv->topology.kobj;
+       int s, ss;
+
+       for (s = 0; s < sseu->max_slices; s++) {
+               struct intel_slice_kobject *slice_kobj =
+                       &dev_priv->topology.slices[s].kobj;
+
+               for (ss = 0; ss < sseu->max_subslices; ss++) {
+                       struct intel_subslice_kobject *subslice_kobj =
+                               
&dev_priv->topology.slices[s].subslices[ss].kobj;
+
+                       sysfs_remove_file(&subslice_kobj->kobj, &mask_attr);
+               }
+
+               sysfs_remove_file(&slice_kobj->kobj, &mask_attr);
+       }
+       sysfs_remove_file(&topology_kobj->kobj, &mask_attr);
+
+       kobject_get(&topology_kobj->kobj);
+       kobject_del(&topology_kobj->kobj);
+}
+
 void i915_setup_sysfs(struct drm_i915_private *dev_priv)
 {
        struct device *kdev = dev_priv->drm.primary->kdev;
@@ -605,6 +758,10 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
        if (ret)
                DRM_ERROR("RPS sysfs setup failed\n");
 
+       ret = i915_setup_topology_sysfs(dev_priv);
+       if (ret)
+               DRM_ERROR("Topology sysfs setup failed\n");
+
        i915_setup_error_capture(kdev);
 }
 
@@ -614,6 +771,8 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
 
        i915_teardown_error_capture(kdev);
 
+       i915_teardown_topology_sysfs(dev_priv);
+
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                sysfs_remove_files(&kdev->kobj, vlv_attrs);
        else
-- 
2.15.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to