Each enabled_mask file gives us a mask of the enabled units :
$ cat
/sys/devices/pci0000\:00/0000\:00\:02.0/drm/card0/gt/rcs/0/slices_enabled_mask
0x7
$ cat
/sys/devices/pci0000\:00/0000\:00\:02.0/drm/card0/gt/rcs/0/slice0/subslice2/eus_enabled_mask
0xff
v2: Move topology below rcs engine (Chris)
Add max_eus_per_subslice/max_slices/max_subslices_per_slice
(Lionel)
v3: Rename enabled_mask (Lionel)
Signed-off-by: Lionel Landwerlin <lionel.g.landwer...@intel.com>
---
drivers/gpu/drm/i915/i915_drv.h | 26 ++++++
drivers/gpu/drm/i915/i915_sysfs.c | 188
++++++++++++++++++++++++++++++++++++++
2 files changed, 214 insertions(+)
diff --git a/drivers/gpu/drm/i915/i915_drv.h
b/drivers/gpu/drm/i915/i915_drv.h
index db550322207c..1ac0a191e8fc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2259,6 +2259,24 @@ struct intel_cdclk_state {
u8 voltage_level;
};
+struct intel_topology_kobject {
+ struct kobject kobj;
+ struct drm_i915_private *dev_priv;
+};
+
+struct intel_slice_kobject {
+ struct kobject kobj;
+ struct drm_i915_private *dev_priv;
+ u8 slice_index;
+};
+
+struct intel_subslice_kobject {
+ struct kobject kobj;
+ struct drm_i915_private *dev_priv;
+ u8 slice_index;
+ u8 subslice_index;
+};
+
struct drm_i915_private {
struct drm_device drm;
@@ -2732,6 +2750,14 @@ struct drm_i915_private {
struct {
struct kobject kobj;
struct kobject classes_kobjs[MAX_ENGINE_CLASS];
+
+ struct sysfs_slice {
+ struct intel_slice_kobject kobj;
+
+ struct sysfs_subslice {
+ struct intel_subslice_kobject kobj;
+ } subslices[GEN_MAX_SUBSLICES];
+ } slices[GEN_MAX_SLICES];
} gt_topology;
/* Abstract the submission mechanism (legacy ringbuffer or
execlists) away */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c
b/drivers/gpu/drm/i915/i915_sysfs.c
index fd04d0b93eaf..df9d8fdbcb0a 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -559,6 +559,174 @@ static void i915_setup_error_capture(struct
device *kdev) {}
static void i915_teardown_error_capture(struct device *kdev) {}
#endif
+static struct attribute slices_enabled_mask_attr = {
+ .name = "slices_enabled_mask",
+ .mode = 0444,
+};
+
+static struct attribute subslices_enabled_mask_attr = {
+ .name = "subslices_enabled_mask",
+ .mode = 0444,
+};
+
+static struct attribute eus_enabled_mask_attr = {
+ .name = "eus_enabled_mask",
+ .mode = 0444,
+};
+
+static struct attribute max_slices_attr = {
+ .name = "max_slices",
+ .mode = 0444,
+};
+
+static struct attribute max_subslices_per_slice_attr = {
+ .name = "max_subslices_per_slice",
+ .mode = 0444,
+};
+
+static struct attribute max_eus_per_subslice_attr = {
+ .name = "max_eus_per_subslice",
+ .mode = 0444,
+};
+
+static ssize_t
+show_slice_attr(struct kobject *kobj, struct attribute *attr, char
*buf)
+{
+ struct intel_slice_kobject *kobj_wrapper =
+ container_of(kobj, struct intel_slice_kobject, kobj);
+ struct drm_i915_private *dev_priv = kobj_wrapper->dev_priv;
+ const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+
+ if (attr == &subslices_enabled_mask_attr) {
+ return sprintf(buf, "0x%hhx\n",
+ sseu->subslices_mask[kobj_wrapper->slice_index]);
+ }
+
+ return sprintf(buf, "0x0\n");
+}
+
+static const struct sysfs_ops slice_ops = {
+ .show = show_slice_attr,
+};
+
+static struct kobj_type slice_type = {
+ .sysfs_ops = &slice_ops,
+};
+
+static ssize_t
+show_subslice_attr(struct kobject *kobj, struct attribute *attr,
char *buf)
+{
+ struct intel_subslice_kobject *kobj_wrapper =
+ container_of(kobj, struct intel_subslice_kobject, kobj);
+ struct drm_i915_private *dev_priv = kobj_wrapper->dev_priv;
+ const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+ int subslice_stride = ALIGN(sseu->max_eus_per_subslice, 8) / 8;
+ int slice_stride = sseu->max_subslices * subslice_stride;
+
+ if (attr == &eus_enabled_mask_attr)
+ return sprintf(buf, "0x%hhx\n",
+ sseu->eu_mask[kobj_wrapper->slice_index * slice_stride +
+ kobj_wrapper->subslice_index *
subslice_stride]);
+ return sprintf(buf, "0x0\n");
+}
+
+static const struct sysfs_ops subslice_ops = {
+ .show = show_subslice_attr,
+};
+
+static struct kobj_type subslice_type = {
+ .sysfs_ops = &subslice_ops,
+};
+
+static int i915_setup_rcs_topology_sysfs(struct drm_i915_private
*dev_priv,
+ struct kobject *engine_kobj)
+{
+ const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+ int ret, s, ss;
+
+ ret = sysfs_create_file(engine_kobj, &slices_enabled_mask_attr);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_file(engine_kobj, &max_slices_attr);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_file(engine_kobj,
&max_subslices_per_slice_attr);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_file(engine_kobj, &max_eus_per_subslice_attr);
+ if (ret)
+ return ret;
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ struct intel_slice_kobject *slice_kobj =
+ &dev_priv->gt_topology.slices[s].kobj;
+
+ slice_kobj->dev_priv = dev_priv;
+ slice_kobj->slice_index = s;
+ ret = kobject_init_and_add(&slice_kobj->kobj, &slice_type,
+ engine_kobj, "slice%i", s);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_file(&slice_kobj->kobj,
+ &subslices_enabled_mask_attr);
+ if (ret)
+ return ret;
+
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ struct intel_subslice_kobject *subslice_kobj =
+ &dev_priv->gt_topology.slices[s].subslices[ss].kobj;
+
+ subslice_kobj->dev_priv = dev_priv;
+ subslice_kobj->slice_index = s;
+ subslice_kobj->subslice_index = ss;
+ ret = kobject_init_and_add(&subslice_kobj->kobj,
+ &subslice_type,
+ &slice_kobj->kobj,
+ "subslice%i", ss);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_file(&subslice_kobj->kobj,
+ &eus_enabled_mask_attr);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void i915_teardown_rcs_topology_sysfs(struct drm_i915_private
*dev_priv,
+ struct kobject *engine_kobj)
+{
+ const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+ int s, ss;
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ struct intel_slice_kobject *slice_kobj =
+ &dev_priv->gt_topology.slices[s].kobj;
+
+ for (ss = 0; ss < sseu->max_subslices; ss++) {
+ struct intel_subslice_kobject *subslice_kobj =
+ &dev_priv->gt_topology.slices[s].subslices[ss].kobj;
+
+ sysfs_remove_file(&subslice_kobj->kobj,
+ &eus_enabled_mask_attr);
+ }
+
+ sysfs_remove_file(&slice_kobj->kobj,
+ &subslices_enabled_mask_attr);
+ }
+ sysfs_remove_file(engine_kobj, &slices_enabled_mask_attr);
+ sysfs_remove_file(engine_kobj, &max_eus_per_subslice_attr);
+ sysfs_remove_file(engine_kobj, &max_subslices_per_slice_attr);
+ sysfs_remove_file(engine_kobj, &max_slices_attr);
+}
+
static struct attribute engine_id_attr = {
.name = "id",
.mode = 0444,
@@ -574,11 +742,20 @@ show_engine_attr(struct kobject *kobj, struct
attribute *attr, char *buf)
{
struct intel_engine_cs *engine =
container_of(kobj, struct intel_engine_cs, instance_kobj);
+ const struct sseu_dev_info *sseu = &INTEL_INFO(engine->i915)->sseu;
if (attr == &engine_id_attr)
return sprintf(buf, "%hhu\n", engine->uabi_id);
if (attr == &engine_class_attr)
return sprintf(buf, "%hhu\n", engine->uabi_class);
+ if (attr == &slices_enabled_mask_attr)
+ return sprintf(buf, "0x%hhx\n", sseu->slice_mask);
+ if (attr == &max_eus_per_subslice_attr)
+ return sprintf(buf, "%hhd\n", sseu->max_eus_per_subslice);
+ if (attr == &max_subslices_per_slice_attr)
+ return sprintf(buf, "%hhd\n", sseu->max_subslices);
+ if (attr == &max_slices_attr)
+ return sprintf(buf, "%hhd\n", sseu->max_slices);
return sprintf(buf, "\n");
}
@@ -671,6 +848,12 @@ static int i915_setup_engines_sysfs(struct
drm_i915_private *dev_priv,
if (ret)
return ret;
}
+ if (engine->id == RCS) {
+ ret = i915_setup_rcs_topology_sysfs(dev_priv,
+ &engine->instance_kobj);
+ if (ret)
+ return ret;
+ }
}
}
@@ -683,6 +866,11 @@ static void i915_teardown_engines_sysfs(struct
drm_i915_private *dev_priv)
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id) {
+ if (id == RCS) {
+ i915_teardown_rcs_topology_sysfs(dev_priv,
+ &engine->instance_kobj);
+ }
+
sysfs_remove_file(&engine->instance_kobj, &engine_id_attr);
sysfs_remove_file(&engine->instance_kobj, &engine_class_attr);
sysfs_remove_file(&engine->capabilities_kobj,