From: Rob Clark <robdcl...@chromium.org> In normal cases the gem obj lock is acquired first before mm_lock. The exception is iterating the various object lists. In the shrinker path, deadlock is avoided by using msm_gem_trylock() and skipping over objects that cannot be locked. But for debugfs the straightforward thing is to split things out into a separate list of all objects protected by it's own lock.
Fixes: d984457b31c4 ("drm/msm: Add priv->mm_lock to protect active/inactive lists") Signed-off-by: Rob Clark <robdcl...@chromium.org> Tested-by: Douglas Anderson <diand...@chromium.org> --- drivers/gpu/drm/msm/msm_debugfs.c | 14 +++----------- drivers/gpu/drm/msm/msm_drv.c | 3 +++ drivers/gpu/drm/msm/msm_drv.h | 9 ++++++++- drivers/gpu/drm/msm/msm_gem.c | 14 +++++++++++++- drivers/gpu/drm/msm/msm_gem.h | 10 ++++++++-- 5 files changed, 35 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 85ad0babc326..d611cc8e54a4 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -111,23 +111,15 @@ static const struct file_operations msm_gpu_fops = { static int msm_gem_show(struct drm_device *dev, struct seq_file *m) { struct msm_drm_private *priv = dev->dev_private; - struct msm_gpu *gpu = priv->gpu; int ret; - ret = mutex_lock_interruptible(&priv->mm_lock); + ret = mutex_lock_interruptible(&priv->obj_lock); if (ret) return ret; - if (gpu) { - seq_printf(m, "Active Objects (%s):\n", gpu->name); - msm_gem_describe_objects(&gpu->active_list, m); - } - - seq_printf(m, "Inactive Objects:\n"); - msm_gem_describe_objects(&priv->inactive_dontneed, m); - msm_gem_describe_objects(&priv->inactive_willneed, m); + msm_gem_describe_objects(&priv->objects, m); - mutex_unlock(&priv->mm_lock); + mutex_unlock(&priv->obj_lock); return 0; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 3462b0ea14c6..1ef1cd0cc714 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -474,6 +474,9 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) priv->wq = alloc_ordered_workqueue("msm", 0); + INIT_LIST_HEAD(&priv->objects); + mutex_init(&priv->obj_lock); + INIT_LIST_HEAD(&priv->inactive_willneed); INIT_LIST_HEAD(&priv->inactive_dontneed); INIT_LIST_HEAD(&priv->inactive_purged); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 503168817e24..c84e6f84cb6d 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -174,7 +174,14 @@ struct msm_drm_private { struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */ struct msm_perf_state *perf; - /* + /** + * List of all GEM objects (mainly for debugfs, protected by obj_lock + * (acquire before per GEM object lock) + */ + struct list_head objects; + struct mutex obj_lock; + + /** * Lists of inactive GEM objects. Every bo is either in one of the * inactive lists (depending on whether or not it is shrinkable) or * gpu->active_list (for the gpu it is active on[1]) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index bec01bb48fce..7ca30e362222 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -961,7 +961,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) size_t size = 0; seq_puts(m, " flags id ref offset kaddr size madv name\n"); - list_for_each_entry(msm_obj, list, mm_list) { + list_for_each_entry(msm_obj, list, node) { struct drm_gem_object *obj = &msm_obj->base; seq_puts(m, " "); msm_gem_describe(obj, m); @@ -980,6 +980,10 @@ void msm_gem_free_object(struct drm_gem_object *obj) struct drm_device *dev = obj->dev; struct msm_drm_private *priv = dev->dev_private; + mutex_lock(&priv->obj_lock); + list_del(&msm_obj->node); + mutex_unlock(&priv->obj_lock); + mutex_lock(&priv->mm_lock); if (msm_obj->dontneed) mark_unpurgable(msm_obj); @@ -1170,6 +1174,10 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); mutex_unlock(&priv->mm_lock); + mutex_lock(&priv->obj_lock); + list_add_tail(&msm_obj->node, &priv->objects); + mutex_unlock(&priv->obj_lock); + return obj; fail: @@ -1240,6 +1248,10 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); mutex_unlock(&priv->mm_lock); + mutex_lock(&priv->obj_lock); + list_add_tail(&msm_obj->node, &priv->objects); + mutex_unlock(&priv->obj_lock); + return obj; fail: diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 13aabfe92dac..e6b28edb1db9 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -60,10 +60,16 @@ struct msm_gem_object { */ uint8_t vmap_count; + /** + * Node in list of all objects (mainly for debugfs, protected by + * priv->obj_lock + */ + struct list_head node; + /** * An object is either: - * inactive - on priv->inactive_dontneed/willneed/purged depending - * on status + * inactive - on priv->inactive_dontneed or priv->inactive_willneed + * (depending on purgability status) * active - on one one of the gpu's active_list.. well, at * least for now we don't have (I don't think) hw sync between * 2d and 3d one devices which have both, meaning we need to -- 2.30.2 _______________________________________________ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel