Atm, a pending delayed destroy work during module removal will be
canceled, leaving behind MST ports, mstbs. Fix this by using a dedicated
workqueue which will be drained of requeued items as well when
destroying it.

v2:
- Check if wq is NULL before calling destroy_workqueue().

Cc: Lyude Paul <ly...@redhat.com>
Cc: Stanislav Lisovskiy <stanislav.lisovs...@intel.com>
Reviewed-by: Stanislav Lisovskiy <stanislav.lisovs...@intel.com>
Signed-off-by: Imre Deak <imre.d...@intel.com>
---
 drivers/gpu/drm/drm_dp_mst_topology.c | 19 ++++++++++++++++---
 include/drm/drm_dp_mst_helper.h       |  8 ++++++++
 2 files changed, 24 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c 
b/drivers/gpu/drm/drm_dp_mst_topology.c
index eff8d6ac0273..a5f67b9db7fa 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1604,7 +1604,7 @@ static void drm_dp_destroy_mst_branch_device(struct kref 
*kref)
        mutex_lock(&mgr->delayed_destroy_lock);
        list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
        mutex_unlock(&mgr->delayed_destroy_lock);
-       schedule_work(&mgr->delayed_destroy_work);
+       queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
 }
 
 /**
@@ -1721,7 +1721,7 @@ static void drm_dp_destroy_port(struct kref *kref)
        mutex_lock(&mgr->delayed_destroy_lock);
        list_add(&port->next, &mgr->destroy_port_list);
        mutex_unlock(&mgr->delayed_destroy_lock);
-       schedule_work(&mgr->delayed_destroy_work);
+       queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
 }
 
 /**
@@ -5182,6 +5182,15 @@ int drm_dp_mst_topology_mgr_init(struct 
drm_dp_mst_topology_mgr *mgr,
        INIT_LIST_HEAD(&mgr->destroy_port_list);
        INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
        INIT_LIST_HEAD(&mgr->up_req_list);
+
+       /*
+        * delayed_destroy_work will be queued on a dedicated WQ, so that any
+        * requeuing will be also flushed when deiniting the topology manager.
+        */
+       mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
+       if (mgr->delayed_destroy_wq == NULL)
+               return -ENOMEM;
+
        INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
        INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
        INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
@@ -5226,7 +5235,11 @@ void drm_dp_mst_topology_mgr_destroy(struct 
drm_dp_mst_topology_mgr *mgr)
 {
        drm_dp_mst_topology_mgr_set_mst(mgr, false);
        flush_work(&mgr->work);
-       cancel_work_sync(&mgr->delayed_destroy_work);
+       /* The following will also drain any requeued work on the WQ. */
+       if (mgr->delayed_destroy_wq) {
+               destroy_workqueue(mgr->delayed_destroy_wq);
+               mgr->delayed_destroy_wq = NULL;
+       }
        mutex_lock(&mgr->payload_lock);
        kfree(mgr->payloads);
        mgr->payloads = NULL;
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 9e1ffcd7cb68..17b568c6f4f8 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -672,6 +672,14 @@ struct drm_dp_mst_topology_mgr {
         * @destroy_branch_device_list.
         */
        struct mutex delayed_destroy_lock;
+
+       /**
+        * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items.
+        * A dedicated WQ makes it possible to drain any requeued work items
+        * on it.
+        */
+       struct workqueue_struct *delayed_destroy_wq;
+
        /**
         * @delayed_destroy_work: Work item to destroy MST port and branch
         * devices, needed to avoid locking inversion.
-- 
2.23.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to