[PATCH 2/7] drm/amd/display: Fix multi-display support for idle opt workqueue

2021-08-13 Thread Wayne Lin
From: Nicholas Kazlauskas 

[Why]
The current implementation for idle optimization support only has a
single work item that gets reshuffled into the system workqueue
whenever we receive an enable or disable event.

We can have mismatched events if the work hasn't been processed or if
we're getting control events from multiple displays at once.

This fixes this issue and also makes the implementation usable for
PSR control - which will be addressed in another patch.

[How]
We need to be able to flush remaining work out on demand for driver stop
and psr disable so create a driver specific workqueue instead of using
the system one. The workqueue will be single threaded to guarantee the
ordering of enable/disable events.

Refactor the queue to allocate the control work and deallocate it
after processing it.

Pass the acrtc directly to make it easier to handle psr enable/disable
in a later patch.

Rename things to indicate that it's not just MALL specific.

Reviewed-by: Roman Li 
Acked-by: Wayne Lin 
Signed-off-by: Nicholas Kazlauskas 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 62 ---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 21 ---
 2 files changed, 36 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3e28f17c84fa..f88b6c5b83cd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1044,10 +1044,10 @@ static void mmhub_read_system_context(struct 
amdgpu_device *adev, struct dc_phy_
 }
 #endif
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-static void event_mall_stutter(struct work_struct *work)
+static void vblank_control_worker(struct work_struct *work)
 {
-
-   struct vblank_workqueue *vblank_work = container_of(work, struct 
vblank_workqueue, mall_work);
+   struct vblank_control_work *vblank_work =
+   container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
 
mutex_lock(>dc_lock);
@@ -1062,22 +1062,9 @@ static void event_mall_stutter(struct work_struct *work)
DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", 
dm->active_vblank_irq_count == 0);
 
mutex_unlock(>dc_lock);
+   kfree(vblank_work);
 }
 
-static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device 
*adev, struct dc *dc)
-{
-   struct vblank_workqueue *vblank_work;
-
-   vblank_work = kzalloc(sizeof(*vblank_work), GFP_KERNEL);
-   if (ZERO_OR_NULL_PTR(vblank_work)) {
-   kfree(vblank_work);
-   return NULL;
-   }
-
-   INIT_WORK(_work->mall_work, event_mall_stutter);
-
-   return vblank_work;
-}
 #endif
 static int amdgpu_dm_init(struct amdgpu_device *adev)
 {
@@ -1220,12 +1207,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
if (adev->dm.dc->caps.max_links > 0) {
-   adev->dm.vblank_workqueue = vblank_create_workqueue(adev, 
adev->dm.dc);
-
-   if (!adev->dm.vblank_workqueue)
+   adev->dm.vblank_control_workqueue =
+   
create_singlethread_workqueue("dm_vblank_control_workqueue");
+   if (!adev->dm.vblank_control_workqueue)
DRM_ERROR("amdgpu: failed to initialize 
vblank_workqueue.\n");
-   else
-   DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done 
%p.\n", adev->dm.vblank_workqueue);
}
 #endif
 
@@ -1298,6 +1283,13 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
 {
int i;
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+   if (adev->dm.vblank_control_workqueue) {
+   destroy_workqueue(adev->dm.vblank_control_workqueue);
+   adev->dm.vblank_control_workqueue = NULL;
+   }
+#endif
+
for (i = 0; i < adev->dm.display_indexes_num; i++) {
drm_encoder_cleanup(>dm.mst_encoders[i].base);
}
@@ -1321,14 +1313,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
dc_deinit_callbacks(adev->dm.dc);
 #endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-   if (adev->dm.vblank_workqueue) {
-   adev->dm.vblank_workqueue->dm = NULL;
-   kfree(adev->dm.vblank_workqueue);
-   adev->dm.vblank_workqueue = NULL;
-   }
-#endif
-
dc_dmub_srv_destroy(>dm.dc->ctx->dmub_srv);
 
if (dc_enable_dmub_notifications(adev->dm.dc)) {
@@ -6000,7 +5984,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, 
bool enable)
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
 #if defined(CONFIG_DRM_AMD_DC_DCN)
struct amdgpu_display_manager *dm = >dm;
-   unsigned long flags;
+   struct vblank_control_work *work;
 #endif
int rc = 0;
 
@@ -6025,12 +6009,16 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, 
bool enable)
  

[PATCH 2/7] drm/amd/display: Fix multi-display support for idle opt workqueue

2021-08-13 Thread Wayne Lin
From: Nicholas Kazlauskas 

[Why]
The current implementation for idle optimization support only has a
single work item that gets reshuffled into the system workqueue
whenever we receive an enable or disable event.

We can have mismatched events if the work hasn't been processed or if
we're getting control events from multiple displays at once.

This fixes this issue and also makes the implementation usable for
PSR control - which will be addressed in another patch.

[How]
We need to be able to flush remaining work out on demand for driver stop
and psr disable so create a driver specific workqueue instead of using
the system one. The workqueue will be single threaded to guarantee the
ordering of enable/disable events.

Refactor the queue to allocate the control work and deallocate it
after processing it.

Pass the acrtc directly to make it easier to handle psr enable/disable
in a later patch.

Rename things to indicate that it's not just MALL specific.

Reviewed-by: Roman Li 
Acked-by: Wayne Lin 
Signed-off-by: Nicholas Kazlauskas 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 62 ---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 21 ---
 2 files changed, 36 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3e28f17c84fa..f88b6c5b83cd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1044,10 +1044,10 @@ static void mmhub_read_system_context(struct 
amdgpu_device *adev, struct dc_phy_
 }
 #endif
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-static void event_mall_stutter(struct work_struct *work)
+static void vblank_control_worker(struct work_struct *work)
 {
-
-   struct vblank_workqueue *vblank_work = container_of(work, struct 
vblank_workqueue, mall_work);
+   struct vblank_control_work *vblank_work =
+   container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
 
mutex_lock(>dc_lock);
@@ -1062,22 +1062,9 @@ static void event_mall_stutter(struct work_struct *work)
DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", 
dm->active_vblank_irq_count == 0);
 
mutex_unlock(>dc_lock);
+   kfree(vblank_work);
 }
 
-static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device 
*adev, struct dc *dc)
-{
-   struct vblank_workqueue *vblank_work;
-
-   vblank_work = kzalloc(sizeof(*vblank_work), GFP_KERNEL);
-   if (ZERO_OR_NULL_PTR(vblank_work)) {
-   kfree(vblank_work);
-   return NULL;
-   }
-
-   INIT_WORK(_work->mall_work, event_mall_stutter);
-
-   return vblank_work;
-}
 #endif
 static int amdgpu_dm_init(struct amdgpu_device *adev)
 {
@@ -1220,12 +1207,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
if (adev->dm.dc->caps.max_links > 0) {
-   adev->dm.vblank_workqueue = vblank_create_workqueue(adev, 
adev->dm.dc);
-
-   if (!adev->dm.vblank_workqueue)
+   adev->dm.vblank_control_workqueue =
+   
create_singlethread_workqueue("dm_vblank_control_workqueue");
+   if (!adev->dm.vblank_control_workqueue)
DRM_ERROR("amdgpu: failed to initialize 
vblank_workqueue.\n");
-   else
-   DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done 
%p.\n", adev->dm.vblank_workqueue);
}
 #endif
 
@@ -1298,6 +1283,13 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
 {
int i;
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+   if (adev->dm.vblank_control_workqueue) {
+   destroy_workqueue(adev->dm.vblank_control_workqueue);
+   adev->dm.vblank_control_workqueue = NULL;
+   }
+#endif
+
for (i = 0; i < adev->dm.display_indexes_num; i++) {
drm_encoder_cleanup(>dm.mst_encoders[i].base);
}
@@ -1321,14 +1313,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
dc_deinit_callbacks(adev->dm.dc);
 #endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-   if (adev->dm.vblank_workqueue) {
-   adev->dm.vblank_workqueue->dm = NULL;
-   kfree(adev->dm.vblank_workqueue);
-   adev->dm.vblank_workqueue = NULL;
-   }
-#endif
-
dc_dmub_srv_destroy(>dm.dc->ctx->dmub_srv);
 
if (dc_enable_dmub_notifications(adev->dm.dc)) {
@@ -6000,7 +5984,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, 
bool enable)
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
 #if defined(CONFIG_DRM_AMD_DC_DCN)
struct amdgpu_display_manager *dm = >dm;
-   unsigned long flags;
+   struct vblank_control_work *work;
 #endif
int rc = 0;
 
@@ -6025,12 +6009,16 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, 
bool enable)