Re: [PATCH] drm/amd/display: turn DPMS off on mst connector unplug

2020-11-26 Thread Aurabindo Pillai


On 2020-11-26 3:11 p.m., Kazlauskas, Nicholas wrote:
> On 2020-11-26 2:50 p.m., Aurabindo Pillai wrote:
>> [Why]
>>
>> Set dpms off on the MST connector that was unplugged, for the side
>> effect of
>> releasing some references held through deallocation of mst payload.
>>
>> Signed-off-by: Aurabindo Pillai 
>> Signed-off-by: Eryk Brol 
>> ---
>>   .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 63 ++-
>>   1 file changed, 62 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>> index e213246e3f04..fc984cf6e316 100644
>> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
>> @@ -1984,6 +1984,64 @@ static void dm_gpureset_commit_state(struct
>> dc_state *dc_state,
>>   return;
>>   }
>>   +static void dm_set_dpms_off(struct dc_link *link)
>> +{
>> +    struct {
>> +    struct dc_surface_update surface_updates[MAX_SURFACES];
>> +    struct dc_stream_update stream_update;
>> +    } * bundle;
>> +    struct dc_stream_state *stream_state;
>> +    struct dc_context *dc_ctx = link->ctx;
>> +    int i;
>> +
>> +    bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
>> +
>> +    if (!bundle) {
>> +    dm_error("Failed to allocate update bundle\n");
>> +    return;
>> +    }
>> +
>> +    bundle->stream_update.dpms_off = kzalloc(sizeof(bool), GFP_KERNEL);
> 
> You don't need to allocate memory for the bool. You can just use a local
> here, DC doesn't need it after the call ends.
> 
> I think the same should apply to the surface updates as well. I'm not
> entirely sure how much stack the bundle takes up for a single stream
> here but it should be small enough that we can leave it on the stack.
> 
>> +
>> +    if (!bundle->stream_update.dpms_off) {
>> +    dm_error("Failed to allocate update bundle\n");
>> +    goto cleanup;
>> +    }
>> +
>> +    *bundle->stream_update.dpms_off = true;
>> +
>> +    for (i = 0; i< MAX_PIPES; i++) {
>> +    if (dc_ctx->dc->current_state->streams[i] == NULL)
>> +    continue;
>> +
>> +    if (dc_ctx->dc->current_state->streams[i]->link == link) {
>> +    stream_state = dc_ctx->dc->current_state->streams[i];
>> +    goto link_found;
>> +    }
>> +    }
> 
> We shouldn't be reading from dc->current_state directly in DM, it's
> essentially private state.
> 
> I think we should actually have a new helper here in dc_stream.h that's
> like:
> 
> struct dc_stream_state *dc_stream_find_from_link(const struct dc_link
> *link);
> 
> to replace this block of code.
> 
> Note that any time we touch current_state we also need to be locking -
> it looks like this function is missing the appropriate calls to:
> 
> mutex_lock(>dm.dc_lock);
> mutex_unlock(>dm.dc_lock);
> 
> Regards,
> Nicholas Kazlauskas
> 
> 

Thank you for the review. Sent a v2
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: turn DPMS off on mst connector unplug

2020-11-26 Thread Kazlauskas, Nicholas

On 2020-11-26 2:50 p.m., Aurabindo Pillai wrote:

[Why]

Set dpms off on the MST connector that was unplugged, for the side effect of
releasing some references held through deallocation of mst payload.

Signed-off-by: Aurabindo Pillai 
Signed-off-by: Eryk Brol 
---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 63 ++-
  1 file changed, 62 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e213246e3f04..fc984cf6e316 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1984,6 +1984,64 @@ static void dm_gpureset_commit_state(struct dc_state 
*dc_state,
return;
  }
  
+static void dm_set_dpms_off(struct dc_link *link)

+{
+   struct {
+   struct dc_surface_update surface_updates[MAX_SURFACES];
+   struct dc_stream_update stream_update;
+   } * bundle;
+   struct dc_stream_state *stream_state;
+   struct dc_context *dc_ctx = link->ctx;
+   int i;
+
+   bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+
+   if (!bundle) {
+   dm_error("Failed to allocate update bundle\n");
+   return;
+   }
+
+   bundle->stream_update.dpms_off = kzalloc(sizeof(bool), GFP_KERNEL);


You don't need to allocate memory for the bool. You can just use a local 
here, DC doesn't need it after the call ends.


I think the same should apply to the surface updates as well. I'm not 
entirely sure how much stack the bundle takes up for a single stream 
here but it should be small enough that we can leave it on the stack.



+
+   if (!bundle->stream_update.dpms_off) {
+   dm_error("Failed to allocate update bundle\n");
+   goto cleanup;
+   }
+
+   *bundle->stream_update.dpms_off = true;
+
+   for (i = 0; i< MAX_PIPES; i++) {
+   if (dc_ctx->dc->current_state->streams[i] == NULL)
+   continue;
+
+   if (dc_ctx->dc->current_state->streams[i]->link == link) {
+   stream_state = dc_ctx->dc->current_state->streams[i];
+   goto link_found;
+   }
+   }


We shouldn't be reading from dc->current_state directly in DM, it's 
essentially private state.


I think we should actually have a new helper here in dc_stream.h that's 
like:


struct dc_stream_state *dc_stream_find_from_link(const struct dc_link 
*link);


to replace this block of code.

Note that any time we touch current_state we also need to be locking - 
it looks like this function is missing the appropriate calls to:


mutex_lock(>dm.dc_lock);
mutex_unlock(>dm.dc_lock);

Regards,
Nicholas Kazlauskas



+
+   dm_error("Cannot find link associated with the stream to be 
disabled\n");
+   return;
+
+link_found:
+   if (stream_state == NULL) {
+   dm_error("Stream state associated with the link is NULL\n");
+   return;
+   }
+
+   bundle->stream_update.stream = stream_state;
+
+   dc_commit_updates_for_stream(stream_state->ctx->dc, 
bundle->surface_updates, 0,
+stream_state, >stream_update,
+stream_state->ctx->dc->current_state);
+
+   kfree(bundle->stream_update.dpms_off);
+cleanup:
+   kfree(bundle);
+
+   return;
+}
+
  static int dm_resume(void *handle)
  {
struct amdgpu_device *adev = handle;
@@ -2434,8 +2492,11 @@ static void handle_hpd_irq(void *param)
drm_kms_helper_hotplug_event(dev);
  
  	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {

-   amdgpu_dm_update_connector_after_detect(aconnector);
+   if (new_connection_type == dc_connection_none &&
+   aconnector->dc_link->type == dc_connection_none)
+   dm_set_dpms_off(aconnector->dc_link);
  
+		amdgpu_dm_update_connector_after_detect(aconnector);
  
  		drm_modeset_lock_all(dev);

dm_restore_drm_connector_state(dev, connector);



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/display: turn DPMS off on mst connector unplug

2020-11-26 Thread Aurabindo Pillai
[Why]

Set dpms off on the MST connector that was unplugged, for the side effect of
releasing some references held through deallocation of mst payload.

Signed-off-by: Aurabindo Pillai 
Signed-off-by: Eryk Brol 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 63 ++-
 1 file changed, 62 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e213246e3f04..fc984cf6e316 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1984,6 +1984,64 @@ static void dm_gpureset_commit_state(struct dc_state 
*dc_state,
return;
 }
 
+static void dm_set_dpms_off(struct dc_link *link)
+{
+   struct {
+   struct dc_surface_update surface_updates[MAX_SURFACES];
+   struct dc_stream_update stream_update;
+   } * bundle;
+   struct dc_stream_state *stream_state;
+   struct dc_context *dc_ctx = link->ctx;
+   int i;
+
+   bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+
+   if (!bundle) {
+   dm_error("Failed to allocate update bundle\n");
+   return;
+   }
+
+   bundle->stream_update.dpms_off = kzalloc(sizeof(bool), GFP_KERNEL);
+
+   if (!bundle->stream_update.dpms_off) {
+   dm_error("Failed to allocate update bundle\n");
+   goto cleanup;
+   }
+
+   *bundle->stream_update.dpms_off = true;
+
+   for (i = 0; i< MAX_PIPES; i++) {
+   if (dc_ctx->dc->current_state->streams[i] == NULL)
+   continue;
+
+   if (dc_ctx->dc->current_state->streams[i]->link == link) {
+   stream_state = dc_ctx->dc->current_state->streams[i];
+   goto link_found;
+   }
+   }
+
+   dm_error("Cannot find link associated with the stream to be 
disabled\n");
+   return;
+
+link_found:
+   if (stream_state == NULL) {
+   dm_error("Stream state associated with the link is NULL\n");
+   return;
+   }
+
+   bundle->stream_update.stream = stream_state;
+
+   dc_commit_updates_for_stream(stream_state->ctx->dc, 
bundle->surface_updates, 0,
+stream_state, >stream_update,
+stream_state->ctx->dc->current_state);
+
+   kfree(bundle->stream_update.dpms_off);
+cleanup:
+   kfree(bundle);
+
+   return;
+}
+
 static int dm_resume(void *handle)
 {
struct amdgpu_device *adev = handle;
@@ -2434,8 +2492,11 @@ static void handle_hpd_irq(void *param)
drm_kms_helper_hotplug_event(dev);
 
} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
-   amdgpu_dm_update_connector_after_detect(aconnector);
+   if (new_connection_type == dc_connection_none &&
+   aconnector->dc_link->type == dc_connection_none)
+   dm_set_dpms_off(aconnector->dc_link);
 
+   amdgpu_dm_update_connector_after_detect(aconnector);
 
drm_modeset_lock_all(dev);
dm_restore_drm_connector_state(dev, connector);
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx