[PATCH] drm: Rename headers to match DP2.1 spec

2023-02-06 Thread jdhillon
This patch changes the headers defined in drm_dp.h to match
the DP 2.1 spec.

Signed-off-by: Jasdeep Dhillon 
---
 drivers/gpu/drm/tegra/dp.c   |  2 +-
 include/drm/display/drm_dp.h | 13 +++--
 2 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/tegra/dp.c b/drivers/gpu/drm/tegra/dp.c
index 08fbd8f151a1..f33e468ece0a 100644
--- a/drivers/gpu/drm/tegra/dp.c
+++ b/drivers/gpu/drm/tegra/dp.c
@@ -499,7 +499,7 @@ static int drm_dp_link_apply_training(struct drm_dp_link 
*link)
for (i = 0; i < lanes; i++)
values[i / 2] |= DP_LANE_POST_CURSOR(i, pc[i]);
 
-   err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_1_SET2, values,
+   err = drm_dp_dpcd_write(aux, DP_LINK_SQUARE_PATTERN, values,
DIV_ROUND_UP(lanes, 2));
if (err < 0) {
DRM_ERROR("failed to set post-cursor: %d\n", err);
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index ed10e6b6f99d..2093c1f8d8e0 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -641,12 +641,11 @@
 # define DP_LINK_QUAL_PATTERN_CUSTOM0x40
 # define DP_LINK_QUAL_PATTERN_SQUARE0x48
 
-#define DP_TRAINING_LANE0_1_SET2   0x10f
-#define DP_TRAINING_LANE2_3_SET2   0x110
-# define DP_LANE02_POST_CURSOR2_SET_MASK(3 << 0)
-# define DP_LANE02_MAX_POST_CURSOR2_REACHED (1 << 2)
-# define DP_LANE13_POST_CURSOR2_SET_MASK(3 << 4)
-# define DP_LANE13_MAX_POST_CURSOR2_REACHED (1 << 6)
+#define DP_LINK_SQUARE_PATTERN 0x10f
+#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX0x110
+# define DP_UHBR10_20_CAPABILITY   (3 << 0)
+# define DP_UHBR13_5_CAPABILITY(1 << 2)
+# define DP_CABLE_TYPE (7 << 3)
 
 #define DP_MSTM_CTRL   0x111   /* 1.2 */
 # define DP_MST_EN (1 << 0)
@@ -1127,6 +1126,8 @@
 # define DP_128B132B_TRAINING_AUX_RD_INTERVAL_32_MS 0x05
 # define DP_128B132B_TRAINING_AUX_RD_INTERVAL_64_MS 0x06
 
+#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX
0x2217 /* 2.0 */
+
 #define DP_TEST_264BIT_CUSTOM_PATTERN_7_0  0x2230
 #define DP_TEST_264BIT_CUSTOM_PATTERN_263_256  0x2250
 
-- 
2.34.1



[PATCH 05/23] drm/amd/display: phase2 enable mst hdcp multiple displays

2022-12-09 Thread jdhillon
From: hersen wu 

[why]
For MST topology with 1 physical link and multiple connectors (>=2),
e.g. daisy cahined MST + SST, or 1-to-multi MST hub, if userspace
set to enable the HDCP simultaneously on all connected outputs, the
commit tail iteratively call the hdcp_update_display() for each
display (connector). However, the hdcp workqueue data structure for
each link has only one DM connector and encryption status members,
which means the work queue of property_validate/update() would only
be triggered for the last connector within this physical link, and
therefore the HDCP property value of other connectors would stay on
DESIRED instead of switching to ENABLED, which is NOT as expected.

[how]
Use array of AMDGPU_DM_MAX_DISPLAY_INDEX for both aconnector and
encryption status in hdcp workqueue data structure for each physical
link. For property validate/update work queue, we iterates over the
array and do similar operation/check for each connected display.

Reviewed-by: Bhawanpreet Lakha 
Acked-by: Jasdeep Dhillon 
Signed-off-by: hersen wu 
---
 .../amd/display/amdgpu_dm/amdgpu_dm_hdcp.c| 160 +-
 .../amd/display/amdgpu_dm/amdgpu_dm_hdcp.h|   4 +-
 2 files changed, 122 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index a7fd98f57f94..0301faaf5d48 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -170,9 +170,10 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
struct mod_hdcp_display *display = _work[link_index].display;
struct mod_hdcp_link *link = _work[link_index].link;
struct mod_hdcp_display_query query;
+   unsigned int conn_index = aconnector->base.index;
 
mutex_lock(_w->mutex);
-   hdcp_w->aconnector = aconnector;
+   hdcp_w->aconnector[conn_index] = aconnector;
 
query.display = NULL;
mod_hdcp_query_display(_w->hdcp, aconnector->base.index, );
@@ -204,7 +205,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
  
msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
} else {
display->adjust.disable = 
MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
-   hdcp_w->encryption_status = 
MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+   hdcp_w->encryption_status[conn_index] = 
MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
cancel_delayed_work(_w->property_validate_dwork);
}
 
@@ -223,9 +224,10 @@ static void hdcp_remove_display(struct hdcp_workqueue 
*hdcp_work,
 {
struct hdcp_workqueue *hdcp_w = _work[link_index];
struct drm_connector_state *conn_state = aconnector->base.state;
+   unsigned int conn_index = aconnector->base.index;
 
mutex_lock(_w->mutex);
-   hdcp_w->aconnector = aconnector;
+   hdcp_w->aconnector[conn_index] = aconnector;
 
/* the removal of display will invoke auth reset -> hdcp destroy and
 * we'd expect the Content Protection (CP) property changed back to
@@ -247,13 +249,18 @@ static void hdcp_remove_display(struct hdcp_workqueue 
*hdcp_work,
 void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int 
link_index)
 {
struct hdcp_workqueue *hdcp_w = _work[link_index];
+   unsigned int conn_index;
 
mutex_lock(_w->mutex);
 
mod_hdcp_reset_connection(_w->hdcp,  _w->output);
 
cancel_delayed_work(_w->property_validate_dwork);
-   hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+   for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; 
conn_index++) {
+   hdcp_w->encryption_status[conn_index] =
+   MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+   }
 
process_output(hdcp_w);
 
@@ -290,49 +297,83 @@ static void event_callback(struct work_struct *work)
 
 
 }
+
 static void event_property_update(struct work_struct *work)
 {
-
struct hdcp_workqueue *hdcp_work = container_of(work, struct 
hdcp_workqueue, property_update_work);
-   struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
-   struct drm_device *dev = hdcp_work->aconnector->base.dev;
+   struct amdgpu_dm_connector *aconnector = NULL;
+   struct drm_device *dev;
long ret;
+   unsigned int conn_index;
+   struct drm_connector *connector;
+   struct drm_connector_state *conn_state;
 
-   drm_modeset_lock(>mode_config.connection_mutex, NULL);
-   mutex_lock(_work->mutex);
+   for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; 
conn_index++) {
+   aconnector = hdcp_work->aconnector[conn_index];
 
+   if (!aconnector)
+   continue;
 
-   if (aconnector->base.state && aconnector->base.state->commit) {
-   ret = 

[PATCH 23/23] drm/amd/display: 3.2.216

2022-12-09 Thread jdhillon
From: Aric Cyr 

This version brings along following fixes:

-Fix array index out of bound error
-Speed up DML fast vadlaite
-Implement multiple secure display
-MST HDCP for multiple display
-Add DPIA notification
-Add support for three new square pattern variant

Reviewed-by: Bhawanpreet Lakha 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Aric Cyr 
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index a76031d7e202..c14205e3183f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
 struct set_config_cmd_payload;
 struct dmub_notification;
 
-#define DC_VER "3.2.215"
+#define DC_VER "3.2.216"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
-- 
2.34.1



[PATCH 22/23] drm/amd/display: Reduce expected sdp bandwidth for dcn321

2022-12-09 Thread jdhillon
From: Dillon Varone 

[Description]
Modify soc BB to reduce expected sdp bandwidth and align with measurements to
fix underflow issues.

Reviewed-by: Jun Lei 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Dillon Varone 
---
 drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index f4b176599be7..0ea406145c1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -136,7 +136,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
-   .pct_ideal_sdp_bw_after_urgent = 100.0,
+   .pct_ideal_sdp_bw_after_urgent = 90.0,
.pct_ideal_fabric_bw_after_urgent = 67.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for 
now keep as is until DML implemented
-- 
2.34.1



[PATCH 11/23] drm/amd/display: Check for PSR in no memory request case

2022-12-09 Thread jdhillon
From: Samson Tam 

[Why]
When we have a PSR display, we will not be requesting data from memory anymore.
So we report back true for no memory request case.

[How]
Check for PSR by checking PSR version in link settings

Reviewed-by: Alvin Lee 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Samson Tam 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c  | 9 +
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c | 3 ++-
 2 files changed, 3 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index dc23801de071..6a6e4c844316 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8359,10 +8359,7 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state)
new_crtc_state = drm_atomic_get_new_crtc_state(state, 
>base);
old_crtc_state = drm_atomic_get_old_crtc_state(state, 
>base);
}
-<<< HEAD
-===
-
->>> 667f52144b9a (drm/amd/display: phase3 mst hdcp for multiple displays)
+   
if (old_crtc_state)
pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: 
%x\n",
old_crtc_state->enable,
@@ -8425,10 +8422,6 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state)
DRM_MODE_CONTENT_PROTECTION_DESIRED)
enable_encryption = true;
 
-<<< HEAD
-
-===
->>> 667f52144b9a (drm/amd/display: phase3 mst hdcp for multiple displays)
if (aconnector->dc_link && aconnector->dc_sink &&
aconnector->dc_link->type == 
dc_connection_mst_branch) {
struct hdcp_workqueue *hdcp_work = 
adev->dm.hdcp_workqueue;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c 
b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index b8767be1e4c5..2f0ebe1f6c45 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -188,7 +188,8 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc 
*dc)
 
 /* First, check no-memory-request case */
for (i = 0; i < dc->current_state->stream_count; i++) {
-   if (dc->current_state->stream_status[i].plane_count)
+   if ((dc->current_state->stream_status[i].plane_count) &&
+   
(dc->current_state->streams[i]->link->psr_settings.psr_version == 
DC_PSR_VERSION_UNSUPPORTED))
/* Fail eligibility on a visible stream */
break;
}
-- 
2.34.1



[PATCH 14/23] drm/amd/display: Add DPIA NOTIFICATION logic

2022-12-09 Thread jdhillon
From: Mustapha Ghaddar 

[WHY]
Adding the new DPIA NOTIFY packets from DMUB
As per the design with Cruise to account for
250ms response delay otherwise

[HOW]
Added th DPIA NOTIFY logic as per DMUB logic

Reviewed-by: Nicholas Kazlauskas 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Mustapha Ghaddar 
---
 drivers/gpu/drm/amd/display/dc/core/dc_stat.c |  1 +
 drivers/gpu/drm/amd/display/dmub/dmub_srv.h   |  3 +
 .../gpu/drm/amd/display/dmub/inc/dmub_cmd.h   | 83 +++
 .../drm/amd/display/dmub/src/dmub_srv_stat.c  | 21 +
 4 files changed, 108 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
index 4b372aa52801..6c06587dd88c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
@@ -65,6 +65,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, 
struct dmub_notification
/* For HPD/HPD RX, convert dpia port index into link index */
if (notify->type == DMUB_NOTIFICATION_HPD ||
notify->type == DMUB_NOTIFICATION_HPD_IRQ ||
+   notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||
notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) {
notify->link_index =
get_link_index_from_dpia_port_index(dc, 
notify->link_index);
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h 
b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index eb5b7eb292ef..c8274967de94 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -126,6 +126,7 @@ enum dmub_notification_type {
DMUB_NOTIFICATION_HPD,
DMUB_NOTIFICATION_HPD_IRQ,
DMUB_NOTIFICATION_SET_CONFIG_REPLY,
+   DMUB_NOTIFICATION_DPIA_NOTIFICATION,
DMUB_NOTIFICATION_MAX
 };
 
@@ -453,6 +454,7 @@ struct dmub_srv {
  * @pending_notification: Indicates there are other pending notifications
  * @aux_reply: aux reply
  * @hpd_status: hpd status
+ * @bw_alloc_reply: BW Allocation reply from CM/DPIA
  */
 struct dmub_notification {
enum dmub_notification_type type;
@@ -463,6 +465,7 @@ struct dmub_notification {
struct aux_reply_data aux_reply;
enum dp_hpd_status hpd_status;
enum set_config_status sc_status;
+   struct dpia_notification_reply_data bw_alloc_reply;
};
 };
 
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h 
b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 33907feefebb..4dcd82d19ccf 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -770,6 +770,7 @@ enum dmub_out_cmd_type {
 * Command type used for SET_CONFIG Reply notification
 */
DMUB_OUT_CMD__SET_CONFIG_REPLY = 3,
+   DMUB_OUT_CMD__DPIA_NOTIFICATION = 5
 };
 
 /* DMUB_CMD__DPIA command sub-types. */
@@ -1516,6 +1517,84 @@ struct dp_hpd_data {
uint8_t pad;
 };
 
+/**
+ * DPIA NOTIFICATION Response Type
+ */
+enum dpia_notify_bw_alloc_status {
+
+   DPIA_BW_REQ_FAILED = 0,
+   DPIA_BW_REQ_SUCCESS,
+   DPIA_EST_BW_CHANGED,
+   DPIA_BW_ALLOC_CAPS_CHANGED
+};
+
+/* DMUB_OUT_CMD__DPIA_NOTIFY Reply command - OutBox Cmd */
+/**
+ * Data passed to driver from FW in a DMUB_OUT_CMD__DPIA_NOTIFY command.
+ */
+struct dpia_notification_reply_data {
+   uint8_t allocated_bw;
+   uint8_t estimated_bw;
+};
+
+struct dpia_notification_common {
+   bool shared;
+};
+
+struct dpia_bw_allocation_notify_data {
+   union {
+   struct {
+   uint16_t cm_bw_alloc_support: 1;/**< USB4 CM BW 
Allocation mode support */
+   uint16_t bw_request_failed: 1;  /**< 
BW_Request_Failed */
+   uint16_t bw_request_succeeded: 1;   /**< 
BW_Request_Succeeded */
+   uint16_t est_bw_changed: 1; /**< 
Estimated_BW changed */
+   uint16_t bw_alloc_cap_changed: 1;   /**< 
BW_Allocation_Capabiity_Changed */
+   uint16_t reserved: 11;
+   } bits;
+   uint16_t flags;
+   };
+   uint8_t cm_id;  /**< CM ID */
+   uint8_t group_id;   /**< Group ID */
+   uint8_t granularity;/**< BW Allocation Granularity */
+   uint8_t estimated_bw;   /**< Estimated_BW */
+   uint8_t allocated_bw;   /**< Allocated_BW */
+   uint8_t reserved;
+};
+
+union dpia_notification_data {
+   struct dpia_notification_common common_data;
+   struct dpia_bw_allocation_notify_data dpia_bw_alloc;/**< Used for 
DPIA BW Allocation mode notification */
+};
+
+enum dmub_cmd_dpia_notification_type {
+   DPIA_NOTIFY__BW_ALLOCATION = 0,
+};
+
+struct dpia_notification_header {
+   uint8_t instance;   
/**< DPIA Instance */
+   

[PATCH 21/23] drm/amd/display: Revert Scaler HCBlank issue workaround

2022-12-09 Thread jdhillon
From: "Leo (Hanghong) Ma" 

This reverts commit b1a3d467a069519fd8aed711fff94c49e486e701.
Workaround no longer needed.

Reviewed-by: Chris Park 
Reviewed-by: Chris Park 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Leo (Hanghong) Ma 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index ce8d6a54ca54..651231387043 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -82,7 +82,6 @@ struct dp_hdmi_dongle_signature_data {
 #define HDMI_SCDC_STATUS_FLAGS 0x40
 #define HDMI_SCDC_ERR_DETECT 0x50
 #define HDMI_SCDC_TEST_CONFIG 0xC0
-#define HDMI_SCDC_DEVICE_ID 0xD3
 
 union hdmi_scdc_update_read_data {
uint8_t byte[2];
-- 
2.34.1



[PATCH 18/23] drm/amd/display: Clear MST topology if it fails to resume

2022-12-09 Thread jdhillon
From: Roman Li 

[Why]
In case of failure to resume MST topology after suspend, an emtpty
mst tree prevents further mst hub detection on the same connector.
That causes the issue with MST hub hotplug after it's been unplug in
suspend.

[How]
Stop topology manager on the connector after detecting DM_MST failure.

Reviewed-by: Wayne Lin 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Roman Li 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 88474f04fa9a..2fb7de3b4f5e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2170,6 +2170,8 @@ static int detect_mst_link_for_all_connectors(struct 
drm_device *dev)
DRM_ERROR("DM_MST: Failed to start MST\n");
aconnector->dc_link->type =
dc_connection_single;
+   ret = 
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+
aconnector->dc_link);
break;
}
}
-- 
2.34.1



[PATCH 16/23] drm/amd/display: use encoder type independent hwss instead of accessing enc directly

2022-12-09 Thread jdhillon
From: Wenjing Liu 

[why]
in dc_link_dp there still exist a few places where we call dio encoders
without checking current enabled encoder type.
The change is to make these places to call hwss equivalent functions so
it won't mistakenly program a wrong type encoder.

Reviewed-by: George Shen 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Wenjing Liu 
---
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c   | 18 +-
 1 file changed, 5 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 909434faf039..905642349ba2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -6137,7 +6137,7 @@ bool dc_link_dp_set_test_pattern(
 * MuteAudioEndpoint(pPathMode->pDisplayPath, true);
 */
/* Blank stream */
-   pipes->stream_res.stream_enc->funcs->dp_blank(link, 
pipe_ctx->stream_res.stream_enc);
+   link->dc->hwss.blank_stream(pipe_ctx);
}
 
dp_set_hw_test_pattern(link, _ctx->link_res, test_pattern,
@@ -7298,8 +7298,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,
pipes[i].stream->link == link) {
udelay(100);
 
-   pipes[i].stream_res.stream_enc->funcs->dp_blank(link,
-   pipes[i].stream_res.stream_enc);
+   link->dc->hwss.blank_stream([i]);
 
/* disable any test pattern that might be active */
dp_set_hw_test_pattern(link, [i].link_res,
@@ -7308,17 +7307,10 @@ void dp_retrain_link_dp_test(struct dc_link *link,
dp_receiver_power_ctrl(link, false);
 
link->dc->hwss.disable_stream([i]);
-   if (([i])->stream_res.audio && 
!link->dc->debug.az_endpoint_mute_only)
-   
([i])->stream_res.audio->funcs->az_disable(([i])->stream_res.audio);
+   if (pipes[i].stream_res.audio && 
!link->dc->debug.az_endpoint_mute_only)
+   
pipes[i].stream_res.audio->funcs->az_disable(pipes[i].stream_res.audio);
 
-   if (link->link_enc)
-   link->link_enc->funcs->disable_output(
-   link->link_enc,
-   SIGNAL_TYPE_DISPLAY_PORT);
-
-   /* Clear current link setting. */
-   memset(>cur_link_settings, 0,
-   sizeof(link->cur_link_settings));
+   link->dc->hwss.disable_link_output(link, 
[i].link_res, SIGNAL_TYPE_DISPLAY_PORT);
 
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
do_fallback = true;
-- 
2.34.1



[PATCH 20/23] drm/amd/display: Revert Reduce delay when sink device not able to ACK 00340h write

2022-12-09 Thread jdhillon
From: Ian Chen 

[WHY]
It causes regression AMD source will not write DPCD 340.

Reviewed-by: Wayne Lin 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Ian Chen 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c|  6 --
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 14 +++---
 drivers/gpu/drm/amd/display/dc/dc_dp_types.h |  1 -
 3 files changed, 3 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 342e906ae26e..1ca3328b492c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1916,12 +1916,6 @@ struct dc_link *link_create(const struct link_init_data 
*init_params)
if (false == dc_link_construct(link, init_params))
goto construct_fail;
 
-   /*
-* Must use preferred_link_setting, not reported_link_cap or 
verified_link_cap,
-* since struct preferred_link_setting won't be reset after S3.
-*/
-   link->preferred_link_setting.dpcd_source_device_specific_field_support 
= true;
-
return link;
 
 construct_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 905642349ba2..af9411ee3c74 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -6584,18 +6584,10 @@ void dpcd_set_source_specific_data(struct dc_link *link)
 
uint8_t hblank_size = 
(uint8_t)link->dc->caps.min_horizontal_blanking_period;
 
-   if 
(link->preferred_link_setting.dpcd_source_device_specific_field_support) {
-   result_write_min_hblank = 
core_link_write_dpcd(link,
-   DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, 
(uint8_t *)(_size),
-   sizeof(hblank_size));
-
-   if (result_write_min_hblank == 
DC_ERROR_UNEXPECTED)
-   
link->preferred_link_setting.dpcd_source_device_specific_field_support = false;
-   } else {
-   DC_LOG_DC("Sink device does not support 00340h 
DPCD write. Skipping on purpose.\n");
-   }
+   result_write_min_hblank = core_link_write_dpcd(link,
+   DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t 
*)(_size),
+   sizeof(hblank_size));
}
-
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,

WPP_BIT_FLAG_DC_DETECTION_DP_CAPS,
"result=%u 
link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x 
branch_dev_name='%c%c%c%c%c%c'",
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h 
b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index b4eddd83b330..73f58ac3b93f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -149,7 +149,6 @@ struct dc_link_settings {
enum dc_link_spread link_spread;
bool use_link_rate_set;
uint8_t link_rate_set;
-   bool dpcd_source_device_specific_field_support;
 };
 
 union dc_dp_ffe_preset {
-- 
2.34.1



[PATCH 04/23] drm/amd/display: Fix when disabling secure_display

2022-12-09 Thread jdhillon
From: Alan Liu 

[Why]
Fix problems when we disable secure_display.

[How]
- Reset secure display context after disabled
- A secure_display_context is dedicate to a crtc, so we set the crtc for
it when we create the context.

Reviewed-by: Wayne Lin 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Alan Liu 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  2 +-
 .../drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c | 28 +++
 .../drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h |  5 ++--
 3 files changed, 20 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4c2a99fbcd70..bbacd764bf0f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1642,7 +1642,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
 #endif
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-   adev->dm.secure_display_ctxs = 
amdgpu_dm_crtc_secure_display_create_contexts(adev->dm.dc->caps.max_links);
+   adev->dm.secure_display_ctxs = 
amdgpu_dm_crtc_secure_display_create_contexts(adev);
 #endif
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
init_completion(>dm.dmub_aux_transfer_done);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 6453abcf5f4b..733041a55ed1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -214,14 +214,12 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc 
*crtc,
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/* Disable secure_display if it was enabled */
if (!enable) {
-   if (adev->dm.secure_display_ctxs) {
-   for (i = 0; i < adev->mode_info.num_crtc; i++) {
-   if 
(adev->dm.secure_display_ctxs[i].crtc == crtc) {
-   /* stop ROI update on this crtc 
*/
-   
flush_work(>dm.secure_display_ctxs[i].notify_ta_work);
-   
dc_stream_forward_crc_window(stream_state, NULL, true);
-   
adev->dm.secure_display_ctxs[i].crtc = NULL;
-   }
+   for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
+   if (adev->dm.secure_display_ctxs[i].crtc == 
crtc) {
+   /* stop ROI update on this crtc */
+   
flush_work(>dm.secure_display_ctxs[i].notify_ta_work);
+   
flush_work(>dm.secure_display_ctxs[i].forward_roi_work);
+   
dc_stream_forward_crc_window(stream_state, NULL, true);
}
}
}
@@ -496,7 +494,12 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc 
*crtc)
}
 
secure_display_ctx = >dm.secure_display_ctxs[acrtc->crtc_id];
-   secure_display_ctx->crtc = crtc;
+   if (WARN_ON(secure_display_ctx->crtc != crtc)) {
+   /* We have set the crtc when creating secure_display_context,
+* don't expect it to be changed here.
+*/
+   secure_display_ctx->crtc = crtc;
+   }
 
if (acrtc->dm_irq_params.window_param.update_win) {
/* prepare work for dmub to update ROI */
@@ -527,19 +530,20 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc 
*crtc)
 }
 
 struct secure_display_context *
-amdgpu_dm_crtc_secure_display_create_contexts(int num_crtc)
+amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
 {
struct secure_display_context *secure_display_ctxs = NULL;
int i;
 
-   secure_display_ctxs = kcalloc(num_crtc, sizeof(struct 
secure_display_context), GFP_KERNEL);
+   secure_display_ctxs = kcalloc(AMDGPU_MAX_CRTCS, sizeof(struct 
secure_display_context), GFP_KERNEL);
 
if (!secure_display_ctxs)
return NULL;
 
-   for (i = 0; i < num_crtc; i++) {
+   for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
INIT_WORK(_display_ctxs[i].forward_roi_work, 
amdgpu_dm_forward_crc_window);
INIT_WORK(_display_ctxs[i].notify_ta_work, 
amdgpu_dm_crtc_notify_ta_to_read);
+   secure_display_ctxs[i].crtc = >mode_info.crtcs[i]->base;
}
 
return secure_display_ctxs;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 4323f723c0de..935adca6f048 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -54,7 +54,7 @@ 

[PATCH 15/23] drm/amd/display: add support for three new square pattern variants from DP2.1 specs

2022-12-09 Thread jdhillon
From: Wenjing Liu 

[why]
DP2.1 specs has brought 3 new variants of sqaure patterns with different
pre-shoot and de-emphasis equalization requirements. The commit adds
logic to identify these variants and apply corresponding eqaulization
requirements into hardware lane settings.

Reviewed-by: George Shen 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Wenjing Liu 
---
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  | 48 +++
 drivers/gpu/drm/amd/display/dc/dc_dp_types.h  |  3 ++
 .../dc/dcn31/dcn31_hpo_dp_link_encoder.c  |  5 +-
 .../gpu/drm/amd/display/include/dpcd_defs.h   |  5 +-
 .../amd/display/include/link_service_types.h  |  7 ++-
 5 files changed, 56 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index dedd1246ce58..909434faf039 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -4094,6 +4094,12 @@ static void dp_test_send_link_training(struct dc_link 
*link)
dp_retrain_link_dp_test(link, _settings, false);
 }
 
+static bool is_dp_phy_sqaure_pattern(enum dp_test_pattern test_pattern)
+{
+   return (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern &&
+   test_pattern <= DP_TEST_PATTERN_SQUARE_END);
+}
+
 /* TODO Raven hbr2 compliance eye output is unstable
  * (toggling on and off) with debugger break
  * This caueses intermittent PHY automation failure
@@ -4111,6 +4117,8 @@ static void dp_test_send_phy_test_pattern(struct dc_link 
*link)
union lane_adjust dpcd_lane_adjust;
unsigned int lane;
struct link_training_settings link_training_settings;
+   unsigned char no_preshoot = 0;
+   unsigned char no_deemphasis = 0;
 
dpcd_test_pattern.raw = 0;
memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment));
@@ -4204,8 +4212,21 @@ static void dp_test_send_phy_test_pattern(struct dc_link 
*link)
case PHY_TEST_PATTERN_264BIT_CUSTOM:
test_pattern = DP_TEST_PATTERN_264BIT_CUSTOM;
break;
-   case PHY_TEST_PATTERN_SQUARE_PULSE:
-   test_pattern = DP_TEST_PATTERN_SQUARE_PULSE;
+   case PHY_TEST_PATTERN_SQUARE:
+   test_pattern = DP_TEST_PATTERN_SQUARE;
+   break;
+   case PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED:
+   test_pattern = DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED;
+   no_preshoot = 1;
+   break;
+   case PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED:
+   test_pattern = DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED;
+   no_deemphasis = 1;
+   break;
+   case PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED:
+   test_pattern = 
DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED;
+   no_preshoot = 1;
+   no_deemphasis = 1;
break;
default:
test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
@@ -4222,7 +4243,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link 
*link)
test_pattern_size);
}
 
-   if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) {
+   if (is_dp_phy_sqaure_pattern(test_pattern)) {
test_pattern_size = 1; // Square pattern data is 1 byte (DP 
spec)
core_link_read_dpcd(
link,
@@ -4259,8 +4280,10 @@ static void dp_test_send_phy_test_pattern(struct dc_link 
*link)
((dpcd_post_cursor_2_adjustment >> (lane * 2)) 
& 0x03);
} else if 
(dp_get_link_encoding_format(>cur_link_settings) ==
DP_128b_132b_ENCODING) {
-   
link_training_settings.hw_lane_settings[lane].FFE_PRESET.raw =
+   
link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.level =
dpcd_lane_adjust.tx_ffe.PRESET_VALUE;
+   
link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.no_preshoot = 
no_preshoot;
+   
link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.no_deemphasis 
= no_deemphasis;
}
}
 
@@ -6178,8 +6201,17 @@ bool dc_link_dp_set_test_pattern(
case DP_TEST_PATTERN_264BIT_CUSTOM:
pattern = PHY_TEST_PATTERN_264BIT_CUSTOM;
break;
-   case DP_TEST_PATTERN_SQUARE_PULSE:
-   pattern = PHY_TEST_PATTERN_SQUARE_PULSE;
+   case DP_TEST_PATTERN_SQUARE:
+   pattern = PHY_TEST_PATTERN_SQUARE;
+   break;
+   case DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED:
+   pattern = PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED;
+   break;
+   case DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED:

[PATCH 12/23] drm/amd/display: Block FPO / SubVP (DRR) on HDMI VRR configs

2022-12-09 Thread jdhillon
From: Alvin Lee 

[Description]
- Current policy does not support HDMI VRR by default, so we
  cannot enable FPO / SubVP (DRR) cases

Reviewed-by: Nevenko Stupar 
Reviewed-by: Jun Lei 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Alvin Lee 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c| 2 +-
 drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 9 ++---
 2 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6a6e4c844316..37c6be337a85 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8359,7 +8359,7 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state)
new_crtc_state = drm_atomic_get_new_crtc_state(state, 
>base);
old_crtc_state = drm_atomic_get_old_crtc_state(state, 
>base);
}
-   
+
if (old_crtc_state)
pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: 
%x\n",
old_crtc_state->enable,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index ff5f3ef8be0a..815cf13ebe11 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -979,8 +979,11 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct 
dc_state *context)
}
// Use ignore_msa_timing_param flag to identify as DRR
if (found && 
context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param) {
-   // SUBVP + DRR case
-   schedulable = subvp_drr_schedulable(dc, context, 
>res_ctx.pipe_ctx[vblank_index]);
+   // SUBVP + DRR case -- don't enable SubVP + DRR for HDMI VRR 
cases
+   if 
(context->res_ctx.pipe_ctx[vblank_index].stream->allow_freesync)
+   schedulable = subvp_drr_schedulable(dc, context, 
>res_ctx.pipe_ctx[vblank_index]);
+   else
+   schedulable = false;
} else if (found) {
main_timing = _pipe->stream->timing;
phantom_timing = 
_pipe->stream->mall_stream_config.paired_stream->timing;
@@ -1195,7 +1198,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,

pipe->stream->mall_stream_config.type == SUBVP_NONE) {
non_subvp_pipes++;
// Use ignore_msa_timing_param 
flag to identify as DRR
-   if 
(pipe->stream->ignore_msa_timing_param) {
+   if 
(pipe->stream->ignore_msa_timing_param && pipe->stream->allow_freesync) {
drr_pipe_found = true;
drr_pipe_index = i;
}
-- 
2.34.1



[PATCH 19/23] drm/amd/display: Block subvp if center timing is in use

2022-12-09 Thread jdhillon
From: Alvin Lee 

[Description]
- FW scheduling algorithm doesn't take into account of it's
  a center timing
- This affects where the subvp mclk switch can be scheduled
  (prevents HUBP vline interrupt from coming in if scheduled
  incorrectly)
- Block subvp center timing cases for now

Reviewed-by: Jun Lei 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Alvin Lee 
---
 .../gpu/drm/amd/display/dc/dcn32/dcn32_resource.h   |  1 +
 .../amd/display/dc/dcn32/dcn32_resource_helpers.c   | 13 +
 .../gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c|  2 +-
 3 files changed, 15 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h 
b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index 13fbc574910b..57ce1d670abe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -112,6 +112,7 @@ bool dcn32_subvp_in_use(struct dc *dc,
 bool dcn32_mpo_in_use(struct dc_state *context);
 
 bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
+bool dcn32_is_center_timing(struct pipe_ctx *pipe);
 
 struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
struct dc_state *state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c 
b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 04fca788c50b..e5287e5f66d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -255,6 +255,19 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct 
dc_state *context)
return false;
 }
 
+bool dcn32_is_center_timing(struct pipe_ctx *pipe)
+{
+   bool is_center_timing = false;
+
+   if (pipe->stream) {
+   if (pipe->stream->timing.v_addressable != 
pipe->stream->dst.height ||
+   pipe->stream->timing.v_addressable != 
pipe->stream->src.height) {
+   is_center_timing = true;
+   }
+   }
+   return is_center_timing;
+}
+
 /**
  * 
***
  * dcn32_determine_det_override: Determine DET allocation for each pipe
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 13e5542a7028..e7459fd50bf9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -691,7 +691,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
 *   to combine this with SubVP can cause issues with the 
scheduling).
 * - Not TMZ surface
 */
-   if (pipe->plane_state && !pipe->top_pipe &&
+   if (pipe->plane_state && !pipe->top_pipe && 
!dcn32_is_center_timing(pipe) &&
pipe->stream->mall_stream_config.type == 
SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface &&

vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]]
 <= 0) {
while (pipe) {
-- 
2.34.1



[PATCH 17/23] drm/amd/display: Fix potential null-deref in dm_resume

2022-12-09 Thread jdhillon
From: Roman Li 

[Why]
Fixing smatch error:
dm_resume() error: we previously assumed 'aconnector->dc_link' could be null

[How]
Check if dc_link null at the beginning of the loop,
so further checks can be dropped.

Reported-by: kernel test robot 
Reported-by: Dan Carpenter 

Reviewed-by: Wayne Lin 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Roman Li 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 37c6be337a85..88474f04fa9a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2739,12 +2739,14 @@ static int dm_resume(void *handle)
drm_for_each_connector_iter(connector, ) {
aconnector = to_amdgpu_dm_connector(connector);
 
+   if (!aconnector->dc_link)
+   continue;
+
/*
 * this is the case when traversing through already created
 * MST connectors, should be skipped
 */
-   if (aconnector->dc_link &&
-   aconnector->dc_link->type == dc_connection_mst_branch)
+   if (aconnector->dc_link->type == dc_connection_mst_branch)
continue;
 
mutex_lock(>hpd_lock);
-- 
2.34.1



[PATCH 07/23] drm/amd/display: Demote Error Level When ODM Transition Supported

2022-12-09 Thread jdhillon
From: Fangzhi Zuo 

[Why && How]
On dcn32, HW supports odm transition in fast update. Hence this
error message is considered false positive. Downgrade the error level
to avoid catching unnecessary attention.

Reviewed-by: Dillon Varone 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Fangzhi Zuo 
---
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 15 ---
 1 file changed, 12 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index da164685547d..002b7b512b09 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -3810,6 +3810,8 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc 
*dc,
int i;
struct pipe_ctx *pipe_ctx, *pipe_ctx_check;
 
+   DC_LOGGER_INIT(dc->ctx->logger);
+
pipe_ctx = >res_ctx.pipe_ctx[disabled_master_pipe_idx];
if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx) != disabled_master_pipe_idx) ||
!IS_PIPE_SYNCD_VALID(pipe_ctx))
@@ -3820,9 +3822,16 @@ void check_syncd_pipes_for_disabled_master_pipe(struct 
dc *dc,
pipe_ctx_check = >res_ctx.pipe_ctx[i];
 
if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_check) == 
disabled_master_pipe_idx) &&
-   IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != 
disabled_master_pipe_idx))
-   DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled 
master pipe_idx[%d]\n",
-   i, disabled_master_pipe_idx);
+   IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != 
disabled_master_pipe_idx)) {
+   /* On dcn32, this error isn't fatal since hw supports 
odm transition in fast update*/
+   if (dc->ctx->dce_version == DCN_VERSION_3_2 ||
+   dc->ctx->dce_version == 
DCN_VERSION_3_21)
+   DC_LOG_DEBUG("DC: pipe_idx[%d] syncd with 
disabled master pipe_idx[%d]\n",
+   i, disabled_master_pipe_idx);
+   else
+   DC_ERR("DC: Failure: pipe_idx[%d] syncd with 
disabled master pipe_idx[%d]\n",
+   i, disabled_master_pipe_idx);
+   }
}
 }
 
-- 
2.34.1



[PATCH 09/23] drm/amd/display: save restore hdcp desired for disp unplug plug from mst hub

2022-12-09 Thread jdhillon
From: hersen wu 

[Why]
connector hdcp properties are lost after display is
unplgged from mst hub. connector is destroyed with
dm_dp_mst_connector_destroy. when display is plugged
back, hdcp is not desired, hdcp could not be enabled
by linux kernel automatically.

[How]
save hdcp properties into hdcp_work within
amdgpu_dm_atomic_commit_tail. if the same display is
plugged back with same display index, its hdcp
properties will be retrieved from hdcp_work within
dm_dp_mst_get_modes

Reviewed-by: Bhawanpreet Lakha 
Acked-by: Jasdeep Dhillon 
Signed-off-by: hersen wu 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 104 +-
 .../amd/display/amdgpu_dm/amdgpu_dm_hdcp.h|  13 +++
 .../display/amdgpu_dm/amdgpu_dm_mst_types.c   |  25 +
 3 files changed, 138 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index bbacd764bf0f..7affe0899418 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8292,15 +8292,65 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state)
}
}
 #ifdef CONFIG_DRM_AMD_DC_HDCP
+   for_each_oldnew_connector_in_state(state, connector, old_con_state, 
new_con_state, i) {
+   struct dm_connector_state *dm_new_con_state = 
to_dm_connector_state(new_con_state);
+   struct amdgpu_crtc *acrtc = 
to_amdgpu_crtc(dm_new_con_state->base.crtc);
+   struct amdgpu_dm_connector *aconnector = 
to_amdgpu_dm_connector(connector);
+
+   pr_debug("[HDCP_DM] -- i : %x --\n", i);
+
+   if (!connector)
+   continue;
+
+   pr_debug("[HDCP_DM] connector->index: %x connect_status: %x 
dpms: %x\n",
+   connector->index, connector->status, connector->dpms);
+   pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+   old_con_state->content_protection, 
new_con_state->content_protection);
+
+   if (aconnector->dc_sink) {
+   if (aconnector->dc_sink->sink_signal != 
SIGNAL_TYPE_VIRTUAL &&
+   aconnector->dc_sink->sink_signal != 
SIGNAL_TYPE_NONE) {
+   pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+   aconnector->dc_sink->edid_caps.display_name);
+   }
+   }
+
+   new_crtc_state = NULL;
+   old_crtc_state = NULL;
+
+   if (acrtc) {
+   new_crtc_state = drm_atomic_get_new_crtc_state(state, 
>base);
+   old_crtc_state = drm_atomic_get_old_crtc_state(state, 
>base);
+   }
+   if (old_crtc_state)
+   pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: 
%x\n",
+   old_crtc_state->enable,
+   old_crtc_state->active,
+   old_crtc_state->mode_changed,
+   old_crtc_state->active_changed,
+   old_crtc_state->connectors_changed);
+
+   if (new_crtc_state)
+   pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: 
%x\n",
+   new_crtc_state->enable,
+   new_crtc_state->active,
+   new_crtc_state->mode_changed,
+   new_crtc_state->active_changed,
+   new_crtc_state->connectors_changed);
+   }
+
for_each_oldnew_connector_in_state(state, connector, old_con_state, 
new_con_state, i) {
struct dm_connector_state *dm_new_con_state = 
to_dm_connector_state(new_con_state);
struct amdgpu_crtc *acrtc = 
to_amdgpu_crtc(dm_new_con_state->base.crtc);
struct amdgpu_dm_connector *aconnector = 
to_amdgpu_dm_connector(connector);
 
new_crtc_state = NULL;
+   old_crtc_state = NULL;
 
-   if (acrtc)
+   if (acrtc) {
new_crtc_state = drm_atomic_get_new_crtc_state(state, 
>base);
+   old_crtc_state = drm_atomic_get_old_crtc_state(state, 
>base);
+   }
 
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 
@@ -8312,11 +8362,57 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state)
continue;
}
 
-   if (is_content_protection_different(new_con_state, 
old_con_state, connector, adev->dm.hdcp_workqueue))
+   if (is_content_protection_different(
+   new_crtc_state,
+   old_crtc_state,
+   new_con_state,
+   old_con_state,
+   connector,
+   adev->dm.hdcp_workqueue)) {
+

[PATCH 10/23] drm/amd/display: phase3 mst hdcp for multiple displays

2022-12-09 Thread jdhillon
From: hersen wu 

[Why]
multiple display hdcp are enabled within event_property_validate,
event_property_update by looping all displays on mst hub. when
one of display on mst hub in unplugged or disabled, hdcp are
disabled for all displays on mst hub within hdcp_reset_display
by looping all displays of mst link. for displays still active,
their encryption status are off. kernel driver will not run hdcp
authentication again. therefore, hdcp are not enabled automatically.

[How]
within is_content_protection_different, check drm_crtc_state changes
of all displays on mst hub, if need, triger hdcp_update_display to
re-run hdcp authentication.

Reviewed-by: Bhawanpreet Lakha 
Acked-by: Jasdeep Dhillon 
Signed-off-by: hersen wu 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 118 --
 1 file changed, 81 insertions(+), 37 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7affe0899418..dc23801de071 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -7374,27 +7374,55 @@ is_scaling_state_different(const struct 
dm_connector_state *dm_state,
 }
 
 #ifdef CONFIG_DRM_AMD_DC_HDCP
-static bool is_content_protection_different(struct drm_connector_state *state,
-   const struct drm_connector_state 
*old_state,
-   const struct drm_connector 
*connector, struct hdcp_workqueue *hdcp_w)
+static bool is_content_protection_different(struct drm_crtc_state 
*new_crtc_state,
+   struct drm_crtc_state 
*old_crtc_state,
+   struct drm_connector_state 
*new_conn_state,
+   struct drm_connector_state 
*old_conn_state,
+   const struct drm_connector 
*connector,
+   struct hdcp_workqueue *hdcp_w)
 {
struct amdgpu_dm_connector *aconnector = 
to_amdgpu_dm_connector(connector);
struct dm_connector_state *dm_con_state = 
to_dm_connector_state(connector->state);
 
-   /* Handle: Type0/1 change */
-   if (old_state->hdcp_content_type != state->hdcp_content_type &&
-   state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 
{
-   state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+   pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+   connector->index, connector->status, connector->dpms);
+   pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+   old_conn_state->content_protection, 
new_conn_state->content_protection);
+
+   if (old_crtc_state)
+   pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x 
c-chg: %x\n",
+   old_crtc_state->enable,
+   old_crtc_state->active,
+   old_crtc_state->mode_changed,
+   old_crtc_state->active_changed,
+   old_crtc_state->connectors_changed);
+
+   if (new_crtc_state)
+   pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x 
c-chg: %x\n",
+   new_crtc_state->enable,
+   new_crtc_state->active,
+   new_crtc_state->mode_changed,
+   new_crtc_state->active_changed,
+   new_crtc_state->connectors_changed);
+
+   /* hdcp content type change */
+   if (old_conn_state->hdcp_content_type != 
new_conn_state->hdcp_content_type &&
+   new_conn_state->content_protection != 
DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+   new_conn_state->content_protection = 
DRM_MODE_CONTENT_PROTECTION_DESIRED;
+   pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
return true;
}
 
-   /* CP is being re enabled, ignore this
-*
-* Handles: ENABLED -> DESIRED
-*/
-   if (old_state->content_protection == 
DRM_MODE_CONTENT_PROTECTION_ENABLED &&
-   state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
-   state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+   /* CP is being re enabled, ignore this */
+   if (old_conn_state->content_protection == 
DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+   new_conn_state->content_protection == 
DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+   if (new_crtc_state && new_crtc_state->mode_changed) {
+   new_conn_state->content_protection = 
DRM_MODE_CONTENT_PROTECTION_DESIRED;
+   pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s 
:true\n", __func__);
+   return true;
+   };
+   new_conn_state->content_protection = 
DRM_MODE_CONTENT_PROTECTION_ENABLED;
+   pr_debug("[HDCP_DM] ENABLED -> DESIRED %s 

[PATCH 13/23] drm/amd/display: Clear link res when merging a pipe split

2022-12-09 Thread jdhillon
From: Alvin Lee 

[Description]
- When merging a pipe that was previously pipe split, we need
  to also clear the link resources or the next stream/plane that
  uses the pipe may have an incorrect link resource state

Reviewed-by: Wenjing Liu 
Reviewed-by: Nevenko Stupar 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Alvin Lee 
---
 drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 815cf13ebe11..13e5542a7028 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1660,6 +1660,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
dcn20_release_dsc(>res_ctx, 
dc->res_pool, >stream_res.dsc);
memset(>plane_res, 0, sizeof(pipe->plane_res));
memset(>stream_res, 0, sizeof(pipe->stream_res));
+   memset(>link_res, 0, sizeof(pipe->link_res));
repopulate_pipes = true;
} else if (pipe->top_pipe && pipe->top_pipe->plane_state == 
pipe->plane_state) {
struct pipe_ctx *top_pipe = pipe->top_pipe;
@@ -1675,6 +1676,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
pipe->stream = NULL;
memset(>plane_res, 0, sizeof(pipe->plane_res));
memset(>stream_res, 0, sizeof(pipe->stream_res));
+   memset(>link_res, 0, sizeof(pipe->link_res));
repopulate_pipes = true;
} else
ASSERT(0); /* Should never try to merge master pipe */
-- 
2.34.1



[PATCH 08/23] drm/amd/display: run subvp validation with supported vlevel

2022-12-09 Thread jdhillon
From: Dillon Varone 

[WHY]
Subvp portion validation currently assumes that if vlevel provided does not
support pstate, then none will, and so subvp is not used.

[HOW]
After get vlevel, use lowest vlevel that supports pstate if it
exists, and use that for subvp validation.

Reviewed-by: Alvin Lee 
Reviewed-by: Jun Lei 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Dillon Varone 
---
 drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index a42ddb911e1d..ff5f3ef8be0a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1169,6 +1169,16 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
pipes[0].clks_cfg.dppclk_mhz = 
get_dppclk_calculated(>bw_ctx.dml, pipes, *pipe_cnt, 0);
*vlevel = dml_get_voltage_level(>bw_ctx.dml, 
pipes, *pipe_cnt);
 
+   /* Check that vlevel requested supports pstate or not
+* if not, select the lowest vlevel that supports it
+*/
+   for (i = *vlevel; i < 
context->bw_ctx.dml.soc.num_states; i++) {
+   if 
(vba->DRAMClockChangeSupport[i][vba->maxMpcComb] != 
dm_dram_clock_change_unsupported) {
+   *vlevel = i;
+   break;
+   }
+   }
+
if (*vlevel < context->bw_ctx.dml.soc.num_states &&

vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != 
dm_dram_clock_change_unsupported
&& subvp_validate_static_schedulability(dc, 
context, *vlevel)) {
-- 
2.34.1



[PATCH 06/23] drm/amd/display: fix warning in amdgpu_dm_crtc_configure_crc_source()

2022-12-09 Thread jdhillon
From: Hamza Mahfooz 

If we build the kernel without CONFIG_DRM_AMD_SECURE_DISPLAY set, we get
the following compile warning:

drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm_crc.c: In function 
‘amdgpu_dm_crtc_configure_crc_source’:
drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm_crc.c:204:13: 
warning: unused variable ‘i’ [-Wunused-variable]
  204 | int i, ret = 0;
  | ^

So, guard variable i around CONFIG_DRM_AMD_SECURE_DISPLAY.

Fixes: 98f02f4c7050 ("SWDEV-351660 - dc: Implement multiple secure display")

Reviewed-by: HaoPing Liu 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Hamza Mahfooz 
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 733041a55ed1..8bf33fa4abd9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -198,10 +198,13 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc 
*crtc,
struct dm_crtc_state *dm_crtc_state,
enum amdgpu_dm_pipe_crc_source source)
 {
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+   int i;
+#endif
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dc_stream_state *stream_state = dm_crtc_state->stream;
bool enable = amdgpu_dm_is_valid_crc_source(source);
-   int i, ret = 0;
+   int ret = 0;
 
/* Configuration will be deferred to stream enable. */
if (!stream_state)
-- 
2.34.1



[PATCH 03/23] drm/amd/display: Implement multiple secure display

2022-12-09 Thread jdhillon
From: Alan Liu 

[Why]
Current secure display only work with single display, now make it
work with multiple displays.

[How]
Create secure_display_context for each crtc instance to store its
own Region of Interest (ROI) information.

Reviewed-by: Wayne Lin 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Alan Liu 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  29 ++--
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |   7 +-
 .../drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c | 150 +-
 .../drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h |  25 ++-
 .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c |  38 +
 drivers/gpu/drm/amd/display/dc/core/dc.c  |   5 +-
 drivers/gpu/drm/amd/display/dc/dc_stream.h|   3 +-
 7 files changed, 116 insertions(+), 141 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 0f391a147354..4c2a99fbcd70 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1642,7 +1642,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
 #endif
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-   adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
+   adev->dm.secure_display_ctxs = 
amdgpu_dm_crtc_secure_display_create_contexts(adev->dm.dc->caps.max_links);
 #endif
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
init_completion(>dm.dmub_aux_transfer_done);
@@ -1737,10 +1737,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
amdgpu_dm_destroy_drm_device(>dm);
 
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-   if (adev->dm.crc_rd_wrk) {
-   flush_work(>dm.crc_rd_wrk->notify_ta_work);
-   kfree(adev->dm.crc_rd_wrk);
-   adev->dm.crc_rd_wrk = NULL;
+   if (adev->dm.secure_display_ctxs) {
+   for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
+   if (adev->dm.secure_display_ctxs[i].crtc) {
+   
flush_work(>dm.secure_display_ctxs[i].notify_ta_work);
+   
flush_work(>dm.secure_display_ctxs[i].forward_roi_work);
+   }
+   }
+   kfree(adev->dm.secure_display_ctxs);
+   adev->dm.secure_display_ctxs = NULL;
}
 #endif
 #ifdef CONFIG_DRM_AMD_DC_HDCP
@@ -8409,9 +8414,6 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 #ifdef CONFIG_DEBUG_FS
enum amdgpu_dm_pipe_crc_source cur_crc_src;
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-   struct crc_rd_work *crc_rd_wrk;
-#endif
 #endif
/* Count number of newly disabled CRTCs for dropping PM refs 
later. */
if (old_crtc_state->active && !new_crtc_state->active)
@@ -8424,9 +8426,6 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state)
update_stream_irq_parameters(dm, dm_new_crtc_state);
 
 #ifdef CONFIG_DEBUG_FS
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-   crc_rd_wrk = dm->crc_rd_wrk;
-#endif
spin_lock_irqsave(_to_drm(adev)->event_lock, flags);
cur_crc_src = acrtc->dm_irq_params.crc_src;
spin_unlock_irqrestore(_to_drm(adev)->event_lock, flags);
@@ -8455,10 +8454,12 @@ static void amdgpu_dm_atomic_commit_tail(struct 
drm_atomic_state *state)
if (amdgpu_dm_crc_window_is_activated(crtc)) {

spin_lock_irqsave(_to_drm(adev)->event_lock, flags);

acrtc->dm_irq_params.window_param.update_win = true;
+
+   /**
+* It takes 2 frames for HW to stably 
generate CRC when
+* resuming from suspend, so we set 
skip_frame_cnt 2.
+*/

acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
-   
spin_lock_irq(_rd_wrk->crc_rd_work_lock);
-   crc_rd_wrk->crtc = crtc;
-   
spin_unlock_irq(_rd_wrk->crc_rd_work_lock);

spin_unlock_irqrestore(_to_drm(adev)->event_lock, flags);
}
 #endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index df3c25e32c65..a3813c1e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -494,11 +494,12 @@ struct amdgpu_display_manager {
 
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/**
-* @crc_rd_wrk:
+* @secure_display_ctxs:
 *
-* Work 

[PATCH 01/23] drm/amd/display: Speed up DML fast_validate path

2022-12-09 Thread jdhillon
From: Ilya Bakoulin 

[Why]
Iterating over every voltage state when we need to validate thousands of
configurations all at once (i.e. display hotplug) can take a significant
amount of time.

[How]
Check just the highest voltage state when fast_validate is true to
verify whether the configuration can work at all, then do a proper
validation including all voltage states later when fast_validate is false.

Reviewed-by: Jun Lei 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Ilya Bakoulin 
---
 .../drm/amd/display/dc/dml/dcn32/dcn32_fpu.c  |  2 +
 .../dc/dml/dcn32/display_mode_vba_32.c| 37 ++-
 .../drm/amd/display/dc/dml/display_mode_lib.h |  1 +
 3 files changed, 23 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index f94abd124021..a42ddb911e1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1551,6 +1551,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,

context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_fclk_and_stutter;
 
+   context->bw_ctx.dml.validate_max_state = fast_validate;
vlevel = dml_get_voltage_level(>bw_ctx.dml, pipes, 
pipe_cnt);
 
/* Last attempt with Prefetch mode 2 
(dm_prefetch_support_stutter == 3) */
@@ -1559,6 +1560,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
dm_prefetch_support_stutter;
vlevel = dml_get_voltage_level(>bw_ctx.dml, 
pipes, pipe_cnt);
}
+   context->bw_ctx.dml.validate_max_state = false;
 
if (vlevel < context->bw_ctx.dml.soc.num_states) {
memset(split, 0, sizeof(split));
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 4b8f5fa0f0ad..05fa0381 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -1707,7 +1707,7 @@ static void mode_support_configuration(struct vba_vars_st 
*v,
 void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib 
*mode_lib)
 {
struct vba_vars_st *v = _lib->vba;
-   int i, j;
+   int i, j, start_state;
unsigned int k, m;
unsigned int MaximumMPCCombine;
unsigned int NumberOfNonCombinedSurfaceOfMaximumBandwidth;
@@ -1720,7 +1720,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct 
display_mode_lib *mode_l
 #endif
 
/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
-
+   if (mode_lib->validate_max_state)
+   start_state = v->soc.num_states - 1;
+   else
+   start_state = 0; 
/*Scale Ratio, taps Support Check*/
 
mode_lib->vba.ScaleRatioAndTapsSupport = true;
@@ -2009,7 +2012,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct 
display_mode_lib *mode_l
mode_lib->vba.MPCCombineMethodIncompatible = 
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsNeededForPStateChangeAndVoltage
&& 
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsPossible;
 
-   for (i = 0; i < v->soc.num_states; i++) {
+   for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
mode_lib->vba.TotalNumberOfActiveDPP[i][j] = 0;
mode_lib->vba.TotalAvailablePipesSupport[i][j] = true;
@@ -2286,7 +2289,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct 
display_mode_lib *mode_l
}
}
 
-   for (i = 0; i < v->soc.num_states; ++i) {
+   for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.ExceededMultistreamSlots[i] = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.OutputMultistreamEn[k] == true && 
mode_lib->vba.OutputMultistreamId[k] == k) {
@@ -2386,7 +2389,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct 
display_mode_lib *mode_l
}
}
 
-   for (i = 0; i < v->soc.num_states; ++i) {
+   for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.DTBCLKRequiredMoreThanSupported[i] = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] == k
@@ -2403,7 +2406,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct 
display_mode_lib *mode_l
}
}
 
-   for (i = 0; i < v->soc.num_states; ++i) {
+   for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.ODMCombine2To1SupportCheckOK[i] 

[PATCH 02/23] drm/amd/display: Add debug bit to disable unbounded requesting

2022-12-09 Thread jdhillon
From: Dillon Varone 

[Description]
Add debug bit to disable unbounded requesting.

Reviewed-by: Jun Lei 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Dillon Varone 
---
 drivers/gpu/drm/amd/display/dc/dc.h   | 1 +
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c | 1 +
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c | 3 ++-
 drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c   | 1 +
 4 files changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 85ebeaa2de18..a76031d7e202 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -872,6 +872,7 @@ struct dc_debug_options {
enum lttpr_mode lttpr_mode_override;
unsigned int dsc_delay_factor_wa_x1000;
unsigned int min_prefetch_in_strobe_ns;
+   bool disable_unbounded_requesting;
 };
 
 struct gpu_info_soc_bounding_box_v1_0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index e4dbc8353ea3..dfecdf3e25e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -726,6 +726,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.allow_sw_cursor_fallback = false, // Linux can't do SW cursor 
"fallback"
.alloc_extra_way_for_cursor = true,
.min_prefetch_in_strobe_ns = 6, // 60us
+   .disable_unbounded_requesting = false,
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c 
b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 783935c4e664..04fca788c50b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -357,6 +357,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct 
dc_state *context,
int i, pipe_cnt;
struct resource_context *res_ctx = >res_ctx;
struct pipe_ctx *pipe;
+   bool disable_unbounded_requesting = dc->debug.disable_z9_mpc || 
dc->debug.disable_unbounded_requesting;
 
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
 
@@ -373,7 +374,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct 
dc_state *context,
 */
if (pipe_cnt == 1) {
pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE;
-   if (pipe->plane_state && !dc->debug.disable_z9_mpc && 
pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
+   if (pipe->plane_state && !disable_unbounded_requesting && 
pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
if (!is_dual_plane(pipe->plane_state->format)) {
pipes[0].pipe.src.det_size_override = 
DCN3_2_DEFAULT_DET_SIZE;
pipes[0].pipe.src.unbounded_req_mode = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index d1f36df03c2e..62e400e90b56 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -724,6 +724,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.allow_sw_cursor_fallback = false, // Linux can't do SW cursor 
"fallback"
.alloc_extra_way_for_cursor = true,
.min_prefetch_in_strobe_ns = 6, // 60us
+   .disable_unbounded_requesting = false,
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
-- 
2.34.1



[PATCH 00/23] DC Patches December 12 2022

2022-12-09 Thread jdhillon
This DC patchset brings improvements in multiple areas. In summary, we 
highlight:

* Fix array index out of bound error
* Speed up DML fast vadlaite   
* Implement multiple secure display 
* MST HDCP for multiple display 
* Add DPIA notification 
* Add support for three new square pattern variant

Cc: Daniel Wheeler 



Alan Liu (2):
  drm/amd/display: Implement multiple secure display
  drm/amd/display: Fix when disabling secure_display

Alvin Lee (3):
  drm/amd/display: Block FPO / SubVP (DRR) on HDMI VRR configs
  drm/amd/display: Clear link res when merging a pipe split
  drm/amd/display: Block subvp if center timing is in use

Aric Cyr (1):
  drm/amd/display: 3.2.216

Dillon Varone (3):
  drm/amd/display: Add debug bit to disable unbounded requesting
  drm/amd/display: run subvp validation with supported vlevel
  drm/amd/display: Reduce expected sdp bandwidth for dcn321

Fangzhi Zuo (1):
  drm/amd/display: Demote Error Level When ODM Transition Supported

Hamza Mahfooz (1):
  drm/amd/display: fix warning in amdgpu_dm_crtc_configure_crc_source()

Ian Chen (1):
  drm/amd/display: Revert Reduce delay when sink device not able to ACK
00340h write

Ilya Bakoulin (1):
  drm/amd/display: Speed up DML fast_validate path

Leo (Hanghong) Ma (1):
  drm/amd/display: Revert Scaler HCBlank issue workaround

Mustapha Ghaddar (1):
  drm/amd/display: Add DPIA NOTIFICATION logic

Roman Li (2):
  drm/amd/display: Fix potential null-deref in dm_resume
  drm/amd/display: Clear MST topology if it fails to resume

Samson Tam (1):
  drm/amd/display: Check for PSR in no memory request case

Wenjing Liu (2):
  drm/amd/display: add support for three new square pattern variants
from DP2.1 specs
  drm/amd/display: use encoder type independent hwss instead of
accessing enc directly

hersen wu (3):
  drm/amd/display: phase2 enable mst hdcp multiple displays
  drm/amd/display: save restore hdcp desired for disp unplug plug from
mst hub
  drm/amd/display: phase3 mst hdcp for multiple displays

 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 252 ++
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |   7 +-
 .../drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c | 153 ++-
 .../drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h |  26 +-
 .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c |  38 +--
 .../amd/display/amdgpu_dm/amdgpu_dm_hdcp.c| 160 ---
 .../amd/display/amdgpu_dm/amdgpu_dm_hdcp.h|  17 +-
 .../display/amdgpu_dm/amdgpu_dm_mst_types.c   |  25 ++
 drivers/gpu/drm/amd/display/dc/core/dc.c  |   5 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link.c |   6 -
 .../gpu/drm/amd/display/dc/core/dc_link_ddc.c |   1 -
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  |  80 +++---
 .../gpu/drm/amd/display/dc/core/dc_resource.c |  15 +-
 drivers/gpu/drm/amd/display/dc/core/dc_stat.c |   1 +
 drivers/gpu/drm/amd/display/dc/dc.h   |   3 +-
 drivers/gpu/drm/amd/display/dc/dc_dp_types.h  |   4 +-
 drivers/gpu/drm/amd/display/dc/dc_stream.h|   3 +-
 .../dc/dcn31/dcn31_hpo_dp_link_encoder.c  |   5 +-
 .../drm/amd/display/dc/dcn32/dcn32_hwseq.c|   3 +-
 .../drm/amd/display/dc/dcn32/dcn32_resource.c |   1 +
 .../drm/amd/display/dc/dcn32/dcn32_resource.h |   1 +
 .../display/dc/dcn32/dcn32_resource_helpers.c |  16 +-
 .../amd/display/dc/dcn321/dcn321_resource.c   |   1 +
 .../drm/amd/display/dc/dml/dcn32/dcn32_fpu.c  |  25 +-
 .../dc/dml/dcn32/display_mode_vba_32.c|  37 +--
 .../amd/display/dc/dml/dcn321/dcn321_fpu.c|   2 +-
 .../drm/amd/display/dc/dml/display_mode_lib.h |   1 +
 drivers/gpu/drm/amd/display/dmub/dmub_srv.h   |   3 +
 .../gpu/drm/amd/display/dmub/inc/dmub_cmd.h   |  83 ++
 .../drm/amd/display/dmub/src/dmub_srv_stat.c  |  21 ++
 .../gpu/drm/amd/display/include/dpcd_defs.h   |   5 +-
 .../amd/display/include/link_service_types.h  |   7 +-
 32 files changed, 711 insertions(+), 296 deletions(-)

-- 
2.34.1



[PATCH 09/12] drm/amd/display: Don't overwrite subvp pipe info in fast updates

2022-11-24 Thread jdhillon
From: Alvin Lee 

[Description]
- This is a workaround to avoid concurrency issues -- a fast update
  creates a shallow copy of the dc current_state, and removes all
  subvp/phantom related flags.
- We want to prevent the fast update thread from removing those
  flags in case there's another thread running that requires
  the info for proper programming

Reviewed-by: Jun Lei 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Alvin Lee 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c  |  2 +-
 .../drm/amd/display/dc/dcn32/dcn32_resource.c | 64 +++
 .../drm/amd/display/dc/dcn32/dcn32_resource.h |  2 +-
 .../drm/amd/display/dc/dml/dcn32/dcn32_fpu.c  |  4 +-
 .../gpu/drm/amd/display/dc/inc/core_types.h   |  2 +-
 5 files changed, 44 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index a7bfe0b6a5f3..87994ae0a397 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -3061,7 +3061,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
 * Ensures that we have enough pipes for newly added MPO planes
 */
if (dc->res_pool->funcs->remove_phantom_pipes)
-   dc->res_pool->funcs->remove_phantom_pipes(dc, context);
+   dc->res_pool->funcs->remove_phantom_pipes(dc, context, 
false);
 
/*remove old surfaces from context */
if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 06489df85ac1..e4dbc8353ea3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -1743,7 +1743,7 @@ void dcn32_retain_phantom_pipes(struct dc *dc, struct 
dc_state *context)
 }
 
 // return true if removed piped from ctx, false otherwise
-bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
+bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context, bool 
fast_update)
 {
int i;
bool removed_pipe = false;
@@ -1770,14 +1770,23 @@ bool dcn32_remove_phantom_pipes(struct dc *dc, struct 
dc_state *context)
removed_pipe = true;
}
 
-   // Clear all phantom stream info
-   if (pipe->stream) {
-   pipe->stream->mall_stream_config.type = SUBVP_NONE;
-   pipe->stream->mall_stream_config.paired_stream = NULL;
-   }
+   /* For non-full updates, a shallow copy of the current state
+* is created. In this case we don't want to erase the current
+* state (there can be 2 HIRQL threads, one in flip, and one in
+* checkMPO) that can cause a race condition.
+*
+* This is just a workaround, needs a proper fix.
+*/
+   if (!fast_update) {
+   // Clear all phantom stream info
+   if (pipe->stream) {
+   pipe->stream->mall_stream_config.type = 
SUBVP_NONE;
+   pipe->stream->mall_stream_config.paired_stream 
= NULL;
+   }
 
-   if (pipe->plane_state) {
-   pipe->plane_state->is_phantom = false;
+   if (pipe->plane_state) {
+   pipe->plane_state->is_phantom = false;
+   }
}
}
return removed_pipe;
@@ -1950,23 +1959,28 @@ int dcn32_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19;
 
-   switch (pipe->stream->mall_stream_config.type) {
-   case SUBVP_MAIN:
-   pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = 
dm_use_mall_pstate_change_sub_viewport;
-   subvp_in_use = true;
-   break;
-   case SUBVP_PHANTOM:
-   pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = 
dm_use_mall_pstate_change_phantom_pipe;
-   pipes[pipe_cnt].pipe.src.use_mall_for_static_screen = 
dm_use_mall_static_screen_disable;
-   // Disallow unbounded req for SubVP according to DCHUB 
programming guide
-   pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
-   break;
-   case SUBVP_NONE:
-   pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = 
dm_use_mall_pstate_change_disable;
-   pipes[pipe_cnt].pipe.src.use_mall_for_static_screen = 
dm_use_mall_static_screen_disable;
-   break;
-   default:
-

[PATCH 08/12] drm/amd/display: program output tf when required

2022-11-24 Thread jdhillon
From: Dillon Varone 

[Description]
Output transfer function must be programmed per pipe as part of a front end
update when the plane changes, or output transfer function changes for a
given plane.

Reviewed-by: Alvin Lee 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Dillon Varone 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index db57b17061ae..bc4a303cd864 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1741,7 +1741,10 @@ static void dcn20_program_pipe(
 * only do gamma programming for powering on, internal memcmp to avoid
 * updating on slave planes
 */
-   if (pipe_ctx->update_flags.bits.enable || 
pipe_ctx->stream->update_flags.bits.out_tf)
+   if (pipe_ctx->update_flags.bits.enable ||
+   pipe_ctx->update_flags.bits.plane_changed ||
+   pipe_ctx->stream->update_flags.bits.out_tf ||
+   
pipe_ctx->plane_state->update_flags.bits.output_tf_change)
hws->funcs.set_output_transfer_func(dc, pipe_ctx, 
pipe_ctx->stream);
 
/* If the pipe has been enabled or has a different opp, we
-- 
2.34.1



[PATCH 12/12] drm/amd/display: 3.2.214

2022-11-24 Thread jdhillon
From: Aric Cyr 

This version brings along following fixes:
-Program output transfer function when required
-Fix arthmetic errror in MALL size caluclations for subvp
-DCC Meta pitch used for MALL allocation
-Debugfs entry to tell if connector is DPIA link
-Use largest vready_offset in pipe group
-Fixes race condition in DPIA Aux transfer

Reviewed-by: Rodrigo Siqueira 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Aric Cyr 
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index a19a890f1d76..4a7c0356d9c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
 struct set_config_cmd_payload;
 struct dmub_notification;
 
-#define DC_VER "3.2.213"
+#define DC_VER "3.2.214"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
-- 
2.34.1



[PATCH 06/12] drm/amd/display: Retain phantom pipes when min transition into subvp

2022-11-24 Thread jdhillon
From: Alvin Lee 

[Description]
- When entering into a SubVP config that requires a minimal
  transition we need to retain phantom pipes and also restore
  the mall config
- This is because the min transition will remove phantom pipes
  from the context (shallow copy) and not restore it's original
  state
- This is just a workaround, and needs a proper fix

Reviewed-by: Jun Lei 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Alvin Lee 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c  | 21 ++-
 drivers/gpu/drm/amd/display/dc/dc_stream.h| 11 ++
 .../drm/amd/display/dc/dcn32/dcn32_resource.c |  2 ++
 .../drm/amd/display/dc/dcn32/dcn32_resource.h | 11 --
 .../amd/display/dc/dcn321/dcn321_resource.c   |  2 ++
 .../gpu/drm/amd/display/dc/inc/core_types.h   |  2 ++
 6 files changed, 37 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index f9b8b6f6fd31..a7bfe0b6a5f3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -3954,6 +3954,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
struct dc_state *context;
enum surface_update_type update_type;
int i;
+   struct mall_temp_config mall_temp_config;
 
/* In cases where MPO and split or ODM are used transitions can
 * cause underflow. Apply stream configuration with minimal pipe
@@ -3985,11 +3986,29 @@ bool dc_update_planes_and_stream(struct dc *dc,
 
/* on plane removal, minimal state is the new one */
if (force_minimal_pipe_splitting && !is_plane_addition) {
+   /* Since all phantom pipes are removed in full validation,
+* we have to save and restore the subvp/mall config when
+* we do a minimal transition since the flags marking the
+* pipe as subvp/phantom will be cleared (dc copy constructor
+* creates a shallow copy).
+*/
+   if (dc->res_pool->funcs->save_mall_state)
+   dc->res_pool->funcs->save_mall_state(dc, context, 
_temp_config);
if (!commit_minimal_transition_state(dc, context)) {
dc_release_state(context);
return false;
}
-
+   if (dc->res_pool->funcs->restore_mall_state)
+   dc->res_pool->funcs->restore_mall_state(dc, context, 
_temp_config);
+
+   /* If we do a minimal transition with plane removal and the 
context
+* has subvp we also have to retain back the phantom stream / 
planes
+* since the refcount is decremented as part of the min 
transition
+* (we commit a state with no subvp, so the phantom streams / 
planes
+* had to be removed).
+*/
+   if (dc->res_pool->funcs->retain_phantom_pipes)
+   dc->res_pool->funcs->retain_phantom_pipes(dc, context);
update_type = UPDATE_TYPE_FULL;
}
 
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h 
b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index e0cee9666c48..dfd3df1d2f7e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -160,6 +160,17 @@ struct mall_stream_config {
struct dc_stream_state *paired_stream;  // master / slave stream
 };
 
+/* Temp struct used to save and restore MALL config
+ * during validation.
+ *
+ * TODO: Move MALL config into dc_state instead of stream struct
+ * to avoid needing to save/restore.
+ */
+struct mall_temp_config {
+   struct mall_stream_config mall_stream_config[MAX_PIPES];
+   bool is_phantom_plane[MAX_PIPES];
+};
+
 struct dc_stream_state {
// sink is deprecated, new code should not reference
// this pointer
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 99ddd2232322..06489df85ac1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -2055,6 +2055,8 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.add_phantom_pipes = dcn32_add_phantom_pipes,
.remove_phantom_pipes = dcn32_remove_phantom_pipes,
.retain_phantom_pipes = dcn32_retain_phantom_pipes,
+   .save_mall_state = dcn32_save_mall_state,
+   .restore_mall_state = dcn32_restore_mall_state,
 };
 
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h 
b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index c50bb34b515f..4e6b71832187 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -45,17 +45,6 @@
 extern struct _vcs_dpi_ip_params_st dcn3_2_ip;
 extern struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc;
 
-/* Temp struct used to save and 

[PATCH 10/12] drm/amd/display: Fix DTBCLK disable requests and SRC_SEL programming

2022-11-24 Thread jdhillon
From: Alvin Lee 

[Description]
- When transitioning FRL / DP2 is not required, we will always request
  DTBCLK = 0Mhz, but PMFW returns the min freq
- This causes us to make DTBCLK requests every time we call optimize
  after transitioning from FRL to non-FRL
- If DTBCLK is not required, request the min instead (then we only need
  to make 1 extra request at boot time)
- Also when programming PIPE_DTO_SRC_SEL, don't programming for DP
  first, just programming once for the required selection (programming
  DP on an HDMI connection then switching back causes corruption)

Reviewed-by: Dillon Varone 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Alvin Lee 
---
 .../gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c| 2 +-
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c   | 6 +-
 2 files changed, 2 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 6f77d8e538ab..9eb9fe5b8d2c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -438,7 +438,7 @@ static void dcn32_update_clocks(struct clk_mgr 
*clk_mgr_base,
}
 
if (!new_clocks->dtbclk_en) {
-   new_clocks->ref_dtbclk_khz = 0;
+   new_clocks->ref_dtbclk_khz = 
clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
}
 
/* clock limits are received with MHz precision, divide by 1000 to 
prevent setting clocks at every call */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c 
b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
index df4f25119142..e4472c6be6c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
@@ -225,11 +225,7 @@ static void dccg32_set_dtbclk_dto(
} else {
REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst],
DTBCLK_DTO_ENABLE[params->otg_inst], 0,
-   PIPE_DTO_SRC_SEL[params->otg_inst], 1);
-   if (params->is_hdmi)
-   REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
-   PIPE_DTO_SRC_SEL[params->otg_inst], 0);
-
+   PIPE_DTO_SRC_SEL[params->otg_inst], 
params->is_hdmi ? 0 : 1);
REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);
REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);
}
-- 
2.34.1



[PATCH 11/12] drm/amd/display: set per pipe dppclk to 0 when dpp is off

2022-11-24 Thread jdhillon
From: Dmytro Laktyushkin 

The 'commit 52e4fdf09ebc ("drm/amd/display: use low clocks for no plane
configs")' introduced a change that set low clock values for DCN31 and
DCN32. As a result of these changes, DC started to spam the log with the
following warning:

[ cut here ]
WARNING: CPU: 8 PID: 1486 at
drivers/gpu/drm/amd/amdgpu/../display/dc/dcn20/dcn20_dccg.c:58
dccg2_update_dpp_dto+0x3f/0xf0 [amdgpu]
[..]
CPU: 8 PID: 1486 Comm: kms_atomic Tainted: G W 5.18.0+ #1
RIP: 0010:dccg2_update_dpp_dto+0x3f/0xf0 [amdgpu]
RSP: 0018:bbd8025334d0 EFLAGS: 00010206
RAX: 01ee RBX: a02c87dd3de0 RCX: 000a7f80
RDX: 0007dec3 RSI:  RDI: a02c87dd3de0
RBP: bbd8025334e8 R08: 0001 R09: 0005
R10: 000331a0 R11: c0b03d80 R12: a02ca576d000
R13: a02cd02c R14: 001453bc R15: a02cdc28
[..]
dcn20_update_clocks_update_dpp_dto+0x4e/0xa0 [amdgpu]
dcn32_update_clocks+0x5d9/0x650 [amdgpu]
dcn20_prepare_bandwidth+0x49/0x100 [amdgpu]
dcn30_prepare_bandwidth+0x63/0x80 [amdgpu]
dc_commit_state_no_check+0x39d/0x13e0 [amdgpu]
dc_commit_streams+0x1f9/0x3b0 [amdgpu]
dc_commit_state+0x37/0x120 [amdgpu]
amdgpu_dm_atomic_commit_tail+0x5e5/0x2520 [amdgpu]
? _raw_spin_unlock_irqrestore+0x1f/0x40
? down_trylock+0x2c/0x40
? vprintk_emit+0x186/0x2c0
? vprintk_default+0x1d/0x20
? vprintk+0x4e/0x60

We can easily trigger this issue by using a 4k@120 or a 2k@165 and
running some of the kms_atomic tests. This warning is triggered because
the per-pipe clock update is not happening; this commit fixes this issue
by ensuring that DPPCLK is updated when calculating the watermark and
dlg is invoked.

Fixes: 52e4fdf09ebc ("drm/amd/display: use low clocks for no plane configs")
Reported-by: Mark Broadworth 
Reviewed-by: Rodrigo Siqueira 
Signed-off-by: Dmytro Laktyushkin 
---
 drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c | 3 +++
 drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 5 -
 2 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index 12b23bd50e19..b37d14369a62 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -559,6 +559,9 @@ void dcn31_calculate_wm_and_dlg_fp(
context->bw_ctx.bw.dcn.clk.dramclk_khz = 0;
context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+   for (i = 0; i < dc->res_pool->pipe_count; i++)
+   if (context->res_ctx.pipe_ctx[i].stream)
+   
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0;
}
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 5a4cdb559d4e..f94abd124021 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1320,7 +1320,10 @@ static void dcn32_calculate_dlg_params(struct dc *dc, 
struct dc_state *context,
 
if (context->bw_ctx.bw.dcn.clk.dppclk_khz < 
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
context->bw_ctx.bw.dcn.clk.dppclk_khz = 
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
-   context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
+   if (context->res_ctx.pipe_ctx[i].plane_state)
+   context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
+   else
+   context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 
0;
context->res_ctx.pipe_ctx[i].pipe_dlg_param = 
pipes[pipe_idx].pipe.dest;
pipe_idx++;
}
-- 
2.34.1



[PATCH 05/12] drm/amd/display: Use DCC meta pitch for MALL allocation requirements

2022-11-24 Thread jdhillon
From: Dillon Varone 

[Description]
Calculations for determining DCC meta size should be pitch*height*bpp/256.

Reviewed-by: Alvin Lee 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Dillon Varone 
---
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c|  6 +++---
 .../drm/amd/display/dc/dcn32/dcn32_resource_helpers.c | 11 ---
 2 files changed, 11 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c 
b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index 76548b4b822c..c9b2343947be 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -262,11 +262,11 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc 
*dc, struct dc_state *c
num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / 
mblk_width) *
((mall_alloc_height_blk_aligned + mblk_height - 
1) / mblk_height);
 
-   /* For DCC:
-* meta_num_mblk = 
CEILING(full_mblk_width_ub_l*full_mblk_height_ub_l*Bpe/256/mblk_bytes, 1)
+   /*For DCC:
+* meta_num_mblk = 
CEILING(meta_pitch*full_vp_height*Bpe/256/mblk_bytes, 1)
 */
if (pipe->plane_state->dcc.enable)
-   num_mblks += (mall_alloc_width_blk_aligned * 
mall_alloc_width_blk_aligned * bytes_per_pixel +
+   num_mblks += (pipe->plane_state->dcc.meta_pitch * 
pipe->plane_res.scl_data.viewport.height * bytes_per_pixel +
(256 * DCN3_2_MALL_MBLK_SIZE_BYTES) - 
1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES);
 
bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c 
b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index fa3778849db1..94fd125daa6b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -121,14 +121,19 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct 
dc *dc, struct dc_stat
 */
num_mblks = ((mall_alloc_width_blk_aligned + mblk_width 
- 1) / mblk_width) *
((mall_alloc_height_blk_aligned + 
mblk_height - 1) / mblk_height);
+
+   /*For DCC:
+* meta_num_mblk = 
CEILING(meta_pitch*full_vp_height*Bpe/256/mblk_bytes, 1)
+*/
+   if (pipe->plane_state->dcc.enable)
+   num_mblks += (pipe->plane_state->dcc.meta_pitch 
* pipe->plane_res.scl_data.viewport.height * bytes_per_pixel +
+   (256 * 
DCN3_2_MALL_MBLK_SIZE_BYTES) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES);
+
bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
// cache lines used is total bytes / cache_line size. 
Add +2 for worst case alignment
// (MALL is 64-byte aligned)
cache_lines_per_plane = bytes_in_mall / 
dc->caps.cache_line_size + 2;
 
-   /* For DCC divide by 256 */
-   if (pipe->plane_state->dcc.enable)
-   cache_lines_per_plane = cache_lines_per_plane + 
(cache_lines_per_plane / 256) + 1;
cache_lines_used += cache_lines_per_plane;
}
}
-- 
2.34.1



[PATCH 07/12] drm/amd/display: Fix arithmetic error in MALL size calculations for subvp

2022-11-24 Thread jdhillon
From: Dillon Varone 

[Description]
Need to subtract unused section of the viewport when calculating required space
in MALL for subvp instead of adding, to prevent over allocation.

Reviewed-by: Alvin Lee 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Dillon Varone 
---
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c 
b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 94fd125daa6b..783935c4e664 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -97,14 +97,14 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct 
dc *dc, struct dc_stat
 * FLOOR(vp_x_start, blk_width)
 */
full_vp_width_blk_aligned = 
((pipe->plane_res.scl_data.viewport.x +
-   pipe->plane_res.scl_data.viewport.width 
+ mblk_width - 1) / mblk_width * mblk_width) +
+   pipe->plane_res.scl_data.viewport.width 
+ mblk_width - 1) / mblk_width * mblk_width) -
(pipe->plane_res.scl_data.viewport.x / 
mblk_width * mblk_width);
 
/* full_vp_height_blk_aligned = FLOOR(vp_y_start + 
full_vp_height + blk_height - 1, blk_height) -
 * FLOOR(vp_y_start, blk_height)
 */
full_vp_height_blk_aligned = 
((pipe->plane_res.scl_data.viewport.y +
-   full_vp_height + mblk_height - 1) / 
mblk_height * mblk_height) +
+   full_vp_height + mblk_height - 1) / 
mblk_height * mblk_height) -
(pipe->plane_res.scl_data.viewport.y / 
mblk_height * mblk_height);
 
/* mall_alloc_width_blk_aligned_l/c = 
full_vp_width_blk_aligned_l/c */
-- 
2.34.1



[PATCH 01/12] drm/amd/display: Fix race condition in DPIA AUX transfer

2022-11-24 Thread jdhillon
From: Stylon Wang 

[Why]
This fix was intended for improving on coding style but in the process
uncovers a race condition, which explains why we are getting incorrect
length in DPIA AUX replies. Due to the call path of DPIA AUX going from
DC back to DM layer then again into DC and the added complexities on top
of current DC AUX implementation, a proper fix to rely on current dc_lock
to address the race condition is difficult without a major overhual
on how DPIA AUX is implemented.

[How]
- Add a mutex dpia_aux_lock to protect DPIA AUX transfers
- Remove DMUB_ASYNC_TO_SYNC_ACCESS_* codes and rely solely on
  aux_return_code_type for error reporting and handling
- Separate SET_CONFIG from DPIA AUX transfer because they have quiet
  different processing logic
- Remove unnecessary type casting to and from void * type

Reviewed-by: Nicholas Kazlauskas 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Stylon Wang 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 151 +-
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  17 +-
 .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c |  10 +-
 3 files changed, 91 insertions(+), 87 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4fe7971e3e58..da1be67831d6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -147,14 +147,6 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
 /* Number of bytes in PSP footer for firmware. */
 #define PSP_FOOTER_BYTES 0x100
 
-/*
- * DMUB Async to Sync Mechanism Status
- */
-#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1
-#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2
-#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3
-#define DMUB_ASYNC_TO_SYNC_ACCESS_INVALID 4
-
 /**
  * DOC: overview
  *
@@ -1442,6 +1434,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
memset(_params, 0, sizeof(init_params));
 #endif
 
+   mutex_init(>dm.dpia_aux_lock);
mutex_init(>dm.dc_lock);
mutex_init(>dm.audio_lock);
 
@@ -1806,6 +1799,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
 
mutex_destroy(>dm.audio_lock);
mutex_destroy(>dm.dc_lock);
+   mutex_destroy(>dm.dpia_aux_lock);
 
return;
 }
@@ -10211,91 +10205,92 @@ uint32_t dm_read_reg_func(const struct dc_context 
*ctx, uint32_t address,
return value;
 }
 
-static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
-   struct dc_context *ctx,
-   uint8_t status_type,
-   uint32_t *operation_result)
+int amdgpu_dm_process_dmub_aux_transfer_sync(
+   struct dc_context *ctx,
+   unsigned int link_index,
+   struct aux_payload *payload,
+   enum aux_return_code_type *operation_result)
 {
struct amdgpu_device *adev = ctx->driver_context;
-   int return_status = -1;
struct dmub_notification *p_notify = adev->dm.dmub_notify;
+   int ret = -1;
 
-   if (is_cmd_aux) {
-   if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
-   return_status = p_notify->aux_reply.length;
-   *operation_result = p_notify->result;
-   } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
-   *operation_result = AUX_RET_ERROR_TIMEOUT;
-   } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
-   *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
-   } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_INVALID) {
-   *operation_result = AUX_RET_ERROR_INVALID_REPLY;
-   } else {
-   *operation_result = AUX_RET_ERROR_UNKNOWN;
+   mutex_lock(>dm.dpia_aux_lock);
+   if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) {
+   *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
+   goto out;
+   }
+
+   if (!wait_for_completion_timeout(>dm.dmub_aux_transfer_done, 10 * 
HZ)) {
+   DRM_ERROR("wait_for_completion_timeout timeout!");
+   *operation_result = AUX_RET_ERROR_TIMEOUT;
+   goto out;
+   }
+
+   if (p_notify->result != AUX_RET_SUCCESS) {
+   /*
+* Transient states before tunneling is enabled could
+* lead to this error. We can ignore this for now.
+*/
+   if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
+   DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
+   payload->address, payload->length,
+   p_notify->result);
}
-   } else {
-   if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
-   

[PATCH 04/12] drm/amd/display: MALL SS calculations should iterate over all pipes for cursor

2022-11-24 Thread jdhillon
From: Dillon Varone 

[Description]
MALL SS allocation calculations should iterate over all pipes to determine the
the allocation size required for HW cursor.

Reviewed-by: Alvin Lee 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Dillon Varone 
---
 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c 
b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index 2f19f711d8be..76548b4b822c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -316,8 +316,8 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc 
*dc, struct dc_state *c
cache_lines_used += (((cursor_size + 
DCN3_2_MALL_MBLK_SIZE_BYTES - 1) /

DCN3_2_MALL_MBLK_SIZE_BYTES) * DCN3_2_MALL_MBLK_SIZE_BYTES) /

dc->caps.cache_line_size + 2;
+   break;
}
-   break;
}
}
 
-- 
2.34.1



[PATCH 03/12] drm/amd/display: Create debugfs to tell if connector is DPIA link

2022-11-24 Thread jdhillon
From: Stylon Wang 

[Why]
Tests need to tell if display is connected via USB4 DPIA link.
Currently this is only possible via analyzing dmesg logs.

[How]
Create a per-connector debugfs entry to report if the link is
tunneled via USB4 DPIA.

Reviewed-by: Wayne Lin 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Stylon Wang 
---
 .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 23 ++-
 1 file changed, 22 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 2c43cdd2e707..461037a3dd75 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -2639,6 +2639,25 @@ static int dp_mst_progress_status_show(struct seq_file 
*m, void *unused)
return 0;
 }
 
+/*
+ * Reports whether the connected display is a USB4 DPIA tunneled display
+ * Example usage: cat /sys/kernel/debug/dri/0/DP-8/is_dpia_link
+ */
+static int is_dpia_link_show(struct seq_file *m, void *data)
+{
+   struct drm_connector *connector = m->private;
+   struct amdgpu_dm_connector *aconnector = 
to_amdgpu_dm_connector(connector);
+   struct dc_link *link = aconnector->dc_link;
+
+   if (connector->status != connector_status_connected)
+   return -ENODEV;
+
+   seq_printf(m, "%s\n", (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? 
"yes" :
+   (link->ep_type == DISPLAY_ENDPOINT_PHY) ? "no" 
: "unknown");
+
+   return 0;
+}
+
 DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
 DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
 DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
@@ -2650,6 +2669,7 @@ DEFINE_SHOW_ATTRIBUTE(internal_display);
 DEFINE_SHOW_ATTRIBUTE(psr_capability);
 DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector);
 DEFINE_SHOW_ATTRIBUTE(dp_mst_progress_status);
+DEFINE_SHOW_ATTRIBUTE(is_dpia_link);
 
 static const struct file_operations dp_dsc_clock_en_debugfs_fops = {
.owner = THIS_MODULE,
@@ -2794,7 +2814,8 @@ static const struct {
{"max_bpc", _max_bpc_debugfs_fops},
{"dsc_disable_passthrough", 
_dsc_disable_passthrough_debugfs_fops},
{"is_mst_connector", _is_mst_connector_fops},
-   {"mst_progress_status", _mst_progress_status_fops}
+   {"mst_progress_status", _mst_progress_status_fops},
+   {"is_dpia_link", _dpia_link_fops}
 };
 
 #ifdef CONFIG_DRM_AMD_DC_HDCP
-- 
2.34.1



[PATCH 00/12] DC Patches November 28 2022

2022-11-24 Thread jdhillon
This DC patchset brings improvements in multiple areas. In summary, we have:

* Program output transfer function when required
* Fix arthmetic errror in MALL size caluclations for subvp 
* DCC Meta pitch used for MALL allocation 
* Debugfs entry to tell if connector is DPIA link 
* Use largest vready_offset in pipe group 
* Fixes race condition in DPIA Aux transfer


Cc: Daniel Wheeler 


Alvin Lee (3):
  drm/amd/display: Retain phantom pipes when min transition into subvp
  drm/amd/display: Don't overwrite subvp pipe info in fast updates
  drm/amd/display: Fix DTBCLK disable requests and SRC_SEL programming

Aric Cyr (1):
  drm/amd/display: 3.2.214

Dillon Varone (4):
  drm/amd/display: MALL SS calculations should iterate over all pipes
for cursor
  drm/amd/display: Use DCC meta pitch for MALL allocation requirements
  drm/amd/display: Fix arithmetic error in MALL size calculations for
subvp
  drm/amd/display: program output tf when required

Dmytro Laktyushkin (1):
  drm/amd/display: set per pipe dppclk to 0 when dpp is off

Stylon Wang (2):
  drm/amd/display: Fix race condition in DPIA AUX transfer
  drm/amd/display: Create debugfs to tell if connector is DPIA link

Wesley Chalmers (1):
  drm/amd/display: Use the largest vready_offset in pipe group

 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 151 +-
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h |  17 +-
 .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c |  23 ++-
 .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c |  10 +-
 .../display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c  |   2 +-
 drivers/gpu/drm/amd/display/dc/core/dc.c  |  23 ++-
 drivers/gpu/drm/amd/display/dc/dc.h   |   2 +-
 drivers/gpu/drm/amd/display/dc/dc_stream.h|  11 ++
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |  30 +++-
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c|  34 +++-
 .../gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c |   6 +-
 .../drm/amd/display/dc/dcn32/dcn32_hwseq.c|   8 +-
 .../drm/amd/display/dc/dcn32/dcn32_resource.c |  66 +---
 .../drm/amd/display/dc/dcn32/dcn32_resource.h |  13 +-
 .../display/dc/dcn32/dcn32_resource_helpers.c |  15 +-
 .../amd/display/dc/dcn321/dcn321_resource.c   |   2 +
 .../drm/amd/display/dc/dml/dcn31/dcn31_fpu.c  |   3 +
 .../drm/amd/display/dc/dml/dcn32/dcn32_fpu.c  |   9 +-
 .../gpu/drm/amd/display/dc/inc/core_types.h   |   4 +-
 19 files changed, 277 insertions(+), 152 deletions(-)

-- 
2.34.1



[PATCH 02/12] drm/amd/display: Use the largest vready_offset in pipe group

2022-11-24 Thread jdhillon
From: Wesley Chalmers 

[WHY]
Corruption can occur in LB if vready_offset is not large enough.
DML calculates vready_offset for each pipe, but we currently select the
top pipe's vready_offset, which is not necessarily enough for all pipes
in the group.

[HOW]
Wherever program_global_sync is currently called, iterate through the
entire pipe group and find the highest vready_offset.

Reviewed-by: Dillon Varone 
Acked-by: Jasdeep Dhillon 
Signed-off-by: Wesley Chalmers 
---
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 30 +--
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c| 29 --
 2 files changed, 55 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 0db02e76dcc5..355ffed7380b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -869,6 +869,32 @@ static void false_optc_underflow_wa(
tg->funcs->clear_optc_underflow(tg);
 }
 
+static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
+{
+   struct pipe_ctx *other_pipe;
+   int vready_offset = pipe->pipe_dlg_param.vready_offset;
+
+   /* Always use the largest vready_offset of all connected pipes */
+   for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = 
other_pipe->bottom_pipe) {
+   if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
+   vready_offset = 
other_pipe->pipe_dlg_param.vready_offset;
+   }
+   for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = 
other_pipe->top_pipe) {
+   if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
+   vready_offset = 
other_pipe->pipe_dlg_param.vready_offset;
+   }
+   for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = 
other_pipe->next_odm_pipe) {
+   if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
+   vready_offset = 
other_pipe->pipe_dlg_param.vready_offset;
+   }
+   for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = 
other_pipe->prev_odm_pipe) {
+   if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
+   vready_offset = 
other_pipe->pipe_dlg_param.vready_offset;
+   }
+
+   return vready_offset;
+}
+
 enum dc_status dcn10_enable_stream_timing(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
@@ -912,7 +938,7 @@ enum dc_status dcn10_enable_stream_timing(
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
>timing,
-   pipe_ctx->pipe_dlg_param.vready_offset,
+   calculate_vready_offset_for_group(pipe_ctx),
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width,
@@ -2908,7 +2934,7 @@ void dcn10_program_pipe(
 
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg,
-   pipe_ctx->pipe_dlg_param.vready_offset,
+   calculate_vready_offset_for_group(pipe_ctx),
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 3f3d4daa6294..db57b17061ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1652,6 +1652,31 @@ static void dcn20_update_dchubp_dpp(
hubp->funcs->phantom_hubp_post_enable(hubp);
 }
 
+static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
+{
+   struct pipe_ctx *other_pipe;
+   int vready_offset = pipe->pipe_dlg_param.vready_offset;
+
+   /* Always use the largest vready_offset of all connected pipes */
+   for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = 
other_pipe->bottom_pipe) {
+   if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
+   vready_offset = 
other_pipe->pipe_dlg_param.vready_offset;
+   }
+   for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = 
other_pipe->top_pipe) {
+   if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
+   vready_offset = 
other_pipe->pipe_dlg_param.vready_offset;
+   }
+   for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = 
other_pipe->next_odm_pipe) {
+