summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2025-03-24 17:56:31 +1000
committerDave Airlie <airlied@redhat.com>2025-03-24 17:56:40 +1000
commitf72e21eaaefe54e3f2eadaa63f55f9f3ba01a786 (patch)
tree6262bcd223f5b5587889ad52fbbfcdc4f6aa33b7 /drivers/gpu/drm/amd/display
parent0f04462874e1228cf58e19a3d1710db9757dd695 (diff)
parenteb6cdfb807d038d9b9986b5c87188f28a4071eae (diff)
Merge tag 'amd-drm-next-6.15-2025-03-14' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-6.15-2025-03-14: amdgpu: - GC 12.x DCC fixes - VCN 2.5 fix - Replay/PSR fixes - HPD fixes - DMUB fixes - Backlight fixes - DM suspend/resume cleanup - Misc DC fixes - HDCP UAF fix - Misc code cleanups - VCE 2.x fix - Wedged event support - GC 12.x PTE fixes - Misc multimedia cap fixes - Enable unique id support for GC 12.x - XGMI code cleanup - GC 11.x and 12.x MQD cleanups - SMU 13.x updates - SMU 14.x fan speed reporting - Enable VCN activity reporting for additional chips - SR-IOV fixes - RAS fixes - MES fixes amdkfd: - Dequeue wait count API cleanups - Queue eviction cleanup fixes - Retry fault fixes - Dequeue retry timeout adjustments - GC 12.x trap handler fixes - GC 9.5.x updates radeon: - VCE command parser fix Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20250314170618.3142042-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/display')
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c37
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c43
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c64
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c272
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h143
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h49
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c85
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c93
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c98
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c93
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c218
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h6
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c308
49 files changed, 1123 insertions, 703 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6a54f1cfa125..4f1cf8afe25f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -250,6 +250,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
static void handle_hpd_rx_irq(void *param);
+static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+ int bl_idx,
+ u32 user_brightness);
+
static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
struct drm_crtc_state *new_crtc_state);
@@ -3137,6 +3141,21 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
}
}
+static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ if (amdgpu_in_reset(adev))
+ return 0;
+
+ WARN_ON(adev->dm.cached_state);
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
+ if (IS_ERR(adev->dm.cached_state))
+ return PTR_ERR(adev->dm.cached_state);
+
+ return 0;
+}
+
static int dm_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -3167,10 +3186,11 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block)
return 0;
}
- WARN_ON(adev->dm.cached_state);
- adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
- if (IS_ERR(adev->dm.cached_state))
- return PTR_ERR(adev->dm.cached_state);
+ if (!adev->dm.cached_state) {
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
+ if (IS_ERR(adev->dm.cached_state))
+ return PTR_ERR(adev->dm.cached_state);
+ }
s3_handle_hdmi_cec(adev_to_drm(adev), true);
@@ -3432,6 +3452,12 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
mutex_unlock(&dm->dc_lock);
+ /* set the backlight after a reset */
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (dm->backlight_dev[i])
+ amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
+ }
+
return 0;
}
@@ -3596,6 +3622,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
.early_fini = amdgpu_dm_early_fini,
.hw_init = dm_hw_init,
.hw_fini = dm_hw_fini,
+ .prepare_suspend = dm_prepare_suspend,
.suspend = dm_suspend,
.resume = dm_resume,
.is_idle = dm_is_idle,
@@ -4986,6 +5013,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
dm->backlight_dev[aconnector->bl_idx] =
backlight_device_register(bl_name, aconnector->base.kdev, dm,
&amdgpu_dm_backlight_ops, &props);
+ dm->brightness[aconnector->bl_idx] = props.brightness;
if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
DRM_ERROR("DM: Backlight registration failed!\n");
@@ -5053,7 +5081,6 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm,
aconnector->bl_idx = bl_idx;
amdgpu_dm_update_backlight_caps(dm, bl_idx);
- dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
dm->backlight_link[bl_idx] = link;
dm->num_of_edps++;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 8238cfd276be..a3e93b2891f0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -172,7 +172,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
struct mod_hdcp_display_adjustment display_adjust;
unsigned int conn_index = aconnector->base.index;
- mutex_lock(&hdcp_w->mutex);
+ guard(mutex)(&hdcp_w->mutex);
hdcp_w->aconnector[conn_index] = aconnector;
memset(&link_adjust, 0, sizeof(link_adjust));
@@ -209,7 +209,6 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output);
process_output(hdcp_w);
- mutex_unlock(&hdcp_w->mutex);
}
static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
@@ -220,7 +219,7 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
struct drm_connector_state *conn_state = aconnector->base.state;
unsigned int conn_index = aconnector->base.index;
- mutex_lock(&hdcp_w->mutex);
+ guard(mutex)(&hdcp_w->mutex);
hdcp_w->aconnector[conn_index] = aconnector;
/* the removal of display will invoke auth reset -> hdcp destroy and
@@ -239,7 +238,6 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
process_output(hdcp_w);
- mutex_unlock(&hdcp_w->mutex);
}
void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
@@ -247,7 +245,7 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
unsigned int conn_index;
- mutex_lock(&hdcp_w->mutex);
+ guard(mutex)(&hdcp_w->mutex);
mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
@@ -259,8 +257,6 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
}
process_output(hdcp_w);
-
- mutex_unlock(&hdcp_w->mutex);
}
void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
@@ -277,7 +273,7 @@ static void event_callback(struct work_struct *work)
hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
callback_dwork);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
cancel_delayed_work(&hdcp_work->callback_dwork);
@@ -285,8 +281,6 @@ static void event_callback(struct work_struct *work)
&hdcp_work->output);
process_output(hdcp_work);
-
- mutex_unlock(&hdcp_work->mutex);
}
static void event_property_update(struct work_struct *work)
@@ -323,7 +317,7 @@ static void event_property_update(struct work_struct *work)
continue;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
if (conn_state->commit) {
ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done,
@@ -355,7 +349,6 @@ static void event_property_update(struct work_struct *work)
drm_hdcp_update_content_protection(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED);
}
- mutex_unlock(&hdcp_work->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
}
@@ -368,7 +361,7 @@ static void event_property_validate(struct work_struct *work)
struct amdgpu_dm_connector *aconnector;
unsigned int conn_index;
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX;
conn_index++) {
@@ -408,8 +401,6 @@ static void event_property_validate(struct work_struct *work)
schedule_work(&hdcp_work->property_update_work);
}
}
-
- mutex_unlock(&hdcp_work->mutex);
}
static void event_watchdog_timer(struct work_struct *work)
@@ -420,7 +411,7 @@ static void event_watchdog_timer(struct work_struct *work)
struct hdcp_workqueue,
watchdog_timer_dwork);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
@@ -429,8 +420,6 @@ static void event_watchdog_timer(struct work_struct *work)
&hdcp_work->output);
process_output(hdcp_work);
-
- mutex_unlock(&hdcp_work->mutex);
}
static void event_cpirq(struct work_struct *work)
@@ -439,13 +428,11 @@ static void event_cpirq(struct work_struct *work)
hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output);
process_output(hdcp_work);
-
- mutex_unlock(&hdcp_work->mutex);
}
void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
@@ -455,6 +442,7 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
for (i = 0; i < hdcp_work->max_link; i++) {
cancel_delayed_work_sync(&hdcp_work[i].callback_dwork);
cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
+ cancel_delayed_work_sync(&hdcp_work[i].property_validate_dwork);
}
sysfs_remove_bin_file(kobj, &hdcp_work[0].attr);
@@ -469,7 +457,6 @@ static bool enable_assr(void *handle, struct dc_link *link)
struct mod_hdcp hdcp = hdcp_work->hdcp;
struct psp_context *psp = hdcp.config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
- bool res = true;
if (!psp->dtm_context.context.initialized) {
DRM_INFO("Failed to enable ASSR, DTM TA is not initialized.");
@@ -478,7 +465,7 @@ static bool enable_assr(void *handle, struct dc_link *link)
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
- mutex_lock(&psp->dtm_context.mutex);
+ guard(mutex)(&psp->dtm_context.mutex);
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE;
@@ -490,12 +477,10 @@ static bool enable_assr(void *handle, struct dc_link *link)
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
DRM_INFO("Failed to enable ASSR");
- res = false;
+ return false;
}
- mutex_unlock(&psp->dtm_context.mutex);
-
- return res;
+ return true;
}
static void update_config(void *handle, struct cp_psp_stream_config *config)
@@ -556,13 +541,11 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
(!!aconnector->base.state) ?
aconnector->base.state->hdcp_content_type : -1);
- mutex_lock(&hdcp_w->mutex);
+ guard(mutex)(&hdcp_w->mutex);
mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
process_output(hdcp_w);
- mutex_unlock(&hdcp_w->mutex);
-
}
/**
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 2b63cbab0e87..b61e210f6246 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -890,8 +890,16 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
+ int irq_type;
int i;
+ /* First, clear all hpd and hpdrx interrupts */
+ for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) {
+ if (!dc_interrupt_set(adev->dm.dc, i, false))
+ drm_err(dev, "Failed to clear hpd(rx) source=%d on init\n",
+ i);
+ }
+
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_dm_connector *amdgpu_dm_connector;
@@ -904,10 +912,31 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
dc_link = amdgpu_dm_connector->dc_link;
+ /*
+ * Get a base driver irq reference for hpd ints for the lifetime
+ * of dm. Note that only hpd interrupt types are registered with
+ * base driver; hpd_rx types aren't. IOW, amdgpu_irq_get/put on
+ * hpd_rx isn't available. DM currently controls hpd_rx
+ * explicitly with dc_interrupt_set()
+ */
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
- dc_interrupt_set(adev->dm.dc,
- dc_link->irq_source_hpd,
- true);
+ irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
+ /*
+ * TODO: There's a mismatch between mode_info.num_hpd
+ * and what bios reports as the # of connectors with hpd
+ * sources. Since the # of hpd source types registered
+ * with base driver == mode_info.num_hpd, we have to
+ * fallback to dc_interrupt_set for the remaining types.
+ */
+ if (irq_type < adev->mode_info.num_hpd) {
+ if (amdgpu_irq_get(adev, &adev->hpd_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n",
+ dc_link->irq_source_hpd);
+ } else {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd,
+ true);
+ }
}
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
@@ -917,12 +946,6 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
-
- /* Update reference counts for HPDs */
- for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
- if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
- drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i);
- }
}
/**
@@ -938,7 +961,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
- int i;
+ int irq_type;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -952,9 +975,18 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
dc_link = amdgpu_dm_connector->dc_link;
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
- dc_interrupt_set(adev->dm.dc,
- dc_link->irq_source_hpd,
- false);
+ irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
+
+ /* TODO: See same TODO in amdgpu_dm_hpd_init() */
+ if (irq_type < adev->mode_info.num_hpd) {
+ if (amdgpu_irq_put(adev, &adev->hpd_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n",
+ dc_link->irq_source_hpd);
+ } else {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd,
+ false);
+ }
}
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
@@ -964,10 +996,4 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
-
- /* Update reference counts for HPDs */
- for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
- if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
- drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i);
- }
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 0090e08d5057..3e0f45f1711c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -700,7 +700,7 @@ static void amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device *adev,
uint64_t mod_4k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_4K_2D);
uint64_t mod_256b = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D);
uint64_t dcc = ver | AMD_FMT_MOD_SET(DCC, 1);
- uint8_t max_comp_block[] = {1, 0};
+ uint8_t max_comp_block[] = {2, 1, 0};
uint64_t max_comp_block_mod[ARRAY_SIZE(max_comp_block)] = {0};
uint8_t i = 0, j = 0;
uint64_t gfx12_modifiers[] = {mod_256k, mod_64k, mod_4k, mod_256b, DRM_FORMAT_MOD_LINEAR};
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index a0fb4481d2f1..19a15acd1509 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -194,8 +194,6 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK)
new_clocks->dppclk_khz = MIN_DPP_DISP_CLK;
- if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK)
- new_clocks->dispclk_khz = MIN_DPP_DISP_CLK;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index c3e50c3aaa60..4b19d9cf27ce 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -201,8 +201,6 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
if (new_clocks->dppclk_khz < 100000)
new_clocks->dppclk_khz = 100000;
- if (new_clocks->dispclk_khz < 100000)
- new_clocks->dispclk_khz = 100000;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index e71ea21401f5..28d1353f403d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -453,6 +453,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
if (dc->caps.max_v_total != 0 &&
(adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) {
+ stream->adjust.timing_adjust_pending = false;
if (adjust->allow_otg_v_count_halt)
return set_long_vtotal(dc, stream, adjust);
else
@@ -466,7 +467,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
dc->hwss.set_drr(&pipe,
1,
*adjust);
-
+ stream->adjust.timing_adjust_pending = false;
return true;
}
}
@@ -3165,8 +3166,13 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->vrr_active_fixed)
stream->vrr_active_fixed = *update->vrr_active_fixed;
- if (update->crtc_timing_adjust)
+ if (update->crtc_timing_adjust) {
+ if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min ||
+ stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max)
+ update->crtc_timing_adjust->timing_adjust_pending = true;
stream->adjust = *update->crtc_timing_adjust;
+ update->crtc_timing_adjust->timing_adjust_pending = false;
+ }
if (update->dpms_off)
stream->dpms_off = *update->dpms_off;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index e0277728268a..55b32dfbfdd6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -659,6 +659,21 @@ void set_p_state_switch_method(
}
}
+void set_drr_and_clear_adjust_pending(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream,
+ struct drr_params *params)
+{
+ /* params can be null.*/
+ if (pipe_ctx && pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->set_drr)
+ pipe_ctx->stream_res.tg->funcs->set_drr(
+ pipe_ctx->stream_res.tg, params);
+
+ if (stream)
+ stream->adjust.timing_adjust_pending = false;
+}
+
void get_fams2_visual_confirm_color(
struct dc *dc,
struct dc_state *context,
@@ -802,7 +817,14 @@ void hwss_build_fast_sequence(struct dc *dc,
block_sequence[*num_steps].func = DPP_SET_OUTPUT_TRANSFER_FUNC;
(*num_steps)++;
}
-
+ if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE &&
+ dc->hwss.update_visual_confirm_color) {
+ block_sequence[*num_steps].params.update_visual_confirm_params.dc = dc;
+ block_sequence[*num_steps].params.update_visual_confirm_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].params.update_visual_confirm_params.mpcc_id = current_mpc_pipe->plane_res.hubp->inst;
+ block_sequence[*num_steps].func = MPC_UPDATE_VISUAL_CONFIRM;
+ (*num_steps)++;
+ }
if (current_mpc_pipe->stream->update_flags.bits.out_csc) {
block_sequence[*num_steps].params.power_on_mpc_mem_pwr_params.mpc = dc->res_pool->mpc;
block_sequence[*num_steps].params.power_on_mpc_mem_pwr_params.mpcc_id = current_mpc_pipe->plane_res.hubp->inst;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ea404435c9b9..313a32248cd7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -3623,10 +3623,13 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
break;
case COLOR_DEPTH_121212:
normalized_pix_clk = (pix_clk * 36) / 24;
- break;
+ break;
+ case COLOR_DEPTH_141414:
+ normalized_pix_clk = (pix_clk * 42) / 24;
+ break;
case COLOR_DEPTH_161616:
normalized_pix_clk = (pix_clk * 48) / 24;
- break;
+ break;
default:
ASSERT(0);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index a62c4893e5ff..67e1bb6fa335 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -53,7 +53,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.323"
+#define DC_VER "3.2.324"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 87b4c2793df3..ca6da53f45ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -70,20 +70,28 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
}
}
-void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
+bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv)
{
- struct dmub_srv *dmub = dc_dmub_srv->dmub;
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ struct dmub_srv *dmub;
+ struct dc_context *dc_ctx;
enum dmub_status status;
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ dc_ctx = dc_dmub_srv->ctx;
+ dmub = dc_dmub_srv->dmub;
+
do {
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ status = dmub_srv_wait_for_pending(dmub, 100000);
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
if (status != DMUB_STATUS_OK) {
DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
}
+
+ return status == DMUB_STATUS_OK;
}
void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
@@ -126,7 +134,49 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv,
}
}
-bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+static bool dc_dmub_srv_reg_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+ unsigned int count,
+ union dmub_rb_cmd *cmd_list)
+{
+ struct dc_context *dc_ctx;
+ struct dmub_srv *dmub;
+ enum dmub_status status = DMUB_STATUS_OK;
+ int i;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ dc_ctx = dc_dmub_srv->ctx;
+ dmub = dc_dmub_srv->dmub;
+
+ for (i = 0 ; i < count; i++) {
+ /* confirm no messages pending */
+ do {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
+
+ /* queue command */
+ if (status == DMUB_STATUS_OK)
+ status = dmub_srv_reg_cmd_execute(dmub, &cmd_list[i]);
+
+ /* check for errors */
+ if (status != DMUB_STATUS_OK) {
+ break;
+ }
+ }
+
+ if (status != DMUB_STATUS_OK) {
+ if (status != DMUB_STATUS_POWER_STATE_D3) {
+ DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
+ return false;
+ }
+
+ return true;
+}
+
+static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
unsigned int count,
union dmub_rb_cmd *cmd_list)
{
@@ -143,11 +193,16 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
for (i = 0 ; i < count; i++) {
// Queue command
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+ if (!cmd_list[i].cmd_common.header.multi_cmd_pending ||
+ dmub_rb_num_free(&dmub->inbox1.rb) >= count - i) {
+ status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
+ } else {
+ status = DMUB_STATUS_QUEUE_FULL;
+ }
if (status == DMUB_STATUS_QUEUE_FULL) {
/* Execute and wait for queue to become empty again. */
- status = dmub_srv_cmd_execute(dmub);
+ status = dmub_srv_fb_cmd_execute(dmub);
if (status == DMUB_STATUS_POWER_STATE_D3)
return false;
@@ -156,7 +211,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
/* Requeue the command. */
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+ status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
}
if (status != DMUB_STATUS_OK) {
@@ -168,7 +223,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
}
}
- status = dmub_srv_cmd_execute(dmub);
+ status = dmub_srv_fb_cmd_execute(dmub);
if (status != DMUB_STATUS_OK) {
if (status != DMUB_STATUS_POWER_STATE_D3) {
DC_ERROR("Error starting DMUB execution: status=%d\n", status);
@@ -180,6 +235,23 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
return true;
}
+bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+ unsigned int count,
+ union dmub_rb_cmd *cmd_list)
+{
+ bool res = false;
+
+ if (dc_dmub_srv && dc_dmub_srv->dmub) {
+ if (dc_dmub_srv->dmub->inbox_type == DMUB_CMD_INTERFACE_REG) {
+ res = dc_dmub_srv_reg_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
+ } else {
+ res = dc_dmub_srv_fb_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
+ }
+ }
+
+ return res;
+}
+
bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
enum dm_dmub_wait_type wait_type,
union dmub_rb_cmd *cmd_list)
@@ -200,18 +272,20 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
if (status != DMUB_STATUS_OK) {
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
- if (!dmub->debug.timeout_occured) {
- dmub->debug.timeout_occured = true;
- dmub->debug.timeout_cmd = *cmd_list;
- dmub->debug.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
+ if (!dmub->debug.timeout_info.timeout_occured) {
+ dmub->debug.timeout_info.timeout_occured = true;
+ if (cmd_list)
+ dmub->debug.timeout_info.timeout_cmd = *cmd_list;
+ dmub->debug.timeout_info.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
}
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
return false;
}
// Copy data back from ring buffer into command
- if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
- dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
+ if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY && cmd_list) {
+ dmub_srv_cmd_get_response(dc_dmub_srv->dmub, cmd_list);
+ }
}
return true;
@@ -224,74 +298,10 @@ bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd
bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type)
{
- struct dc_context *dc_ctx;
- struct dmub_srv *dmub;
- enum dmub_status status;
- int i;
-
- if (!dc_dmub_srv || !dc_dmub_srv->dmub)
- return false;
-
- dc_ctx = dc_dmub_srv->ctx;
- dmub = dc_dmub_srv->dmub;
-
- for (i = 0 ; i < count; i++) {
- // Queue command
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
-
- if (status == DMUB_STATUS_QUEUE_FULL) {
- /* Execute and wait for queue to become empty again. */
- status = dmub_srv_cmd_execute(dmub);
- if (status == DMUB_STATUS_POWER_STATE_D3)
- return false;
-
- status = dmub_srv_wait_for_idle(dmub, 100000);
- if (status != DMUB_STATUS_OK)
- return false;
-
- /* Requeue the command. */
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
- }
-
- if (status != DMUB_STATUS_OK) {
- if (status != DMUB_STATUS_POWER_STATE_D3) {
- DC_ERROR("Error queueing DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- }
- return false;
- }
- }
-
- status = dmub_srv_cmd_execute(dmub);
- if (status != DMUB_STATUS_OK) {
- if (status != DMUB_STATUS_POWER_STATE_D3) {
- DC_ERROR("Error starting DMUB execution: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- }
+ if (!dc_dmub_srv_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list))
return false;
- }
-
- // Wait for DMUB to process command
- if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
- if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
- do {
- status = dmub_srv_wait_for_idle(dmub, 100000);
- } while (status != DMUB_STATUS_OK);
- } else
- status = dmub_srv_wait_for_idle(dmub, 100000);
- if (status != DMUB_STATUS_OK) {
- DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- return false;
- }
-
- // Copy data back from ring buffer into command
- if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
- dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
- }
-
- return true;
+ return dc_dmub_srv_wait_for_idle(dc_dmub_srv, wait_type, cmd_list);
}
bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
@@ -927,16 +937,15 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
-bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
+bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
{
- if (!dc_dmub_srv || !dc_dmub_srv->dmub || !diag_data)
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return false;
- return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub, diag_data);
+ return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub);
}
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
{
- struct dmub_diagnostic_data diag_data = {0};
uint32_t i;
if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
@@ -946,49 +955,49 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__);
- if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) {
+ if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv)) {
DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__);
return;
}
DC_LOG_DEBUG("DMCUB STATE:");
- DC_LOG_DEBUG(" dmcub_version : %08x", diag_data.dmcub_version);
- DC_LOG_DEBUG(" scratch [0] : %08x", diag_data.scratch[0]);
- DC_LOG_DEBUG(" scratch [1] : %08x", diag_data.scratch[1]);
- DC_LOG_DEBUG(" scratch [2] : %08x", diag_data.scratch[2]);
- DC_LOG_DEBUG(" scratch [3] : %08x", diag_data.scratch[3]);
- DC_LOG_DEBUG(" scratch [4] : %08x", diag_data.scratch[4]);
- DC_LOG_DEBUG(" scratch [5] : %08x", diag_data.scratch[5]);
- DC_LOG_DEBUG(" scratch [6] : %08x", diag_data.scratch[6]);
- DC_LOG_DEBUG(" scratch [7] : %08x", diag_data.scratch[7]);
- DC_LOG_DEBUG(" scratch [8] : %08x", diag_data.scratch[8]);
- DC_LOG_DEBUG(" scratch [9] : %08x", diag_data.scratch[9]);
- DC_LOG_DEBUG(" scratch [10] : %08x", diag_data.scratch[10]);
- DC_LOG_DEBUG(" scratch [11] : %08x", diag_data.scratch[11]);
- DC_LOG_DEBUG(" scratch [12] : %08x", diag_data.scratch[12]);
- DC_LOG_DEBUG(" scratch [13] : %08x", diag_data.scratch[13]);
- DC_LOG_DEBUG(" scratch [14] : %08x", diag_data.scratch[14]);
- DC_LOG_DEBUG(" scratch [15] : %08x", diag_data.scratch[15]);
+ DC_LOG_DEBUG(" dmcub_version : %08x", dc_dmub_srv->dmub->debug.dmcub_version);
+ DC_LOG_DEBUG(" scratch [0] : %08x", dc_dmub_srv->dmub->debug.scratch[0]);
+ DC_LOG_DEBUG(" scratch [1] : %08x", dc_dmub_srv->dmub->debug.scratch[1]);
+ DC_LOG_DEBUG(" scratch [2] : %08x", dc_dmub_srv->dmub->debug.scratch[2]);
+ DC_LOG_DEBUG(" scratch [3] : %08x", dc_dmub_srv->dmub->debug.scratch[3]);
+ DC_LOG_DEBUG(" scratch [4] : %08x", dc_dmub_srv->dmub->debug.scratch[4]);
+ DC_LOG_DEBUG(" scratch [5] : %08x", dc_dmub_srv->dmub->debug.scratch[5]);
+ DC_LOG_DEBUG(" scratch [6] : %08x", dc_dmub_srv->dmub->debug.scratch[6]);
+ DC_LOG_DEBUG(" scratch [7] : %08x", dc_dmub_srv->dmub->debug.scratch[7]);
+ DC_LOG_DEBUG(" scratch [8] : %08x", dc_dmub_srv->dmub->debug.scratch[8]);
+ DC_LOG_DEBUG(" scratch [9] : %08x", dc_dmub_srv->dmub->debug.scratch[9]);
+ DC_LOG_DEBUG(" scratch [10] : %08x", dc_dmub_srv->dmub->debug.scratch[10]);
+ DC_LOG_DEBUG(" scratch [11] : %08x", dc_dmub_srv->dmub->debug.scratch[11]);
+ DC_LOG_DEBUG(" scratch [12] : %08x", dc_dmub_srv->dmub->debug.scratch[12]);
+ DC_LOG_DEBUG(" scratch [13] : %08x", dc_dmub_srv->dmub->debug.scratch[13]);
+ DC_LOG_DEBUG(" scratch [14] : %08x", dc_dmub_srv->dmub->debug.scratch[14]);
+ DC_LOG_DEBUG(" scratch [15] : %08x", dc_dmub_srv->dmub->debug.scratch[15]);
for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++)
- DC_LOG_DEBUG(" pc[%d] : %08x", i, diag_data.pc[i]);
- DC_LOG_DEBUG(" unk_fault_addr : %08x", diag_data.undefined_address_fault_addr);
- DC_LOG_DEBUG(" inst_fault_addr : %08x", diag_data.inst_fetch_fault_addr);
- DC_LOG_DEBUG(" data_fault_addr : %08x", diag_data.data_write_fault_addr);
- DC_LOG_DEBUG(" inbox1_rptr : %08x", diag_data.inbox1_rptr);
- DC_LOG_DEBUG(" inbox1_wptr : %08x", diag_data.inbox1_wptr);
- DC_LOG_DEBUG(" inbox1_size : %08x", diag_data.inbox1_size);
- DC_LOG_DEBUG(" inbox0_rptr : %08x", diag_data.inbox0_rptr);
- DC_LOG_DEBUG(" inbox0_wptr : %08x", diag_data.inbox0_wptr);
- DC_LOG_DEBUG(" inbox0_size : %08x", diag_data.inbox0_size);
- DC_LOG_DEBUG(" outbox1_rptr : %08x", diag_data.outbox1_rptr);
- DC_LOG_DEBUG(" outbox1_wptr : %08x", diag_data.outbox1_wptr);
- DC_LOG_DEBUG(" outbox1_size : %08x", diag_data.outbox1_size);
- DC_LOG_DEBUG(" is_enabled : %d", diag_data.is_dmcub_enabled);
- DC_LOG_DEBUG(" is_soft_reset : %d", diag_data.is_dmcub_soft_reset);
- DC_LOG_DEBUG(" is_secure_reset : %d", diag_data.is_dmcub_secure_reset);
- DC_LOG_DEBUG(" is_traceport_en : %d", diag_data.is_traceport_en);
- DC_LOG_DEBUG(" is_cw0_en : %d", diag_data.is_cw0_enabled);
- DC_LOG_DEBUG(" is_cw6_en : %d", diag_data.is_cw6_enabled);
+ DC_LOG_DEBUG(" pc[%d] : %08x", i, dc_dmub_srv->dmub->debug.pc[i]);
+ DC_LOG_DEBUG(" unk_fault_addr : %08x", dc_dmub_srv->dmub->debug.undefined_address_fault_addr);
+ DC_LOG_DEBUG(" inst_fault_addr : %08x", dc_dmub_srv->dmub->debug.inst_fetch_fault_addr);
+ DC_LOG_DEBUG(" data_fault_addr : %08x", dc_dmub_srv->dmub->debug.data_write_fault_addr);
+ DC_LOG_DEBUG(" inbox1_rptr : %08x", dc_dmub_srv->dmub->debug.inbox1_rptr);
+ DC_LOG_DEBUG(" inbox1_wptr : %08x", dc_dmub_srv->dmub->debug.inbox1_wptr);
+ DC_LOG_DEBUG(" inbox1_size : %08x", dc_dmub_srv->dmub->debug.inbox1_size);
+ DC_LOG_DEBUG(" inbox0_rptr : %08x", dc_dmub_srv->dmub->debug.inbox0_rptr);
+ DC_LOG_DEBUG(" inbox0_wptr : %08x", dc_dmub_srv->dmub->debug.inbox0_wptr);
+ DC_LOG_DEBUG(" inbox0_size : %08x", dc_dmub_srv->dmub->debug.inbox0_size);
+ DC_LOG_DEBUG(" outbox1_rptr : %08x", dc_dmub_srv->dmub->debug.outbox1_rptr);
+ DC_LOG_DEBUG(" outbox1_wptr : %08x", dc_dmub_srv->dmub->debug.outbox1_wptr);
+ DC_LOG_DEBUG(" outbox1_size : %08x", dc_dmub_srv->dmub->debug.outbox1_size);
+ DC_LOG_DEBUG(" is_enabled : %d", dc_dmub_srv->dmub->debug.is_dmcub_enabled);
+ DC_LOG_DEBUG(" is_soft_reset : %d", dc_dmub_srv->dmub->debug.is_dmcub_soft_reset);
+ DC_LOG_DEBUG(" is_secure_reset : %d", dc_dmub_srv->dmub->debug.is_dmcub_secure_reset);
+ DC_LOG_DEBUG(" is_traceport_en : %d", dc_dmub_srv->dmub->debug.is_traceport_en);
+ DC_LOG_DEBUG(" is_cw0_en : %d", dc_dmub_srv->dmub->debug.is_cw0_enabled);
+ DC_LOG_DEBUG(" is_cw6_en : %d", dc_dmub_srv->dmub->debug.is_cw6_enabled);
}
static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
@@ -1244,7 +1253,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dc_dmub_srv_wait_for_idle(dc->ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL);
memset(&new_signals, 0, sizeof(new_signals));
@@ -1401,7 +1410,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
- dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
+ dmub_srv_sync_inboxes(dc->ctx->dmub_srv->dmub);
}
}
@@ -1655,7 +1664,8 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
/* fill in generic command header */
global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- global_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ global_cmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
if (enable) {
/* send global configuration parameters */
@@ -1674,11 +1684,13 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
/* configure command header */
stream_base_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
stream_base_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- stream_base_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_base_cmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
stream_base_cmd->header.multi_cmd_pending = 1;
stream_sub_state_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
stream_sub_state_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- stream_sub_state_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_sub_state_cmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
stream_sub_state_cmd->header.multi_cmd_pending = 1;
/* copy stream static base state */
memcpy(&stream_base_cmd->config,
@@ -1724,7 +1736,8 @@ void dc_dmub_srv_fams2_drr_update(struct dc *dc,
cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num;
cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger;
- cmd.fams2_drr_update.header.payload_bytes = sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
+ cmd.fams2_drr_update.header.payload_bytes =
+ sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
@@ -1760,7 +1773,8 @@ void dc_dmub_srv_fams2_passthrough_flip(
/* build command header */
cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP;
- cmds[num_cmds].fams2_flip.header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2_flip);
+ cmds[num_cmds].fams2_flip.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2_flip) - sizeof(struct dmub_cmd_header);
/* for chaining multiple commands, all but last command should set to 1 */
cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 10b48198b7a6..ada5c2fb2db3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -58,7 +58,7 @@ struct dc_dmub_srv {
bool needs_idle_wake;
};
-void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
+bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv);
bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv);
@@ -94,7 +94,7 @@ void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv);
void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv);
void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data);
-bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca);
+bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, bool enable);
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 1f4f11adc491..77c87ad57220 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -432,7 +432,28 @@ union hdmi_encoded_link_bw {
uint8_t BW_32Gbps:1;
uint8_t BW_40Gbps:1;
uint8_t BW_48Gbps:1;
- uint8_t RESERVED:1; // Bit 7
+ uint8_t FRL_LINK_TRAINING_FINISHED:1; // Bit 7
+ } bits;
+ uint8_t raw;
+};
+
+union hdmi_tx_link_status {
+ struct {
+ uint8_t HDMI_TX_LINK_ACTIVE_STATUS:1;
+ uint8_t HDMI_TX_READY_STATUS:1;
+ uint8_t RESERVED:6;
+ } bits;
+ uint8_t raw;
+};
+
+union autonomous_mode_and_frl_link_status {
+ struct {
+ uint8_t FRL_LT_IN_PROGRESS_STATUS:1;
+ uint8_t FRL_LT_LINK_CONFIG_IN_PROGRESS:3;
+ uint8_t RESERVED:1;
+ uint8_t FALLBACK_POLICY:1;
+ uint8_t FALLBACK_POLICY_VALID:1;
+ uint8_t REGULATED_AUTONOMOUS_MODE_SUPPORTED:1;
} bits;
uint8_t raw;
};
@@ -1166,6 +1187,7 @@ struct dc_dongle_caps {
uint32_t dp_hdmi_max_bpc;
uint32_t dp_hdmi_max_pixel_clk_in_khz;
uint32_t dp_hdmi_frl_max_link_bw_in_kbps;
+ uint32_t dp_hdmi_regulated_autonomous_mode_support;
struct dc_dongle_dfp_cap_ext dfp_cap_ext;
};
@@ -1394,6 +1416,9 @@ struct dp_trace {
#ifndef DP_LTTPR_ALPM_CAPABILITIES
#define DP_LTTPR_ALPM_CAPABILITIES 0xF0009
#endif
+#ifndef DP_REGULATED_AUTONOMOUS_MODE_SUPPORTED_AND_HDMI_LINK_TRAINING_STATUS
+#define DP_REGULATED_AUTONOMOUS_MODE_SUPPORTED_AND_HDMI_LINK_TRAINING_STATUS 0x303C
+#endif
#ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE
#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 8f077e15b4f0..72b87b78da0e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -682,7 +682,7 @@ void reg_sequence_wait_done(const struct dc_context *ctx)
if (offload &&
ctx->dc->debug.dmub_offload_enabled &&
!ctx->dc->debug.dmcub_emulation) {
- dc_dmub_srv_wait_idle(ctx->dmub_srv);
+ dc_dmub_srv_wait_for_idle(ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 9f3dd8824ed5..d562ddeca512 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -1017,6 +1017,7 @@ struct dc_crtc_timing_adjust {
uint32_t v_total_mid;
uint32_t v_total_mid_frame_num;
uint32_t allow_otg_v_count_halt;
+ uint8_t timing_adjust_pending;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
index 0d7e7f3b81a1..a641ae04450c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
@@ -240,7 +240,8 @@ bool dmub_abm_save_restore(
cmd.abm_save_restore.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
cmd.abm_save_restore.abm_init_config_data.panel_mask = panel_mask;
- cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore);
+ cmd.abm_save_restore.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_abm_save_restore) - sizeof(struct dmub_cmd_header);
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index bf636b28e3e1..a262598aa676 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -63,6 +63,10 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
bool should_use_dmub_lock(struct dc_link *link)
{
+ /* ASIC doesn't support DMUB */
+ if (!link->ctx->dmub_srv)
+ return false;
+
if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index c31e4f26a305..fcd3d86ad517 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -280,7 +280,9 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm
memset(&cmd, 0, sizeof(cmd));
pCmd->header.type = DMUB_CMD__REPLAY;
pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL;
- pCmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal);
+ pCmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal) -
+ sizeof(struct dmub_cmd_header);
pCmd->replay_set_power_opt_data.power_opt = power_opt;
pCmd->replay_set_power_opt_data.panel_inst = panel_inst;
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
@@ -319,7 +321,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_timing_sync.header.sub_type =
DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED;
cmd.replay_set_timing_sync.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_timing_sync);
+ sizeof(struct dmub_rb_cmd_replay_set_timing_sync) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_timing_sync.replay_set_timing_sync_data.panel_inst =
cmd_element->sync_data.panel_inst;
@@ -331,7 +334,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_frameupdate_timer.header.sub_type =
DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER;
cmd.replay_set_frameupdate_timer.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer);
+ sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_frameupdate_timer.data.panel_inst =
cmd_element->panel_inst;
@@ -345,7 +349,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_pseudo_vtotal.header.sub_type =
DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL;
cmd.replay_set_pseudo_vtotal.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal);
+ sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_pseudo_vtotal.data.panel_inst =
cmd_element->pseudo_vtotal_data.panel_inst;
@@ -357,7 +362,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_disabled_adaptive_sync_sdp.header.sub_type =
DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP;
cmd.replay_disabled_adaptive_sync_sdp.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp);
+ sizeof(struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_disabled_adaptive_sync_sdp.data.panel_inst =
cmd_element->disabled_adaptive_sync_sdp_data.panel_inst;
@@ -369,7 +375,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_general_cmd.header.sub_type =
DMUB_CMD__REPLAY_SET_GENERAL_CMD;
cmd.replay_set_general_cmd.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_general_cmd);
+ sizeof(struct dmub_rb_cmd_replay_set_general_cmd) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_general_cmd.data.panel_inst =
cmd_element->set_general_cmd_data.panel_inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index 4c33d99ca7e8..4c504cb0e1c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -15,6 +15,7 @@
//#define DML_MODE_SUPPORT_USE_DPM_DRAM_BW
//#define DML_GLOBAL_PREFETCH_CHECK
#define ALLOW_SDPIF_RATE_LIMIT_PRE_CSTATE
+#define DML_MAX_VSTARTUP_START 1023
const char *dml2_core_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type)
{
@@ -3737,6 +3738,7 @@ static unsigned int CalculateMaxVStartup(
dml2_printf("DML::%s: vblank_avail = %u\n", __func__, vblank_avail);
dml2_printf("DML::%s: max_vstartup_lines = %u\n", __func__, max_vstartup_lines);
#endif
+ max_vstartup_lines = (unsigned int)math_min2(max_vstartup_lines, DML_MAX_VSTARTUP_START);
return max_vstartup_lines;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 9c9947fc5d44..5656d10368ad 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -1066,7 +1066,8 @@ void dce110_edp_backlight_control(
DC_LOG_DC("edp_receiver_ready_T9 skipped\n");
}
- if (!enable && link->dpcd_sink_ext_caps.bits.oled) {
+ if (!enable) {
+ /*follow oem panel config's requirement*/
pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms;
msleep(pre_T11_delay);
}
@@ -1658,9 +1659,7 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
// DRR should set trigger event to monitor surface update event
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
@@ -1838,11 +1837,10 @@ static void clean_up_dsc_blocks(struct dc *dc)
struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
int i;
- if (dc->ctx->dce_version != DCN_VERSION_3_5 &&
- dc->ctx->dce_version != DCN_VERSION_3_6 &&
- dc->ctx->dce_version != DCN_VERSION_3_51)
+ if (!dc->caps.is_apu ||
+ dc->ctx->dce_version < DCN_VERSION_3_15)
return;
-
+ /*VBIOS supports dsc starts from dcn315*/
for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
struct dcn_dsc_state s = {0};
@@ -2109,8 +2107,7 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
if ((tg != NULL) && tg->funcs) {
- if (tg->funcs->set_drr)
- tg->funcs->set_drr(tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, &params);
if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
if (tg->funcs->set_static_screen_control)
tg->funcs->set_static_screen_control(
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 301ef36d3d05..912f96323ed6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -1113,9 +1113,7 @@ static void dcn10_reset_back_end_for_pipe(
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, NULL);
+ set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
}
@@ -3218,8 +3216,7 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
if ((tg != NULL) && tg->funcs) {
- if (tg->funcs->set_drr)
- tg->funcs->set_drr(tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, &params);
if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
if (tg->funcs->set_static_screen_control)
tg->funcs->set_static_screen_control(
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index a5a3e0823e21..926c08e790c1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -952,9 +952,7 @@ enum dc_status dcn20_enable_stream_timing(
params.vertical_total_max = stream->adjust.v_total_max;
params.vertical_total_mid = stream->adjust.v_total_mid;
params.vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
// DRR should set trigger event to monitor surface update event
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
@@ -2856,9 +2854,7 @@ void dcn20_reset_back_end_for_pipe(
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, NULL);
+ set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
/* TODO - convert symclk_ref_cnts for otg to a bit map to solve
* the case where the same symclk is shared across multiple otg
* instances
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 288e9dd9205d..f38340aa3f15 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -543,9 +543,7 @@ static void dcn31_reset_back_end_for_pipe(
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, NULL);
+ set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
/* DPMS may already disable or */
/* dpms_off status is incorrect due to fastboot
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index b907ad1acedd..922b8d71cf1a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -1473,8 +1473,7 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
num_frames = 2 * (frame_rate % 60);
}
}
- if (tg->funcs->set_drr)
- tg->funcs->set_drr(tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, &params);
if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
if (tg->funcs->set_static_screen_control)
tg->funcs->set_static_screen_control(
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 39668d8cc13a..8f5da0ded850 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -830,10 +830,7 @@ enum dc_status dcn401_enable_stream_timing(
}
hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
-
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
/* Event triggers and num frames initialized for DRR, but can be
* later updated for PSR use. Note DRR trigger events are generated
@@ -1820,9 +1817,8 @@ void dcn401_reset_back_end_for_pipe(
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, NULL);
+ set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
+
/* TODO - convert symclk_ref_cnts for otg to a bit map to solve
* the case where the same symclk is shared across multiple otg
* instances
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 2b1a2a00648a..c8b5ed834579 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -46,6 +46,7 @@ struct dce_hwseq;
struct link_resource;
struct dc_dmub_cmd;
struct pg_block_update;
+struct drr_params;
struct subvp_pipe_control_lock_fast_params {
struct dc *dc;
@@ -527,6 +528,11 @@ void set_p_state_switch_method(
struct dc_state *context,
struct pipe_ctx *pipe_ctx);
+void set_drr_and_clear_adjust_pending(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream,
+ struct drr_params *params);
+
void hwss_execute_sequence(struct dc *dc,
struct block_sequence block_sequence[],
int num_steps);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index a77410122636..21ee0d96c9d4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -265,6 +265,8 @@ static uint32_t intersect_frl_link_bw_support(
supported_bw_in_kbps = 18000000;
else if (hdmi_encoded_link_bw.bits.BW_9Gbps)
supported_bw_in_kbps = 9000000;
+ else if (hdmi_encoded_link_bw.bits.FRL_LINK_TRAINING_FINISHED)
+ supported_bw_in_kbps = 0; /* This case should only get hit in regulated autonomous mode. */
return supported_bw_in_kbps;
}
@@ -1075,6 +1077,48 @@ static enum dc_status wake_up_aux_channel(struct dc_link *link)
return DC_OK;
}
+static void read_and_intersect_post_frl_lt_status(
+ struct dc_link *link)
+{
+ union autonomous_mode_and_frl_link_status autonomous_mode_caps = {0};
+ union hdmi_tx_link_status hdmi_tx_link_status = {0};
+ union hdmi_encoded_link_bw hdmi_encoded_link_bw = {0};
+
+ /* Check if dongle supports regulated autonomous mode. */
+ core_link_read_dpcd(link, DP_REGULATED_AUTONOMOUS_MODE_SUPPORTED_AND_HDMI_LINK_TRAINING_STATUS,
+ &autonomous_mode_caps.raw, sizeof(autonomous_mode_caps));
+
+ link->dpcd_caps.dongle_caps.dp_hdmi_regulated_autonomous_mode_support =
+ autonomous_mode_caps.bits.REGULATED_AUTONOMOUS_MODE_SUPPORTED;
+
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_regulated_autonomous_mode_support) {
+ DC_LOG_DC("%s: PCON supports regulated autonomous mode.\n", __func__);
+
+ core_link_read_dpcd(link, DP_PCON_HDMI_TX_LINK_STATUS,
+ &hdmi_tx_link_status.raw, sizeof(hdmi_tx_link_status));
+ }
+
+ // Intersect reported max link bw support with the supported link rate post FRL link training
+ if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS,
+ &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) {
+
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_regulated_autonomous_mode_support &&
+ (!hdmi_tx_link_status.bits.HDMI_TX_READY_STATUS ||
+ !hdmi_encoded_link_bw.bits.FRL_LINK_TRAINING_FINISHED)) {
+ DC_LOG_WARNING("%s: PCON TX link training has not finished.\n", __func__);
+
+ /* Link training not finished, ignore values from this DPCD reg. */
+ return;
+ }
+
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support(
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps,
+ hdmi_encoded_link_bw);
+ DC_LOG_DC("%s: pcon frl link bw = %u\n", __func__,
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps);
+ }
+}
+
static void get_active_converter_info(
uint8_t data, struct dc_link *link)
{
@@ -1163,21 +1207,12 @@ static void get_active_converter_info(
hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
if (link->dc->caps.dp_hdmi21_pcon_support) {
- union hdmi_encoded_link_bw hdmi_encoded_link_bw;
link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps =
link_bw_kbps_from_raw_frl_link_rate_data(
hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT);
- // Intersect reported max link bw support with the supported link rate post FRL link training
- if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS,
- &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) {
- link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support(
- link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps,
- hdmi_encoded_link_bw);
- DC_LOG_DC("%s: pcon frl link bw = %u\n", __func__,
- link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps);
- }
+ read_and_intersect_post_frl_lt_status(link);
if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0)
link->dpcd_caps.dongle_caps.extendedCapValid = true;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
index 0d123e647652..c149210096ac 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -92,6 +92,7 @@ bool dpia_query_hpd_status(struct dc_link *link)
/* prepare QUERY_HPD command */
cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
+ cmd.query_hpd.header.payload_bytes = sizeof(cmd.query_hpd.data);
cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 613298d21d03..ef358afdfb65 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -1789,13 +1789,10 @@ bool perform_link_training_with_retries(
is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
(cur_link_settings.lane_count <= LANE_COUNT_ONE));
- if (is_link_bw_low) {
+ if (is_link_bw_low)
DC_LOG_WARNING(
"%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
__func__, link->link_index, req_bw, link_bw);
-
- return false;
- }
}
msleep(delay_between_attempts);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index e0e3bb865359..1e4adbc764ea 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -675,6 +675,18 @@ bool edp_setup_psr(struct dc_link *link,
if (!link)
return false;
+ //Clear PSR cfg
+ memset(&psr_configuration, 0, sizeof(psr_configuration));
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ DP_PSR_EN_CFG,
+ &psr_configuration.raw,
+ sizeof(psr_configuration.raw));
+
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
+ return false;
+
dc = link->ctx->dc;
dmcu = dc->res_pool->dmcu;
psr = dc->res_pool->psr;
@@ -685,9 +697,6 @@ bool edp_setup_psr(struct dc_link *link,
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
-
- memset(&psr_configuration, 0, sizeof(psr_configuration));
-
psr_configuration.bits.ENABLE = 1;
psr_configuration.bits.CRC_VERIFICATION = 1;
psr_configuration.bits.FRAME_CAPTURE_INDICATION =
@@ -950,6 +959,16 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
if (!link)
return false;
+ //Clear Replay config
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_SINK_PR_ENABLE_AND_CONFIGURATION,
+ (uint8_t *)&(replay_config.raw), sizeof(uint8_t));
+
+ if (!(link->replay_settings.config.replay_supported))
+ return false;
+
+ link->replay_settings.config.replay_error_status.raw = 0;
+
dc = link->ctx->dc;
replay = dc->res_pool->replay;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index 13202ce30d66..f01ced015072 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -2047,7 +2047,8 @@ bool dcn30_validate_bandwidth(struct dc *dc,
int vlevel = 0;
int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ display_e2e_pipe_params_st *pipes = kcalloc(dc->res_pool->pipe_count,
+ sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 3c42ba8566cf..dddddbfef85f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -1768,7 +1768,8 @@ bool dcn31_validate_bandwidth(struct dc *dc,
int vlevel = 0;
int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ display_e2e_pipe_params_st *pipes = kcalloc(dc->res_pool->pipe_count,
+ sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index e3ba105034f8..26becc4cb804 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -1704,7 +1704,8 @@ bool dcn314_validate_bandwidth(struct dc *dc,
int vlevel = 0;
int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ display_e2e_pipe_params_st *pipes = kcalloc(dc->res_pool->pipe_count,
+ sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 14acef036b5a..6c2bb3f63be1 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -1698,7 +1698,7 @@ static int dcn315_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
- if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) {
+ if (pixel_rate_crb) {
int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format);
/* Ceil to crb segment size */
int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate(
@@ -1755,28 +1755,26 @@ static int dcn315_populate_dml_pipes_from_context(
continue;
}
- if (!pipe->top_pipe && !pipe->prev_odm_pipe) {
- bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
- || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
-
- if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0)
- pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
- (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
- if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
- /* Clamp to 2 pipe split max det segments */
- remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
- pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
- }
- if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
- /* If we are splitting we must have an even number of segments */
- remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
- pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
- }
- /* Convert segments into size for DML use */
- pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
-
- crb_idx++;
+ bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
+ || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
+
+ if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0)
+ pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
+ (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
+ if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
+ /* Clamp to 2 pipe split max det segments */
+ remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
+ pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
+ }
+ if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
+ /* If we are splitting we must have an even number of segments */
+ remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
+ pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
}
+ /* Convert segments into size for DML use */
+ pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
+
+ crb_idx++;
pipe_cnt++;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 664302876019..2a59cc61ed8c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -1749,7 +1749,8 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
int vlevel = 0;
int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ display_e2e_pipe_params_st *pipes = kcalloc(dc->res_pool->pipe_count,
+ sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
/* To handle Freesync properly, setting FreeSync DML parameters
* to its default state for the first stage of validation
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h
index 145961803a92..d621c42a237e 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h
@@ -17,9 +17,6 @@
#define SPL_EXPAND(a, b) SPL_EXPAND2(a, b)
#define SPL_NAMESPACE(symbol) SPL_EXPAND(SPL_PFX_, symbol)
-#ifdef __cplusplus
-extern "C" {
-#endif
/* SPL interfaces */
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 0787cc3904d6..203e3a440845 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -51,8 +51,8 @@
* for the cache windows.
*
* The call to dmub_srv_hw_init() programs the DMCUB registers to prepare
- * for command submission. Commands can be queued via dmub_srv_cmd_queue()
- * and executed via dmub_srv_cmd_execute().
+ * for command submission. Commands can be queued via dmub_srv_fb_cmd_queue()
+ * and executed via dmub_srv_fb_cmd_execute().
*
* If the queue is full the dmub_srv_wait_for_idle() call can be used to
* wait until the queue has been cleared.
@@ -170,6 +170,13 @@ enum dmub_srv_power_state_type {
DMUB_POWER_STATE_D3 = 8
};
+/* enum dmub_inbox_cmd_interface type - defines default interface for host->dmub commands */
+enum dmub_inbox_cmd_interface_type {
+ DMUB_CMD_INTERFACE_DEFAULT = 0,
+ DMUB_CMD_INTERFACE_FB = 1,
+ DMUB_CMD_INTERFACE_REG = 2,
+};
+
/**
* struct dmub_region - dmub hw memory region
* @base: base address for region, must be 256 byte aligned
@@ -313,7 +320,7 @@ struct dmub_srv_hw_params {
* @timeout_occured: Indicates a timeout occured on any message from driver to dmub
* @timeout_cmd: first cmd sent from driver that timed out - subsequent timeouts are not stored
*/
-struct dmub_srv_debug {
+struct dmub_timeout_info {
bool timeout_occured;
union dmub_rb_cmd timeout_cmd;
unsigned long long timestamp;
@@ -340,7 +347,7 @@ struct dmub_diagnostic_data {
uint32_t outbox1_wptr;
uint32_t outbox1_size;
uint32_t gpint_datain0;
- struct dmub_srv_debug timeout_info;
+ struct dmub_timeout_info timeout_info;
uint8_t is_dmcub_enabled : 1;
uint8_t is_dmcub_soft_reset : 1;
uint8_t is_dmcub_secure_reset : 1;
@@ -349,6 +356,21 @@ struct dmub_diagnostic_data {
uint8_t is_cw6_enabled : 1;
};
+struct dmub_srv_inbox {
+ /* generic status */
+ uint64_t num_submitted;
+ uint64_t num_reported;
+ union {
+ /* frame buffer mailbox status */
+ struct dmub_rb rb;
+ /* register mailbox status */
+ struct {
+ bool is_pending;
+ bool is_multi_pending;
+ };
+ };
+};
+
/**
* struct dmub_srv_base_funcs - Driver specific base callbacks
*/
@@ -456,24 +478,27 @@ struct dmub_srv_hw_funcs {
void (*send_inbox0_cmd)(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
uint32_t (*get_current_time)(struct dmub_srv *dmub);
- void (*get_diagnostic_data)(struct dmub_srv *dmub, struct dmub_diagnostic_data *dmub_oca);
+ void (*get_diagnostic_data)(struct dmub_srv *dmub);
bool (*should_detect)(struct dmub_srv *dmub);
void (*init_reg_offsets)(struct dmub_srv *dmub, struct dc_context *ctx);
void (*subvp_save_surf_addr)(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index);
+
void (*send_reg_inbox0_cmd_msg)(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
uint32_t (*read_reg_inbox0_rsp_int_status)(struct dmub_srv *dmub);
void (*read_reg_inbox0_cmd_rsp)(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
void (*write_reg_inbox0_rsp_int_ack)(struct dmub_srv *dmub);
+ void (*clear_reg_inbox0_rsp_int_ack)(struct dmub_srv *dmub);
+ void (*enable_reg_inbox0_rsp_int)(struct dmub_srv *dmub, bool enable);
+
uint32_t (*read_reg_outbox0_rdy_int_status)(struct dmub_srv *dmub);
void (*write_reg_outbox0_rdy_int_ack)(struct dmub_srv *dmub);
void (*read_reg_outbox0_msg)(struct dmub_srv *dmub, uint32_t *msg);
void (*write_reg_outbox0_rsp)(struct dmub_srv *dmub, uint32_t *rsp);
uint32_t (*read_reg_outbox0_rsp_int_status)(struct dmub_srv *dmub);
- void (*enable_reg_inbox0_rsp_int)(struct dmub_srv *dmub, bool enable);
void (*enable_reg_outbox0_rdy_int)(struct dmub_srv *dmub, bool enable);
};
@@ -493,6 +518,7 @@ struct dmub_srv_create_params {
enum dmub_asic asic;
uint32_t fw_version;
bool is_virtual;
+ enum dmub_inbox_cmd_interface_type inbox_type;
};
/**
@@ -519,11 +545,11 @@ struct dmub_srv {
struct dmub_srv_dcn32_regs *regs_dcn32;
struct dmub_srv_dcn35_regs *regs_dcn35;
const struct dmub_srv_dcn401_regs *regs_dcn401;
-
struct dmub_srv_base_funcs funcs;
struct dmub_srv_hw_funcs hw_funcs;
- struct dmub_rb inbox1_rb;
+ struct dmub_srv_inbox inbox1;
uint32_t inbox1_last_wptr;
+ struct dmub_srv_inbox reg_inbox0;
/**
* outbox1_rb is accessed without locks (dal & dc)
* and to be used only in dmub_srv_stat_get_notification()
@@ -543,9 +569,10 @@ struct dmub_srv {
struct dmub_fw_meta_info meta_info;
struct dmub_feature_caps feature_caps;
struct dmub_visual_confirm_color visual_confirm_color;
+ enum dmub_inbox_cmd_interface_type inbox_type;
enum dmub_srv_power_state_type power_state;
- struct dmub_srv_debug debug;
+ struct dmub_diagnostic_data debug;
};
/**
@@ -700,19 +727,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
/**
- * dmub_srv_sync_inbox1() - sync sw state with hw state
- * @dmub: the dmub service
- *
- * Sync sw state with hw state when resume from S0i3
- *
- * Return:
- * DMUB_STATUS_OK - success
- * DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub);
-
-/**
- * dmub_srv_cmd_queue() - queues a command to the DMUB
+ * dmub_srv_fb_cmd_queue() - queues a command to the DMUB
* @dmub: the dmub service
* @cmd: the command to queue
*
@@ -724,11 +739,11 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub);
* DMUB_STATUS_QUEUE_FULL - no remaining room in queue
* DMUB_STATUS_INVALID - unspecified error
*/
-enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+enum dmub_status dmub_srv_fb_cmd_queue(struct dmub_srv *dmub,
const union dmub_rb_cmd *cmd);
/**
- * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub
+ * dmub_srv_fb_cmd_execute() - Executes a queued sequence to the dmub
* @dmub: the dmub service
*
* Begins execution of queued commands on the dmub.
@@ -737,7 +752,7 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
* DMUB_STATUS_OK - success
* DMUB_STATUS_INVALID - unspecified error
*/
-enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub);
+enum dmub_status dmub_srv_fb_cmd_execute(struct dmub_srv *dmub);
/**
* dmub_srv_wait_for_hw_pwr_up() - Waits for firmware hardware power up is completed
@@ -796,6 +811,23 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
uint32_t timeout_us);
/**
+ * dmub_srv_wait_for_pending() - Re-entrant wait for messages currently pending
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Waits until the commands queued prior to this call are complete.
+ * If interfaces remain busy due to additional work being submitted
+ * concurrently, this function will not continue to wait.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub,
+ uint32_t timeout_us);
+
+/**
* dmub_srv_wait_for_idle() - Waits for the DMUB to be idle
* @dmub: the dmub service
* @timeout_us: the maximum number of microseconds to wait
@@ -893,15 +925,12 @@ enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub,
enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub,
union dmub_fw_boot_options *option);
-enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd);
-
enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
bool skip);
bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry);
-bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub);
bool dmub_srv_should_detect(struct dmub_srv *dmub);
@@ -960,26 +989,6 @@ enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub);
void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index);
/**
- * dmub_srv_send_reg_inbox0_cmd() - send a dmub command and wait for the command
- * being processed by DMUB.
- * @dmub: The dmub service
- * @cmd: The dmub command being sent. If with_replay is true, the function will
- * update cmd with replied data.
- * @with_reply: true if DMUB reply needs to be copied back to cmd. false if the
- * cmd doesn't need to be replied.
- * @timeout_us: timeout in microseconds.
- *
- * Return:
- * DMUB_STATUS_OK - success
- * DMUB_STATUS_TIMEOUT - DMUB fails to process the command within the timeout
- * interval.
- */
-enum dmub_status dmub_srv_send_reg_inbox0_cmd(
- struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd,
- bool with_reply, uint32_t timeout_us);
-
-/**
* dmub_srv_set_power_state() - Track DC power state in dmub_srv
* @dmub: The dmub service
* @power_state: DC power state setting
@@ -991,4 +1000,40 @@ enum dmub_status dmub_srv_send_reg_inbox0_cmd(
*/
void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state);
+/**
+ * dmub_srv_reg_cmd_execute() - Executes provided command to the dmub
+ * @dmub: the dmub service
+ * @cmd: the command packet to be executed
+ *
+ * Executes a single command for the dmub.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd);
+
+
+/**
+ * dmub_srv_cmd_get_response() - Copies return data for command into buffer
+ * @dmub: the dmub service
+ * @cmd_rsp: response buffer
+ *
+ * Copies return data for command into buffer
+ */
+void dmub_srv_cmd_get_response(struct dmub_srv *dmub,
+ union dmub_rb_cmd *cmd_rsp);
+
+/**
+ * dmub_srv_sync_inboxes() - Sync inbox state
+ * @dmub: the dmub service
+ *
+ * Sync inbox state
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub);
+
#endif /* _DMUB_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index f84bbc033e64..1f5f4e3e49d4 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -1332,6 +1332,16 @@ enum dmub_inbox0_command {
#define DMUB_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_RB_MAX_ENTRY)
/**
+ * Maximum number of items in the DMUB REG INBOX0 internal ringbuffer.
+ */
+#define DMUB_REG_INBOX0_RB_MAX_ENTRY 16
+
+/**
+ * Ringbuffer size in bytes.
+ */
+#define DMUB_REG_INBOX0_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_REG_INBOX0_RB_MAX_ENTRY)
+
+/**
* REG_SET mask for reg offload.
*/
#define REG_SET_MASK 0xFFFF
@@ -1533,7 +1543,8 @@ struct dmub_cmd_header {
unsigned int sub_type : 8; /**< command sub type */
unsigned int ret_status : 1; /**< 1 if returned data, 0 otherwise */
unsigned int multi_cmd_pending : 1; /**< 1 if multiple commands chained together */
- unsigned int reserved0 : 6; /**< reserved bits */
+ unsigned int is_reg_based : 1; /**< 1 if register based mailbox cmd, 0 if FB based cmd */
+ unsigned int reserved0 : 5; /**< reserved bits */
unsigned int payload_bytes : 6; /* payload excluding header - up to 60 bytes */
unsigned int reserved1 : 2; /**< reserved bits */
};
@@ -5891,6 +5902,42 @@ static inline bool dmub_rb_empty(struct dmub_rb *rb)
}
/**
+ * @brief gets number of outstanding requests in the RB
+ *
+ * @param rb DMUB Ringbuffer
+ * @return true if full
+ */
+static inline uint32_t dmub_rb_num_outstanding(struct dmub_rb *rb)
+{
+ uint32_t data_count;
+
+ if (rb->wrpt >= rb->rptr)
+ data_count = rb->wrpt - rb->rptr;
+ else
+ data_count = rb->capacity - (rb->rptr - rb->wrpt);
+
+ return data_count / DMUB_RB_CMD_SIZE;
+}
+
+/**
+ * @brief gets number of free buffers in the RB
+ *
+ * @param rb DMUB Ringbuffer
+ * @return true if full
+ */
+static inline uint32_t dmub_rb_num_free(struct dmub_rb *rb)
+{
+ uint32_t data_count;
+
+ if (rb->wrpt >= rb->rptr)
+ data_count = rb->wrpt - rb->rptr;
+ else
+ data_count = rb->capacity - (rb->rptr - rb->wrpt);
+
+ return (rb->capacity - data_count) / DMUB_RB_CMD_SIZE;
+}
+
+/**
* @brief Checks if the ringbuffer is full
*
* @param rb DMUB Ringbuffer
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index e500ca9ae09c..73888c1bea93 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -414,63 +414,66 @@ uint32_t dmub_dcn20_get_current_time(struct dmub_srv *dmub)
return REG_READ(DMCUB_TIMER_CURRENT);
}
-void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub)
{
uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+ struct dmub_timeout_info timeout = {0};
- if (!dmub || !diag_data)
+ if (!dmub)
return;
- memset(diag_data, 0, sizeof(*diag_data));
-
- diag_data->dmcub_version = dmub->fw_version;
-
- diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
- diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
- diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
- diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
- diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
- diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
- diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
- diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
- diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
- diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
- diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
- diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
- diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
- diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
- diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
- diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
-
- diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
- diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
- diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
-
- diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
- diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
- diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
-
- diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
- diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
- diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+ /* timeout data filled externally, cache before resetting memory */
+ timeout = dmub->debug.timeout_info;
+ memset(&dmub->debug, 0, sizeof(dmub->debug));
+ dmub->debug.timeout_info = timeout;
+
+ dmub->debug.dmcub_version = dmub->fw_version;
+
+ dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0);
+ dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1);
+ dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2);
+ dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3);
+ dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4);
+ dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5);
+ dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6);
+ dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7);
+ dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8);
+ dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9);
+ dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10);
+ dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11);
+ dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12);
+ dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13);
+ dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14);
+ dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15);
+
+ dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
+ dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
+ dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
+
+ dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
+ dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
+ dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
+
+ dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
+ dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
+ dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- diag_data->is_dmcub_enabled = is_dmub_enabled;
+ dmub->debug.is_dmcub_enabled = is_dmub_enabled;
REG_GET(DMCUB_CNTL, DMCUB_SOFT_RESET, &is_soft_reset);
- diag_data->is_dmcub_soft_reset = is_soft_reset;
+ dmub->debug.is_dmcub_soft_reset = is_soft_reset;
REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- diag_data->is_dmcub_secure_reset = is_sec_reset;
+ dmub->debug.is_dmcub_secure_reset = is_sec_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- diag_data->is_traceport_en = is_traceport_enabled;
+ dmub->debug.is_traceport_en = is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- diag_data->is_cw0_enabled = is_cw0_enabled;
+ dmub->debug.is_cw0_enabled = is_cw0_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- diag_data->is_cw6_enabled = is_cw6_enabled;
- diag_data->timeout_info = dmub->debug;
+ dmub->debug.is_cw6_enabled = is_cw6_enabled;
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index de287b101848..42c1fb4bc73f 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -247,6 +247,6 @@ bool dmub_dcn20_use_cached_trace_buffer(struct dmub_srv *dmub);
uint32_t dmub_dcn20_get_current_time(struct dmub_srv *dmub);
-void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *dmub_oca);
+void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub);
#endif /* _DMUB_DCN20_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 9796077885c9..a308bd604677 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -414,69 +414,72 @@ uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub)
return REG_READ(DMCUB_TIMER_CURRENT);
}
-void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub)
{
uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+ struct dmub_timeout_info timeout = {0};
- if (!dmub || !diag_data)
+ if (!dmub)
return;
- memset(diag_data, 0, sizeof(*diag_data));
-
- diag_data->dmcub_version = dmub->fw_version;
-
- diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
- diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
- diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
- diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
- diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
- diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
- diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
- diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
- diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
- diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
- diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
- diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
- diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
- diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
- diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
- diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
-
- diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
- diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
- diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
-
- diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
- diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
- diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
-
- diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
- diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
- diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
-
- diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
- diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
- diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+ /* timeout data filled externally, cache before resetting memory */
+ timeout = dmub->debug.timeout_info;
+ memset(&dmub->debug, 0, sizeof(dmub->debug));
+ dmub->debug.timeout_info = timeout;
+
+ dmub->debug.dmcub_version = dmub->fw_version;
+
+ dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0);
+ dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1);
+ dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2);
+ dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3);
+ dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4);
+ dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5);
+ dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6);
+ dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7);
+ dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8);
+ dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9);
+ dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10);
+ dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11);
+ dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12);
+ dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13);
+ dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14);
+ dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15);
+
+ dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
+ dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
+ dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
+
+ dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
+ dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
+ dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
+
+ dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
+ dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
+ dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+
+ dmub->debug.outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ dmub->debug.outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- diag_data->is_dmcub_enabled = is_dmub_enabled;
+ dmub->debug.is_dmcub_enabled = is_dmub_enabled;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
- diag_data->is_dmcub_soft_reset = is_soft_reset;
+ dmub->debug.is_dmcub_soft_reset = is_soft_reset;
REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- diag_data->is_dmcub_secure_reset = is_sec_reset;
+ dmub->debug.is_dmcub_secure_reset = is_sec_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- diag_data->is_traceport_en = is_traceport_enabled;
+ dmub->debug.is_traceport_en = is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- diag_data->is_cw0_enabled = is_cw0_enabled;
+ dmub->debug.is_cw0_enabled = is_cw0_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- diag_data->is_cw6_enabled = is_cw6_enabled;
- diag_data->timeout_info = dmub->debug;
+ dmub->debug.is_cw6_enabled = is_cw6_enabled;
}
bool dmub_dcn31_should_detect(struct dmub_srv *dmub)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
index eccdab4986ce..1c43ef2bca66 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
@@ -251,7 +251,7 @@ void dmub_dcn31_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset);
uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub);
-void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub);
bool dmub_dcn31_should_detect(struct dmub_srv *dmub);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index 9600b7f858b0..e7056205b050 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -417,73 +417,75 @@ uint32_t dmub_dcn32_get_current_time(struct dmub_srv *dmub)
return REG_READ(DMCUB_TIMER_CURRENT);
}
-void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub)
{
uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+ struct dmub_timeout_info timeout = {0};
- if (!dmub || !diag_data)
+ if (!dmub)
return;
- memset(diag_data, 0, sizeof(*diag_data));
-
- diag_data->dmcub_version = dmub->fw_version;
-
- diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
- diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
- diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
- diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
- diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
- diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
- diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
- diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
- diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
- diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
- diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
- diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
- diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
- diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
- diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
- diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
- diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16);
-
- diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
- diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
- diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
-
- diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
- diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
- diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
-
- diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
- diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
- diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
-
- diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
- diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
- diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+ /* timeout data filled externally, cache before resetting memory */
+ timeout = dmub->debug.timeout_info;
+ memset(&dmub->debug, 0, sizeof(dmub->debug));
+ dmub->debug.timeout_info = timeout;
+
+ dmub->debug.dmcub_version = dmub->fw_version;
+
+ dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0);
+ dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1);
+ dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2);
+ dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3);
+ dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4);
+ dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5);
+ dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6);
+ dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7);
+ dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8);
+ dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9);
+ dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10);
+ dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11);
+ dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12);
+ dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13);
+ dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14);
+ dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15);
+ dmub->debug.scratch[16] = REG_READ(DMCUB_SCRATCH16);
+
+ dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
+ dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
+ dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
+
+ dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
+ dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
+ dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
+
+ dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
+ dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
+ dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+
+ dmub->debug.outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ dmub->debug.outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- diag_data->is_dmcub_enabled = is_dmub_enabled;
+ dmub->debug.is_dmcub_enabled = is_dmub_enabled;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
- diag_data->is_dmcub_soft_reset = is_soft_reset;
+ dmub->debug.is_dmcub_soft_reset = is_soft_reset;
REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- diag_data->is_dmcub_secure_reset = is_sec_reset;
+ dmub->debug.is_dmcub_secure_reset = is_sec_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- diag_data->is_traceport_en = is_traceport_enabled;
+ dmub->debug.is_traceport_en = is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- diag_data->is_cw0_enabled = is_cw0_enabled;
+ dmub->debug.is_cw0_enabled = is_cw0_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- diag_data->is_cw6_enabled = is_cw6_enabled;
+ dmub->debug.is_cw6_enabled = is_cw6_enabled;
- diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
-
- diag_data->timeout_info = dmub->debug;
+ dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
}
void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
index 29c1132951af..1a229450c53d 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
@@ -254,7 +254,7 @@ void dmub_dcn32_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset);
uint32_t dmub_dcn32_get_current_time(struct dmub_srv *dmub);
-void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub);
void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub);
void dmub_dcn32_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 01d013a12b94..72a0f078cd1a 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -462,66 +462,69 @@ uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub)
return REG_READ(DMCUB_TIMER_CURRENT);
}
-void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub)
{
uint32_t is_dmub_enabled, is_soft_reset;
uint32_t is_traceport_enabled, is_cw6_enabled;
+ struct dmub_timeout_info timeout = {0};
- if (!dmub || !diag_data)
+ if (!dmub)
return;
- memset(diag_data, 0, sizeof(*diag_data));
-
- diag_data->dmcub_version = dmub->fw_version;
-
- diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
- diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
- diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
- diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
- diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
- diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
- diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
- diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
- diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
- diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
- diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
- diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
- diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
- diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
- diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
- diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
- diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16);
-
- diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
- diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
- diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
-
- diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
- diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
- diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
-
- diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
- diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
- diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
-
- diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
- diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
- diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+ /* timeout data filled externally, cache before resetting memory */
+ timeout = dmub->debug.timeout_info;
+ memset(&dmub->debug, 0, sizeof(dmub->debug));
+ dmub->debug.timeout_info = timeout;
+
+ dmub->debug.dmcub_version = dmub->fw_version;
+
+ dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0);
+ dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1);
+ dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2);
+ dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3);
+ dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4);
+ dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5);
+ dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6);
+ dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7);
+ dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8);
+ dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9);
+ dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10);
+ dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11);
+ dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12);
+ dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13);
+ dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14);
+ dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15);
+ dmub->debug.scratch[16] = REG_READ(DMCUB_SCRATCH16);
+
+ dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
+ dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
+ dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
+
+ dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
+ dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
+ dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
+
+ dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
+ dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
+ dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+
+ dmub->debug.outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ dmub->debug.outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- diag_data->is_dmcub_enabled = is_dmub_enabled;
+ dmub->debug.is_dmcub_enabled = is_dmub_enabled;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
- diag_data->is_dmcub_soft_reset = is_soft_reset;
+ dmub->debug.is_dmcub_soft_reset = is_soft_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- diag_data->is_traceport_en = is_traceport_enabled;
+ dmub->debug.is_traceport_en = is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- diag_data->is_cw6_enabled = is_cw6_enabled;
+ dmub->debug.is_cw6_enabled = is_cw6_enabled;
- diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
- diag_data->timeout_info = dmub->debug;
+ dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
}
void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
index 686e97c00ccc..39fcb7275da5 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
@@ -269,7 +269,7 @@ void dmub_dcn35_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset);
uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub);
-void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub);
void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
index e1c4fe1c6e3e..731ca9b6a6cf 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
@@ -415,72 +415,75 @@ uint32_t dmub_dcn401_get_current_time(struct dmub_srv *dmub)
return REG_READ(DMCUB_TIMER_CURRENT);
}
-void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub)
{
uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+ struct dmub_timeout_info timeout = {0};
- if (!dmub || !diag_data)
+ if (!dmub)
return;
- memset(diag_data, 0, sizeof(*diag_data));
-
- diag_data->dmcub_version = dmub->fw_version;
-
- diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
- diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
- diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
- diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
- diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
- diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
- diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
- diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
- diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
- diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
- diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
- diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
- diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
- diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
- diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
- diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
- diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16);
-
- diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
- diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
- diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
-
- diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
- diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
- diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
-
- diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
- diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
- diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
-
- diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
- diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
- diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
+ /* timeout data filled externally, cache before resetting memory */
+ timeout = dmub->debug.timeout_info;
+ memset(&dmub->debug, 0, sizeof(dmub->debug));
+ dmub->debug.timeout_info = timeout;
+
+ dmub->debug.dmcub_version = dmub->fw_version;
+
+ dmub->debug.scratch[0] = REG_READ(DMCUB_SCRATCH0);
+ dmub->debug.scratch[1] = REG_READ(DMCUB_SCRATCH1);
+ dmub->debug.scratch[2] = REG_READ(DMCUB_SCRATCH2);
+ dmub->debug.scratch[3] = REG_READ(DMCUB_SCRATCH3);
+ dmub->debug.scratch[4] = REG_READ(DMCUB_SCRATCH4);
+ dmub->debug.scratch[5] = REG_READ(DMCUB_SCRATCH5);
+ dmub->debug.scratch[6] = REG_READ(DMCUB_SCRATCH6);
+ dmub->debug.scratch[7] = REG_READ(DMCUB_SCRATCH7);
+ dmub->debug.scratch[8] = REG_READ(DMCUB_SCRATCH8);
+ dmub->debug.scratch[9] = REG_READ(DMCUB_SCRATCH9);
+ dmub->debug.scratch[10] = REG_READ(DMCUB_SCRATCH10);
+ dmub->debug.scratch[11] = REG_READ(DMCUB_SCRATCH11);
+ dmub->debug.scratch[12] = REG_READ(DMCUB_SCRATCH12);
+ dmub->debug.scratch[13] = REG_READ(DMCUB_SCRATCH13);
+ dmub->debug.scratch[14] = REG_READ(DMCUB_SCRATCH14);
+ dmub->debug.scratch[15] = REG_READ(DMCUB_SCRATCH15);
+ dmub->debug.scratch[16] = REG_READ(DMCUB_SCRATCH16);
+
+ dmub->debug.undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
+ dmub->debug.inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
+ dmub->debug.data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
+
+ dmub->debug.inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
+ dmub->debug.inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
+ dmub->debug.inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
+
+ dmub->debug.inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
+ dmub->debug.inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
+ dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
+
+ dmub->debug.outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR);
+ dmub->debug.outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR);
+ dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
- diag_data->is_dmcub_enabled = is_dmub_enabled;
+ dmub->debug.is_dmcub_enabled = is_dmub_enabled;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
- diag_data->is_dmcub_soft_reset = is_soft_reset;
+ dmub->debug.is_dmcub_soft_reset = is_soft_reset;
REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- diag_data->is_dmcub_secure_reset = is_sec_reset;
+ dmub->debug.is_dmcub_secure_reset = is_sec_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
- diag_data->is_traceport_en = is_traceport_enabled;
+ dmub->debug.is_traceport_en = is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- diag_data->is_cw0_enabled = is_cw0_enabled;
+ dmub->debug.is_cw0_enabled = is_cw0_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
- diag_data->is_cw6_enabled = is_cw6_enabled;
+ dmub->debug.is_cw6_enabled = is_cw6_enabled;
- diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
- diag_data->timeout_info = dmub->debug;
+ dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
}
void dmub_dcn401_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
@@ -514,28 +517,69 @@ void dmub_dcn401_send_reg_inbox0_cmd_msg(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd)
{
uint32_t *dwords = (uint32_t *)cmd;
-
+ int32_t payload_size_bytes = cmd->cmd_common.header.payload_bytes;
+ uint32_t msg_index;
static_assert(sizeof(*cmd) == 64, "DMUB command size mismatch");
- REG_WRITE(DMCUB_REG_INBOX0_MSG0, dwords[0]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG1, dwords[1]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG2, dwords[2]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG3, dwords[3]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG4, dwords[4]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG5, dwords[5]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG6, dwords[6]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG7, dwords[7]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG8, dwords[8]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG9, dwords[9]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG10, dwords[10]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG11, dwords[11]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG12, dwords[12]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG13, dwords[13]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG14, dwords[14]);
+ /* read remaining data based on payload size */
+ for (msg_index = 0; msg_index < 15; msg_index++) {
+ if (payload_size_bytes <= msg_index * 4) {
+ break;
+ }
+
+ switch (msg_index) {
+ case 0:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG0, dwords[msg_index + 1]);
+ break;
+ case 1:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG1, dwords[msg_index + 1]);
+ break;
+ case 2:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG2, dwords[msg_index + 1]);
+ break;
+ case 3:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG3, dwords[msg_index + 1]);
+ break;
+ case 4:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG4, dwords[msg_index + 1]);
+ break;
+ case 5:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG5, dwords[msg_index + 1]);
+ break;
+ case 6:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG6, dwords[msg_index + 1]);
+ break;
+ case 7:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG7, dwords[msg_index + 1]);
+ break;
+ case 8:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG8, dwords[msg_index + 1]);
+ break;
+ case 9:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG9, dwords[msg_index + 1]);
+ break;
+ case 10:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG10, dwords[msg_index + 1]);
+ break;
+ case 11:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG11, dwords[msg_index + 1]);
+ break;
+ case 12:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG12, dwords[msg_index + 1]);
+ break;
+ case 13:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG13, dwords[msg_index + 1]);
+ break;
+ case 14:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG14, dwords[msg_index + 1]);
+ break;
+ }
+ }
+
/* writing to INBOX RDY register will trigger DMUB REG INBOX0 RDY
* interrupt.
*/
- REG_WRITE(DMCUB_REG_INBOX0_RDY, dwords[15]);
+ REG_WRITE(DMCUB_REG_INBOX0_RDY, dwords[0]);
}
uint32_t dmub_dcn401_read_reg_inbox0_rsp_int_status(struct dmub_srv *dmub)
@@ -553,30 +597,39 @@ void dmub_dcn401_read_reg_inbox0_cmd_rsp(struct dmub_srv *dmub,
static_assert(sizeof(*cmd) == 64, "DMUB command size mismatch");
- dwords[0] = REG_READ(DMCUB_REG_INBOX0_MSG0);
- dwords[1] = REG_READ(DMCUB_REG_INBOX0_MSG1);
- dwords[2] = REG_READ(DMCUB_REG_INBOX0_MSG2);
- dwords[3] = REG_READ(DMCUB_REG_INBOX0_MSG3);
- dwords[4] = REG_READ(DMCUB_REG_INBOX0_MSG4);
- dwords[5] = REG_READ(DMCUB_REG_INBOX0_MSG5);
- dwords[6] = REG_READ(DMCUB_REG_INBOX0_MSG6);
- dwords[7] = REG_READ(DMCUB_REG_INBOX0_MSG7);
- dwords[8] = REG_READ(DMCUB_REG_INBOX0_MSG8);
- dwords[9] = REG_READ(DMCUB_REG_INBOX0_MSG9);
- dwords[10] = REG_READ(DMCUB_REG_INBOX0_MSG10);
- dwords[11] = REG_READ(DMCUB_REG_INBOX0_MSG11);
- dwords[12] = REG_READ(DMCUB_REG_INBOX0_MSG12);
- dwords[13] = REG_READ(DMCUB_REG_INBOX0_MSG13);
- dwords[14] = REG_READ(DMCUB_REG_INBOX0_MSG14);
- dwords[15] = REG_READ(DMCUB_REG_INBOX0_RSP);
+ dwords[0] = REG_READ(DMCUB_REG_INBOX0_RSP);
+ dwords[1] = REG_READ(DMCUB_REG_INBOX0_MSG0);
+ dwords[2] = REG_READ(DMCUB_REG_INBOX0_MSG1);
+ dwords[3] = REG_READ(DMCUB_REG_INBOX0_MSG2);
+ dwords[4] = REG_READ(DMCUB_REG_INBOX0_MSG3);
+ dwords[5] = REG_READ(DMCUB_REG_INBOX0_MSG4);
+ dwords[6] = REG_READ(DMCUB_REG_INBOX0_MSG5);
+ dwords[7] = REG_READ(DMCUB_REG_INBOX0_MSG6);
+ dwords[8] = REG_READ(DMCUB_REG_INBOX0_MSG7);
+ dwords[9] = REG_READ(DMCUB_REG_INBOX0_MSG8);
+ dwords[10] = REG_READ(DMCUB_REG_INBOX0_MSG9);
+ dwords[11] = REG_READ(DMCUB_REG_INBOX0_MSG10);
+ dwords[12] = REG_READ(DMCUB_REG_INBOX0_MSG11);
+ dwords[13] = REG_READ(DMCUB_REG_INBOX0_MSG12);
+ dwords[14] = REG_READ(DMCUB_REG_INBOX0_MSG13);
+ dwords[15] = REG_READ(DMCUB_REG_INBOX0_MSG14);
}
void dmub_dcn401_write_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub)
{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_ACK, 1);
+}
+
+void dmub_dcn401_clear_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub)
+{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_ACK, 0);
}
+void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable)
+{
+ REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN, enable ? 1:0);
+}
+
void dmub_dcn401_write_reg_outbox0_rdy_int_ack(struct dmub_srv *dmub)
{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_ACK, 1);
@@ -601,11 +654,6 @@ uint32_t dmub_dcn401_read_reg_outbox0_rsp_int_status(struct dmub_srv *dmub)
return status;
}
-void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable)
-{
- REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN, enable ? 1:0);
-}
-
void dmub_dcn401_enable_reg_outbox0_rdy_int(struct dmub_srv *dmub, bool enable)
{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN, enable ? 1:0);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
index 31f95b27e227..88c3a44d67d9 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
@@ -264,7 +264,7 @@ void dmub_dcn401_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset);
uint32_t dmub_dcn401_get_current_time(struct dmub_srv *dmub);
-void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub);
void dmub_dcn401_configure_dmub_in_system_memory(struct dmub_srv *dmub);
void dmub_dcn401_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
@@ -277,11 +277,13 @@ uint32_t dmub_dcn401_read_reg_inbox0_rsp_int_status(struct dmub_srv *dmub);
void dmub_dcn401_read_reg_inbox0_cmd_rsp(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
void dmub_dcn401_write_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub);
+void dmub_dcn401_clear_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub);
+void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable);
+
void dmub_dcn401_write_reg_outbox0_rdy_int_ack(struct dmub_srv *dmub);
void dmub_dcn401_read_reg_outbox0_msg(struct dmub_srv *dmub, uint32_t *msg);
void dmub_dcn401_write_reg_outbox0_rsp(struct dmub_srv *dmub, uint32_t *msg);
uint32_t dmub_dcn401_read_reg_outbox0_rsp_int_status(struct dmub_srv *dmub);
-void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable);
void dmub_dcn401_enable_reg_outbox0_rdy_int(struct dmub_srv *dmub, bool enable);
uint32_t dmub_dcn401_read_reg_outbox0_rdy_int_status(struct dmub_srv *dmub);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 6133d25da301..713576a1f6fa 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -157,6 +157,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
{
struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs;
+ /* default to specifying now inbox type */
+ enum dmub_inbox_cmd_interface_type default_inbox_type = DMUB_CMD_INTERFACE_DEFAULT;
+
switch (asic) {
case DMUB_ASIC_DCN20:
case DMUB_ASIC_DCN21:
@@ -353,7 +356,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->init_reg_offsets = dmub_srv_dcn35_regs_init;
if (asic == DMUB_ASIC_DCN351)
- funcs->init_reg_offsets = dmub_srv_dcn351_regs_init;
+ funcs->init_reg_offsets = dmub_srv_dcn351_regs_init;
if (asic == DMUB_ASIC_DCN36)
funcs->init_reg_offsets = dmub_srv_dcn36_regs_init;
@@ -395,10 +398,15 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_current_time = dmub_dcn401_get_current_time;
funcs->get_diagnostic_data = dmub_dcn401_get_diagnostic_data;
+
funcs->send_reg_inbox0_cmd_msg = dmub_dcn401_send_reg_inbox0_cmd_msg;
funcs->read_reg_inbox0_rsp_int_status = dmub_dcn401_read_reg_inbox0_rsp_int_status;
funcs->read_reg_inbox0_cmd_rsp = dmub_dcn401_read_reg_inbox0_cmd_rsp;
funcs->write_reg_inbox0_rsp_int_ack = dmub_dcn401_write_reg_inbox0_rsp_int_ack;
+ funcs->clear_reg_inbox0_rsp_int_ack = dmub_dcn401_clear_reg_inbox0_rsp_int_ack;
+ funcs->enable_reg_inbox0_rsp_int = dmub_dcn401_enable_reg_inbox0_rsp_int;
+ default_inbox_type = DMUB_CMD_INTERFACE_FB; // still default to FB for now
+
funcs->write_reg_outbox0_rdy_int_ack = dmub_dcn401_write_reg_outbox0_rdy_int_ack;
funcs->read_reg_outbox0_msg = dmub_dcn401_read_reg_outbox0_msg;
funcs->write_reg_outbox0_rsp = dmub_dcn401_write_reg_outbox0_rsp;
@@ -411,6 +419,20 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
return false;
}
+ /* set default inbox type if not overriden */
+ if (dmub->inbox_type == DMUB_CMD_INTERFACE_DEFAULT) {
+ if (default_inbox_type != DMUB_CMD_INTERFACE_DEFAULT) {
+ /* use default inbox type as specified by DCN rev */
+ dmub->inbox_type = default_inbox_type;
+ } else if (funcs->send_reg_inbox0_cmd_msg) {
+ /* prefer reg as default inbox type if present */
+ dmub->inbox_type = DMUB_CMD_INTERFACE_REG;
+ } else {
+ /* use fb as fallback */
+ dmub->inbox_type = DMUB_CMD_INTERFACE_FB;
+ }
+ }
+
return true;
}
@@ -426,6 +448,7 @@ enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
dmub->asic = params->asic;
dmub->fw_version = params->fw_version;
dmub->is_virtual = params->is_virtual;
+ dmub->inbox_type = params->inbox_type;
/* Setup asic dependent hardware funcs. */
if (!dmub_srv_hw_setup(dmub, params->asic)) {
@@ -695,7 +718,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
inbox1.base = cw4.region.base;
inbox1.top = cw4.region.base + DMUB_RB_SIZE;
outbox1.base = inbox1.top;
- outbox1.top = cw4.region.top;
+ outbox1.top = inbox1.top + DMUB_RB_SIZE;
cw5.offset.quad_part = tracebuff_fb->gpu_addr;
cw5.region.base = DMUB_CW5_BASE;
@@ -737,7 +760,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
rb_params.ctx = dmub;
rb_params.base_address = mail_fb->cpu_addr;
rb_params.capacity = DMUB_RB_SIZE;
- dmub_rb_init(&dmub->inbox1_rb, &rb_params);
+ dmub_rb_init(&dmub->inbox1.rb, &rb_params);
// Initialize outbox1 ring buffer
rb_params.ctx = dmub;
@@ -768,27 +791,6 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
-{
- if (!dmub->sw_init)
- return DMUB_STATUS_INVALID;
-
- if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
- uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
- uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
-
- if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) {
- return DMUB_STATUS_HW_FAILURE;
- } else {
- dmub->inbox1_rb.rptr = rptr;
- dmub->inbox1_rb.wrpt = wptr;
- dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
- }
- }
-
- return DMUB_STATUS_OK;
-}
-
enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
{
if (!dmub->sw_init)
@@ -799,8 +801,13 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
/* mailboxes have been reset in hw, so reset the sw state as well */
dmub->inbox1_last_wptr = 0;
- dmub->inbox1_rb.wrpt = 0;
- dmub->inbox1_rb.rptr = 0;
+ dmub->inbox1.rb.wrpt = 0;
+ dmub->inbox1.rb.rptr = 0;
+ dmub->inbox1.num_reported = 0;
+ dmub->inbox1.num_submitted = 0;
+ dmub->reg_inbox0.num_reported = 0;
+ dmub->reg_inbox0.num_submitted = 0;
+ dmub->reg_inbox0.is_pending = 0;
dmub->outbox0_rb.wrpt = 0;
dmub->outbox0_rb.rptr = 0;
dmub->outbox1_rb.wrpt = 0;
@@ -811,7 +818,7 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+enum dmub_status dmub_srv_fb_cmd_queue(struct dmub_srv *dmub,
const union dmub_rb_cmd *cmd)
{
if (!dmub->hw_init)
@@ -820,18 +827,20 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
if (dmub->power_state != DMUB_POWER_STATE_D0)
return DMUB_STATUS_POWER_STATE_D3;
- if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
- dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
+ if (dmub->inbox1.rb.rptr > dmub->inbox1.rb.capacity ||
+ dmub->inbox1.rb.wrpt > dmub->inbox1.rb.capacity) {
return DMUB_STATUS_HW_FAILURE;
}
- if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
+ if (dmub_rb_push_front(&dmub->inbox1.rb, cmd)) {
+ dmub->inbox1.num_submitted++;
return DMUB_STATUS_OK;
+ }
return DMUB_STATUS_QUEUE_FULL;
}
-enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
+enum dmub_status dmub_srv_fb_cmd_execute(struct dmub_srv *dmub)
{
struct dmub_rb flush_rb;
@@ -846,13 +855,13 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
* been flushed to framebuffer memory. Otherwise DMCUB might
* read back stale, fully invalid or partially invalid data.
*/
- flush_rb = dmub->inbox1_rb;
+ flush_rb = dmub->inbox1.rb;
flush_rb.rptr = dmub->inbox1_last_wptr;
dmub_rb_flush_pending(&flush_rb);
- dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
+ dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1.rb.wrpt);
- dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
+ dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt;
return DMUB_STATUS_OK;
}
@@ -910,26 +919,97 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
return DMUB_STATUS_TIMEOUT;
}
+static void dmub_srv_update_reg_inbox0_status(struct dmub_srv *dmub)
+{
+ if (dmub->reg_inbox0.is_pending) {
+ dmub->reg_inbox0.is_pending = dmub->hw_funcs.read_reg_inbox0_rsp_int_status &&
+ !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
+
+ if (!dmub->reg_inbox0.is_pending) {
+ /* ack the rsp interrupt */
+ if (dmub->hw_funcs.write_reg_inbox0_rsp_int_ack)
+ dmub->hw_funcs.write_reg_inbox0_rsp_int_ack(dmub);
+
+ /* only update the reported count if commands aren't being batched */
+ if (!dmub->reg_inbox0.is_pending && !dmub->reg_inbox0.is_multi_pending) {
+ dmub->reg_inbox0.num_reported = dmub->reg_inbox0.num_submitted;
+ }
+ }
+ }
+}
+
+enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub,
+ uint32_t timeout_us)
+{
+ uint32_t i;
+ const uint32_t polling_interval_us = 1;
+ struct dmub_srv_inbox scratch_reg_inbox0 = dmub->reg_inbox0;
+ struct dmub_srv_inbox scratch_inbox1 = dmub->inbox1;
+ const volatile struct dmub_srv_inbox *reg_inbox0 = &dmub->reg_inbox0;
+ const volatile struct dmub_srv_inbox *inbox1 = &dmub->inbox1;
+
+ if (!dmub->hw_init ||
+ !dmub->hw_funcs.get_inbox1_wptr)
+ return DMUB_STATUS_INVALID;
+
+ /* take a snapshot of the required mailbox state */
+ scratch_inbox1.rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
+
+ for (i = 0; i <= timeout_us; i += polling_interval_us) {
+ scratch_inbox1.rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+
+ scratch_reg_inbox0.is_pending = scratch_reg_inbox0.is_pending &&
+ dmub->hw_funcs.read_reg_inbox0_rsp_int_status &&
+ !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
+
+ if (scratch_inbox1.rb.rptr > dmub->inbox1.rb.capacity)
+ return DMUB_STATUS_HW_FAILURE;
+
+ /* check current HW state first, but use command submission vs reported as a fallback */
+ if ((dmub_rb_empty(&scratch_inbox1.rb) ||
+ inbox1->num_reported >= scratch_inbox1.num_submitted) &&
+ (!scratch_reg_inbox0.is_pending ||
+ reg_inbox0->num_reported >= scratch_reg_inbox0.num_submitted))
+ return DMUB_STATUS_OK;
+
+ udelay(polling_interval_us);
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
uint32_t timeout_us)
{
uint32_t i, rptr;
+ const uint32_t polling_interval_us = 1;
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
- for (i = 0; i <= timeout_us; ++i) {
- rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ for (i = 0; i < timeout_us; i += polling_interval_us) {
+ /* update inbox1 state */
+ rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
- if (rptr > dmub->inbox1_rb.capacity)
+ if (rptr > dmub->inbox1.rb.capacity)
return DMUB_STATUS_HW_FAILURE;
- dmub->inbox1_rb.rptr = rptr;
+ if (dmub->inbox1.rb.rptr > rptr) {
+ /* rb wrapped */
+ dmub->inbox1.num_reported += (rptr + dmub->inbox1.rb.capacity - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
+ } else {
+ dmub->inbox1.num_reported += (rptr - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
+ }
+ dmub->inbox1.rb.rptr = rptr;
+
+ /* update reg_inbox0 */
+ dmub_srv_update_reg_inbox0_status(dmub);
- if (dmub_rb_empty(&dmub->inbox1_rb))
+ /* check for idle */
+ if (dmub_rb_empty(&dmub->inbox1.rb) && !dmub->reg_inbox0.is_pending)
return DMUB_STATUS_OK;
- udelay(1);
+ udelay(polling_interval_us);
}
return DMUB_STATUS_TIMEOUT;
@@ -1040,35 +1120,6 @@ enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd)
-{
- enum dmub_status status = DMUB_STATUS_OK;
-
- // Queue command
- status = dmub_srv_cmd_queue(dmub, cmd);
-
- if (status != DMUB_STATUS_OK)
- return status;
-
- // Execute command
- status = dmub_srv_cmd_execute(dmub);
-
- if (status != DMUB_STATUS_OK)
- return status;
-
- // Wait for DMUB to process command
- status = dmub_srv_wait_for_idle(dmub, 100000);
-
- if (status != DMUB_STATUS_OK)
- return status;
-
- // Copy data back from ring buffer into command
- dmub_rb_get_return_data(&dmub->inbox1_rb, cmd);
-
- return status;
-}
-
static inline bool dmub_rb_out_trace_buffer_front(struct dmub_rb *rb,
void *entry)
{
@@ -1099,11 +1150,11 @@ bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entr
return dmub_rb_out_trace_buffer_front(&dmub->outbox0_rb, (void *)entry);
}
-bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
+bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub)
{
- if (!dmub || !dmub->hw_funcs.get_diagnostic_data || !diag_data)
+ if (!dmub || !dmub->hw_funcs.get_diagnostic_data)
return false;
- dmub->hw_funcs.get_diagnostic_data(dmub, diag_data);
+ dmub->hw_funcs.get_diagnostic_data(dmub);
return true;
}
@@ -1160,46 +1211,105 @@ void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_
}
}
-enum dmub_status dmub_srv_send_reg_inbox0_cmd(
- struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd,
- bool with_reply, uint32_t timeout_us)
+void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state)
{
- uint32_t rsp_ready = 0;
- uint32_t i;
+ if (!dmub || !dmub->hw_init)
+ return;
+
+ dmub->power_state = dmub_srv_power_state;
+}
+
+enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd)
+{
+ uint32_t num_pending = 0;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->power_state != DMUB_POWER_STATE_D0)
+ return DMUB_STATUS_POWER_STATE_D3;
+
+ if (!dmub->hw_funcs.send_reg_inbox0_cmd_msg ||
+ !dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->reg_inbox0.num_submitted >= dmub->reg_inbox0.num_reported)
+ num_pending = dmub->reg_inbox0.num_submitted - dmub->reg_inbox0.num_reported;
+ else
+ /* num_submitted wrapped */
+ num_pending = DMUB_REG_INBOX0_RB_MAX_ENTRY -
+ (dmub->reg_inbox0.num_reported - dmub->reg_inbox0.num_submitted);
+ if (num_pending >= DMUB_REG_INBOX0_RB_MAX_ENTRY)
+ return DMUB_STATUS_QUEUE_FULL;
+
+ /* clear last rsp ack and send message */
+ dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack(dmub);
dmub->hw_funcs.send_reg_inbox0_cmd_msg(dmub, cmd);
- for (i = 0; i < timeout_us; i++) {
- rsp_ready = dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
- if (rsp_ready)
- break;
- udelay(1);
+ dmub->reg_inbox0.num_submitted++;
+ dmub->reg_inbox0.is_pending = true;
+ dmub->reg_inbox0.is_multi_pending = cmd->cmd_common.header.multi_cmd_pending;
+
+ return DMUB_STATUS_OK;
+}
+
+void dmub_srv_cmd_get_response(struct dmub_srv *dmub,
+ union dmub_rb_cmd *cmd_rsp)
+{
+ if (dmub) {
+ if (dmub->inbox_type == DMUB_CMD_INTERFACE_REG &&
+ dmub->hw_funcs.read_reg_inbox0_cmd_rsp) {
+ dmub->hw_funcs.read_reg_inbox0_cmd_rsp(dmub, cmd_rsp);
+ } else {
+ dmub_rb_get_return_data(&dmub->inbox1.rb, cmd_rsp);
+ }
}
- if (rsp_ready == 0)
- return DMUB_STATUS_TIMEOUT;
+}
- if (with_reply)
- dmub->hw_funcs.read_reg_inbox0_cmd_rsp(dmub, cmd);
+static enum dmub_status dmub_srv_sync_reg_inbox0(struct dmub_srv *dmub)
+{
+ if (!dmub || !dmub->sw_init)
+ return DMUB_STATUS_INVALID;
- dmub->hw_funcs.write_reg_inbox0_rsp_int_ack(dmub);
+ dmub->reg_inbox0.is_pending = 0;
+ dmub->reg_inbox0.is_multi_pending = 0;
- /* wait for rsp int status is cleared to initial state before exit */
- for (; i <= timeout_us; i++) {
- rsp_ready = dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
- if (rsp_ready == 0)
- break;
- udelay(1);
+ return DMUB_STATUS_OK;
+}
+
+static enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
+{
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
+ uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
+
+ if (rptr > dmub->inbox1.rb.capacity || wptr > dmub->inbox1.rb.capacity) {
+ return DMUB_STATUS_HW_FAILURE;
+ } else {
+ dmub->inbox1.rb.rptr = rptr;
+ dmub->inbox1.rb.wrpt = wptr;
+ dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt;
+ }
}
- ASSERT(rsp_ready == 0);
return DMUB_STATUS_OK;
}
-void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state)
+enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub)
{
- if (!dmub || !dmub->hw_init)
- return;
+ enum dmub_status status;
- dmub->power_state = dmub_srv_power_state;
+ status = dmub_srv_sync_reg_inbox0(dmub);
+ if (status != DMUB_STATUS_OK)
+ return status;
+
+ status = dmub_srv_sync_inbox1(dmub);
+ if (status != DMUB_STATUS_OK)
+ return status;
+
+ return DMUB_STATUS_OK;
}