From: <Roman.Li@amd.com>
To: <amd-gfx@lists.freedesktop.org>
Cc: Harry Wentland <harry.wentland@amd.com>,
Leo Li <sunpeng.li@amd.com>,
Aurabindo Pillai <aurabindo.pillai@amd.com>,
Roman Li <roman.li@amd.com>, Wayne Lin <wayne.lin@amd.com>,
Tom Chung <chiahsuan.chung@amd.com>,
"Fangzhi Zuo" <jerry.zuo@amd.com>,
Daniel Wheeler <daniel.wheeler@amd.com>, Ray Wu <Ray.Wu@amd.com>,
Ivan Lipski <ivan.lipski@amd.com>, Alex Hung <alex.hung@amd.com>,
Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
Subject: [PATCH 03/12] drm/amd/display: fix dmub access race condition
Date: Wed, 30 Jul 2025 14:58:54 -0400 [thread overview]
Message-ID: <20250730185903.1023256-4-Roman.Li@amd.com> (raw)
In-Reply-To: <20250730185903.1023256-1-Roman.Li@amd.com>
From: Aurabindo Pillai <aurabindo.pillai@amd.com>
Accessing DC from amdgpu_dm is usually preceded by acquisition of
dc_lock mutex. Most of the DC API that DM calls are under a DC lock.
However, there are a few that are not. Some DC API called from interrupt
context end up sending DMUB commands via a DC API, while other threads were
using DMUB. This was apparent from a race between calls for setting idle
optimization enable/disable and the DC API to set vmin/vmax.
Offload the call to dc_stream_adjust_vmin_vmax() to a thread instead
of directly calling them from the interrupt handler such that it waits
for dc_lock.
Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
Signed-off-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
Signed-off-by: Roman Li <roman.li@amd.com>
---
.../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 55 +++++++++++++++++--
.../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 14 +++++
2 files changed, 63 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c71167ffdb76..6762fc7de769 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -530,6 +530,50 @@ static void dm_pflip_high_irq(void *interrupt_params)
amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
}
+static void dm_handle_vmin_vmax_update(struct work_struct *offload_work)
+{
+ struct vupdate_offload_work *work = container_of(offload_work, struct vupdate_offload_work, work);
+ struct amdgpu_device *adev = work->adev;
+ struct dc_stream_state *stream = work->stream;
+ struct dc_crtc_timing_adjust *adjust = work->adjust;
+
+ mutex_lock(&adev->dm.dc_lock);
+ dc_stream_adjust_vmin_vmax(adev->dm.dc, stream, adjust);
+ mutex_unlock(&adev->dm.dc_lock);
+
+ dc_stream_release(stream);
+ kfree(work->adjust);
+ kfree(work);
+}
+
+static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
+ struct dc_stream_state *stream,
+ struct dc_crtc_timing_adjust *adjust)
+{
+ struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL);
+ if (!offload_work) {
+ drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
+ return;
+ }
+
+ struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL);
+ if (!adjust_copy) {
+ drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
+ kfree(offload_work);
+ return;
+ }
+
+ dc_stream_retain(stream);
+ memcpy(adjust_copy, adjust, sizeof(*adjust_copy));
+
+ INIT_WORK(&offload_work->work, dm_handle_vmin_vmax_update);
+ offload_work->adev = adev;
+ offload_work->stream = stream;
+ offload_work->adjust = adjust_copy;
+
+ queue_work(system_wq, &offload_work->work);
+}
+
static void dm_vupdate_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
@@ -579,10 +623,9 @@ static void dm_vupdate_high_irq(void *interrupt_params)
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(
- adev->dm.dc,
- acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
+ schedule_dc_vmin_vmax(adev,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
}
@@ -672,8 +715,8 @@ static void dm_crtc_high_irq(void *interrupt_params)
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
+ schedule_dc_vmin_vmax(adev, acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
}
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 67c3a7a967f2..94f312bae9ac 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -153,6 +153,20 @@ struct idle_workqueue {
bool running;
};
+/**
+ * struct dm_vupdate_work - Work data for periodic action in idle
+ * @work: Kernel work data for the work event
+ * @adev: amdgpu_device back pointer
+ * @stream: DC stream associated with the crtc
+ * @adjust: DC CRTC timing adjust to be applied to the crtc
+ */
+struct vupdate_offload_work {
+ struct work_struct work;
+ struct amdgpu_device *adev;
+ struct dc_stream_state *stream;
+ struct dc_crtc_timing_adjust *adjust;
+};
+
#define MAX_LUMINANCE_DATA_POINTS 99
/**
--
2.34.1
next prev parent reply other threads:[~2025-07-30 19:00 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-07-30 18:58 [PATCH 00/12] DC Patches July 30, 2025 Roman.Li
2025-07-30 18:58 ` [PATCH 01/12] drm/amd/display: fix a Null pointer dereference vulnerability Roman.Li
2025-07-30 18:58 ` [PATCH 02/12] drm/amd/display: Adjust AUX-less ALPM setting Roman.Li
2025-07-30 18:58 ` Roman.Li [this message]
2025-08-05 11:51 ` [PATCH 03/12] drm/amd/display: fix dmub access race condition Klara Modin
2025-08-18 11:31 ` Klara Modin
2025-07-30 18:58 ` [PATCH 04/12] drm/amd/display: more liberal vmin/vmax update for freesync Roman.Li
2025-07-30 18:58 ` [PATCH 05/12] drm/amd/display: update dpp/disp clock from smu clock table Roman.Li
2025-07-30 18:58 ` [PATCH 06/12] drm/amd/display: Revert "drm/amd/display: Fix AMDGPU_MAX_BL_LEVEL value" Roman.Li
2025-07-30 18:58 ` [PATCH 07/12] drm/amd/display: Avoid Read Remote DPCD Many Times Roman.Li
2025-07-30 18:58 ` [PATCH 08/12] drm/amd/display: limited pll vco w/a v2 Roman.Li
2025-07-30 18:59 ` [PATCH 09/12] drm/amd/display: Fixing hubp programming of 3dlut fast load Roman.Li
2025-07-30 18:59 ` [PATCH 10/12] drm/amd/display: Toggle for Disable Force Pstate Allow on Disable Roman.Li
2025-07-30 18:59 ` [PATCH 11/12] drm/amd/display: Adding interface to log hw state when underflow happens Roman.Li
2025-07-30 18:59 ` [PATCH 12/12] drm/amd/display: Promote DC to 3.2.344 Roman.Li
2025-08-05 13:04 ` [PATCH 00/12] DC Patches July 30, 2025 Wheeler, Daniel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250730185903.1023256-4-Roman.Li@amd.com \
--to=roman.li@amd.com \
--cc=Ray.Wu@amd.com \
--cc=alex.hung@amd.com \
--cc=amd-gfx@lists.freedesktop.org \
--cc=aurabindo.pillai@amd.com \
--cc=chiahsuan.chung@amd.com \
--cc=daniel.wheeler@amd.com \
--cc=harry.wentland@amd.com \
--cc=ivan.lipski@amd.com \
--cc=jerry.zuo@amd.com \
--cc=nicholas.kazlauskas@amd.com \
--cc=sunpeng.li@amd.com \
--cc=wayne.lin@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).