From: Alex Deucher <alexander.deucher@amd.com>
To: <amd-gfx@lists.freedesktop.org>
Cc: Alex Deucher <alexander.deucher@amd.com>
Subject: [PATCH 34/42] drm/amdgpu: store vm flush state in amdgpu_job
Date: Wed, 14 Jan 2026 11:47:19 -0500 [thread overview]
Message-ID: <20260114164727.15367-35-alexander.deucher@amd.com> (raw)
In-Reply-To: <20260114164727.15367-1-alexander.deucher@amd.com>
Store it in the job rather than as local variables.
No intended functional change. Needed to help separate
the vm flush and emit logic.
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 3 ++
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 54 ++++++++++++-------------
2 files changed, 30 insertions(+), 27 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 21e1941ce356a..d53c13322a648 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -105,6 +105,9 @@ struct amdgpu_job {
bool need_pipe_sync;
u32 pipe_sync_seq;
bool need_ctx_switch;
+ bool vm_flush_needed;
+ bool cleaner_shader_needed;
+ bool pasid_mapping_needed;
uint32_t num_ibs;
struct amdgpu_ib ibs[];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index cea359f2084ca..e480a65dbdb1c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -774,42 +774,40 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
unsigned vmhub = ring->vm_hub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
- bool spm_update_needed = job->spm_update_needed;
- bool gds_switch_needed = ring->funcs->emit_gds_switch &&
- job->gds_switch_needed;
- bool vm_flush_needed = job->vm_needs_flush;
- bool cleaner_shader_needed = false;
- bool pasid_mapping_needed = false;
struct dma_fence *fence = NULL;
unsigned int patch;
int r;
+ job->gds_switch_needed = ring->funcs->emit_gds_switch &&
+ job->gds_switch_needed;
+ job->vm_flush_needed = job->vm_needs_flush;
+
if (amdgpu_vmid_had_gpu_reset(adev, id)) {
- gds_switch_needed = true;
- vm_flush_needed = true;
- pasid_mapping_needed = true;
- spm_update_needed = true;
+ job->gds_switch_needed = true;
+ job->vm_flush_needed = true;
+ job->pasid_mapping_needed = true;
+ job->spm_update_needed = true;
}
mutex_lock(&id_mgr->lock);
if (id->pasid != job->pasid || !id->pasid_mapping ||
!dma_fence_is_signaled(id->pasid_mapping))
- pasid_mapping_needed = true;
+ job->pasid_mapping_needed = true;
mutex_unlock(&id_mgr->lock);
- gds_switch_needed &= !!ring->funcs->emit_gds_switch;
- vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
- job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
- pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
+ job->gds_switch_needed &= !!ring->funcs->emit_gds_switch;
+ job->vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
+ job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
+ job->pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
ring->funcs->emit_wreg;
- cleaner_shader_needed = job->run_cleaner_shader &&
+ job->cleaner_shader_needed = job->run_cleaner_shader &&
adev->gfx.enable_cleaner_shader &&
ring->funcs->emit_cleaner_shader && job->base.s_fence &&
&job->base.s_fence->scheduled == isolation->spearhead;
- if (!vm_flush_needed && !gds_switch_needed && !job->need_pipe_sync &&
- !cleaner_shader_needed)
+ if (!job->vm_flush_needed && !job->gds_switch_needed && !job->need_pipe_sync &&
+ !job->cleaner_shader_needed)
return 0;
amdgpu_ring_ib_begin(ring);
@@ -820,29 +818,31 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
if (job->need_pipe_sync)
amdgpu_ring_emit_pipeline_sync(ring, job->pipe_sync_seq);
- if (cleaner_shader_needed)
+ if (job->cleaner_shader_needed)
ring->funcs->emit_cleaner_shader(ring);
- if (vm_flush_needed) {
+ if (job->vm_flush_needed) {
trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
}
- if (pasid_mapping_needed)
+ if (job->pasid_mapping_needed)
amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
- if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
+ if (job->spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
adev->gfx.rlc.funcs->update_spm_vmid(adev, ring->xcc_id, ring, job->vmid);
if (ring->funcs->emit_gds_switch &&
- gds_switch_needed) {
+ job->gds_switch_needed) {
amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
job->gds_size, job->gws_base,
job->gws_size, job->oa_base,
job->oa_size);
}
- if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
+ if (job->vm_flush_needed ||
+ job->pasid_mapping_needed ||
+ job->cleaner_shader_needed) {
r = amdgpu_fence_emit(ring, job->hw_vm_fence, 0);
if (r)
return r;
@@ -851,7 +851,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
dma_fence_get(fence);
}
- if (vm_flush_needed) {
+ if (job->vm_flush_needed) {
mutex_lock(&id_mgr->lock);
dma_fence_put(id->last_flush);
id->last_flush = dma_fence_get(fence);
@@ -860,7 +860,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
mutex_unlock(&id_mgr->lock);
}
- if (pasid_mapping_needed) {
+ if (job->pasid_mapping_needed) {
mutex_lock(&id_mgr->lock);
id->pasid = job->pasid;
dma_fence_put(id->pasid_mapping);
@@ -872,7 +872,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
* Make sure that all other submissions wait for the cleaner shader to
* finish before we push them to the HW.
*/
- if (cleaner_shader_needed) {
+ if (job->cleaner_shader_needed) {
trace_amdgpu_cleaner_shader(ring, fence);
mutex_lock(&adev->enforce_isolation_mutex);
dma_fence_put(isolation->spearhead);
--
2.52.0
next prev parent reply other threads:[~2026-01-14 16:48 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-14 16:46 [PATCH 00/42] Improvements for IB handling V2 Alex Deucher
2026-01-14 16:46 ` [PATCH 01/42] drm/amdgpu/job: use GFP_ATOMIC while in gpu reset Alex Deucher
2026-01-14 16:46 ` [PATCH 02/42] drm/amdgpu/vpe: switch to using job for IBs Alex Deucher
2026-01-14 16:46 ` [PATCH 03/42] drm/amdgpu/gfx6: " Alex Deucher
2026-01-14 16:46 ` [PATCH 04/42] drm/amdgpu/gfx7: " Alex Deucher
2026-01-14 16:46 ` [PATCH 05/42] drm/amdgpu/gfx8: " Alex Deucher
2026-01-14 16:46 ` [PATCH 06/42] drm/amdgpu/gfx9: " Alex Deucher
2026-01-14 16:46 ` [PATCH 07/42] drm/amdgpu/gfx9.4.2: " Alex Deucher
2026-01-14 16:46 ` [PATCH 08/42] drm/amdgpu/gfx9.4.3: " Alex Deucher
2026-01-14 16:46 ` [PATCH 09/42] drm/amdgpu/gfx10: " Alex Deucher
2026-01-14 16:46 ` [PATCH 10/42] drm/amdgpu/gfx11: " Alex Deucher
2026-01-14 16:46 ` [PATCH 11/42] drm/amdgpu/gfx12: " Alex Deucher
2026-01-14 16:46 ` [PATCH 12/42] drm/amdgpu/gfx12.1: " Alex Deucher
2026-01-14 16:46 ` [PATCH 13/42] drm/amdgpu/si_dma: " Alex Deucher
2026-01-14 16:46 ` [PATCH 14/42] drm/amdgpu/cik_sdma: " Alex Deucher
2026-01-14 16:47 ` [PATCH 15/42] drm/amdgpu/sdma2.4: " Alex Deucher
2026-01-14 16:47 ` [PATCH 16/42] drm/amdgpu/sdma3: " Alex Deucher
2026-01-14 16:47 ` [PATCH 17/42] drm/amdgpu/sdma4: " Alex Deucher
2026-01-14 16:47 ` [PATCH 18/42] drm/amdgpu/sdma4.4.2: " Alex Deucher
2026-01-14 16:47 ` [PATCH 19/42] drm/amdgpu/sdma5: " Alex Deucher
2026-01-14 16:47 ` [PATCH 20/42] drm/amdgpu/sdma5.2: " Alex Deucher
2026-01-14 16:47 ` [PATCH 21/42] drm/amdgpu/sdma6: " Alex Deucher
2026-01-14 16:47 ` [PATCH 22/42] drm/amdgpu/sdma7: " Alex Deucher
2026-01-14 16:47 ` [PATCH 23/42] drm/amdgpu/sdma7.1: " Alex Deucher
2026-01-14 16:47 ` [PATCH 24/42] drm/amdgpu: require a job to schedule an IB Alex Deucher
2026-01-14 16:47 ` [PATCH 25/42] drm/amdgpu: rename amdgpu_fence_driver_guilty_force_completion() Alex Deucher
2026-01-14 16:47 ` [PATCH 26/42] drm/amdgpu: mark fences with errors before ring reset Alex Deucher
2026-01-14 16:47 ` [PATCH 27/42] drm/amdgpu: don't call drm_sched_stop/start() in asic reset Alex Deucher
2026-01-14 16:47 ` [PATCH 28/42] drm/amdgpu: drop drm_sched_increase_karma() Alex Deucher
2026-01-14 16:47 ` [PATCH 29/42] drm/amdgpu: plumb timedout fence through to force completion Alex Deucher
2026-01-14 16:47 ` [PATCH 30/42] drm/amdgpu: simplify VCN reset helper Alex Deucher
2026-01-14 16:47 ` [PATCH 31/42] drm/amdgpu: change function signature for emit_pipeline_sync() Alex Deucher
2026-01-14 16:47 ` [PATCH 32/42] drm/amdgpu: drop extra parameter for vm_flush Alex Deucher
2026-01-14 16:47 ` [PATCH 33/42] drm/amdgpu: move need_ctx_switch into amdgpu_job Alex Deucher
2026-01-14 16:47 ` Alex Deucher [this message]
2026-01-14 16:47 ` [PATCH 35/42] drm/amdgpu: split fence init and emit logic Alex Deucher
2026-01-14 16:47 ` [PATCH 36/42] drm/amdgpu: split vm flush and vm flush " Alex Deucher
2026-01-14 16:47 ` [PATCH 37/42] drm/amdgpu: split ib schedule and ib " Alex Deucher
2026-01-14 16:47 ` [PATCH 38/42] drm/amdgpu: move drm sched stop/start into amdgpu_job_timedout() Alex Deucher
2026-01-14 16:47 ` [PATCH 39/42] drm/amdgpu: add an all_instance_rings_reset ring flag Alex Deucher
2026-01-14 16:47 ` [PATCH 40/42] drm/amdgpu: add helper to save and restore ring state Alex Deucher
2026-01-14 16:47 ` [PATCH 41/42] drm/amdgpu: rework reset reemit handling Alex Deucher
2026-01-14 16:47 ` [PATCH 42/42] drm/amdgpu: simplify per queue reset code Alex Deucher
2026-01-14 20:47 ` [PATCH 00/42] Improvements for IB handling V2 Alex Deucher
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260114164727.15367-35-alexander.deucher@amd.com \
--to=alexander.deucher@amd.com \
--cc=amd-gfx@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox