AMD-GFX Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Alex Deucher <alexander.deucher@amd.com>
To: <amd-gfx@lists.freedesktop.org>
Cc: Alex Deucher <alexander.deucher@amd.com>
Subject: [PATCH 36/42] drm/amdgpu: split vm flush and vm flush emit logic
Date: Wed, 14 Jan 2026 11:47:21 -0500	[thread overview]
Message-ID: <20260114164727.15367-37-alexander.deucher@amd.com> (raw)
In-Reply-To: <20260114164727.15367-1-alexander.deucher@amd.com>

No intended functional change.  Split the logic into
two functions, one to set the state and one to use
the state to emit the ring contents.

Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c  |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.h |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c  | 92 ++++++++++++++-----------
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h  |  1 +
 4 files changed, 56 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 49c32ad975203..f2b95ad57d97f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -213,6 +213,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, struct amdgpu_job *job,
 		amdgpu_ring_undo(ring);
 		return r;
 	}
+	amdgpu_vm_emit_flush(ring, job);
 
 	amdgpu_ring_ib_begin(ring);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index d53c13322a648..72d50602a8e52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -108,6 +108,7 @@ struct amdgpu_job {
 	bool			vm_flush_needed;
 	bool			cleaner_shader_needed;
 	bool			pasid_mapping_needed;
+	bool			emit_vm_fence;
 
 	uint32_t		num_ibs;
 	struct amdgpu_ib	ibs[];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 374991520ad2c..6c84677daad4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -756,6 +756,57 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
 	return false;
 }
 
+void amdgpu_vm_emit_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
+{
+	struct amdgpu_device *adev = ring->adev;
+	unsigned int patch;
+
+	if (!job->vm_flush_needed && !job->gds_switch_needed && !job->need_pipe_sync &&
+	    !job->cleaner_shader_needed)
+		return;
+
+	amdgpu_ring_ib_begin(ring);
+	if (ring->funcs->init_cond_exec)
+		patch = amdgpu_ring_init_cond_exec(ring,
+						   ring->cond_exe_gpu_addr);
+
+	if (job->need_pipe_sync)
+		amdgpu_ring_emit_pipeline_sync(ring, job->pipe_sync_seq);
+
+	if (job->cleaner_shader_needed)
+		ring->funcs->emit_cleaner_shader(ring);
+
+	if (job->vm_flush_needed)
+		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
+
+	if (job->pasid_mapping_needed)
+		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
+
+	if (job->spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
+		adev->gfx.rlc.funcs->update_spm_vmid(adev, ring->xcc_id, ring, job->vmid);
+
+	if (ring->funcs->emit_gds_switch &&
+	    job->gds_switch_needed) {
+		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
+					    job->gds_size, job->gws_base,
+					    job->gws_size, job->oa_base,
+					    job->oa_size);
+	}
+
+	if (job->emit_vm_fence)
+		amdgpu_fence_emit(ring, job->hw_vm_fence, 0);
+
+	amdgpu_ring_patch_cond_exec(ring, patch);
+
+	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
+	if (ring->funcs->emit_switch_buffer) {
+		amdgpu_ring_emit_switch_buffer(ring);
+		amdgpu_ring_emit_switch_buffer(ring);
+	}
+
+	amdgpu_ring_ib_end(ring);
+}
+
 /**
  * amdgpu_vm_flush - hardware flush the vm
  *
@@ -775,7 +826,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
 	struct dma_fence *fence = NULL;
-	unsigned int patch;
 	int r;
 
 	job->gds_switch_needed = ring->funcs->emit_gds_switch &&
@@ -810,35 +860,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
 	    !job->cleaner_shader_needed)
 		return 0;
 
-	amdgpu_ring_ib_begin(ring);
-	if (ring->funcs->init_cond_exec)
-		patch = amdgpu_ring_init_cond_exec(ring,
-						   ring->cond_exe_gpu_addr);
-
-	if (job->need_pipe_sync)
-		amdgpu_ring_emit_pipeline_sync(ring, job->pipe_sync_seq);
-
-	if (job->cleaner_shader_needed)
-		ring->funcs->emit_cleaner_shader(ring);
-
-	if (job->vm_flush_needed) {
+	if (job->vm_flush_needed)
 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
-		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
-	}
-
-	if (job->pasid_mapping_needed)
-		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
-
-	if (job->spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
-		adev->gfx.rlc.funcs->update_spm_vmid(adev, ring->xcc_id, ring, job->vmid);
-
-	if (ring->funcs->emit_gds_switch &&
-	    job->gds_switch_needed) {
-		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
-					    job->gds_size, job->gws_base,
-					    job->gws_size, job->oa_base,
-					    job->oa_size);
-	}
 
 	if (job->vm_flush_needed ||
 	    job->pasid_mapping_needed ||
@@ -846,7 +869,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
 		r = amdgpu_fence_init(ring, job->hw_vm_fence);
 		if (r)
 			return r;
-		amdgpu_fence_emit(ring, job->hw_vm_fence, 0);
+		job->emit_vm_fence = true;
 		fence = &job->hw_vm_fence->base;
 		/* get a ref for the job */
 		dma_fence_get(fence);
@@ -882,15 +905,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
 	}
 	dma_fence_put(fence);
 
-	amdgpu_ring_patch_cond_exec(ring, patch);
-
-	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
-	if (ring->funcs->emit_switch_buffer) {
-		amdgpu_ring_emit_switch_buffer(ring);
-		amdgpu_ring_emit_switch_buffer(ring);
-	}
-
-	amdgpu_ring_ib_end(ring);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 89b76639cb273..0ce37aab8b518 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -512,6 +512,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		       struct ww_acquire_ctx *ticket,
 		       int (*callback)(void *p, struct amdgpu_bo *bo),
 		       void *param);
+void amdgpu_vm_emit_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
 			  struct amdgpu_vm *vm, bool immediate);
-- 
2.52.0


  parent reply	other threads:[~2026-01-14 16:48 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-01-14 16:46 [PATCH 00/42] Improvements for IB handling V2 Alex Deucher
2026-01-14 16:46 ` [PATCH 01/42] drm/amdgpu/job: use GFP_ATOMIC while in gpu reset Alex Deucher
2026-01-14 16:46 ` [PATCH 02/42] drm/amdgpu/vpe: switch to using job for IBs Alex Deucher
2026-01-14 16:46 ` [PATCH 03/42] drm/amdgpu/gfx6: " Alex Deucher
2026-01-14 16:46 ` [PATCH 04/42] drm/amdgpu/gfx7: " Alex Deucher
2026-01-14 16:46 ` [PATCH 05/42] drm/amdgpu/gfx8: " Alex Deucher
2026-01-14 16:46 ` [PATCH 06/42] drm/amdgpu/gfx9: " Alex Deucher
2026-01-14 16:46 ` [PATCH 07/42] drm/amdgpu/gfx9.4.2: " Alex Deucher
2026-01-14 16:46 ` [PATCH 08/42] drm/amdgpu/gfx9.4.3: " Alex Deucher
2026-01-14 16:46 ` [PATCH 09/42] drm/amdgpu/gfx10: " Alex Deucher
2026-01-14 16:46 ` [PATCH 10/42] drm/amdgpu/gfx11: " Alex Deucher
2026-01-14 16:46 ` [PATCH 11/42] drm/amdgpu/gfx12: " Alex Deucher
2026-01-14 16:46 ` [PATCH 12/42] drm/amdgpu/gfx12.1: " Alex Deucher
2026-01-14 16:46 ` [PATCH 13/42] drm/amdgpu/si_dma: " Alex Deucher
2026-01-14 16:46 ` [PATCH 14/42] drm/amdgpu/cik_sdma: " Alex Deucher
2026-01-14 16:47 ` [PATCH 15/42] drm/amdgpu/sdma2.4: " Alex Deucher
2026-01-14 16:47 ` [PATCH 16/42] drm/amdgpu/sdma3: " Alex Deucher
2026-01-14 16:47 ` [PATCH 17/42] drm/amdgpu/sdma4: " Alex Deucher
2026-01-14 16:47 ` [PATCH 18/42] drm/amdgpu/sdma4.4.2: " Alex Deucher
2026-01-14 16:47 ` [PATCH 19/42] drm/amdgpu/sdma5: " Alex Deucher
2026-01-14 16:47 ` [PATCH 20/42] drm/amdgpu/sdma5.2: " Alex Deucher
2026-01-14 16:47 ` [PATCH 21/42] drm/amdgpu/sdma6: " Alex Deucher
2026-01-14 16:47 ` [PATCH 22/42] drm/amdgpu/sdma7: " Alex Deucher
2026-01-14 16:47 ` [PATCH 23/42] drm/amdgpu/sdma7.1: " Alex Deucher
2026-01-14 16:47 ` [PATCH 24/42] drm/amdgpu: require a job to schedule an IB Alex Deucher
2026-01-14 16:47 ` [PATCH 25/42] drm/amdgpu: rename amdgpu_fence_driver_guilty_force_completion() Alex Deucher
2026-01-14 16:47 ` [PATCH 26/42] drm/amdgpu: mark fences with errors before ring reset Alex Deucher
2026-01-14 16:47 ` [PATCH 27/42] drm/amdgpu: don't call drm_sched_stop/start() in asic reset Alex Deucher
2026-01-14 16:47 ` [PATCH 28/42] drm/amdgpu: drop drm_sched_increase_karma() Alex Deucher
2026-01-14 16:47 ` [PATCH 29/42] drm/amdgpu: plumb timedout fence through to force completion Alex Deucher
2026-01-14 16:47 ` [PATCH 30/42] drm/amdgpu: simplify VCN reset helper Alex Deucher
2026-01-14 16:47 ` [PATCH 31/42] drm/amdgpu: change function signature for emit_pipeline_sync() Alex Deucher
2026-01-14 16:47 ` [PATCH 32/42] drm/amdgpu: drop extra parameter for vm_flush Alex Deucher
2026-01-14 16:47 ` [PATCH 33/42] drm/amdgpu: move need_ctx_switch into amdgpu_job Alex Deucher
2026-01-14 16:47 ` [PATCH 34/42] drm/amdgpu: store vm flush state in amdgpu_job Alex Deucher
2026-01-14 16:47 ` [PATCH 35/42] drm/amdgpu: split fence init and emit logic Alex Deucher
2026-01-14 16:47 ` Alex Deucher [this message]
2026-01-14 16:47 ` [PATCH 37/42] drm/amdgpu: split ib schedule and ib " Alex Deucher
2026-01-14 16:47 ` [PATCH 38/42] drm/amdgpu: move drm sched stop/start into amdgpu_job_timedout() Alex Deucher
2026-01-14 16:47 ` [PATCH 39/42] drm/amdgpu: add an all_instance_rings_reset ring flag Alex Deucher
2026-01-14 16:47 ` [PATCH 40/42] drm/amdgpu: add helper to save and restore ring state Alex Deucher
2026-01-14 16:47 ` [PATCH 41/42] drm/amdgpu: rework reset reemit handling Alex Deucher
2026-01-14 16:47 ` [PATCH 42/42] drm/amdgpu: simplify per queue reset code Alex Deucher
2026-01-14 20:47 ` [PATCH 00/42] Improvements for IB handling V2 Alex Deucher

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260114164727.15367-37-alexander.deucher@amd.com \
    --to=alexander.deucher@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox