From: Matthew Brost <matthew.brost@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: michal.mrozek@intel.com, himal.prasad.ghimiray@intel.com,
francois.dugast@intel.com, thomas.hellstrom@linux.intel.com,
matthew.auld@intel.com
Subject: [PATCH 2/3] drm/xe: Update xe_migrate_vram to support compression
Date: Mon, 14 Jul 2025 10:33:41 -0700 [thread overview]
Message-ID: <20250714173342.2997396-3-matthew.brost@intel.com> (raw)
In-Reply-To: <20250714173342.2997396-1-matthew.brost@intel.com>
While SVM does not currently support compression, other users of
xe_migrate_vram (e.g., devcoredump) expect the data to be read back
uncompressed. Update xe_migrate_vram to support compressed data.
Cc: stable@vger.kernel.org
Fixes: 9c44fd5f6e8a ("drm/xe: Add migrate layer functions for SVM support")
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/xe/xe_migrate.c | 31 ++++++++++++++++++++++++-------
1 file changed, 24 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index ba1cff2e4cda..936daa2b363d 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -1613,7 +1613,8 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
unsigned long len,
unsigned long sram_offset,
dma_addr_t *sram_addr, u64 vram_addr,
- const enum xe_migrate_copy_dir dir)
+ const enum xe_migrate_copy_dir dir,
+ bool needs_ccs_emit)
{
struct xe_gt *gt = m->tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
@@ -1623,10 +1624,12 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
u64 src_L0_ofs, dst_L0_ofs;
struct xe_sched_job *job;
struct xe_bb *bb;
- u32 update_idx, pt_slot = 0;
+ u32 update_idx, pt_slot = 0, flush_flags = 0;
unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE);
unsigned int pitch = len >= PAGE_SIZE && !(len & ~PAGE_MASK) ?
PAGE_SIZE : 4;
+ bool use_comp_pat = xe_device_has_flat_ccs(xe) &&
+ GRAPHICS_VER(xe) >= 20 && dir == XE_MIGRATE_COPY_TO_SRAM;
int err;
if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) ||
@@ -1637,6 +1640,8 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
batch_size += pte_update_cmd_size(len);
batch_size += EMIT_COPY_DW;
+ if (needs_ccs_emit)
+ batch_size += EMIT_COPY_CCS_DW;
bb = xe_bb_new(gt, batch_size, use_usm_batch);
if (IS_ERR(bb)) {
@@ -1652,7 +1657,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
} else {
- src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
+ src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, use_comp_pat);
dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
}
@@ -1661,6 +1666,17 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, len, pitch);
+ if (needs_ccs_emit) {
+ if (dir == XE_MIGRATE_COPY_TO_VRAM)
+ flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
+ false, dst_L0_ofs,
+ true, len, 0, true);
+ else
+ flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
+ true, dst_L0_ofs,
+ false, len, 0, true);
+ }
+
job = xe_bb_create_migration_job(m->q, bb,
xe_migrate_batch_base(m, use_usm_batch),
update_idx);
@@ -1669,7 +1685,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
goto err;
}
- xe_sched_job_add_migrate_flush(job, 0);
+ xe_sched_job_add_migrate_flush(job, flush_flags);
mutex_lock(&m->job_mutex);
xe_sched_job_arm(job);
@@ -1708,7 +1724,7 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
u64 dst_addr)
{
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
- XE_MIGRATE_COPY_TO_VRAM);
+ XE_MIGRATE_COPY_TO_VRAM, false);
}
/**
@@ -1729,7 +1745,7 @@ struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
dma_addr_t *dst_addr)
{
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
- XE_MIGRATE_COPY_TO_SRAM);
+ XE_MIGRATE_COPY_TO_SRAM, false);
}
static void xe_migrate_dma_unmap(struct xe_device *xe, dma_addr_t *dma_addr,
@@ -1890,7 +1906,8 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
dma_addr + current_page,
vram_addr, write ?
XE_MIGRATE_COPY_TO_VRAM :
- XE_MIGRATE_COPY_TO_SRAM);
+ XE_MIGRATE_COPY_TO_SRAM,
+ xe_bo_needs_ccs_pages(bo));
if (IS_ERR(__fence)) {
if (fence)
dma_fence_wait(fence, false);
--
2.34.1
next prev parent reply other threads:[~2025-07-14 17:32 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-07-14 17:33 [PATCH 0/3] SVM compression fixes Matthew Brost
2025-07-14 17:33 ` [PATCH 1/3] drm/xe: Disable compression on SVM Matthew Brost
2025-07-15 8:15 ` Matthew Auld
2025-07-15 8:26 ` Ghimiray, Himal Prasad
2025-07-15 15:55 ` Matthew Brost
2025-07-15 8:25 ` Ghimiray, Himal Prasad
2025-07-15 15:55 ` Matthew Brost
2025-07-14 17:33 ` Matthew Brost [this message]
2025-07-15 8:37 ` [PATCH 2/3] drm/xe: Update xe_migrate_vram to support compression Matthew Auld
2025-07-15 8:49 ` Matthew Auld
2025-07-15 15:58 ` Matthew Brost
2025-07-14 17:33 ` [PATCH 3/3] drm/xe: Enable compressed memory for SVM Matthew Brost
2025-07-14 17:43 ` ✗ CI.checkpatch: warning for SVM compression fixes Patchwork
2025-07-14 17:45 ` ✓ CI.KUnit: success " Patchwork
2025-07-14 18:25 ` ✓ Xe.CI.BAT: " Patchwork
2025-07-14 20:20 ` ✗ Xe.CI.Full: failure " Patchwork
2025-07-15 12:59 ` [PATCH 0/3] " Matthew Auld
2025-07-15 16:25 ` Matthew Brost
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250714173342.2997396-3-matthew.brost@intel.com \
--to=matthew.brost@intel.com \
--cc=francois.dugast@intel.com \
--cc=himal.prasad.ghimiray@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=matthew.auld@intel.com \
--cc=michal.mrozek@intel.com \
--cc=thomas.hellstrom@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox