From: Philip Yang <Philip.Yang@amd.com>
To: <amd-gfx@lists.freedesktop.org>
Cc: <Felix.Kuehling@amd.com>, <christian.koenig@amd.com>,
<Kent.Russell@amd.com>, <Andrew.Martin@amd.com>,
Philip Yang <Philip.Yang@amd.com>
Subject: [PATCH v2 2/2] drm/amdgpu: move VM PTE MTYPE override to per-PTE granularity
Date: Mon, 20 Apr 2026 09:37:05 -0400 [thread overview]
Message-ID: <20260420133705.3721315-3-Philip.Yang@amd.com> (raw)
In-Reply-To: <20260420133705.3721315-1-Philip.Yang@amd.com>
Refactor the NUMA-aware MTYPE override for VM page table entries:
- Move the override_vm_pte_flags call from the centralized
amdgpu_vm_pte_update_flags() into the individual CPU and SDMA update
backends, enabling per-PTE MTYPE override including for scattered
pages (pages_addr path).
- Move APU, IP version, and direct-mapped eligibility checks from
runtime (gmc_v9_0_override_vm_pte_flags) to init time
(gmc_v9_0_set_gmc_funcs), selecting between gmc_funcs structs with
and without the override function pointer to avoid repeated runtime
checks on every PTE update.
- Guard allow_override on whether gmc_funcs->override_vm_pte_flags is
actually implemented.
Signed-off-by: Philip Yang <Philip.Yang@amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 2 ++
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +-
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 6 ++--
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c | 11 ++++++-
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 9 ------
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 11 ++++++-
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 35 +++++++++------------
7 files changed, 41 insertions(+), 35 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 6ab4c1e297fc..c6d7a9e54eb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -365,6 +365,8 @@ struct amdgpu_gmc {
bool flush_tlb_needs_extra_type_0;
bool flush_tlb_needs_extra_type_2;
bool flush_pasid_uses_kiq;
+
+ bool override_pte;
};
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 63156289ae7f..532b78701bbe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1163,7 +1163,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
params.pages_addr = pages_addr;
params.unlocked = unlocked;
params.needs_flush = flush_tlb;
- params.allow_override = allow_override;
+ params.override_pte = allow_override && adev->gmc.override_pte;
INIT_LIST_HEAD(¶ms.tlb_flush_waitlist);
amdgpu_vm_eviction_lock(vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index f33ea7f8509b..a0435468d0bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -296,10 +296,10 @@ struct amdgpu_vm_update_params {
bool needs_flush;
/**
- * @allow_override: true for memory that is not uncached: allows MTYPE
- * to be overridden for NUMA local memory.
+ * @override_pte: true for memory that is not uncached and gmc override function is
+ * implemented to allow MTYPE to be overridden for NUMA local memory.
*/
- bool allow_override;
+ bool override_pte;
/**
* @tlb_flush_waitlist: temporary storage for BOs until tlb_flush
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index f078db3fef79..b31ff6f56f0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -88,12 +88,21 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
+ if (!p->pages_addr && p->override_pte)
+ amdgpu_gmc_override_vm_pte_flags(p->adev, p->vm, addr, &flags);
+
for (i = 0; i < count; i++) {
+ u64 oflags = flags;
+
value = p->pages_addr ?
amdgpu_vm_map_gart(p->pages_addr, addr) :
addr;
+
+ if (p->pages_addr && p->override_pte)
+ amdgpu_gmc_override_vm_pte_flags(p->adev, p->vm, value, &oflags);
+
amdgpu_gmc_set_pte_pde(p->adev, (void *)(uintptr_t)pe,
- i, value, flags);
+ i, value, oflags);
addr += incr;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 31a437ce9570..883cc275f354 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -707,15 +707,6 @@ static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
if (level == AMDGPU_VM_PTB)
amdgpu_vm_pte_update_noretry_flags(adev, &flags);
- /* APUs mapping system memory may need different MTYPEs on different
- * NUMA nodes. Only do this for contiguous ranges that can be assumed
- * to be on the same NUMA node.
- */
- if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) &&
- adev->gmc.gmc_funcs->override_vm_pte_flags &&
- num_possible_nodes() > 1 && !params->pages_addr && params->allow_override)
- amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags);
-
params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
flags);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 36805dcfa159..2eb64df6daa9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -257,6 +257,9 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
}
if (!p->pages_addr) {
+ if (p->override_pte)
+ amdgpu_gmc_override_vm_pte_flags(p->adev, p->vm, addr, &flags);
+
/* set page commands needed */
amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
incr, flags);
@@ -275,8 +278,14 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
p->num_dw_left -= nptes * 2;
pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]);
for (i = 0; i < nptes; ++i, addr += incr) {
+ u64 oflags = flags;
+
pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
- pte[i] |= flags;
+
+ if (p->override_pte)
+ amdgpu_gmc_override_vm_pte_flags(p->adev, p->vm, pte[i], &oflags);
+
+ pte[i] |= oflags;
}
amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index e7b78027002b..aca7841173f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1204,21 +1204,6 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
{
int local_node, nid;
- /* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system
- * memory can use more efficient MTYPEs.
- */
- if (!(adev->flags & AMD_IS_APU) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3))
- return;
-
- /* Only direct-mapped memory allows us to determine the NUMA node from
- * the DMA address.
- */
- if (!adev->ram_is_direct_mapped) {
- dev_dbg_ratelimited(adev->dev, "RAM is not direct mapped\n");
- return;
- }
-
/* MTYPE_NC is the same default and can be overridden.
* MTYPE_UC will be present if the memory is extended-coherent
* and can also be overridden.
@@ -1231,11 +1216,7 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
return;
}
- /* FIXME: Only supported on native mode for now. For carve-out, the
- * NUMA affinity of the GPU/VM needs to come from the PCI info because
- * memory partitions are not associated with different NUMA nodes.
- */
- if (adev->gmc.is_app_apu && vm->mem_id >= 0) {
+ if (vm->mem_id >= 0) {
local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
} else {
dev_dbg_ratelimited(adev->dev, "Only native mode APU is supported.\n");
@@ -1344,6 +1325,20 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
{
adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
+
+ /* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes, local system
+ * memory can use more efficient MTYPEs.
+ *
+ * APUs mapping system memory may need different MTYPEs on different
+ * NUMA nodes.
+ *
+ * Only direct-mapped memory allows us to determine the NUMA node from
+ * the DMA address.
+ */
+ adev->gmc.override_pte = adev->gmc.is_app_apu &&
+ num_possible_nodes() > 1 &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
+ adev->ram_is_direct_mapped;
}
static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
--
2.50.1
next prev parent reply other threads:[~2026-04-20 13:38 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-20 13:37 [PATCH v2 0/2] drm/amdgpu: per-PTE MTYPE override for NUMA locality Philip Yang
2026-04-20 13:37 ` [PATCH v2 1/2] drm/amdgpu: Move amdgpu_device_check_iommu_direct_map() earlier Philip Yang
2026-04-27 11:19 ` Christian König
2026-04-20 13:37 ` Philip Yang [this message]
2026-04-27 11:23 ` [PATCH v2 2/2] drm/amdgpu: move VM PTE MTYPE override to per-PTE granularity Christian König
2026-04-22 18:00 ` [PATCH v2 0/2] drm/amdgpu: per-PTE MTYPE override for NUMA locality Philip Yang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260420133705.3721315-3-Philip.Yang@amd.com \
--to=philip.yang@amd.com \
--cc=Andrew.Martin@amd.com \
--cc=Felix.Kuehling@amd.com \
--cc=Kent.Russell@amd.com \
--cc=amd-gfx@lists.freedesktop.org \
--cc=christian.koenig@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox