From: "Sharma, Nishit" <nishit.sharma@intel.com>
To: Varun Gupta <varun.gupta@intel.com>, <igt-dev@lists.freedesktop.org>
Cc: <arvind.yadav@intel.com>, <himal.prasad.ghimiray@intel.com>
Subject: Re: [PATCH i-g-t v2 2/4] tests/intel/xe_madvise: Add atomic-device subtest
Date: Mon, 11 May 2026 14:24:26 +0530 [thread overview]
Message-ID: <22bef27c-adc9-4747-b948-87f1a5501b5d@intel.com> (raw)
In-Reply-To: <20260511035310.32323-3-varun.gupta@intel.com>
On 5/11/2026 9:22 AM, Varun Gupta wrote:
> Validate that madvise ATOMIC_DEVICE allows GPU MI_ATOMIC_INC on SVM
> memory. The test creates a fault-mode VM with CPU_ADDR_MIRROR binding
> over heap memory allocated via aligned_alloc(). After setting
> ATOMIC_DEVICE via DRM_XE_MEM_RANGE_ATTR_ATOMIC madvise, the GPU
> executes MI_ATOMIC_INC through the page-fault handler which migrates
> pages to VRAM for device atomics.
>
> Also adds the shared atomic test infrastructure: struct
> atomic_data, atomic_build_batch() helper, timeout constants, and the
> atomic subtest group gated on VRAM and fault-mode support.
>
> Signed-off-by: Varun Gupta <varun.gupta@intel.com>
>
> v2: Add UNMAP of CPU_ADDR_MIRROR binding before xe_vm_destroy.
> Add pagefault count print before/after exec (Nishit).
> ---
> tests/intel/xe_madvise.c | 122 ++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 121 insertions(+), 1 deletion(-)
>
> diff --git a/tests/intel/xe_madvise.c b/tests/intel/xe_madvise.c
> index e79cafbff..f343f3c8c 100644
> --- a/tests/intel/xe_madvise.c
> +++ b/tests/intel/xe_madvise.c
> @@ -14,9 +14,12 @@
> #include "igt.h"
> #include "xe_drm.h"
>
> +#include "intel_gpu_commands.h"
> +#include "lib/igt_syncobj.h"
> +#include "lib/intel_reg.h"
> +#include "xe/xe_gt.h"
> #include "xe/xe_ioctl.h"
> #include "xe/xe_query.h"
> -#include "lib/igt_syncobj.h"
>
> /* Purgeable test constants */
> #define PURGEABLE_ADDR 0x1a0000
> @@ -27,6 +30,11 @@
> #define PURGEABLE_TEST_PATTERN 0xc0ffee
> #define PURGEABLE_DEAD_PATTERN 0xdead
>
> +/* Atomic test constants */
> +#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
> +#define FIVE_SEC (5LL * NSEC_PER_SEC)
> +#define QUARTER_SEC (NSEC_PER_SEC / 4)
> +
> static bool xe_has_purgeable_support(int fd)
> {
> struct drm_xe_query_config *config = xe_config(fd);
> @@ -768,6 +776,107 @@ out:
> igt_skip("Unable to induce purge on this platform/config");
> }
>
> +/*
> + * Atomic madvise subtests — validate DRM_XE_MEM_RANGE_ATTR_ATOMIC
> + * modes (DEVICE, GLOBAL, CPU) on fault-mode SVM VMAs.
> + */
> +
> +struct atomic_data {
> + uint32_t batch[32];
> + uint64_t vm_sync;
> + uint64_t exec_sync;
> + uint32_t data;
> +};
> +
> +static void atomic_build_batch(struct atomic_data *d, uint64_t gpu_addr)
> +{
> + uint64_t data_offset = (char *)&d->data - (char *)d;
> + uint64_t sdi_addr = gpu_addr + data_offset;
> + int b = 0;
> +
> + d->batch[b++] = MI_ATOMIC | MI_ATOMIC_INC;
> + d->batch[b++] = sdi_addr;
> + d->batch[b++] = sdi_addr >> 32;
> + d->batch[b++] = MI_BATCH_BUFFER_END;
> + igt_assert(b <= ARRAY_SIZE(d->batch));
> +}
> +
> +/**
> + * SUBTEST: atomic-device
> + * Description: madvise atomic device supports only GPU atomic operations,
> + * test executes GPU MI_ATOMIC_INC on SVM memory via fault handler
> + * Test category: functionality test
> + */
> +static void test_atomic_device(int fd, struct drm_xe_engine_class_instance *eci)
> +{
> + struct drm_xe_sync sync[1] = {
> + { .type = DRM_XE_SYNC_TYPE_USER_FENCE,
> + .flags = DRM_XE_SYNC_FLAG_SIGNAL,
> + .timeline_value = USER_FENCE_VALUE },
> + };
> + struct drm_xe_exec exec = {
> + .num_batch_buffer = 1,
> + .num_syncs = 1,
> + .syncs = to_user_pointer(sync),
> + };
> + struct atomic_data *data;
> + uint32_t vm, exec_queue;
> + uint64_t addr;
> + size_t bo_size;
> + int va_bits;
> + int pf_count_before, pf_count_after;
> +
> + va_bits = xe_va_bits(fd);
> + vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> + DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> +
> + bo_size = xe_bb_size(fd, sizeof(*data));
> + data = aligned_alloc(bo_size, bo_size);
> + igt_assert(data);
> + memset(data, 0, bo_size);
> +
> + addr = to_user_pointer(data);
> +
> + /* Bind entire VA space as CPU_ADDR_MIRROR */
> + sync[0].addr = to_user_pointer(&data->vm_sync);
> + __xe_vm_bind_assert(fd, vm, 0, 0, 0, 0, 0x1ull << va_bits,
> + DRM_XE_VM_BIND_OP_MAP,
> + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR,
> + sync, 1, 0, 0);
> + xe_wait_ufence(fd, &data->vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
> + data->vm_sync = 0;
> +
> + xe_vm_madvise(fd, vm, addr, bo_size, 0,
> + DRM_XE_MEM_RANGE_ATTR_ATOMIC, DRM_XE_ATOMIC_DEVICE, 0, 0);
> +
> + atomic_build_batch(data, addr);
> +
> + exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
> + exec.exec_queue_id = exec_queue;
> + exec.address = addr + ((char *)&data->batch - (char *)data);
> +
> + pf_count_before = xe_gt_stats_get_count(fd, eci->gt_id,
> + "svm_pagefault_count");
> +
> + sync[0].addr = to_user_pointer(&data->exec_sync);
> + xe_exec(fd, &exec);
> + xe_wait_ufence(fd, &data->exec_sync, USER_FENCE_VALUE,
> + exec_queue, FIVE_SEC);
> +
> + pf_count_after = xe_gt_stats_get_count(fd, eci->gt_id,
> + "svm_pagefault_count");
> + igt_info("Pagefault count: before=%d, after=%d\n",
> + pf_count_before, pf_count_after);
small nit: this will print pf count all the times whether pf not
generated . Add loop
if (pf_count_before != pf_count_after)
igt_info("pf count prints);
> +
> + igt_assert_eq(data->data, 1);
> +
> + xe_exec_queue_destroy(fd, exec_queue);
> + __xe_vm_bind_assert(fd, vm, 0, 0, 0, 0, 0x1ull << va_bits,
> + DRM_XE_VM_BIND_OP_UNMAP, 0, NULL, 0, 0, 0);
> + free(data);
> + xe_vm_destroy(fd, vm);
> +}
> +
> int igt_main()
> {
> struct drm_xe_engine_class_instance *hwe;
> @@ -826,6 +935,17 @@ int igt_main()
> }
> }
>
> + igt_subtest_group() {
> + igt_fixture() {
> + igt_require(xe_has_vram(fd));
> + igt_require(!xe_supports_faults(fd));
> + }
> +
> + igt_subtest("atomic-device")
> + xe_for_each_engine(fd, hwe)
> + test_atomic_device(fd, hwe);
> + }
> +
> igt_fixture() {
> xe_device_put(fd);
> drm_close_driver(fd);
With above change LGTM:
Reviewed-by: Nishit Sharma <nishit.sharma@intel.com>
next prev parent reply other threads:[~2026-05-11 8:54 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-11 3:52 [PATCH i-g-t v2 0/4] tests/intel/xe_madvise: Add atomic madvise subtests Varun Gupta
2026-05-11 3:52 ` [PATCH i-g-t v2 1/4] tests/intel/xe_madvise: Generalize metadata and group purgeable subtests Varun Gupta
2026-05-11 8:51 ` Sharma, Nishit
2026-05-11 3:52 ` [PATCH i-g-t v2 2/4] tests/intel/xe_madvise: Add atomic-device subtest Varun Gupta
2026-05-11 8:54 ` Sharma, Nishit [this message]
2026-05-11 3:52 ` [PATCH i-g-t v2 3/4] tests/intel/xe_madvise: Add atomic-global subtest Varun Gupta
2026-05-11 9:06 ` Sharma, Nishit
2026-05-11 3:52 ` [PATCH i-g-t v2 4/4] tests/intel/xe_madvise: Add atomic-cpu subtest Varun Gupta
2026-05-11 9:08 ` Sharma, Nishit
2026-05-11 22:18 ` ✗ Fi.CI.BUILD: failure for tests/intel/xe_madvise: Add atomic madvise subtests (rev2) Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=22bef27c-adc9-4747-b948-87f1a5501b5d@intel.com \
--to=nishit.sharma@intel.com \
--cc=arvind.yadav@intel.com \
--cc=himal.prasad.ghimiray@intel.com \
--cc=igt-dev@lists.freedesktop.org \
--cc=varun.gupta@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox