igt-dev.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: "Hellstrom, Thomas" <thomas.hellstrom@intel.com>
To: "igt-dev@lists.freedesktop.org" <igt-dev@lists.freedesktop.org>,
	"Sharma,  Nishit" <nishit.sharma@intel.com>
Subject: Re: [PATCH i-g-t v7 03/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU cross-GPU memory access test
Date: Mon, 17 Nov 2025 20:40:01 +0000	[thread overview]
Message-ID: <a13b4524d3cabb04cdf847d53287c6f453a2c718.camel@intel.com> (raw)
In-Reply-To: <3f2239f2-1608-4343-808e-3acd60cd7545@intel.com>

On Mon, 2025-11-17 at 21:19 +0530, Sharma, Nishit wrote:
> 
> On 11/17/2025 6:30 PM, Hellstrom, Thomas wrote:
> > On Thu, 2025-11-13 at 16:33 +0000, nishit.sharma@intel.com wrote:
> > > From: Nishit Sharma <nishit.sharma@intel.com>
> > > 
> > > This test allocates a buffer in SVM, writes data to it from src
> > > GPU ,
> > > and reads/verifies
> > > the data from dst GPU. Optionally, the CPU also reads or modifies
> > > the
> > > buffer and both
> > > GPUs verify the results, ensuring correct cross-GPU and CPU
> > > memory
> > > access in a
> > > multi-GPU environment.
> > > 
> > > Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
> > > Acked-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> > > ---
> > >   tests/intel/xe_multi_gpusvm.c | 373
> > > ++++++++++++++++++++++++++++++++++
> > >   tests/meson.build             |   1 +
> > >   2 files changed, 374 insertions(+)
> > >   create mode 100644 tests/intel/xe_multi_gpusvm.c
> > > 
> > > diff --git a/tests/intel/xe_multi_gpusvm.c
> > > b/tests/intel/xe_multi_gpusvm.c
> > > new file mode 100644
> > > index 000000000..6614ea3d1
> > > --- /dev/null
> > > +++ b/tests/intel/xe_multi_gpusvm.c
> > > @@ -0,0 +1,373 @@
> > > +// SPDX-License-Identifier: MIT
> > > +/*
> > > + * Copyright © 2023 Intel Corporation
> > > + */
> > > +
> > > +#include <unistd.h>
> > > +
> > > +#include "drmtest.h"
> > > +#include "igt.h"
> > > +#include "igt_multigpu.h"
> > > +
> > > +#include "intel_blt.h"
> > > +#include "intel_mocs.h"
> > > +#include "intel_reg.h"
> > > +
> > > +#include "xe/xe_ioctl.h"
> > > +#include "xe/xe_query.h"
> > > +#include "xe/xe_util.h"
> > > +
> > > +/**
> > > + * TEST: Basic multi-gpu SVM testing
> > > + * Category: SVM
> > > + * Mega feature: Compute
> > > + * Sub-category: Compute tests
> > > + * Functionality: SVM p2p access, madvise and prefetch.
> > > + * Test category: functionality test
> > > + *
> > > + * SUBTEST: cross-gpu-mem-access
> > > + * Description:
> > > + *      This test creates two malloced regions, places the
> > > destination
> > > + *      region both remotely and locally and copies to it. Reads
> > > back to
> > > + *      system memory and checks the result.
> > > + *
> > > + */
> > > +
> > > +#define MAX_XE_REGIONS	8
> > > +#define MAX_XE_GPUS 8
> > > +#define NUM_LOOPS 1
> > > +#define BATCH_SIZE(_fd) ALIGN(SZ_8K,
> > > xe_get_default_alignment(_fd))
> > > +#define BIND_SYNC_VAL 0x686868
> > > +#define EXEC_SYNC_VAL 0x676767
> > > +#define COPY_SIZE SZ_64M
> > > +
> > > +struct xe_svm_gpu_info {
> > > +	bool supports_faults;
> > > +	int vram_regions[MAX_XE_REGIONS];
> > > +	unsigned int num_regions;
> > > +	unsigned int va_bits;
> > > +	int fd;
> > > +};
> > > +
> > > +struct multigpu_ops_args {
> > > +	bool prefetch_req;
> > > +	bool op_mod;
> > > +};
> > > +
> > > +typedef void (*gpu_pair_fn) (
> > > +		struct xe_svm_gpu_info *src,
> > > +		struct xe_svm_gpu_info *dst,
> > > +		struct drm_xe_engine_class_instance *eci,
> > > +		void *extra_args
> > > +);
> > > +
> > > +static void for_each_gpu_pair(int num_gpus,
> > > +			      struct xe_svm_gpu_info *gpus,
> > > +			      struct
> > > drm_xe_engine_class_instance
> > > *eci,
> > > +			      gpu_pair_fn fn,
> > > +			      void *extra_args);
> > > +
> > > +static void gpu_mem_access_wrapper(struct xe_svm_gpu_info *src,
> > > +				   struct xe_svm_gpu_info *dst,
> > > +				   struct
> > > drm_xe_engine_class_instance *eci,
> > > +				   void *extra_args);
> > > +
> > > +static void open_pagemaps(int fd, struct xe_svm_gpu_info *info);
> > > +
> > > +static void
> > > +create_vm_and_queue(struct xe_svm_gpu_info *gpu, struct
> > > drm_xe_engine_class_instance *eci,
> > > +		    uint32_t *vm, uint32_t *exec_queue)
> > > +{
> > > +	*vm = xe_vm_create(gpu->fd,
> > > +			   DRM_XE_VM_CREATE_FLAG_LR_MODE |
> > > DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> > > +	*exec_queue = xe_exec_queue_create(gpu->fd, *vm, eci,
> > > 0);
> > > +	xe_vm_bind_lr_sync(gpu->fd, *vm, 0, 0, 0, 1ull << gpu-
> > > > va_bits,
> > > +			   DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR);
> > > +}
> > > +
> > > +static void
> > > +setup_sync(struct drm_xe_sync *sync, volatile uint64_t
> > > **sync_addr,
> > > uint64_t timeline_value)
> > > +{
> > > +	*sync_addr = malloc(sizeof(**sync_addr));
> > > +	igt_assert(*sync_addr);
> > > +	sync->flags = DRM_XE_SYNC_FLAG_SIGNAL;
> > > +	sync->type = DRM_XE_SYNC_TYPE_USER_FENCE;
> > > +	sync->addr = to_user_pointer((uint64_t *)*sync_addr);
> > > +	sync->timeline_value = timeline_value;
> > > +	**sync_addr = 0;
> > > +}
> > > +
> > > +static void
> > > +cleanup_vm_and_queue(struct xe_svm_gpu_info *gpu, uint32_t vm,
> > > uint32_t exec_queue)
> > > +{
> > > +	xe_vm_unbind_lr_sync(gpu->fd, vm, 0, 0, 1ull << gpu-
> > > > va_bits);
> > > +	xe_exec_queue_destroy(gpu->fd, exec_queue);
> > > +	xe_vm_destroy(gpu->fd, vm);
> > > +}
> > > +
> > > +static void xe_multigpu_madvise(int src_fd, uint32_t vm,
> > > uint64_t
> > > addr, uint64_t size,
> > > +				uint64_t ext, uint32_t type, int
> > > dst_fd, uint16_t policy,
> > > +				uint16_t instance, uint32_t
> > > exec_queue, int local_fd,
> > > +				uint16_t local_vram)
> > > +{
> > > +	int ret;
> > > +
> > > +#define SYSTEM_MEMORY	0
> > Please use DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM.
> > A new define isn't necessary and it's also incorrect.
> Sure, will use that at required places.
> > 
> > > +	if (src_fd != dst_fd) {
> > > +		ret = xe_vm_madvise(src_fd, vm, addr, size, ext,
> > > type, dst_fd, policy, instance);
> > > +		if (ret == -ENOLINK) {
> > > +			igt_info("No fast interconnect between
> > > GPU0
> > > and GPU1, falling back to local VRAM\n");
> > > +			ret = xe_vm_madvise(src_fd, vm, addr,
> > > size,
> > > ext, type, local_fd,
> > > +					    policy, local_vram);
> > Please use DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE

Note that this also means you can skip the last two parameters to the
function AFAICT.

/Thomas


  reply	other threads:[~2025-11-17 20:40 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-11-13 16:32 [PATCH i-g-t v7 00/10] Madvise feature in SVM for Multi-GPU configs nishit.sharma
2025-11-13 16:33 ` [PATCH i-g-t v7 01/10] lib/xe: Add instance parameter to xe_vm_madvise and introduce lr_sync helpers nishit.sharma
2025-11-17 12:34   ` Hellstrom, Thomas
2025-11-17 15:43     ` Sharma, Nishit
2025-11-18  9:23       ` Hellstrom, Thomas
2025-11-13 16:33 ` [PATCH i-g-t v7 02/10] tests/intel/xe_exec_system_allocator: Add parameter in madvise call nishit.sharma
2025-11-17 12:38   ` Hellstrom, Thomas
2025-11-13 16:33 ` [PATCH i-g-t v7 03/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU cross-GPU memory access test nishit.sharma
2025-11-17 13:00   ` Hellstrom, Thomas
2025-11-17 15:49     ` Sharma, Nishit
2025-11-17 20:40       ` Hellstrom, Thomas [this message]
2025-11-18  9:24       ` Hellstrom, Thomas
2025-11-13 16:33 ` [PATCH i-g-t v7 04/10] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU atomic operations nishit.sharma
2025-11-17 13:10   ` Hellstrom, Thomas
2025-11-17 15:50     ` Sharma, Nishit
2025-11-18  9:26       ` Hellstrom, Thomas
2025-11-13 16:33 ` [PATCH i-g-t v7 05/10] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU coherency test nishit.sharma
2025-11-17 14:02   ` Hellstrom, Thomas
2025-11-17 16:18     ` Sharma, Nishit
2025-11-27  7:36       ` Gurram, Pravalika
2025-11-13 16:33 ` [PATCH i-g-t v7 06/10] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU performance test nishit.sharma
2025-11-17 14:39   ` Hellstrom, Thomas
2025-11-13 16:33 ` [PATCH i-g-t v7 07/10] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU fault handling test nishit.sharma
2025-11-17 14:48   ` Hellstrom, Thomas
2025-11-13 16:33 ` [PATCH i-g-t v7 08/10] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU simultaneous access test nishit.sharma
2025-11-17 14:57   ` Hellstrom, Thomas
2025-11-13 16:33 ` [PATCH i-g-t v7 09/10] tests/intel/xe_multi_gpusvm.c: Add SVM multi-GPU conflicting madvise test nishit.sharma
2025-11-17 15:11   ` Hellstrom, Thomas
  -- strict thread matches above, loose matches on Subject: below --
2025-11-13 17:16 [PATCH i-g-t v7 00/10] Madvise feature in SVM for Multi-GPU configs nishit.sharma
2025-11-13 17:16 ` [PATCH i-g-t v7 03/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU cross-GPU memory access test nishit.sharma
2025-11-18 13:36   ` Gurram, Pravalika
2025-11-19 13:00     ` Gurram, Pravalika
2025-11-13 17:15 [PATCH i-g-t v7 00/10] Madvise feature in SVM for Multi-GPU configs nishit.sharma
2025-11-13 17:15 ` [PATCH i-g-t v7 03/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU cross-GPU memory access test nishit.sharma
2025-11-13 17:09 [PATCH i-g-t v7 00/10] SVM madvise feature in multi-GPU config nishit.sharma
2025-11-13 17:09 ` [PATCH i-g-t v7 03/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU cross-GPU memory access test nishit.sharma
2025-11-13 17:04 [PATCH i-g-t v7 00/10] Add SVM madvise feature for multi-GPU configurations nishit.sharma
2025-11-13 17:04 ` [PATCH i-g-t v7 03/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU cross-GPU memory access test nishit.sharma
2025-11-13 16:49 [PATCH i-g-t v7 00/10] Madvise feature in SVM for Multi-GPU configs nishit.sharma
2025-11-13 16:49 ` [PATCH i-g-t v7 03/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU cross-GPU memory access test nishit.sharma
2025-11-04 15:31 [PATCH i-g-t v2 0/7] Madvise feature in SVM for Multi-GPU configs nishit.sharma
2025-11-13 17:00 ` [PATCH i-g-t v7 00/10] " Nishit Sharma
2025-11-13 17:00   ` [PATCH i-g-t v7 03/10] tests/intel/xe_multi_gpusvm: Add SVM multi-GPU cross-GPU memory access test Nishit Sharma

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a13b4524d3cabb04cdf847d53287c6f453a2c718.camel@intel.com \
    --to=thomas.hellstrom@intel.com \
    --cc=igt-dev@lists.freedesktop.org \
    --cc=nishit.sharma@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).