From: nishit.sharma@intel.com
To: igt-dev@lists.freedesktop.org, nishit.sharma@intel.com,
sai.gowtham.ch@intel.com
Subject: [PATCH i-g-t v14 02/11] lib/xe: Add instance parameter to xe_vm_madvise
Date: Mon, 5 Jan 2026 08:47:41 +0000 [thread overview]
Message-ID: <20260105084750.190346-3-nishit.sharma@intel.com> (raw)
In-Reply-To: <20260105084750.190346-1-nishit.sharma@intel.com>
From: Nishit Sharma <nishit.sharma@intel.com>
tests/intel/xe_exec_system_allocator: Add parameter in madvise call
Add an 'instance' parameter to __xe_vm_madvise() and xe_vm_madvise() to
specify which VRAM instance should be targeted for memory advise
operations.
Getting compilation issues in xe_exec_system_allocator test after adding
'instance' parameters in xe_vm_madvise(). As a fix 0 as instance parameter
passed in xe_vm_madvise() calls available in xe_exec_system_allocator test.
Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
Reviewed-by: Pravalika Gurram <pravalika.gurram@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
---
lib/xe/xe_ioctl.c | 13 +++++++++----
lib/xe/xe_ioctl.h | 9 +++++++--
tests/intel/xe_exec_system_allocator.c | 8 ++++----
3 files changed, 20 insertions(+), 10 deletions(-)
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index 319853da0..e3efe12ad 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -714,7 +714,8 @@ int64_t xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
}
int __xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range,
- uint64_t ext, uint32_t type, uint32_t op_val, uint16_t policy)
+ uint64_t ext, uint32_t type, uint32_t op_val, uint16_t policy,
+ uint16_t instance)
{
struct drm_xe_madvise madvise = {
.type = type,
@@ -731,6 +732,7 @@ int __xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range,
case DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC:
madvise.preferred_mem_loc.devmem_fd = op_val;
madvise.preferred_mem_loc.migration_policy = policy;
+ madvise.preferred_mem_loc.region_instance = instance;
igt_debug("madvise.preferred_mem_loc.devmem_fd = %d\n",
madvise.preferred_mem_loc.devmem_fd);
break;
@@ -758,14 +760,17 @@ int __xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range,
* @type: type of attribute
* @op_val: fd/atomic value/pat index, depending upon type of operation
* @policy: Page migration policy
+ * @instance: vram instance
*
* Function initializes different members of struct drm_xe_madvise and calls
* MADVISE IOCTL .
*
- * Asserts in case of error returned by DRM_IOCTL_XE_MADVISE.
+ * Asserts in case of error returned by DRM_IOCTL_XE_MADVISE
*/
void xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range,
- uint64_t ext, uint32_t type, uint32_t op_val, uint16_t policy)
+ uint64_t ext, uint32_t type, uint32_t op_val, uint16_t policy,
+ uint16_t instance)
{
- igt_assert_eq(__xe_vm_madvise(fd, vm, addr, range, ext, type, op_val, policy), 0);
+ igt_assert_eq(__xe_vm_madvise(fd, vm, addr, range, ext, type, op_val, policy,
+ instance), 0);
}
diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
index a7fd43c9e..3ea651063 100644
--- a/lib/xe/xe_ioctl.h
+++ b/lib/xe/xe_ioctl.h
@@ -104,13 +104,18 @@ int __xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
int64_t xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
uint32_t exec_queue, int64_t timeout);
int __xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range, uint64_t ext,
- uint32_t type, uint32_t op_val, uint16_t policy);
+ uint32_t type, uint32_t op_val, uint16_t policy, uint16_t instance);
void xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range, uint64_t ext,
- uint32_t type, uint32_t op_val, uint16_t policy);
+ uint32_t type, uint32_t op_val, uint16_t policy, uint16_t instance);
int xe_vm_number_vmas_in_range(int fd, struct drm_xe_vm_query_mem_range_attr *vmas_attr);
int xe_vm_vma_attrs(int fd, struct drm_xe_vm_query_mem_range_attr *vmas_attr,
struct drm_xe_mem_range_attr *mem_attr);
struct drm_xe_mem_range_attr
*xe_vm_get_mem_attr_values_in_range(int fd, uint32_t vm, uint64_t start,
uint64_t range, uint32_t *num_ranges);
+void xe_vm_bind_lr_sync(int fd, uint32_t vm, uint32_t bo,
+ uint64_t offset, uint64_t addr,
+ uint64_t size, uint32_t flags);
+void xe_vm_unbind_lr_sync(int fd, uint32_t vm, uint64_t offset,
+ uint64_t addr, uint64_t size);
#endif /* XE_IOCTL_H */
diff --git a/tests/intel/xe_exec_system_allocator.c b/tests/intel/xe_exec_system_allocator.c
index ffa4ec3f5..28958eccc 100644
--- a/tests/intel/xe_exec_system_allocator.c
+++ b/tests/intel/xe_exec_system_allocator.c
@@ -1164,7 +1164,7 @@ madvise_swizzle_op_exec(int fd, uint32_t vm, struct test_exec_data *data,
xe_vm_madvise(fd, vm, to_user_pointer(data), bo_size, 0,
DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
preferred_loc,
- 0);
+ 0, 0);
}
static void
@@ -1172,7 +1172,7 @@ xe_vm_madvixe_pat_attr(int fd, uint32_t vm, uint64_t addr, uint64_t range,
int pat_index)
{
xe_vm_madvise(fd, vm, addr, range, 0,
- DRM_XE_MEM_RANGE_ATTR_PAT, pat_index, 0);
+ DRM_XE_MEM_RANGE_ATTR_PAT, pat_index, 0, 0);
}
static void
@@ -1181,7 +1181,7 @@ xe_vm_madvise_atomic_attr(int fd, uint32_t vm, uint64_t addr, uint64_t range,
{
xe_vm_madvise(fd, vm, addr, range, 0,
DRM_XE_MEM_RANGE_ATTR_ATOMIC,
- mem_attr, 0);
+ mem_attr, 0, 0);
}
static void
@@ -1190,7 +1190,7 @@ xe_vm_madvise_migrate_pages(int fd, uint32_t vm, uint64_t addr, uint64_t range)
xe_vm_madvise(fd, vm, addr, range, 0,
DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM,
- DRM_XE_MIGRATE_ALL_PAGES);
+ DRM_XE_MIGRATE_ALL_PAGES, 0);
}
static void
--
2.48.1
next prev parent reply other threads:[~2026-01-05 8:47 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-05 8:47 [PATCH i-g-t v14 00/11] Madvise feature in SVM for Multi-GPU configs nishit.sharma
2026-01-05 8:47 ` [PATCH i-g-t v14 01/11] drm-uapi/xe: Sync with madvise interface nishit.sharma
2026-04-02 19:23 ` Dixit, Ashutosh
2026-01-05 8:47 ` nishit.sharma [this message]
2026-01-05 8:47 ` [PATCH i-g-t v14 03/11] lib/xe: Add synchronous helpers for VM bind/unbind operations nishit.sharma
2026-01-05 8:47 ` [PATCH i-g-t v14 04/11] tests/intel/xe_multigpu_svm: Add SVM multi-GPU xGPU memory access test nishit.sharma
2026-01-05 8:47 ` [PATCH i-g-t v14 05/11] tests/intel/xe_multigpu_svm: Add SVM multi-GPU atomic operations nishit.sharma
2026-01-05 8:47 ` [PATCH i-g-t v14 06/11] tests/intel/xe_multigpu_svm: Add SVM multi-GPU coherency test nishit.sharma
2026-01-05 8:47 ` [PATCH i-g-t v14 07/11] tests/intel/xe_multigpu_svm: Add SVM multi-GPU performance test nishit.sharma
2026-01-05 8:47 ` [PATCH i-g-t v14 08/11] tests/intel/xe_multigpu_svm: Add SVM multi-GPU fault handling test nishit.sharma
2026-01-05 8:53 ` Ch, Sai Gowtham
2026-01-05 8:47 ` [PATCH i-g-t v14 09/11] tests/intel/xe_multigpu_svm: Add SVM multi-GPU simultaneous access test nishit.sharma
2026-01-05 8:47 ` [PATCH i-g-t v14 10/11] tests/intel/xe_multigpu_svm: Add SVM multi-GPU conflicting madvise test nishit.sharma
2026-01-05 8:47 ` [PATCH i-g-t v14 11/11] tests/intel/xe_multigpu_svm: Add SVM multi-GPU migration test nishit.sharma
2026-01-05 14:44 ` ✗ Fi.CI.BUILD: failure for Madvise feature in SVM for Multi-GPU configs Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260105084750.190346-3-nishit.sharma@intel.com \
--to=nishit.sharma@intel.com \
--cc=igt-dev@lists.freedesktop.org \
--cc=sai.gowtham.ch@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox