* [igt-dev] [PATCH v4 01/20] drm-uapi/xe: Extend drm_xe_vm_bind_op
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 02/20] xe_ioctl: Converge bo_create to the most used version Francois Dugast
` (19 subsequent siblings)
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev
Align with commit ("drm/xe/uapi: Extend drm_xe_vm_bind_op")
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
include/drm-uapi/xe_drm.h | 3 +++
1 file changed, 3 insertions(+)
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 255b360a1..07ea94ed4 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -614,6 +614,9 @@ struct drm_xe_vm_destroy {
};
struct drm_xe_vm_bind_op {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
/**
* @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP
*/
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] [PATCH v4 02/20] xe_ioctl: Converge bo_create to the most used version
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 01/20] drm-uapi/xe: Extend drm_xe_vm_bind_op Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 03/20] xe_ioctl: Rename *xe_bo_create_flags to simply xe_bo_create Francois Dugast
` (18 subsequent siblings)
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev; +Cc: Rodrigo Vivi
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Let's unify the call instead of having 2 separated
options for the same goal.
v2: Fix some xe_bo_create_flags arguments, remove extra new
line (Kamil Konieczny)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
lib/xe/xe_ioctl.c | 15 ---------------
lib/xe/xe_ioctl.h | 1 -
tests/intel/xe_perf_pmu.c | 4 ++--
tests/intel/xe_spin_batch.c | 2 +-
tests/intel/xe_vm.c | 8 ++++----
5 files changed, 7 insertions(+), 23 deletions(-)
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index 738c4ffdb..78d431ab2 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -253,21 +253,6 @@ uint32_t xe_bo_create_flags(int fd, uint32_t vm, uint64_t size, uint32_t flags)
return handle;
}
-uint32_t xe_bo_create(int fd, int gt, uint32_t vm, uint64_t size)
-{
- struct drm_xe_gem_create create = {
- .vm_id = vm,
- .size = size,
- .flags = vram_if_possible(fd, gt),
- };
- int err;
-
- err = igt_ioctl(fd, DRM_IOCTL_XE_GEM_CREATE, &create);
- igt_assert_eq(err, 0);
-
- return create.handle;
-}
-
uint32_t xe_bind_exec_queue_create(int fd, uint32_t vm, uint64_t ext, bool async)
{
struct drm_xe_engine_class_instance instance = {
diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
index a9171bcf7..fb191d98f 100644
--- a/lib/xe/xe_ioctl.h
+++ b/lib/xe/xe_ioctl.h
@@ -67,7 +67,6 @@ void xe_vm_destroy(int fd, uint32_t vm);
uint32_t __xe_bo_create_flags(int fd, uint32_t vm, uint64_t size, uint32_t flags,
uint32_t *handle);
uint32_t xe_bo_create_flags(int fd, uint32_t vm, uint64_t size, uint32_t flags);
-uint32_t xe_bo_create(int fd, int gt, uint32_t vm, uint64_t size);
uint32_t xe_exec_queue_create(int fd, uint32_t vm,
struct drm_xe_engine_class_instance *instance,
uint64_t ext);
diff --git a/tests/intel/xe_perf_pmu.c b/tests/intel/xe_perf_pmu.c
index e9d05cf2b..196e4d2e6 100644
--- a/tests/intel/xe_perf_pmu.c
+++ b/tests/intel/xe_perf_pmu.c
@@ -103,7 +103,7 @@ static void test_any_engine_busyness(int fd, struct drm_xe_engine_class_instance
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
- bo = xe_bo_create(fd, eci->gt_id, vm, bo_size);
+ bo = xe_bo_create_flags(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id));
spin = xe_bo_map(fd, bo, bo_size);
exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
@@ -223,7 +223,7 @@ static void test_engine_group_busyness(int fd, int gt, int class, const char *na
bo_size = sizeof(*data) * num_placements;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd), xe_get_default_alignment(fd));
- bo = xe_bo_create(fd, gt, vm, bo_size);
+ bo = xe_bo_create_flags(fd, vm, bo_size, vram_if_possible(fd, gt));
data = xe_bo_map(fd, bo, bo_size);
for (i = 0; i < num_placements; i++) {
diff --git a/tests/intel/xe_spin_batch.c b/tests/intel/xe_spin_batch.c
index 6ab604d9b..261fde9af 100644
--- a/tests/intel/xe_spin_batch.c
+++ b/tests/intel/xe_spin_batch.c
@@ -169,7 +169,7 @@ static void xe_spin_fixed_duration(int fd)
exec_queue = xe_exec_queue_create_class(fd, vm, DRM_XE_ENGINE_CLASS_COPY);
ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_RELOC);
bo_size = ALIGN(sizeof(*spin) + xe_cs_prefetch_size(fd), xe_get_default_alignment(fd));
- bo = xe_bo_create(fd, 0, vm, bo_size);
+ bo = xe_bo_create_flags(fd, vm, bo_size, vram_if_possible(fd, 0));
spin = xe_bo_map(fd, bo, bo_size);
spin_addr = intel_allocator_alloc_with_strategy(ahnd, bo, bo_size, 0,
ALLOC_STRATEGY_LOW_TO_HIGH);
diff --git a/tests/intel/xe_vm.c b/tests/intel/xe_vm.c
index 86664740f..b4577fb6e 100644
--- a/tests/intel/xe_vm.c
+++ b/tests/intel/xe_vm.c
@@ -267,7 +267,7 @@ static void test_partial_unbinds(int fd)
{
uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
size_t bo_size = 3 * xe_get_default_alignment(fd);
- uint32_t bo = xe_bo_create(fd, 0, vm, bo_size);
+ uint32_t bo = xe_bo_create_flags(fd, vm, bo_size, vram_if_possible(fd, 0));
uint64_t unbind_size = bo_size / 3;
uint64_t addr = 0x1a0000;
@@ -316,7 +316,7 @@ static void unbind_all(int fd, int n_vmas)
};
vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
- bo = xe_bo_create(fd, 0, vm, bo_size);
+ bo = xe_bo_create_flags(fd, vm, bo_size, vram_if_possible(fd, 0));
for (i = 0; i < n_vmas; ++i)
xe_vm_bind_async(fd, vm, 0, bo, 0, addr + i * bo_size,
@@ -1577,9 +1577,9 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
igt_assert(map0 != MAP_FAILED);
igt_assert(map1 != MAP_FAILED);
} else {
- bo0 = xe_bo_create(fd, eci->gt_id, vm, bo_size);
+ bo0 = xe_bo_create_flags(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id));
map0 = xe_bo_map(fd, bo0, bo_size);
- bo1 = xe_bo_create(fd, eci->gt_id, vm, bo_size);
+ bo1 = xe_bo_create_flags(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id));
map1 = xe_bo_map(fd, bo1, bo_size);
}
memset(map0, 0, bo_size);
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] [PATCH v4 03/20] xe_ioctl: Rename *xe_bo_create_flags to simply xe_bo_create
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 01/20] drm-uapi/xe: Extend drm_xe_vm_bind_op Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 02/20] xe_ioctl: Converge bo_create to the most used version Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 04/20] xe_query: Add missing include Francois Dugast
` (17 subsequent siblings)
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev; +Cc: Rodrigo Vivi
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Now that we have only one variant we can unify to the
simplest version.
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
benchmarks/gem_wsim.c | 4 +--
lib/igt_draw.c | 6 ++---
lib/igt_fb.c | 6 ++---
lib/intel_batchbuffer.c | 6 ++---
lib/intel_blt.c | 2 +-
lib/intel_bufops.c | 2 +-
lib/xe/xe_ioctl.c | 8 +++---
lib/xe/xe_ioctl.h | 6 ++---
lib/xe/xe_spin.c | 8 +++---
tests/intel/api_intel_allocator.c | 4 +--
tests/intel/kms_big_fb.c | 22 ++++++++--------
tests/intel/kms_ccs.c | 4 +--
tests/intel/xe_ccs.c | 12 ++++-----
tests/intel/xe_copy_basic.c | 8 +++---
tests/intel/xe_dma_buf_sync.c | 4 +--
tests/intel/xe_drm_fdinfo.c | 6 ++---
tests/intel/xe_evict.c | 40 +++++++++++++++---------------
tests/intel/xe_evict_ccs.c | 6 ++---
tests/intel/xe_exec_balancer.c | 6 ++---
tests/intel/xe_exec_basic.c | 3 +--
tests/intel/xe_exec_compute_mode.c | 4 +--
tests/intel/xe_exec_fault_mode.c | 10 ++++----
tests/intel/xe_exec_reset.c | 16 ++++++------
tests/intel/xe_exec_store.c | 12 ++++-----
tests/intel/xe_exec_threads.c | 12 ++++-----
tests/intel/xe_exercise_blt.c | 4 +--
tests/intel/xe_intel_bb.c | 2 +-
tests/intel/xe_mmap.c | 32 ++++++++++++------------
tests/intel/xe_noexec_ping_pong.c | 4 +--
tests/intel/xe_perf_pmu.c | 4 +--
tests/intel/xe_pm.c | 6 ++---
tests/intel/xe_pm_residency.c | 4 +--
tests/intel/xe_prime_self_import.c | 28 ++++++++++-----------
tests/intel/xe_spin_batch.c | 2 +-
tests/intel/xe_vm.c | 34 ++++++++++++-------------
tests/intel/xe_waitfence.c | 20 +++++++--------
tests/kms_addfb_basic.c | 2 +-
tests/kms_getfb.c | 2 +-
38 files changed, 180 insertions(+), 181 deletions(-)
diff --git a/benchmarks/gem_wsim.c b/benchmarks/gem_wsim.c
index b60cde177..cb19ad505 100644
--- a/benchmarks/gem_wsim.c
+++ b/benchmarks/gem_wsim.c
@@ -1745,8 +1745,8 @@ xe_alloc_step_batch(struct workload *wrk, struct w_step *w)
w->bb_size = ALIGN(PAGE_SIZE + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
- w->bb_handle = xe_bo_create_flags(fd, vm->id, w->bb_size,
- visible_vram_if_possible(fd, eq->hwe_list[0].gt_id));
+ w->bb_handle = xe_bo_create(fd, vm->id, w->bb_size,
+ visible_vram_if_possible(fd, eq->hwe_list[0].gt_id));
w->xe.data = xe_bo_map(fd, w->bb_handle, w->bb_size);
w->xe.exec.address =
intel_allocator_alloc_with_strategy(vm->ahnd, w->bb_handle, w->bb_size,
diff --git a/lib/igt_draw.c b/lib/igt_draw.c
index 498c69a56..bad6f455a 100644
--- a/lib/igt_draw.c
+++ b/lib/igt_draw.c
@@ -795,9 +795,9 @@ static void draw_rect_render(int fd, struct cmd_data *cmd_data,
if (is_i915_device(fd))
tmp.handle = gem_create(fd, tmp.size);
else
- tmp.handle = xe_bo_create_flags(fd, 0,
- ALIGN(tmp.size, xe_get_default_alignment(fd)),
- visible_vram_if_possible(fd, 0));
+ tmp.handle = xe_bo_create(fd, 0,
+ ALIGN(tmp.size, xe_get_default_alignment(fd)),
+ visible_vram_if_possible(fd, 0));
tmp.stride = rect->w * pixel_size;
tmp.bpp = buf->bpp;
diff --git a/lib/igt_fb.c b/lib/igt_fb.c
index e70d2e3ce..f96dca7a4 100644
--- a/lib/igt_fb.c
+++ b/lib/igt_fb.c
@@ -1205,8 +1205,8 @@ static int create_bo_for_fb(struct igt_fb *fb, bool prefer_sysmem)
/* If we can't use fences, we won't use ggtt detiling later. */
igt_assert(err == 0 || err == -EOPNOTSUPP);
} else if (is_xe_device(fd)) {
- fb->gem_handle = xe_bo_create_flags(fd, 0, fb->size,
- visible_vram_if_possible(fd, 0));
+ fb->gem_handle = xe_bo_create(fd, 0, fb->size,
+ visible_vram_if_possible(fd, 0));
} else if (is_vc4_device(fd)) {
fb->gem_handle = igt_vc4_create_bo(fd, fb->size);
@@ -2903,7 +2903,7 @@ static void blitcopy(const struct igt_fb *dst_fb,
bb_size = ALIGN(bb_size + xe_cs_prefetch_size(dst_fb->fd),
xe_get_default_alignment(dst_fb->fd));
- xe_bb = xe_bo_create_flags(dst_fb->fd, 0, bb_size, mem_region);
+ xe_bb = xe_bo_create(dst_fb->fd, 0, bb_size, mem_region);
}
for (int i = 0; i < dst_fb->num_planes - dst_cc; i++) {
diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index 570b35072..c38eda771 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -947,7 +947,7 @@ __intel_bb_create(int fd, uint32_t ctx, uint32_t vm, const intel_ctx_cfg_t *cfg,
ibb->alignment = xe_get_default_alignment(fd);
size = ALIGN(size, ibb->alignment);
- ibb->handle = xe_bo_create_flags(fd, 0, size, visible_vram_if_possible(fd, 0));
+ ibb->handle = xe_bo_create(fd, 0, size, visible_vram_if_possible(fd, 0));
/* Limit to 48-bit due to MI_* address limitation */
ibb->gtt_size = 1ull << min_t(uint32_t, xe_va_bits(fd), 48);
@@ -1405,8 +1405,8 @@ void intel_bb_reset(struct intel_bb *ibb, bool purge_objects_cache)
if (ibb->driver == INTEL_DRIVER_I915)
ibb->handle = gem_create(ibb->fd, ibb->size);
else
- ibb->handle = xe_bo_create_flags(ibb->fd, 0, ibb->size,
- visible_vram_if_possible(ibb->fd, 0));
+ ibb->handle = xe_bo_create(ibb->fd, 0, ibb->size,
+ visible_vram_if_possible(ibb->fd, 0));
/* Reacquire offset for RELOC and SIMPLE */
if (ibb->allocator_type == INTEL_ALLOCATOR_SIMPLE ||
diff --git a/lib/intel_blt.c b/lib/intel_blt.c
index 2edcd72f3..36830fb3e 100644
--- a/lib/intel_blt.c
+++ b/lib/intel_blt.c
@@ -1807,7 +1807,7 @@ blt_create_object(const struct blt_copy_data *blt, uint32_t region,
flags |= DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
size = ALIGN(size, xe_get_default_alignment(blt->fd));
- handle = xe_bo_create_flags(blt->fd, 0, size, flags);
+ handle = xe_bo_create(blt->fd, 0, size, flags);
} else {
igt_assert(__gem_create_in_memory_regions(blt->fd, &handle,
&size, region) == 0);
diff --git a/lib/intel_bufops.c b/lib/intel_bufops.c
index 2c91adb88..6f3a77f47 100644
--- a/lib/intel_bufops.c
+++ b/lib/intel_bufops.c
@@ -920,7 +920,7 @@ static void __intel_buf_init(struct buf_ops *bops,
igt_assert_eq(__gem_create(bops->fd, &size, &buf->handle), 0);
} else {
size = ALIGN(size, xe_get_default_alignment(bops->fd));
- buf->handle = xe_bo_create_flags(bops->fd, 0, size, region);
+ buf->handle = xe_bo_create(bops->fd, 0, size, region);
}
}
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index 78d431ab2..63fa2ae25 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -226,8 +226,8 @@ void xe_vm_destroy(int fd, uint32_t vm)
igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_VM_DESTROY, &destroy), 0);
}
-uint32_t __xe_bo_create_flags(int fd, uint32_t vm, uint64_t size, uint32_t flags,
- uint32_t *handle)
+uint32_t __xe_bo_create(int fd, uint32_t vm, uint64_t size, uint32_t flags,
+ uint32_t *handle)
{
struct drm_xe_gem_create create = {
.vm_id = vm,
@@ -244,11 +244,11 @@ uint32_t __xe_bo_create_flags(int fd, uint32_t vm, uint64_t size, uint32_t flags
return 0;
}
-uint32_t xe_bo_create_flags(int fd, uint32_t vm, uint64_t size, uint32_t flags)
+uint32_t xe_bo_create(int fd, uint32_t vm, uint64_t size, uint32_t flags)
{
uint32_t handle;
- igt_assert_eq(__xe_bo_create_flags(fd, vm, size, flags, &handle), 0);
+ igt_assert_eq(__xe_bo_create(fd, vm, size, flags, &handle), 0);
return handle;
}
diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
index fb191d98f..1ec29c2c5 100644
--- a/lib/xe/xe_ioctl.h
+++ b/lib/xe/xe_ioctl.h
@@ -64,9 +64,9 @@ void xe_vm_unbind_all_async(int fd, uint32_t vm, uint32_t exec_queue,
uint32_t bo, struct drm_xe_sync *sync,
uint32_t num_syncs);
void xe_vm_destroy(int fd, uint32_t vm);
-uint32_t __xe_bo_create_flags(int fd, uint32_t vm, uint64_t size, uint32_t flags,
- uint32_t *handle);
-uint32_t xe_bo_create_flags(int fd, uint32_t vm, uint64_t size, uint32_t flags);
+uint32_t __xe_bo_create(int fd, uint32_t vm, uint64_t size, uint32_t flags,
+ uint32_t *handle);
+uint32_t xe_bo_create(int fd, uint32_t vm, uint64_t size, uint32_t flags);
uint32_t xe_exec_queue_create(int fd, uint32_t vm,
struct drm_xe_engine_class_instance *instance,
uint64_t ext);
diff --git a/lib/xe/xe_spin.c b/lib/xe/xe_spin.c
index cfc663acc..828938434 100644
--- a/lib/xe/xe_spin.c
+++ b/lib/xe/xe_spin.c
@@ -219,8 +219,8 @@ xe_spin_create(int fd, const struct igt_spin_factory *opt)
spin->engine = xe_exec_queue_create_class(fd, spin->vm, DRM_XE_ENGINE_CLASS_COPY);
}
- spin->handle = xe_bo_create_flags(fd, spin->vm, bo_size,
- visible_vram_if_possible(fd, 0));
+ spin->handle = xe_bo_create(fd, spin->vm, bo_size,
+ visible_vram_if_possible(fd, 0));
xe_spin = xe_bo_map(fd, spin->handle, bo_size);
addr = intel_allocator_alloc_with_strategy(ahnd, spin->handle, bo_size, 0, ALLOC_STRATEGY_LOW_TO_HIGH);
xe_vm_bind_sync(fd, spin->vm, spin->handle, 0, addr, bo_size);
@@ -298,8 +298,8 @@ void xe_cork_init(int fd, struct drm_xe_engine_class_instance *hwe,
vm = xe_vm_create(fd, 0, 0);
- bo = xe_bo_create_flags(fd, vm, bo_size,
- visible_vram_if_possible(fd, hwe->gt_id));
+ bo = xe_bo_create(fd, vm, bo_size,
+ visible_vram_if_possible(fd, hwe->gt_id));
spin = xe_bo_map(fd, bo, 0x1000);
xe_vm_bind_sync(fd, vm, bo, 0, addr, bo_size);
diff --git a/tests/intel/api_intel_allocator.c b/tests/intel/api_intel_allocator.c
index f3fcf8a34..158fd86a1 100644
--- a/tests/intel/api_intel_allocator.c
+++ b/tests/intel/api_intel_allocator.c
@@ -468,8 +468,8 @@ static void __simple_allocs(int fd)
size = (rand() % 4 + 1) * 0x1000;
if (is_xe)
- handles[i] = xe_bo_create_flags(fd, 0, size,
- system_memory(fd));
+ handles[i] = xe_bo_create(fd, 0, size,
+ system_memory(fd));
else
handles[i] = gem_create(fd, size);
diff --git a/tests/intel/kms_big_fb.c b/tests/intel/kms_big_fb.c
index 82aff532b..b627ce659 100644
--- a/tests/intel/kms_big_fb.c
+++ b/tests/intel/kms_big_fb.c
@@ -776,10 +776,10 @@ test_size_overflow(data_t *data)
if (is_i915_device(data->drm_fd))
bo = gem_buffer_create_fb_obj(data->drm_fd, (1ULL << 32) - 4096);
else
- bo = xe_bo_create_flags(data->drm_fd, 0,
- ALIGN(((1ULL << 32) - 4096),
- xe_get_default_alignment(data->drm_fd)),
- vram_if_possible(data->drm_fd, 0));
+ bo = xe_bo_create(data->drm_fd, 0,
+ ALIGN(((1ULL << 32) - 4096),
+ xe_get_default_alignment(data->drm_fd)),
+ vram_if_possible(data->drm_fd, 0));
igt_require(bo);
ret = __kms_addfb(data->drm_fd, bo,
@@ -820,10 +820,10 @@ test_size_offset_overflow(data_t *data)
if (is_i915_device(data->drm_fd))
bo = gem_buffer_create_fb_obj(data->drm_fd, (1ULL << 32) - 4096);
else
- bo = xe_bo_create_flags(data->drm_fd, 0,
- ALIGN(((1ULL << 32) - 4096),
- xe_get_default_alignment(data->drm_fd)),
- vram_if_possible(data->drm_fd, 0));
+ bo = xe_bo_create(data->drm_fd, 0,
+ ALIGN(((1ULL << 32) - 4096),
+ xe_get_default_alignment(data->drm_fd)),
+ vram_if_possible(data->drm_fd, 0));
igt_require(bo);
offsets[0] = 0;
@@ -885,9 +885,9 @@ test_addfb(data_t *data)
if (is_i915_device(data->drm_fd))
bo = gem_buffer_create_fb_obj(data->drm_fd, size);
else
- bo = xe_bo_create_flags(data->drm_fd, 0,
- ALIGN(size, xe_get_default_alignment(data->drm_fd)),
- vram_if_possible(data->drm_fd, 0));
+ bo = xe_bo_create(data->drm_fd, 0,
+ ALIGN(size, xe_get_default_alignment(data->drm_fd)),
+ vram_if_possible(data->drm_fd, 0));
igt_require(bo);
if (is_i915_device(data->drm_fd) && intel_display_ver(data->devid) < 4)
diff --git a/tests/intel/kms_ccs.c b/tests/intel/kms_ccs.c
index 7a3fd7cf6..a5a8abb28 100644
--- a/tests/intel/kms_ccs.c
+++ b/tests/intel/kms_ccs.c
@@ -452,8 +452,8 @@ static void test_bad_ccs_plane(data_t *data, int width, int height, int ccs_plan
if (data->flags & TEST_BAD_CCS_HANDLE) {
bad_ccs_bo = is_i915_device(data->drm_fd) ?
gem_create(data->drm_fd, fb.size) :
- xe_bo_create_flags(data->drm_fd, 0, fb.size,
- visible_vram_if_possible(data->drm_fd, 0));
+ xe_bo_create(data->drm_fd, 0, fb.size,
+ visible_vram_if_possible(data->drm_fd, 0));
f.handles[ccs_plane] = bad_ccs_bo;
}
diff --git a/tests/intel/xe_ccs.c b/tests/intel/xe_ccs.c
index 465f67e23..ceecba416 100644
--- a/tests/intel/xe_ccs.c
+++ b/tests/intel/xe_ccs.c
@@ -102,8 +102,8 @@ static void surf_copy(int xe,
igt_assert(mid->compression);
ccscopy = (uint32_t *) malloc(ccssize);
- ccs = xe_bo_create_flags(xe, 0, ccssize, sysmem);
- ccs2 = xe_bo_create_flags(xe, 0, ccssize, sysmem);
+ ccs = xe_bo_create(xe, 0, ccssize, sysmem);
+ ccs2 = xe_bo_create(xe, 0, ccssize, sysmem);
blt_ctrl_surf_copy_init(xe, &surf);
surf.print_bb = param.print_bb;
@@ -111,7 +111,7 @@ static void surf_copy(int xe,
uc_mocs, BLT_INDIRECT_ACCESS);
blt_set_ctrl_surf_object(&surf.dst, ccs, sysmem, ccssize, uc_mocs, DIRECT_ACCESS);
bb_size = xe_get_default_alignment(xe);
- bb1 = xe_bo_create_flags(xe, 0, bb_size, sysmem);
+ bb1 = xe_bo_create(xe, 0, bb_size, sysmem);
blt_set_batch(&surf.bb, bb1, bb_size, sysmem);
blt_ctrl_surf_copy(xe, ctx, NULL, ahnd, &surf);
intel_ctx_xe_sync(ctx, true);
@@ -166,7 +166,7 @@ static void surf_copy(int xe,
blt_set_copy_object(&blt.dst, dst);
blt_set_object_ext(&ext.src, mid->compression_type, mid->x2, mid->y2, SURFACE_TYPE_2D);
blt_set_object_ext(&ext.dst, 0, dst->x2, dst->y2, SURFACE_TYPE_2D);
- bb2 = xe_bo_create_flags(xe, 0, bb_size, sysmem);
+ bb2 = xe_bo_create(xe, 0, bb_size, sysmem);
blt_set_batch(&blt.bb, bb2, bb_size, sysmem);
blt_block_copy(xe, ctx, NULL, ahnd, &blt, &ext);
intel_ctx_xe_sync(ctx, true);
@@ -297,7 +297,7 @@ static void block_copy(int xe,
uint8_t uc_mocs = intel_get_uc_mocs_index(xe);
int result;
- bb = xe_bo_create_flags(xe, 0, bb_size, region1);
+ bb = xe_bo_create(xe, 0, bb_size, region1);
if (!blt_uses_extended_block_copy(xe))
pext = NULL;
@@ -418,7 +418,7 @@ static void block_multicopy(int xe,
uint8_t uc_mocs = intel_get_uc_mocs_index(xe);
int result;
- bb = xe_bo_create_flags(xe, 0, bb_size, region1);
+ bb = xe_bo_create(xe, 0, bb_size, region1);
if (!blt_uses_extended_block_copy(xe))
pext3 = NULL;
diff --git a/tests/intel/xe_copy_basic.c b/tests/intel/xe_copy_basic.c
index 191c29155..715f7d3b5 100644
--- a/tests/intel/xe_copy_basic.c
+++ b/tests/intel/xe_copy_basic.c
@@ -52,7 +52,7 @@ mem_copy(int fd, uint32_t src_handle, uint32_t dst_handle, const intel_ctx_t *ct
uint32_t bb;
int result;
- bb = xe_bo_create_flags(fd, 0, bb_size, region);
+ bb = xe_bo_create(fd, 0, bb_size, region);
blt_mem_init(fd, &mem);
blt_set_mem_object(&mem.src, src_handle, size, 0, width, height,
@@ -102,7 +102,7 @@ mem_set(int fd, uint32_t dst_handle, const intel_ctx_t *ctx, uint32_t size,
uint32_t bb;
uint8_t *result;
- bb = xe_bo_create_flags(fd, 0, bb_size, region);
+ bb = xe_bo_create(fd, 0, bb_size, region);
blt_mem_init(fd, &mem);
blt_set_mem_object(&mem.dst, dst_handle, size, 0, width, height, region,
dst_mocs, M_LINEAR, COMPRESSION_DISABLED);
@@ -132,8 +132,8 @@ static void copy_test(int fd, uint32_t size, enum blt_cmd_type cmd, uint32_t reg
uint32_t bo_size = ALIGN(size, xe_get_default_alignment(fd));
intel_ctx_t *ctx;
- src_handle = xe_bo_create_flags(fd, 0, bo_size, region);
- dst_handle = xe_bo_create_flags(fd, 0, bo_size, region);
+ src_handle = xe_bo_create(fd, 0, bo_size, region);
+ dst_handle = xe_bo_create(fd, 0, bo_size, region);
vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
exec_queue = xe_exec_queue_create(fd, vm, &inst, 0);
ctx = intel_ctx_xe(fd, vm, exec_queue, 0, 0, 0);
diff --git a/tests/intel/xe_dma_buf_sync.c b/tests/intel/xe_dma_buf_sync.c
index 0d835dddb..ac9d9d767 100644
--- a/tests/intel/xe_dma_buf_sync.c
+++ b/tests/intel/xe_dma_buf_sync.c
@@ -119,8 +119,8 @@ test_export_dma_buf(struct drm_xe_engine_class_instance *hwe0,
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd[0]),
xe_get_default_alignment(fd[0]));
for (i = 0; i < n_bo; ++i) {
- bo[i] = xe_bo_create_flags(fd[0], 0, bo_size,
- visible_vram_if_possible(fd[0], hwe0->gt_id));
+ bo[i] = xe_bo_create(fd[0], 0, bo_size,
+ visible_vram_if_possible(fd[0], hwe0->gt_id));
dma_buf_fd[i] = prime_handle_to_fd(fd[0], bo[i]);
import_bo[i] = prime_fd_to_handle(fd[1], dma_buf_fd[i]);
diff --git a/tests/intel/xe_drm_fdinfo.c b/tests/intel/xe_drm_fdinfo.c
index 4ef30cf49..8f737a533 100644
--- a/tests/intel/xe_drm_fdinfo.c
+++ b/tests/intel/xe_drm_fdinfo.c
@@ -85,7 +85,7 @@ static void test_active(int fd, struct drm_xe_engine_class_instance *eci)
igt_assert_f(ret != 0, "failed with err:%d\n", errno);
pre_size = info.region_mem[memregion->instance + 1].active;
- bo = xe_bo_create_flags(fd, vm, bo_size, region);
+ bo = xe_bo_create(fd, vm, bo_size, region);
data = xe_bo_map(fd, bo, bo_size);
for (i = 0; i < N_EXEC_QUEUES; i++) {
@@ -185,7 +185,7 @@ static void test_shared(int xe)
igt_assert_f(ret != 0, "failed with err:%d\n", errno);
pre_size = info.region_mem[memregion->instance + 1].shared;
- bo = xe_bo_create_flags(xe, 0, BO_SIZE, region);
+ bo = xe_bo_create(xe, 0, BO_SIZE, region);
flink.handle = bo;
ret = igt_ioctl(xe, DRM_IOCTL_GEM_FLINK, &flink);
@@ -232,7 +232,7 @@ static void test_total_resident(int xe)
igt_assert_f(ret != 0, "failed with err:%d\n", errno);
pre_size = info.region_mem[memregion->instance + 1].shared;
- handle = xe_bo_create_flags(xe, vm, BO_SIZE, region);
+ handle = xe_bo_create(xe, vm, BO_SIZE, region);
xe_vm_bind_sync(xe, vm, handle, 0, addr, BO_SIZE);
ret = igt_parse_drm_fdinfo(xe, &info, NULL, 0, NULL, 0);
diff --git a/tests/intel/xe_evict.c b/tests/intel/xe_evict.c
index 6d953e58b..a9d501d5f 100644
--- a/tests/intel/xe_evict.c
+++ b/tests/intel/xe_evict.c
@@ -99,18 +99,18 @@ test_evict(int fd, struct drm_xe_engine_class_instance *eci,
i < n_execs / 8 ? 0 : vm;
if (flags & MULTI_VM) {
- __bo = bo[i] = xe_bo_create_flags(fd, 0,
- bo_size,
- visible_vram_memory(fd, eci->gt_id));
+ __bo = bo[i] = xe_bo_create(fd, 0,
+ bo_size,
+ visible_vram_memory(fd, eci->gt_id));
} else if (flags & THREADED) {
- __bo = bo[i] = xe_bo_create_flags(fd, vm,
- bo_size,
- visible_vram_memory(fd, eci->gt_id));
+ __bo = bo[i] = xe_bo_create(fd, vm,
+ bo_size,
+ visible_vram_memory(fd, eci->gt_id));
} else {
- __bo = bo[i] = xe_bo_create_flags(fd, _vm,
- bo_size,
- visible_vram_memory(fd, eci->gt_id) |
- system_memory(fd));
+ __bo = bo[i] = xe_bo_create(fd, _vm,
+ bo_size,
+ visible_vram_memory(fd, eci->gt_id) |
+ system_memory(fd));
}
} else {
__bo = bo[i % (n_execs / 2)];
@@ -275,18 +275,18 @@ test_evict_cm(int fd, struct drm_xe_engine_class_instance *eci,
i < n_execs / 8 ? 0 : vm;
if (flags & MULTI_VM) {
- __bo = bo[i] = xe_bo_create_flags(fd, 0,
- bo_size,
- visible_vram_memory(fd, eci->gt_id));
+ __bo = bo[i] = xe_bo_create(fd, 0,
+ bo_size,
+ visible_vram_memory(fd, eci->gt_id));
} else if (flags & THREADED) {
- __bo = bo[i] = xe_bo_create_flags(fd, vm,
- bo_size,
- visible_vram_memory(fd, eci->gt_id));
+ __bo = bo[i] = xe_bo_create(fd, vm,
+ bo_size,
+ visible_vram_memory(fd, eci->gt_id));
} else {
- __bo = bo[i] = xe_bo_create_flags(fd, _vm,
- bo_size,
- visible_vram_memory(fd, eci->gt_id) |
- system_memory(fd));
+ __bo = bo[i] = xe_bo_create(fd, _vm,
+ bo_size,
+ visible_vram_memory(fd, eci->gt_id) |
+ system_memory(fd));
}
} else {
__bo = bo[i % (n_execs / 2)];
diff --git a/tests/intel/xe_evict_ccs.c b/tests/intel/xe_evict_ccs.c
index d7244f620..35a588521 100644
--- a/tests/intel/xe_evict_ccs.c
+++ b/tests/intel/xe_evict_ccs.c
@@ -82,7 +82,7 @@ static void copy_obj(struct blt_copy_data *blt,
w = src_obj->x2;
h = src_obj->y2;
- bb = xe_bo_create_flags(fd, 0, bb_size, visible_vram_memory(fd, 0));
+ bb = xe_bo_create(fd, 0, bb_size, visible_vram_memory(fd, 0));
blt->color_depth = CD_32bit;
blt->print_bb = params.print_bb;
@@ -274,8 +274,8 @@ static void evict_single(int fd, int child, const struct config *config)
}
if (config->flags & TEST_SIMPLE) {
- big_obj = xe_bo_create_flags(fd, vm, kb_left * SZ_1K,
- vram_memory(fd, 0));
+ big_obj = xe_bo_create(fd, vm, kb_left * SZ_1K,
+ vram_memory(fd, 0));
break;
}
diff --git a/tests/intel/xe_exec_balancer.c b/tests/intel/xe_exec_balancer.c
index 8a0165b8c..da34e117d 100644
--- a/tests/intel/xe_exec_balancer.c
+++ b/tests/intel/xe_exec_balancer.c
@@ -70,7 +70,7 @@ static void test_all_active(int fd, int gt, int class)
bo_size = sizeof(*data) * num_placements;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd), xe_get_default_alignment(fd));
- bo = xe_bo_create_flags(fd, vm, bo_size, visible_vram_if_possible(fd, gt));
+ bo = xe_bo_create(fd, vm, bo_size, visible_vram_if_possible(fd, gt));
data = xe_bo_map(fd, bo, bo_size);
for (i = 0; i < num_placements; i++) {
@@ -224,7 +224,7 @@ test_exec(int fd, int gt, int class, int n_exec_queues, int n_execs,
}
memset(data, 0, bo_size);
} else {
- bo = xe_bo_create_flags(fd, vm, bo_size, visible_vram_if_possible(fd, gt));
+ bo = xe_bo_create(fd, vm, bo_size, visible_vram_if_possible(fd, gt));
data = xe_bo_map(fd, bo, bo_size);
}
@@ -452,7 +452,7 @@ test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
igt_assert(data);
}
} else {
- bo = xe_bo_create_flags(fd, vm, bo_size, visible_vram_if_possible(fd, gt));
+ bo = xe_bo_create(fd, vm, bo_size, visible_vram_if_possible(fd, gt));
data = xe_bo_map(fd, bo, bo_size);
}
memset(data, 0, bo_size);
diff --git a/tests/intel/xe_exec_basic.c b/tests/intel/xe_exec_basic.c
index a401f0165..841696b68 100644
--- a/tests/intel/xe_exec_basic.c
+++ b/tests/intel/xe_exec_basic.c
@@ -140,8 +140,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
if (flags & DEFER_ALLOC)
bo_flags |= DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING;
- bo = xe_bo_create_flags(fd, n_vm == 1 ? vm[0] : 0,
- bo_size, bo_flags);
+ bo = xe_bo_create(fd, n_vm == 1 ? vm[0] : 0, bo_size, bo_flags);
if (!(flags & DEFER_BIND))
data = xe_bo_map(fd, bo, bo_size);
}
diff --git a/tests/intel/xe_exec_compute_mode.c b/tests/intel/xe_exec_compute_mode.c
index 20d3fc6e8..beb962f79 100644
--- a/tests/intel/xe_exec_compute_mode.c
+++ b/tests/intel/xe_exec_compute_mode.c
@@ -141,8 +141,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
igt_assert(data);
}
} else {
- bo = xe_bo_create_flags(fd, flags & VM_FOR_BO ? vm : 0,
- bo_size, visible_vram_if_possible(fd, eci->gt_id));
+ bo = xe_bo_create(fd, flags & VM_FOR_BO ? vm : 0,
+ bo_size, visible_vram_if_possible(fd, eci->gt_id));
data = xe_bo_map(fd, bo, bo_size);
}
memset(data, 0, bo_size);
diff --git a/tests/intel/xe_exec_fault_mode.c b/tests/intel/xe_exec_fault_mode.c
index 92d552f97..903ad430d 100644
--- a/tests/intel/xe_exec_fault_mode.c
+++ b/tests/intel/xe_exec_fault_mode.c
@@ -151,12 +151,12 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
}
} else {
if (flags & PREFETCH)
- bo = xe_bo_create_flags(fd, 0, bo_size,
- all_memory_regions(fd) |
- visible_vram_if_possible(fd, 0));
+ bo = xe_bo_create(fd, 0, bo_size,
+ all_memory_regions(fd) |
+ visible_vram_if_possible(fd, 0));
else
- bo = xe_bo_create_flags(fd, 0, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ bo = xe_bo_create(fd, 0, bo_size,
+ visible_vram_if_possible(fd, eci->gt_id));
data = xe_bo_map(fd, bo, bo_size);
}
memset(data, 0, bo_size);
diff --git a/tests/intel/xe_exec_reset.c b/tests/intel/xe_exec_reset.c
index 195e62911..704690e83 100644
--- a/tests/intel/xe_exec_reset.c
+++ b/tests/intel/xe_exec_reset.c
@@ -50,8 +50,8 @@ static void test_spin(int fd, struct drm_xe_engine_class_instance *eci)
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
- bo = xe_bo_create_flags(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ bo = xe_bo_create(fd, vm, bo_size,
+ visible_vram_if_possible(fd, eci->gt_id));
spin = xe_bo_map(fd, bo, bo_size);
exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
@@ -181,7 +181,7 @@ test_balancer(int fd, int gt, int class, int n_exec_queues, int n_execs,
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
- bo = xe_bo_create_flags(fd, vm, bo_size, visible_vram_if_possible(fd, gt));
+ bo = xe_bo_create(fd, vm, bo_size, visible_vram_if_possible(fd, gt));
data = xe_bo_map(fd, bo, bo_size);
for (i = 0; i < n_exec_queues; i++) {
@@ -367,8 +367,8 @@ test_legacy_mode(int fd, struct drm_xe_engine_class_instance *eci,
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
- bo = xe_bo_create_flags(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ bo = xe_bo_create(fd, vm, bo_size,
+ visible_vram_if_possible(fd, eci->gt_id));
data = xe_bo_map(fd, bo, bo_size);
for (i = 0; i < n_exec_queues; i++) {
@@ -534,8 +534,8 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
- bo = xe_bo_create_flags(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ bo = xe_bo_create(fd, vm, bo_size,
+ visible_vram_if_possible(fd, eci->gt_id));
data = xe_bo_map(fd, bo, bo_size);
memset(data, 0, bo_size);
@@ -661,7 +661,7 @@ static void submit_jobs(struct gt_thread_data *t)
uint32_t bo;
uint32_t *data;
- bo = xe_bo_create_flags(fd, vm, bo_size, visible_vram_if_possible(fd, 0));
+ bo = xe_bo_create(fd, vm, bo_size, visible_vram_if_possible(fd, 0));
data = xe_bo_map(fd, bo, bo_size);
data[0] = MI_BATCH_BUFFER_END;
diff --git a/tests/intel/xe_exec_store.c b/tests/intel/xe_exec_store.c
index 9c14bfd14..bcc4de8d0 100644
--- a/tests/intel/xe_exec_store.c
+++ b/tests/intel/xe_exec_store.c
@@ -81,8 +81,8 @@ static void store(int fd)
xe_get_default_alignment(fd));
hw_engine = xe_hw_engine(fd, 1);
- bo = xe_bo_create_flags(fd, vm, bo_size,
- visible_vram_if_possible(fd, hw_engine->gt_id));
+ bo = xe_bo_create(fd, vm, bo_size,
+ visible_vram_if_possible(fd, hw_engine->gt_id));
xe_vm_bind_async(fd, vm, hw_engine->gt_id, bo, 0, addr, bo_size, &sync, 1);
data = xe_bo_map(fd, bo, bo_size);
@@ -150,8 +150,8 @@ static void store_cachelines(int fd, struct drm_xe_engine_class_instance *eci,
sync[0].handle = syncobj_create(fd, 0);
for (i = 0; i < count; i++) {
- bo[i] = xe_bo_create_flags(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ bo[i] = xe_bo_create(fd, vm, bo_size,
+ visible_vram_if_possible(fd, eci->gt_id));
bo_map[i] = xe_bo_map(fd, bo[i], bo_size);
dst_offset[i] = intel_allocator_alloc_with_strategy(ahnd, bo[i],
bo_size, 0,
@@ -235,8 +235,8 @@ static void store_all(int fd, int gt, int class)
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
- bo = xe_bo_create_flags(fd, vm, bo_size,
- visible_vram_if_possible(fd, 0));
+ bo = xe_bo_create(fd, vm, bo_size,
+ visible_vram_if_possible(fd, 0));
data = xe_bo_map(fd, bo, bo_size);
xe_for_each_hw_engine(fd, hwe) {
diff --git a/tests/intel/xe_exec_threads.c b/tests/intel/xe_exec_threads.c
index bb979b18c..a9b0c0b09 100644
--- a/tests/intel/xe_exec_threads.c
+++ b/tests/intel/xe_exec_threads.c
@@ -106,8 +106,8 @@ test_balancer(int fd, int gt, uint32_t vm, uint64_t addr, uint64_t userptr,
igt_assert(data);
}
} else {
- bo = xe_bo_create_flags(fd, vm, bo_size,
- visible_vram_if_possible(fd, gt));
+ bo = xe_bo_create(fd, vm, bo_size,
+ visible_vram_if_possible(fd, gt));
data = xe_bo_map(fd, bo, bo_size);
}
memset(data, 0, bo_size);
@@ -307,8 +307,8 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
igt_assert(data);
}
} else {
- bo = xe_bo_create_flags(fd, 0, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ bo = xe_bo_create(fd, 0, bo_size,
+ visible_vram_if_possible(fd, eci->gt_id));
data = xe_bo_map(fd, bo, bo_size);
}
memset(data, 0, bo_size);
@@ -510,8 +510,8 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
igt_assert(data);
}
} else {
- bo = xe_bo_create_flags(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ bo = xe_bo_create(fd, vm, bo_size,
+ visible_vram_if_possible(fd, eci->gt_id));
data = xe_bo_map(fd, bo, bo_size);
}
memset(data, 0, bo_size);
diff --git a/tests/intel/xe_exercise_blt.c b/tests/intel/xe_exercise_blt.c
index fd310138d..9c69be3ef 100644
--- a/tests/intel/xe_exercise_blt.c
+++ b/tests/intel/xe_exercise_blt.c
@@ -125,7 +125,7 @@ static void fast_copy_emit(int xe, const intel_ctx_t *ctx,
uint32_t bb, width = param.width, height = param.height;
int result;
- bb = xe_bo_create_flags(xe, 0, bb_size, region1);
+ bb = xe_bo_create(xe, 0, bb_size, region1);
blt_copy_init(xe, &bltinit);
src = blt_create_object(&bltinit, region1, width, height, bpp, 0,
@@ -184,7 +184,7 @@ static void fast_copy(int xe, const intel_ctx_t *ctx,
uint32_t width = param.width, height = param.height;
int result;
- bb = xe_bo_create_flags(xe, 0, bb_size, region1);
+ bb = xe_bo_create(xe, 0, bb_size, region1);
blt_copy_init(xe, &blt);
src = blt_create_object(&blt, region1, width, height, bpp, 0,
diff --git a/tests/intel/xe_intel_bb.c b/tests/intel/xe_intel_bb.c
index d66996cd5..a3a315297 100644
--- a/tests/intel/xe_intel_bb.c
+++ b/tests/intel/xe_intel_bb.c
@@ -396,7 +396,7 @@ static void create_in_region(struct buf_ops *bops, uint64_t region)
intel_bb_set_debug(ibb, true);
size = xe_min_page_size(xe, system_memory(xe));
- handle = xe_bo_create_flags(xe, 0, size, system_memory(xe));
+ handle = xe_bo_create(xe, 0, size, system_memory(xe));
intel_buf_init_full(bops, handle, &buf,
width/4, height, 32, 0,
I915_TILING_NONE, 0,
diff --git a/tests/intel/xe_mmap.c b/tests/intel/xe_mmap.c
index 7e7e43c00..a805eabda 100644
--- a/tests/intel/xe_mmap.c
+++ b/tests/intel/xe_mmap.c
@@ -52,7 +52,7 @@ test_mmap(int fd, uint32_t flags)
igt_require_f(flags, "Device doesn't support such memory region\n");
- bo = xe_bo_create_flags(fd, 0, 4096, flags);
+ bo = xe_bo_create(fd, 0, 4096, flags);
map = xe_bo_map(fd, bo, 4096);
strcpy(map, "Write some data to the BO!");
@@ -72,8 +72,8 @@ static void test_bad_flags(int fd)
{
uint64_t size = xe_get_default_alignment(fd);
struct drm_xe_gem_mmap_offset mmo = {
- .handle = xe_bo_create_flags(fd, 0, size,
- visible_vram_if_possible(fd, 0)),
+ .handle = xe_bo_create(fd, 0, size,
+ visible_vram_if_possible(fd, 0)),
.flags = -1u,
};
@@ -92,8 +92,8 @@ static void test_bad_extensions(int fd)
uint64_t size = xe_get_default_alignment(fd);
struct xe_user_extension ext;
struct drm_xe_gem_mmap_offset mmo = {
- .handle = xe_bo_create_flags(fd, 0, size,
- visible_vram_if_possible(fd, 0)),
+ .handle = xe_bo_create(fd, 0, size,
+ visible_vram_if_possible(fd, 0)),
};
mmo.extensions = to_user_pointer(&ext);
@@ -113,8 +113,8 @@ static void test_bad_object(int fd)
{
uint64_t size = xe_get_default_alignment(fd);
struct drm_xe_gem_mmap_offset mmo = {
- .handle = xe_bo_create_flags(fd, 0, size,
- visible_vram_if_possible(fd, 0)),
+ .handle = xe_bo_create(fd, 0, size,
+ visible_vram_if_possible(fd, 0)),
};
mmo.handle = 0xdeadbeef;
@@ -159,13 +159,13 @@ static void test_small_bar(int fd)
uint32_t *map;
/* 2BIG invalid case */
- igt_assert_neq(__xe_bo_create_flags(fd, 0, visible_size + 4096,
- visible_vram_memory(fd, 0), &bo),
+ igt_assert_neq(__xe_bo_create(fd, 0, visible_size + 4096,
+ visible_vram_memory(fd, 0), &bo),
0);
/* Normal operation */
- bo = xe_bo_create_flags(fd, 0, visible_size / 4,
- visible_vram_memory(fd, 0));
+ bo = xe_bo_create(fd, 0, visible_size / 4,
+ visible_vram_memory(fd, 0));
mmo = xe_bo_mmap_offset(fd, bo);
map = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED, fd, mmo);
igt_assert(map != MAP_FAILED);
@@ -176,9 +176,9 @@ static void test_small_bar(int fd)
gem_close(fd, bo);
/* Normal operation with system memory spilling */
- bo = xe_bo_create_flags(fd, 0, visible_size,
- visible_vram_memory(fd, 0) |
- system_memory(fd));
+ bo = xe_bo_create(fd, 0, visible_size,
+ visible_vram_memory(fd, 0) |
+ system_memory(fd));
mmo = xe_bo_mmap_offset(fd, bo);
map = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED, fd, mmo);
igt_assert(map != MAP_FAILED);
@@ -189,8 +189,8 @@ static void test_small_bar(int fd)
gem_close(fd, bo);
/* Bogus operation with SIGBUS */
- bo = xe_bo_create_flags(fd, 0, visible_size + 4096,
- vram_memory(fd, 0));
+ bo = xe_bo_create(fd, 0, visible_size + 4096,
+ vram_memory(fd, 0));
mmo = xe_bo_mmap_offset(fd, bo);
map = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED, fd, mmo);
igt_assert(map != MAP_FAILED);
diff --git a/tests/intel/xe_noexec_ping_pong.c b/tests/intel/xe_noexec_ping_pong.c
index 9c2a70ff3..88ef39783 100644
--- a/tests/intel/xe_noexec_ping_pong.c
+++ b/tests/intel/xe_noexec_ping_pong.c
@@ -70,8 +70,8 @@ static void test_ping_pong(int fd, struct drm_xe_engine_class_instance *eci)
(unsigned long) bo_size,
(unsigned int) vm[i]);
- bo[i][j] = xe_bo_create_flags(fd, vm[i], bo_size,
- vram_memory(fd, 0));
+ bo[i][j] = xe_bo_create(fd, vm[i], bo_size,
+ vram_memory(fd, 0));
xe_vm_bind(fd, vm[i], bo[i][j], 0, 0x40000 + j*bo_size,
bo_size, NULL, 0);
}
diff --git a/tests/intel/xe_perf_pmu.c b/tests/intel/xe_perf_pmu.c
index 196e4d2e6..9f1627727 100644
--- a/tests/intel/xe_perf_pmu.c
+++ b/tests/intel/xe_perf_pmu.c
@@ -103,7 +103,7 @@ static void test_any_engine_busyness(int fd, struct drm_xe_engine_class_instance
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
- bo = xe_bo_create_flags(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id));
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id));
spin = xe_bo_map(fd, bo, bo_size);
exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
@@ -223,7 +223,7 @@ static void test_engine_group_busyness(int fd, int gt, int class, const char *na
bo_size = sizeof(*data) * num_placements;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd), xe_get_default_alignment(fd));
- bo = xe_bo_create_flags(fd, vm, bo_size, vram_if_possible(fd, gt));
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, gt));
data = xe_bo_map(fd, bo, bo_size);
for (i = 0; i < num_placements; i++) {
diff --git a/tests/intel/xe_pm.c b/tests/intel/xe_pm.c
index 9423984cc..9bfe1acad 100644
--- a/tests/intel/xe_pm.c
+++ b/tests/intel/xe_pm.c
@@ -271,8 +271,8 @@ test_exec(device_t device, struct drm_xe_engine_class_instance *eci,
if (check_rpm && runtime_usage_available(device.pci_xe))
rpm_usage = igt_pm_get_runtime_usage(device.pci_xe);
- bo = xe_bo_create_flags(device.fd_xe, vm, bo_size,
- visible_vram_if_possible(device.fd_xe, eci->gt_id));
+ bo = xe_bo_create(device.fd_xe, vm, bo_size,
+ visible_vram_if_possible(device.fd_xe, eci->gt_id));
data = xe_bo_map(device.fd_xe, bo, bo_size);
for (i = 0; i < n_exec_queues; i++) {
@@ -409,7 +409,7 @@ static void test_vram_d3cold_threshold(device_t device, int sysfs_fd)
threshold = vram_used_mb + (SIZE / 1024 /1024);
igt_require(threshold < vram_total_mb);
- bo = xe_bo_create_flags(device.fd_xe, 0, SIZE, flags);
+ bo = xe_bo_create(device.fd_xe, 0, SIZE, flags);
map = xe_bo_map(device.fd_xe, bo, SIZE);
memset(map, 0, SIZE);
munmap(map, SIZE);
diff --git a/tests/intel/xe_pm_residency.c b/tests/intel/xe_pm_residency.c
index c87eeef3c..cc133f5fb 100644
--- a/tests/intel/xe_pm_residency.c
+++ b/tests/intel/xe_pm_residency.c
@@ -100,8 +100,8 @@ static void exec_load(int fd, struct drm_xe_engine_class_instance *hwe, unsigned
exec_queue = xe_exec_queue_create(fd, vm, hwe, 0);
bo_size = xe_get_default_alignment(fd);
- bo = xe_bo_create_flags(fd, vm, bo_size,
- visible_vram_if_possible(fd, hwe->gt_id));
+ bo = xe_bo_create(fd, vm, bo_size,
+ visible_vram_if_possible(fd, hwe->gt_id));
data = xe_bo_map(fd, bo, bo_size);
syncobj = syncobj_create(fd, 0);
diff --git a/tests/intel/xe_prime_self_import.c b/tests/intel/xe_prime_self_import.c
index 536230f9f..378368eaa 100644
--- a/tests/intel/xe_prime_self_import.c
+++ b/tests/intel/xe_prime_self_import.c
@@ -105,7 +105,7 @@ static void test_with_fd_dup(void)
fd1 = drm_open_driver(DRIVER_XE);
fd2 = drm_open_driver(DRIVER_XE);
- handle = xe_bo_create_flags(fd1, 0, bo_size, visible_vram_if_possible(fd1, 0));
+ handle = xe_bo_create(fd1, 0, bo_size, visible_vram_if_possible(fd1, 0));
dma_buf_fd1 = prime_handle_to_fd(fd1, handle);
gem_close(fd1, handle);
@@ -138,8 +138,8 @@ static void test_with_two_bos(void)
fd1 = drm_open_driver(DRIVER_XE);
fd2 = drm_open_driver(DRIVER_XE);
- handle1 = xe_bo_create_flags(fd1, 0, bo_size, visible_vram_if_possible(fd1, 0));
- handle2 = xe_bo_create_flags(fd1, 0, bo_size, visible_vram_if_possible(fd1, 0));
+ handle1 = xe_bo_create(fd1, 0, bo_size, visible_vram_if_possible(fd1, 0));
+ handle2 = xe_bo_create(fd1, 0, bo_size, visible_vram_if_possible(fd1, 0));
dma_buf_fd = prime_handle_to_fd(fd1, handle1);
handle_import = prime_fd_to_handle(fd2, dma_buf_fd);
@@ -174,8 +174,8 @@ static void test_with_one_bo_two_files(void)
fd1 = drm_open_driver(DRIVER_XE);
fd2 = drm_open_driver(DRIVER_XE);
- handle_orig = xe_bo_create_flags(fd1, 0, bo_size,
- visible_vram_if_possible(fd1, 0));
+ handle_orig = xe_bo_create(fd1, 0, bo_size,
+ visible_vram_if_possible(fd1, 0));
dma_buf_fd1 = prime_handle_to_fd(fd1, handle_orig);
flink_name = gem_flink(fd1, handle_orig);
@@ -207,7 +207,7 @@ static void test_with_one_bo(void)
fd1 = drm_open_driver(DRIVER_XE);
fd2 = drm_open_driver(DRIVER_XE);
- handle = xe_bo_create_flags(fd1, 0, bo_size, visible_vram_if_possible(fd1, 0));
+ handle = xe_bo_create(fd1, 0, bo_size, visible_vram_if_possible(fd1, 0));
dma_buf_fd = prime_handle_to_fd(fd1, handle);
handle_import1 = prime_fd_to_handle(fd2, dma_buf_fd);
@@ -293,8 +293,8 @@ static void *thread_fn_reimport_vs_close(void *p)
fds[0] = drm_open_driver(DRIVER_XE);
- handle = xe_bo_create_flags(fds[0], 0, bo_size,
- visible_vram_if_possible(fds[0], 0));
+ handle = xe_bo_create(fds[0], 0, bo_size,
+ visible_vram_if_possible(fds[0], 0));
fds[1] = prime_handle_to_fd(fds[0], handle);
pthread_barrier_init(&g_barrier, NULL, num_threads);
@@ -336,8 +336,8 @@ static void *thread_fn_export_vs_close(void *p)
igt_until_timeout(g_time_out) {
/* We want to race gem close against prime export on handle one.*/
- handle = xe_bo_create_flags(fd, 0, bo_size,
- visible_vram_if_possible(fd, 0));
+ handle = xe_bo_create(fd, 0, bo_size,
+ visible_vram_if_possible(fd, 0));
if (handle != 1)
gem_close(fd, handle);
@@ -433,8 +433,8 @@ static void test_llseek_size(void)
for (i = 0; i < 10; i++) {
int bufsz = xe_get_default_alignment(fd) << i;
- handle = xe_bo_create_flags(fd, 0, bufsz,
- visible_vram_if_possible(fd, 0));
+ handle = xe_bo_create(fd, 0, bufsz,
+ visible_vram_if_possible(fd, 0));
dma_buf_fd = prime_handle_to_fd(fd, handle);
gem_close(fd, handle);
@@ -462,8 +462,8 @@ static void test_llseek_bad(void)
fd = drm_open_driver(DRIVER_XE);
- handle = xe_bo_create_flags(fd, 0, bo_size,
- visible_vram_if_possible(fd, 0));
+ handle = xe_bo_create(fd, 0, bo_size,
+ visible_vram_if_possible(fd, 0));
dma_buf_fd = prime_handle_to_fd(fd, handle);
gem_close(fd, handle);
diff --git a/tests/intel/xe_spin_batch.c b/tests/intel/xe_spin_batch.c
index 261fde9af..c1b161f9c 100644
--- a/tests/intel/xe_spin_batch.c
+++ b/tests/intel/xe_spin_batch.c
@@ -169,7 +169,7 @@ static void xe_spin_fixed_duration(int fd)
exec_queue = xe_exec_queue_create_class(fd, vm, DRM_XE_ENGINE_CLASS_COPY);
ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_RELOC);
bo_size = ALIGN(sizeof(*spin) + xe_cs_prefetch_size(fd), xe_get_default_alignment(fd));
- bo = xe_bo_create_flags(fd, vm, bo_size, vram_if_possible(fd, 0));
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, 0));
spin = xe_bo_map(fd, bo, bo_size);
spin_addr = intel_allocator_alloc_with_strategy(ahnd, bo, bo_size, 0,
ALLOC_STRATEGY_LOW_TO_HIGH);
diff --git a/tests/intel/xe_vm.c b/tests/intel/xe_vm.c
index b4577fb6e..52241314c 100644
--- a/tests/intel/xe_vm.c
+++ b/tests/intel/xe_vm.c
@@ -51,8 +51,8 @@ write_dwords(int fd, uint32_t vm, int n_dwords, uint64_t *addrs)
batch_size = (n_dwords * 4 + 1) * sizeof(uint32_t);
batch_size = ALIGN(batch_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
- batch_bo = xe_bo_create_flags(fd, vm, batch_size,
- visible_vram_if_possible(fd, 0));
+ batch_bo = xe_bo_create(fd, vm, batch_size,
+ visible_vram_if_possible(fd, 0));
batch_map = xe_bo_map(fd, batch_bo, batch_size);
for (i = 0; i < n_dwords; i++) {
@@ -116,7 +116,7 @@ __test_bind_one_bo(int fd, uint32_t vm, int n_addrs, uint64_t *addrs)
vms = malloc(sizeof(*vms) * n_addrs);
igt_assert(vms);
}
- bo = xe_bo_create_flags(fd, vm, bo_size, visible_vram_if_possible(fd, 0));
+ bo = xe_bo_create(fd, vm, bo_size, visible_vram_if_possible(fd, 0));
map = xe_bo_map(fd, bo, bo_size);
memset(map, 0, bo_size);
@@ -267,7 +267,7 @@ static void test_partial_unbinds(int fd)
{
uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
size_t bo_size = 3 * xe_get_default_alignment(fd);
- uint32_t bo = xe_bo_create_flags(fd, vm, bo_size, vram_if_possible(fd, 0));
+ uint32_t bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, 0));
uint64_t unbind_size = bo_size / 3;
uint64_t addr = 0x1a0000;
@@ -316,7 +316,7 @@ static void unbind_all(int fd, int n_vmas)
};
vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
- bo = xe_bo_create_flags(fd, vm, bo_size, vram_if_possible(fd, 0));
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, 0));
for (i = 0; i < n_vmas; ++i)
xe_vm_bind_async(fd, vm, 0, bo, 0, addr + i * bo_size,
@@ -421,8 +421,8 @@ shared_pte_page(int fd, struct drm_xe_engine_class_instance *eci, int n_bo,
addr_stride = addr_stride + bo_size;
for (i = 0; i < n_bo; ++i) {
- bo[i] = xe_bo_create_flags(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ bo[i] = xe_bo_create(fd, vm, bo_size,
+ visible_vram_if_possible(fd, eci->gt_id));
data[i] = xe_bo_map(fd, bo[i], bo_size);
}
@@ -600,8 +600,8 @@ test_bind_execqueues_independent(int fd, struct drm_xe_engine_class_instance *ec
bo_size = sizeof(*data) * N_EXEC_QUEUES;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
- bo = xe_bo_create_flags(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ bo = xe_bo_create(fd, vm, bo_size,
+ visible_vram_if_possible(fd, eci->gt_id));
data = xe_bo_map(fd, bo, bo_size);
for (i = 0; i < N_EXEC_QUEUES; i++) {
@@ -781,8 +781,8 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
- bo = xe_bo_create_flags(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ bo = xe_bo_create(fd, vm, bo_size,
+ visible_vram_if_possible(fd, eci->gt_id));
data = xe_bo_map(fd, bo, bo_size);
if (flags & BIND_ARRAY_BIND_EXEC_QUEUE_FLAG)
@@ -979,8 +979,8 @@ test_large_binds(int fd, struct drm_xe_engine_class_instance *eci,
igt_skip_on(xe_visible_vram_size(fd, 0) && bo_size >
xe_visible_vram_size(fd, 0));
- bo = xe_bo_create_flags(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ bo = xe_bo_create(fd, vm, bo_size,
+ visible_vram_if_possible(fd, eci->gt_id));
map = xe_bo_map(fd, bo, bo_size);
}
@@ -1273,8 +1273,8 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
MAP_ANONYMOUS, -1, 0);
igt_assert(map != MAP_FAILED);
} else {
- bo = xe_bo_create_flags(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ bo = xe_bo_create(fd, vm, bo_size,
+ visible_vram_if_possible(fd, eci->gt_id));
map = xe_bo_map(fd, bo, bo_size);
}
memset(map, 0, bo_size);
@@ -1577,9 +1577,9 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
igt_assert(map0 != MAP_FAILED);
igt_assert(map1 != MAP_FAILED);
} else {
- bo0 = xe_bo_create_flags(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id));
+ bo0 = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id));
map0 = xe_bo_map(fd, bo0, bo_size);
- bo1 = xe_bo_create_flags(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id));
+ bo1 = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id));
map1 = xe_bo_map(fd, bo1, bo_size);
}
memset(map0, 0, bo_size);
diff --git a/tests/intel/xe_waitfence.c b/tests/intel/xe_waitfence.c
index 08d588add..eaef0185f 100644
--- a/tests/intel/xe_waitfence.c
+++ b/tests/intel/xe_waitfence.c
@@ -96,19 +96,19 @@ waitfence(int fd, enum waittype wt)
int64_t timeout;
uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
- bo_1 = xe_bo_create_flags(fd, vm, 0x40000, MY_FLAG);
+ bo_1 = xe_bo_create(fd, vm, 0x40000, MY_FLAG);
do_bind(fd, vm, bo_1, 0, 0x200000, 0x40000, 1);
- bo_2 = xe_bo_create_flags(fd, vm, 0x40000, MY_FLAG);
+ bo_2 = xe_bo_create(fd, vm, 0x40000, MY_FLAG);
do_bind(fd, vm, bo_2, 0, 0xc0000000, 0x40000, 2);
- bo_3 = xe_bo_create_flags(fd, vm, 0x40000, MY_FLAG);
+ bo_3 = xe_bo_create(fd, vm, 0x40000, MY_FLAG);
do_bind(fd, vm, bo_3, 0, 0x180000000, 0x40000, 3);
- bo_4 = xe_bo_create_flags(fd, vm, 0x10000, MY_FLAG);
+ bo_4 = xe_bo_create(fd, vm, 0x10000, MY_FLAG);
do_bind(fd, vm, bo_4, 0, 0x140000000, 0x10000, 4);
- bo_5 = xe_bo_create_flags(fd, vm, 0x100000, MY_FLAG);
+ bo_5 = xe_bo_create(fd, vm, 0x100000, MY_FLAG);
do_bind(fd, vm, bo_5, 0, 0x100000000, 0x100000, 5);
- bo_6 = xe_bo_create_flags(fd, vm, 0x1c0000, MY_FLAG);
+ bo_6 = xe_bo_create(fd, vm, 0x1c0000, MY_FLAG);
do_bind(fd, vm, bo_6, 0, 0xc0040000, 0x1c0000, 6);
- bo_7 = xe_bo_create_flags(fd, vm, 0x10000, MY_FLAG);
+ bo_7 = xe_bo_create(fd, vm, 0x10000, MY_FLAG);
do_bind(fd, vm, bo_7, 0, 0xeffff0000, 0x10000, 7);
if (wt == RELTIME) {
@@ -173,7 +173,7 @@ invalid_flag(int fd)
uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
- bo = xe_bo_create_flags(fd, vm, 0x40000, MY_FLAG);
+ bo = xe_bo_create(fd, vm, 0x40000, MY_FLAG);
do_bind(fd, vm, bo, 0, 0x200000, 0x40000, 1);
@@ -198,7 +198,7 @@ invalid_ops(int fd)
uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
- bo = xe_bo_create_flags(fd, vm, 0x40000, MY_FLAG);
+ bo = xe_bo_create(fd, vm, 0x40000, MY_FLAG);
do_bind(fd, vm, bo, 0, 0x200000, 0x40000, 1);
@@ -223,7 +223,7 @@ invalid_engine(int fd)
uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
- bo = xe_bo_create_flags(fd, vm, 0x40000, MY_FLAG);
+ bo = xe_bo_create(fd, vm, 0x40000, MY_FLAG);
do_bind(fd, vm, bo, 0, 0x200000, 0x40000, 1);
diff --git a/tests/kms_addfb_basic.c b/tests/kms_addfb_basic.c
index 337fef77e..1e34eaa54 100644
--- a/tests/kms_addfb_basic.c
+++ b/tests/kms_addfb_basic.c
@@ -307,7 +307,7 @@ static void invalid_tests(int fd)
handle = gem_create_in_memory_regions(fd, size, REGION_SMEM);
} else {
igt_require(xe_has_vram(fd));
- handle = xe_bo_create_flags(fd, 0, size, system_memory(fd));
+ handle = xe_bo_create(fd, 0, size, system_memory(fd));
}
f.handles[0] = handle;
diff --git a/tests/kms_getfb.c b/tests/kms_getfb.c
index e6b8dd873..2d93a032d 100644
--- a/tests/kms_getfb.c
+++ b/tests/kms_getfb.c
@@ -210,7 +210,7 @@ static void get_ccs_fb(int fd, struct drm_mode_fb_cmd2 *ret)
if (is_i915_device(fd))
add.handles[0] = gem_buffer_create_fb_obj(fd, size);
else
- add.handles[0] = xe_bo_create_flags(fd, 0, size, vram_if_possible(fd, 0));
+ add.handles[0] = xe_bo_create(fd, 0, size, vram_if_possible(fd, 0));
igt_require(add.handles[0] != 0);
if (!HAS_FLATCCS(devid))
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] [PATCH v4 04/20] xe_query: Add missing include
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
` (2 preceding siblings ...)
2023-11-29 14:54 ` [igt-dev] [PATCH v4 03/20] xe_ioctl: Rename *xe_bo_create_flags to simply xe_bo_create Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 05/20] xe_query: Kill visible_vram_if_possible Francois Dugast
` (16 subsequent siblings)
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev; +Cc: Rodrigo Vivi
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
When trying to use xe_for_each_mem_region from a caller
that is not importing the igt_aux.h, the following build issue
will occur:
../lib/xe/xe_query.h:76:38: error: implicit declaration of function ‘igt_fls’ [-Werror=implicit-function-declaration]
76 | for (uint64_t __i = 0; __i < igt_fls(__memreg); __i++) \
So, to avoid a dependency chain, let's include from the file
that is using the helper.
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
lib/xe/xe_query.h | 2 ++
1 file changed, 2 insertions(+)
diff --git a/lib/xe/xe_query.h b/lib/xe/xe_query.h
index 38e9aa440..7b3fc3100 100644
--- a/lib/xe/xe_query.h
+++ b/lib/xe/xe_query.h
@@ -11,6 +11,8 @@
#include <stdint.h>
#include <xe_drm.h>
+
+#include "igt_aux.h"
#include "igt_list.h"
#include "igt_sizes.h"
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] [PATCH v4 05/20] xe_query: Kill visible_vram_if_possible
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
` (3 preceding siblings ...)
2023-11-29 14:54 ` [igt-dev] [PATCH v4 04/20] xe_query: Add missing include Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 06/20] drm-uapi/xe: Separate bo_create placement from flags Francois Dugast
` (15 subsequent siblings)
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev; +Cc: Rodrigo Vivi
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Let the caller set the flag and the xe_bo_query clear if
not needed.
Although the current helper makes the code cleaner, the
goal is to split the flags into placement and flags as two
different arguments on xe_bo_create. So, the flag decision
cannot be hidden under the helper.
v2: Fix one comment (Kamil Konieczny)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
benchmarks/gem_wsim.c | 3 ++-
lib/igt_draw.c | 3 ++-
lib/igt_fb.c | 3 ++-
lib/intel_batchbuffer.c | 6 ++++--
lib/xe/xe_ioctl.c | 19 +++++++++++++++++++
lib/xe/xe_query.c | 26 --------------------------
lib/xe/xe_query.h | 1 -
lib/xe/xe_spin.c | 7 ++++---
tests/intel/kms_ccs.c | 3 ++-
tests/intel/xe_dma_buf_sync.c | 3 ++-
tests/intel/xe_exec_balancer.c | 9 ++++++---
tests/intel/xe_exec_basic.c | 2 +-
tests/intel/xe_exec_compute_mode.c | 3 ++-
tests/intel/xe_exec_fault_mode.c | 6 ++++--
tests/intel/xe_exec_reset.c | 14 +++++++++-----
tests/intel/xe_exec_store.c | 9 ++++++---
tests/intel/xe_exec_threads.c | 9 ++++++---
tests/intel/xe_mmap.c | 9 ++++++---
tests/intel/xe_pm.c | 3 ++-
tests/intel/xe_pm_residency.c | 3 ++-
tests/intel/xe_prime_self_import.c | 27 ++++++++++++++++++---------
tests/intel/xe_vm.c | 21 ++++++++++++++-------
22 files changed, 113 insertions(+), 76 deletions(-)
diff --git a/benchmarks/gem_wsim.c b/benchmarks/gem_wsim.c
index cb19ad505..47692e94f 100644
--- a/benchmarks/gem_wsim.c
+++ b/benchmarks/gem_wsim.c
@@ -1746,7 +1746,8 @@ xe_alloc_step_batch(struct workload *wrk, struct w_step *w)
w->bb_size = ALIGN(PAGE_SIZE + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
w->bb_handle = xe_bo_create(fd, vm->id, w->bb_size,
- visible_vram_if_possible(fd, eq->hwe_list[0].gt_id));
+ vram_if_possible(fd, eq->hwe_list[0].gt_id) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
w->xe.data = xe_bo_map(fd, w->bb_handle, w->bb_size);
w->xe.exec.address =
intel_allocator_alloc_with_strategy(vm->ahnd, w->bb_handle, w->bb_size,
diff --git a/lib/igt_draw.c b/lib/igt_draw.c
index bad6f455a..4a8dd7bd9 100644
--- a/lib/igt_draw.c
+++ b/lib/igt_draw.c
@@ -797,7 +797,8 @@ static void draw_rect_render(int fd, struct cmd_data *cmd_data,
else
tmp.handle = xe_bo_create(fd, 0,
ALIGN(tmp.size, xe_get_default_alignment(fd)),
- visible_vram_if_possible(fd, 0));
+ vram_if_possible(fd, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
tmp.stride = rect->w * pixel_size;
tmp.bpp = buf->bpp;
diff --git a/lib/igt_fb.c b/lib/igt_fb.c
index f96dca7a4..0a6aa27c8 100644
--- a/lib/igt_fb.c
+++ b/lib/igt_fb.c
@@ -1206,7 +1206,8 @@ static int create_bo_for_fb(struct igt_fb *fb, bool prefer_sysmem)
igt_assert(err == 0 || err == -EOPNOTSUPP);
} else if (is_xe_device(fd)) {
fb->gem_handle = xe_bo_create(fd, 0, fb->size,
- visible_vram_if_possible(fd, 0));
+ vram_if_possible(fd, 0)
+ | DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
} else if (is_vc4_device(fd)) {
fb->gem_handle = igt_vc4_create_bo(fd, fb->size);
diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index c38eda771..ae7a43605 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -947,7 +947,8 @@ __intel_bb_create(int fd, uint32_t ctx, uint32_t vm, const intel_ctx_cfg_t *cfg,
ibb->alignment = xe_get_default_alignment(fd);
size = ALIGN(size, ibb->alignment);
- ibb->handle = xe_bo_create(fd, 0, size, visible_vram_if_possible(fd, 0));
+ ibb->handle = xe_bo_create(fd, 0, size, vram_if_possible(fd, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
/* Limit to 48-bit due to MI_* address limitation */
ibb->gtt_size = 1ull << min_t(uint32_t, xe_va_bits(fd), 48);
@@ -1406,7 +1407,8 @@ void intel_bb_reset(struct intel_bb *ibb, bool purge_objects_cache)
ibb->handle = gem_create(ibb->fd, ibb->size);
else
ibb->handle = xe_bo_create(ibb->fd, 0, ibb->size,
- visible_vram_if_possible(ibb->fd, 0));
+ vram_if_possible(ibb->fd, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
/* Reacquire offset for RELOC and SIMPLE */
if (ibb->allocator_type == INTEL_ALLOCATOR_SIMPLE ||
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index 63fa2ae25..3b8d2e1e5 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -226,6 +226,18 @@ void xe_vm_destroy(int fd, uint32_t vm)
igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_VM_DESTROY, &destroy), 0);
}
+static bool vram_selected(int fd, uint32_t selected_regions)
+{
+ uint64_t regions = all_memory_regions(fd) & selected_regions;
+ uint64_t region;
+
+ xe_for_each_mem_region(fd, regions, region)
+ if (xe_mem_region(fd, region)->mem_class == DRM_XE_MEM_REGION_CLASS_VRAM)
+ return true;
+
+ return false;
+}
+
uint32_t __xe_bo_create(int fd, uint32_t vm, uint64_t size, uint32_t flags,
uint32_t *handle)
{
@@ -236,6 +248,13 @@ uint32_t __xe_bo_create(int fd, uint32_t vm, uint64_t size, uint32_t flags,
};
int err;
+ /*
+ * In case vram_if_possible returned system_memory,
+ * visible VRAM cannot be requested through flags
+ */
+ if (!vram_selected(fd, flags))
+ create.flags &= ~DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+
err = igt_ioctl(fd, DRM_IOCTL_XE_GEM_CREATE, &create);
if (err)
return err;
diff --git a/lib/xe/xe_query.c b/lib/xe/xe_query.c
index afd443be3..760a150db 100644
--- a/lib/xe/xe_query.c
+++ b/lib/xe/xe_query.c
@@ -442,32 +442,6 @@ uint64_t vram_if_possible(int fd, int gt)
return vram_memory(fd, gt) ?: system_memory(fd);
}
-/**
- * visible_vram_if_possible:
- * @fd: xe device fd
- * @gt: gt id
- *
- * Returns vram memory bitmask for xe device @fd and @gt id or system memory if
- * there's no vram memory available for @gt. Also attaches the
- * DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM to ensure that CPU access is possible
- * when using vram.
- */
-uint64_t visible_vram_if_possible(int fd, int gt)
-{
- uint64_t regions = all_memory_regions(fd);
- uint64_t system_memory = regions & 0x1;
- uint64_t vram = regions & (0x2 << gt);
-
- /*
- * TODO: Keep it backwards compat for now. Fixup once the kernel side
- * has landed.
- */
- if (__xe_visible_vram_size(fd, gt))
- return vram ? vram | DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM : system_memory;
- else
- return vram ? vram : system_memory; /* older kernel */
-}
-
/**
* xe_hw_engines:
* @fd: xe device fd
diff --git a/lib/xe/xe_query.h b/lib/xe/xe_query.h
index 7b3fc3100..4dd0ad573 100644
--- a/lib/xe/xe_query.h
+++ b/lib/xe/xe_query.h
@@ -82,7 +82,6 @@ uint64_t system_memory(int fd);
uint64_t vram_memory(int fd, int gt);
uint64_t visible_vram_memory(int fd, int gt);
uint64_t vram_if_possible(int fd, int gt);
-uint64_t visible_vram_if_possible(int fd, int gt);
struct drm_xe_engine_class_instance *xe_hw_engines(int fd);
struct drm_xe_engine_class_instance *xe_hw_engine(int fd, int idx);
struct drm_xe_query_mem_region *xe_mem_region(int fd, uint64_t region);
diff --git a/lib/xe/xe_spin.c b/lib/xe/xe_spin.c
index 828938434..270b58bf5 100644
--- a/lib/xe/xe_spin.c
+++ b/lib/xe/xe_spin.c
@@ -220,7 +220,8 @@ xe_spin_create(int fd, const struct igt_spin_factory *opt)
}
spin->handle = xe_bo_create(fd, spin->vm, bo_size,
- visible_vram_if_possible(fd, 0));
+ vram_if_possible(fd, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
xe_spin = xe_bo_map(fd, spin->handle, bo_size);
addr = intel_allocator_alloc_with_strategy(ahnd, spin->handle, bo_size, 0, ALLOC_STRATEGY_LOW_TO_HIGH);
xe_vm_bind_sync(fd, spin->vm, spin->handle, 0, addr, bo_size);
@@ -298,8 +299,8 @@ void xe_cork_init(int fd, struct drm_xe_engine_class_instance *hwe,
vm = xe_vm_create(fd, 0, 0);
- bo = xe_bo_create(fd, vm, bo_size,
- visible_vram_if_possible(fd, hwe->gt_id));
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, hwe->gt_id) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
spin = xe_bo_map(fd, bo, 0x1000);
xe_vm_bind_sync(fd, vm, bo, 0, addr, bo_size);
diff --git a/tests/intel/kms_ccs.c b/tests/intel/kms_ccs.c
index a5a8abb28..7a99da14e 100644
--- a/tests/intel/kms_ccs.c
+++ b/tests/intel/kms_ccs.c
@@ -453,7 +453,8 @@ static void test_bad_ccs_plane(data_t *data, int width, int height, int ccs_plan
bad_ccs_bo = is_i915_device(data->drm_fd) ?
gem_create(data->drm_fd, fb.size) :
xe_bo_create(data->drm_fd, 0, fb.size,
- visible_vram_if_possible(data->drm_fd, 0));
+ vram_if_possible(data->drm_fd, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
f.handles[ccs_plane] = bad_ccs_bo;
}
diff --git a/tests/intel/xe_dma_buf_sync.c b/tests/intel/xe_dma_buf_sync.c
index ac9d9d767..9318647af 100644
--- a/tests/intel/xe_dma_buf_sync.c
+++ b/tests/intel/xe_dma_buf_sync.c
@@ -120,7 +120,8 @@ test_export_dma_buf(struct drm_xe_engine_class_instance *hwe0,
xe_get_default_alignment(fd[0]));
for (i = 0; i < n_bo; ++i) {
bo[i] = xe_bo_create(fd[0], 0, bo_size,
- visible_vram_if_possible(fd[0], hwe0->gt_id));
+ vram_if_possible(fd[0], hwe0->gt_id) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
dma_buf_fd[i] = prime_handle_to_fd(fd[0], bo[i]);
import_bo[i] = prime_fd_to_handle(fd[1], dma_buf_fd[i]);
diff --git a/tests/intel/xe_exec_balancer.c b/tests/intel/xe_exec_balancer.c
index da34e117d..388bb6185 100644
--- a/tests/intel/xe_exec_balancer.c
+++ b/tests/intel/xe_exec_balancer.c
@@ -70,7 +70,8 @@ static void test_all_active(int fd, int gt, int class)
bo_size = sizeof(*data) * num_placements;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd), xe_get_default_alignment(fd));
- bo = xe_bo_create(fd, vm, bo_size, visible_vram_if_possible(fd, gt));
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, gt) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
for (i = 0; i < num_placements; i++) {
@@ -224,7 +225,8 @@ test_exec(int fd, int gt, int class, int n_exec_queues, int n_execs,
}
memset(data, 0, bo_size);
} else {
- bo = xe_bo_create(fd, vm, bo_size, visible_vram_if_possible(fd, gt));
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, gt) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
}
@@ -452,7 +454,8 @@ test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
igt_assert(data);
}
} else {
- bo = xe_bo_create(fd, vm, bo_size, visible_vram_if_possible(fd, gt));
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, gt) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
}
memset(data, 0, bo_size);
diff --git a/tests/intel/xe_exec_basic.c b/tests/intel/xe_exec_basic.c
index 841696b68..ca287b2e5 100644
--- a/tests/intel/xe_exec_basic.c
+++ b/tests/intel/xe_exec_basic.c
@@ -136,7 +136,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
} else {
uint32_t bo_flags;
- bo_flags = visible_vram_if_possible(fd, eci->gt_id);
+ bo_flags = vram_if_possible(fd, eci->gt_id) | DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
if (flags & DEFER_ALLOC)
bo_flags |= DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING;
diff --git a/tests/intel/xe_exec_compute_mode.c b/tests/intel/xe_exec_compute_mode.c
index beb962f79..07a27fd29 100644
--- a/tests/intel/xe_exec_compute_mode.c
+++ b/tests/intel/xe_exec_compute_mode.c
@@ -142,7 +142,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
}
} else {
bo = xe_bo_create(fd, flags & VM_FOR_BO ? vm : 0,
- bo_size, visible_vram_if_possible(fd, eci->gt_id));
+ bo_size, vram_if_possible(fd, eci->gt_id) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
}
memset(data, 0, bo_size);
diff --git a/tests/intel/xe_exec_fault_mode.c b/tests/intel/xe_exec_fault_mode.c
index 903ad430d..bfd61c4ea 100644
--- a/tests/intel/xe_exec_fault_mode.c
+++ b/tests/intel/xe_exec_fault_mode.c
@@ -153,10 +153,12 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
if (flags & PREFETCH)
bo = xe_bo_create(fd, 0, bo_size,
all_memory_regions(fd) |
- visible_vram_if_possible(fd, 0));
+ vram_if_possible(fd, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
else
bo = xe_bo_create(fd, 0, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ vram_if_possible(fd, eci->gt_id) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
}
memset(data, 0, bo_size);
diff --git a/tests/intel/xe_exec_reset.c b/tests/intel/xe_exec_reset.c
index 704690e83..3affb19ae 100644
--- a/tests/intel/xe_exec_reset.c
+++ b/tests/intel/xe_exec_reset.c
@@ -51,7 +51,8 @@ static void test_spin(int fd, struct drm_xe_engine_class_instance *eci)
xe_get_default_alignment(fd));
bo = xe_bo_create(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ vram_if_possible(fd, eci->gt_id) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
spin = xe_bo_map(fd, bo, bo_size);
exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
@@ -181,7 +182,8 @@ test_balancer(int fd, int gt, int class, int n_exec_queues, int n_execs,
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
- bo = xe_bo_create(fd, vm, bo_size, visible_vram_if_possible(fd, gt));
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, gt) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
for (i = 0; i < n_exec_queues; i++) {
@@ -368,7 +370,7 @@ test_legacy_mode(int fd, struct drm_xe_engine_class_instance *eci,
xe_get_default_alignment(fd));
bo = xe_bo_create(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ vram_if_possible(fd, eci->gt_id) | DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
for (i = 0; i < n_exec_queues; i++) {
@@ -535,7 +537,8 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
xe_get_default_alignment(fd));
bo = xe_bo_create(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ vram_if_possible(fd, eci->gt_id) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
memset(data, 0, bo_size);
@@ -661,7 +664,8 @@ static void submit_jobs(struct gt_thread_data *t)
uint32_t bo;
uint32_t *data;
- bo = xe_bo_create(fd, vm, bo_size, visible_vram_if_possible(fd, 0));
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
data[0] = MI_BATCH_BUFFER_END;
diff --git a/tests/intel/xe_exec_store.c b/tests/intel/xe_exec_store.c
index bcc4de8d0..884183202 100644
--- a/tests/intel/xe_exec_store.c
+++ b/tests/intel/xe_exec_store.c
@@ -82,7 +82,8 @@ static void store(int fd)
hw_engine = xe_hw_engine(fd, 1);
bo = xe_bo_create(fd, vm, bo_size,
- visible_vram_if_possible(fd, hw_engine->gt_id));
+ vram_if_possible(fd, hw_engine->gt_id) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
xe_vm_bind_async(fd, vm, hw_engine->gt_id, bo, 0, addr, bo_size, &sync, 1);
data = xe_bo_map(fd, bo, bo_size);
@@ -151,7 +152,8 @@ static void store_cachelines(int fd, struct drm_xe_engine_class_instance *eci,
for (i = 0; i < count; i++) {
bo[i] = xe_bo_create(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ vram_if_possible(fd, eci->gt_id) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
bo_map[i] = xe_bo_map(fd, bo[i], bo_size);
dst_offset[i] = intel_allocator_alloc_with_strategy(ahnd, bo[i],
bo_size, 0,
@@ -236,7 +238,8 @@ static void store_all(int fd, int gt, int class)
xe_get_default_alignment(fd));
bo = xe_bo_create(fd, vm, bo_size,
- visible_vram_if_possible(fd, 0));
+ vram_if_possible(fd, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
xe_for_each_hw_engine(fd, hwe) {
diff --git a/tests/intel/xe_exec_threads.c b/tests/intel/xe_exec_threads.c
index a9b0c0b09..ebc41dadd 100644
--- a/tests/intel/xe_exec_threads.c
+++ b/tests/intel/xe_exec_threads.c
@@ -107,7 +107,8 @@ test_balancer(int fd, int gt, uint32_t vm, uint64_t addr, uint64_t userptr,
}
} else {
bo = xe_bo_create(fd, vm, bo_size,
- visible_vram_if_possible(fd, gt));
+ vram_if_possible(fd, gt) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
}
memset(data, 0, bo_size);
@@ -308,7 +309,8 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
}
} else {
bo = xe_bo_create(fd, 0, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ vram_if_possible(fd, eci->gt_id) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
}
memset(data, 0, bo_size);
@@ -511,7 +513,8 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
}
} else {
bo = xe_bo_create(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ vram_if_possible(fd, eci->gt_id) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
}
memset(data, 0, bo_size);
diff --git a/tests/intel/xe_mmap.c b/tests/intel/xe_mmap.c
index a805eabda..a4b53ad48 100644
--- a/tests/intel/xe_mmap.c
+++ b/tests/intel/xe_mmap.c
@@ -73,7 +73,8 @@ static void test_bad_flags(int fd)
uint64_t size = xe_get_default_alignment(fd);
struct drm_xe_gem_mmap_offset mmo = {
.handle = xe_bo_create(fd, 0, size,
- visible_vram_if_possible(fd, 0)),
+ vram_if_possible(fd, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM),
.flags = -1u,
};
@@ -93,7 +94,8 @@ static void test_bad_extensions(int fd)
struct xe_user_extension ext;
struct drm_xe_gem_mmap_offset mmo = {
.handle = xe_bo_create(fd, 0, size,
- visible_vram_if_possible(fd, 0)),
+ vram_if_possible(fd, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM),
};
mmo.extensions = to_user_pointer(&ext);
@@ -114,7 +116,8 @@ static void test_bad_object(int fd)
uint64_t size = xe_get_default_alignment(fd);
struct drm_xe_gem_mmap_offset mmo = {
.handle = xe_bo_create(fd, 0, size,
- visible_vram_if_possible(fd, 0)),
+ vram_if_possible(fd, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM),
};
mmo.handle = 0xdeadbeef;
diff --git a/tests/intel/xe_pm.c b/tests/intel/xe_pm.c
index 9bfe1acad..9fd3527f7 100644
--- a/tests/intel/xe_pm.c
+++ b/tests/intel/xe_pm.c
@@ -272,7 +272,8 @@ test_exec(device_t device, struct drm_xe_engine_class_instance *eci,
rpm_usage = igt_pm_get_runtime_usage(device.pci_xe);
bo = xe_bo_create(device.fd_xe, vm, bo_size,
- visible_vram_if_possible(device.fd_xe, eci->gt_id));
+ vram_if_possible(device.fd_xe, eci->gt_id) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(device.fd_xe, bo, bo_size);
for (i = 0; i < n_exec_queues; i++) {
diff --git a/tests/intel/xe_pm_residency.c b/tests/intel/xe_pm_residency.c
index cc133f5fb..40a1693b8 100644
--- a/tests/intel/xe_pm_residency.c
+++ b/tests/intel/xe_pm_residency.c
@@ -101,7 +101,8 @@ static void exec_load(int fd, struct drm_xe_engine_class_instance *hwe, unsigned
bo_size = xe_get_default_alignment(fd);
bo = xe_bo_create(fd, vm, bo_size,
- visible_vram_if_possible(fd, hwe->gt_id));
+ vram_if_possible(fd, hwe->gt_id) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
syncobj = syncobj_create(fd, 0);
diff --git a/tests/intel/xe_prime_self_import.c b/tests/intel/xe_prime_self_import.c
index 378368eaa..2c2f2898c 100644
--- a/tests/intel/xe_prime_self_import.c
+++ b/tests/intel/xe_prime_self_import.c
@@ -105,7 +105,8 @@ static void test_with_fd_dup(void)
fd1 = drm_open_driver(DRIVER_XE);
fd2 = drm_open_driver(DRIVER_XE);
- handle = xe_bo_create(fd1, 0, bo_size, visible_vram_if_possible(fd1, 0));
+ handle = xe_bo_create(fd1, 0, bo_size, vram_if_possible(fd1, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
dma_buf_fd1 = prime_handle_to_fd(fd1, handle);
gem_close(fd1, handle);
@@ -138,8 +139,10 @@ static void test_with_two_bos(void)
fd1 = drm_open_driver(DRIVER_XE);
fd2 = drm_open_driver(DRIVER_XE);
- handle1 = xe_bo_create(fd1, 0, bo_size, visible_vram_if_possible(fd1, 0));
- handle2 = xe_bo_create(fd1, 0, bo_size, visible_vram_if_possible(fd1, 0));
+ handle1 = xe_bo_create(fd1, 0, bo_size, vram_if_possible(fd1, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
+ handle2 = xe_bo_create(fd1, 0, bo_size, vram_if_possible(fd1, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
dma_buf_fd = prime_handle_to_fd(fd1, handle1);
handle_import = prime_fd_to_handle(fd2, dma_buf_fd);
@@ -175,7 +178,8 @@ static void test_with_one_bo_two_files(void)
fd2 = drm_open_driver(DRIVER_XE);
handle_orig = xe_bo_create(fd1, 0, bo_size,
- visible_vram_if_possible(fd1, 0));
+ vram_if_possible(fd1, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
dma_buf_fd1 = prime_handle_to_fd(fd1, handle_orig);
flink_name = gem_flink(fd1, handle_orig);
@@ -207,7 +211,8 @@ static void test_with_one_bo(void)
fd1 = drm_open_driver(DRIVER_XE);
fd2 = drm_open_driver(DRIVER_XE);
- handle = xe_bo_create(fd1, 0, bo_size, visible_vram_if_possible(fd1, 0));
+ handle = xe_bo_create(fd1, 0, bo_size, vram_if_possible(fd1, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
dma_buf_fd = prime_handle_to_fd(fd1, handle);
handle_import1 = prime_fd_to_handle(fd2, dma_buf_fd);
@@ -294,7 +299,8 @@ static void *thread_fn_reimport_vs_close(void *p)
fds[0] = drm_open_driver(DRIVER_XE);
handle = xe_bo_create(fds[0], 0, bo_size,
- visible_vram_if_possible(fds[0], 0));
+ vram_if_possible(fds[0], 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
fds[1] = prime_handle_to_fd(fds[0], handle);
pthread_barrier_init(&g_barrier, NULL, num_threads);
@@ -337,7 +343,8 @@ static void *thread_fn_export_vs_close(void *p)
igt_until_timeout(g_time_out) {
/* We want to race gem close against prime export on handle one.*/
handle = xe_bo_create(fd, 0, bo_size,
- visible_vram_if_possible(fd, 0));
+ vram_if_possible(fd, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
if (handle != 1)
gem_close(fd, handle);
@@ -434,7 +441,8 @@ static void test_llseek_size(void)
int bufsz = xe_get_default_alignment(fd) << i;
handle = xe_bo_create(fd, 0, bufsz,
- visible_vram_if_possible(fd, 0));
+ vram_if_possible(fd, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
dma_buf_fd = prime_handle_to_fd(fd, handle);
gem_close(fd, handle);
@@ -463,7 +471,8 @@ static void test_llseek_bad(void)
fd = drm_open_driver(DRIVER_XE);
handle = xe_bo_create(fd, 0, bo_size,
- visible_vram_if_possible(fd, 0));
+ vram_if_possible(fd, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
dma_buf_fd = prime_handle_to_fd(fd, handle);
gem_close(fd, handle);
diff --git a/tests/intel/xe_vm.c b/tests/intel/xe_vm.c
index 52241314c..7db15d4e9 100644
--- a/tests/intel/xe_vm.c
+++ b/tests/intel/xe_vm.c
@@ -52,7 +52,8 @@ write_dwords(int fd, uint32_t vm, int n_dwords, uint64_t *addrs)
batch_size = ALIGN(batch_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
batch_bo = xe_bo_create(fd, vm, batch_size,
- visible_vram_if_possible(fd, 0));
+ vram_if_possible(fd, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
batch_map = xe_bo_map(fd, batch_bo, batch_size);
for (i = 0; i < n_dwords; i++) {
@@ -116,7 +117,8 @@ __test_bind_one_bo(int fd, uint32_t vm, int n_addrs, uint64_t *addrs)
vms = malloc(sizeof(*vms) * n_addrs);
igt_assert(vms);
}
- bo = xe_bo_create(fd, vm, bo_size, visible_vram_if_possible(fd, 0));
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, 0) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
map = xe_bo_map(fd, bo, bo_size);
memset(map, 0, bo_size);
@@ -422,7 +424,8 @@ shared_pte_page(int fd, struct drm_xe_engine_class_instance *eci, int n_bo,
for (i = 0; i < n_bo; ++i) {
bo[i] = xe_bo_create(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ vram_if_possible(fd, eci->gt_id) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data[i] = xe_bo_map(fd, bo[i], bo_size);
}
@@ -601,7 +604,8 @@ test_bind_execqueues_independent(int fd, struct drm_xe_engine_class_instance *ec
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
bo = xe_bo_create(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ vram_if_possible(fd, eci->gt_id) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
for (i = 0; i < N_EXEC_QUEUES; i++) {
@@ -782,7 +786,8 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
xe_get_default_alignment(fd));
bo = xe_bo_create(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ vram_if_possible(fd, eci->gt_id) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
if (flags & BIND_ARRAY_BIND_EXEC_QUEUE_FLAG)
@@ -980,7 +985,8 @@ test_large_binds(int fd, struct drm_xe_engine_class_instance *eci,
xe_visible_vram_size(fd, 0));
bo = xe_bo_create(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ vram_if_possible(fd, eci->gt_id) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
map = xe_bo_map(fd, bo, bo_size);
}
@@ -1274,7 +1280,8 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
igt_assert(map != MAP_FAILED);
} else {
bo = xe_bo_create(fd, vm, bo_size,
- visible_vram_if_possible(fd, eci->gt_id));
+ vram_if_possible(fd, eci->gt_id) |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
map = xe_bo_map(fd, bo, bo_size);
}
memset(map, 0, bo_size);
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] [PATCH v4 06/20] drm-uapi/xe: Separate bo_create placement from flags
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
` (4 preceding siblings ...)
2023-11-29 14:54 ` [igt-dev] [PATCH v4 05/20] xe_query: Kill visible_vram_if_possible Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 07/20] xe: s/hw_engine/engine Francois Dugast
` (14 subsequent siblings)
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev; +Cc: Rodrigo Vivi
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Align with kernel commit ("drm/xe/uapi: Separate bo_create placement from flags")
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
benchmarks/gem_wsim.c | 2 +-
include/drm-uapi/xe_drm.h | 12 +++++-----
lib/igt_draw.c | 2 +-
lib/igt_fb.c | 6 ++---
lib/intel_batchbuffer.c | 4 ++--
lib/intel_blt.c | 4 ++--
lib/intel_bufops.c | 2 +-
lib/xe/xe_ioctl.c | 12 +++++-----
lib/xe/xe_ioctl.h | 7 +++---
lib/xe/xe_query.c | 21 -----------------
lib/xe/xe_query.h | 1 -
lib/xe/xe_spin.c | 4 ++--
tests/intel/api_intel_allocator.c | 2 +-
tests/intel/kms_big_fb.c | 6 ++---
tests/intel/kms_ccs.c | 2 +-
tests/intel/xe_ccs.c | 12 +++++-----
tests/intel/xe_copy_basic.c | 8 +++----
tests/intel/xe_create.c | 4 ++--
tests/intel/xe_dma_buf_sync.c | 2 +-
tests/intel/xe_drm_fdinfo.c | 6 ++---
tests/intel/xe_evict.c | 22 +++++++++++-------
tests/intel/xe_evict_ccs.c | 5 +++--
tests/intel/xe_exec_balancer.c | 6 ++---
tests/intel/xe_exec_basic.c | 5 +++--
tests/intel/xe_exec_compute_mode.c | 2 +-
tests/intel/xe_exec_fault_mode.c | 4 ++--
tests/intel/xe_exec_reset.c | 11 ++++-----
tests/intel/xe_exec_store.c | 6 ++---
tests/intel/xe_exec_threads.c | 6 ++---
tests/intel/xe_exercise_blt.c | 4 ++--
tests/intel/xe_intel_bb.c | 2 +-
tests/intel/xe_mmap.c | 36 +++++++++++++++++-------------
tests/intel/xe_noexec_ping_pong.c | 2 +-
tests/intel/xe_perf_pmu.c | 4 ++--
tests/intel/xe_pm.c | 10 ++++-----
tests/intel/xe_pm_residency.c | 2 +-
tests/intel/xe_prime_self_import.c | 18 +++++++--------
tests/intel/xe_spin_batch.c | 2 +-
tests/intel/xe_vm.c | 22 +++++++++---------
tests/intel/xe_waitfence.c | 22 +++++++++---------
tests/kms_addfb_basic.c | 2 +-
tests/kms_getfb.c | 2 +-
42 files changed, 153 insertions(+), 161 deletions(-)
diff --git a/benchmarks/gem_wsim.c b/benchmarks/gem_wsim.c
index 47692e94f..eb9fd6266 100644
--- a/benchmarks/gem_wsim.c
+++ b/benchmarks/gem_wsim.c
@@ -1746,7 +1746,7 @@ xe_alloc_step_batch(struct workload *wrk, struct w_step *w)
w->bb_size = ALIGN(PAGE_SIZE + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
w->bb_handle = xe_bo_create(fd, vm->id, w->bb_size,
- vram_if_possible(fd, eq->hwe_list[0].gt_id) |
+ vram_if_possible(fd, eq->hwe_list[0].gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
w->xe.data = xe_bo_map(fd, w->bb_handle, w->bb_size);
w->xe.exec.address =
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 07ea94ed4..26c1a8604 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -500,8 +500,11 @@ struct drm_xe_gem_create {
*/
__u64 size;
-#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (0x1 << 24)
-#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (0x1 << 25)
+ /** @placement: A mask of memory instances of where BO can be placed. */
+ __u32 placement;
+
+#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0)
+#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1)
/*
* When using VRAM as a possible placement, ensure that the corresponding VRAM
* allocation will always use the CPU accessible part of VRAM. This is important
@@ -517,7 +520,7 @@ struct drm_xe_gem_create {
* display surfaces, therefore the kernel requires setting this flag for such
* objects, otherwise an error is thrown on small-bar systems.
*/
-#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (0x1 << 26)
+#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2)
/**
* @flags: Flags, currently a mask of memory instances of where BO can
* be placed
@@ -541,9 +544,6 @@ struct drm_xe_gem_create {
*/
__u32 handle;
- /** @pad: MBZ */
- __u32 pad;
-
/** @reserved: Reserved */
__u64 reserved[2];
};
diff --git a/lib/igt_draw.c b/lib/igt_draw.c
index 4a8dd7bd9..a55a07635 100644
--- a/lib/igt_draw.c
+++ b/lib/igt_draw.c
@@ -797,7 +797,7 @@ static void draw_rect_render(int fd, struct cmd_data *cmd_data,
else
tmp.handle = xe_bo_create(fd, 0,
ALIGN(tmp.size, xe_get_default_alignment(fd)),
- vram_if_possible(fd, 0) |
+ vram_if_possible(fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
tmp.stride = rect->w * pixel_size;
diff --git a/lib/igt_fb.c b/lib/igt_fb.c
index 0a6aa27c8..9c1257801 100644
--- a/lib/igt_fb.c
+++ b/lib/igt_fb.c
@@ -1206,8 +1206,8 @@ static int create_bo_for_fb(struct igt_fb *fb, bool prefer_sysmem)
igt_assert(err == 0 || err == -EOPNOTSUPP);
} else if (is_xe_device(fd)) {
fb->gem_handle = xe_bo_create(fd, 0, fb->size,
- vram_if_possible(fd, 0)
- | DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
+ vram_if_possible(fd, 0),
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
} else if (is_vc4_device(fd)) {
fb->gem_handle = igt_vc4_create_bo(fd, fb->size);
@@ -2904,7 +2904,7 @@ static void blitcopy(const struct igt_fb *dst_fb,
bb_size = ALIGN(bb_size + xe_cs_prefetch_size(dst_fb->fd),
xe_get_default_alignment(dst_fb->fd));
- xe_bb = xe_bo_create(dst_fb->fd, 0, bb_size, mem_region);
+ xe_bb = xe_bo_create(dst_fb->fd, 0, bb_size, mem_region, 0);
}
for (int i = 0; i < dst_fb->num_planes - dst_cc; i++) {
diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index ae7a43605..ef55b6330 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -947,7 +947,7 @@ __intel_bb_create(int fd, uint32_t ctx, uint32_t vm, const intel_ctx_cfg_t *cfg,
ibb->alignment = xe_get_default_alignment(fd);
size = ALIGN(size, ibb->alignment);
- ibb->handle = xe_bo_create(fd, 0, size, vram_if_possible(fd, 0) |
+ ibb->handle = xe_bo_create(fd, 0, size, vram_if_possible(fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
/* Limit to 48-bit due to MI_* address limitation */
@@ -1407,7 +1407,7 @@ void intel_bb_reset(struct intel_bb *ibb, bool purge_objects_cache)
ibb->handle = gem_create(ibb->fd, ibb->size);
else
ibb->handle = xe_bo_create(ibb->fd, 0, ibb->size,
- vram_if_possible(ibb->fd, 0) |
+ vram_if_possible(ibb->fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
/* Reacquire offset for RELOC and SIMPLE */
diff --git a/lib/intel_blt.c b/lib/intel_blt.c
index 36830fb3e..2ab4f69cf 100644
--- a/lib/intel_blt.c
+++ b/lib/intel_blt.c
@@ -1801,13 +1801,13 @@ blt_create_object(const struct blt_copy_data *blt, uint32_t region,
obj->size = size;
if (blt->driver == INTEL_DRIVER_XE) {
- uint64_t flags = region;
+ uint64_t flags = 0;
if (create_mapping && region != system_memory(blt->fd))
flags |= DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
size = ALIGN(size, xe_get_default_alignment(blt->fd));
- handle = xe_bo_create(blt->fd, 0, size, flags);
+ handle = xe_bo_create(blt->fd, 0, size, region, flags);
} else {
igt_assert(__gem_create_in_memory_regions(blt->fd, &handle,
&size, region) == 0);
diff --git a/lib/intel_bufops.c b/lib/intel_bufops.c
index 6f3a77f47..5582481f6 100644
--- a/lib/intel_bufops.c
+++ b/lib/intel_bufops.c
@@ -920,7 +920,7 @@ static void __intel_buf_init(struct buf_ops *bops,
igt_assert_eq(__gem_create(bops->fd, &size, &buf->handle), 0);
} else {
size = ALIGN(size, xe_get_default_alignment(bops->fd));
- buf->handle = xe_bo_create(bops->fd, 0, size, region);
+ buf->handle = xe_bo_create(bops->fd, 0, size, region, 0);
}
}
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index 3b8d2e1e5..c6d7af878 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -238,12 +238,13 @@ static bool vram_selected(int fd, uint32_t selected_regions)
return false;
}
-uint32_t __xe_bo_create(int fd, uint32_t vm, uint64_t size, uint32_t flags,
- uint32_t *handle)
+uint32_t __xe_bo_create(int fd, uint32_t vm, uint64_t size, uint32_t placement,
+ uint32_t flags, uint32_t *handle)
{
struct drm_xe_gem_create create = {
.vm_id = vm,
.size = size,
+ .placement = placement,
.flags = flags,
};
int err;
@@ -252,7 +253,7 @@ uint32_t __xe_bo_create(int fd, uint32_t vm, uint64_t size, uint32_t flags,
* In case vram_if_possible returned system_memory,
* visible VRAM cannot be requested through flags
*/
- if (!vram_selected(fd, flags))
+ if (!vram_selected(fd, placement))
create.flags &= ~DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
err = igt_ioctl(fd, DRM_IOCTL_XE_GEM_CREATE, &create);
@@ -263,11 +264,12 @@ uint32_t __xe_bo_create(int fd, uint32_t vm, uint64_t size, uint32_t flags,
return 0;
}
-uint32_t xe_bo_create(int fd, uint32_t vm, uint64_t size, uint32_t flags)
+uint32_t xe_bo_create(int fd, uint32_t vm, uint64_t size, uint32_t placement,
+ uint32_t flags)
{
uint32_t handle;
- igt_assert_eq(__xe_bo_create(fd, vm, size, flags, &handle), 0);
+ igt_assert_eq(__xe_bo_create(fd, vm, size, placement, flags, &handle), 0);
return handle;
}
diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
index 1ec29c2c5..bc609442a 100644
--- a/lib/xe/xe_ioctl.h
+++ b/lib/xe/xe_ioctl.h
@@ -64,9 +64,10 @@ void xe_vm_unbind_all_async(int fd, uint32_t vm, uint32_t exec_queue,
uint32_t bo, struct drm_xe_sync *sync,
uint32_t num_syncs);
void xe_vm_destroy(int fd, uint32_t vm);
-uint32_t __xe_bo_create(int fd, uint32_t vm, uint64_t size, uint32_t flags,
- uint32_t *handle);
-uint32_t xe_bo_create(int fd, uint32_t vm, uint64_t size, uint32_t flags);
+uint32_t __xe_bo_create(int fd, uint32_t vm, uint64_t size, uint32_t placement,
+ uint32_t flags, uint32_t *handle);
+uint32_t xe_bo_create(int fd, uint32_t vm, uint64_t size, uint32_t placement,
+ uint32_t flags);
uint32_t xe_exec_queue_create(int fd, uint32_t vm,
struct drm_xe_engine_class_instance *instance,
uint64_t ext);
diff --git a/lib/xe/xe_query.c b/lib/xe/xe_query.c
index 760a150db..fa17b46b6 100644
--- a/lib/xe/xe_query.c
+++ b/lib/xe/xe_query.c
@@ -408,27 +408,6 @@ static uint64_t __xe_visible_vram_size(int fd, int gt)
return xe_dev->visible_vram_size[gt];
}
-/**
- * visible_vram_memory:
- * @fd: xe device fd
- * @gt: gt id
- *
- * Returns vram memory bitmask for xe device @fd and @gt id, with
- * DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM also set, to ensure that CPU access is
- * possible.
- */
-uint64_t visible_vram_memory(int fd, int gt)
-{
- /*
- * TODO: Keep it backwards compat for now. Fixup once the kernel side
- * has landed.
- */
- if (__xe_visible_vram_size(fd, gt))
- return vram_memory(fd, gt) | DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
- else
- return vram_memory(fd, gt); /* older kernel */
-}
-
/**
* vram_if_possible:
* @fd: xe device fd
diff --git a/lib/xe/xe_query.h b/lib/xe/xe_query.h
index 4dd0ad573..be92ec5ed 100644
--- a/lib/xe/xe_query.h
+++ b/lib/xe/xe_query.h
@@ -80,7 +80,6 @@ unsigned int xe_number_gt(int fd);
uint64_t all_memory_regions(int fd);
uint64_t system_memory(int fd);
uint64_t vram_memory(int fd, int gt);
-uint64_t visible_vram_memory(int fd, int gt);
uint64_t vram_if_possible(int fd, int gt);
struct drm_xe_engine_class_instance *xe_hw_engines(int fd);
struct drm_xe_engine_class_instance *xe_hw_engine(int fd, int idx);
diff --git a/lib/xe/xe_spin.c b/lib/xe/xe_spin.c
index 270b58bf5..91bc6664d 100644
--- a/lib/xe/xe_spin.c
+++ b/lib/xe/xe_spin.c
@@ -220,7 +220,7 @@ xe_spin_create(int fd, const struct igt_spin_factory *opt)
}
spin->handle = xe_bo_create(fd, spin->vm, bo_size,
- vram_if_possible(fd, 0) |
+ vram_if_possible(fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
xe_spin = xe_bo_map(fd, spin->handle, bo_size);
addr = intel_allocator_alloc_with_strategy(ahnd, spin->handle, bo_size, 0, ALLOC_STRATEGY_LOW_TO_HIGH);
@@ -299,7 +299,7 @@ void xe_cork_init(int fd, struct drm_xe_engine_class_instance *hwe,
vm = xe_vm_create(fd, 0, 0);
- bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, hwe->gt_id) |
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, hwe->gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
spin = xe_bo_map(fd, bo, 0x1000);
diff --git a/tests/intel/api_intel_allocator.c b/tests/intel/api_intel_allocator.c
index 158fd86a1..2d01da7d0 100644
--- a/tests/intel/api_intel_allocator.c
+++ b/tests/intel/api_intel_allocator.c
@@ -469,7 +469,7 @@ static void __simple_allocs(int fd)
size = (rand() % 4 + 1) * 0x1000;
if (is_xe)
handles[i] = xe_bo_create(fd, 0, size,
- system_memory(fd));
+ system_memory(fd), 0);
else
handles[i] = gem_create(fd, size);
diff --git a/tests/intel/kms_big_fb.c b/tests/intel/kms_big_fb.c
index b627ce659..3ee9753a0 100644
--- a/tests/intel/kms_big_fb.c
+++ b/tests/intel/kms_big_fb.c
@@ -779,7 +779,7 @@ test_size_overflow(data_t *data)
bo = xe_bo_create(data->drm_fd, 0,
ALIGN(((1ULL << 32) - 4096),
xe_get_default_alignment(data->drm_fd)),
- vram_if_possible(data->drm_fd, 0));
+ vram_if_possible(data->drm_fd, 0), 0);
igt_require(bo);
ret = __kms_addfb(data->drm_fd, bo,
@@ -823,7 +823,7 @@ test_size_offset_overflow(data_t *data)
bo = xe_bo_create(data->drm_fd, 0,
ALIGN(((1ULL << 32) - 4096),
xe_get_default_alignment(data->drm_fd)),
- vram_if_possible(data->drm_fd, 0));
+ vram_if_possible(data->drm_fd, 0), 0);
igt_require(bo);
offsets[0] = 0;
@@ -887,7 +887,7 @@ test_addfb(data_t *data)
else
bo = xe_bo_create(data->drm_fd, 0,
ALIGN(size, xe_get_default_alignment(data->drm_fd)),
- vram_if_possible(data->drm_fd, 0));
+ vram_if_possible(data->drm_fd, 0), 0);
igt_require(bo);
if (is_i915_device(data->drm_fd) && intel_display_ver(data->devid) < 4)
diff --git a/tests/intel/kms_ccs.c b/tests/intel/kms_ccs.c
index 7a99da14e..74bc78477 100644
--- a/tests/intel/kms_ccs.c
+++ b/tests/intel/kms_ccs.c
@@ -453,7 +453,7 @@ static void test_bad_ccs_plane(data_t *data, int width, int height, int ccs_plan
bad_ccs_bo = is_i915_device(data->drm_fd) ?
gem_create(data->drm_fd, fb.size) :
xe_bo_create(data->drm_fd, 0, fb.size,
- vram_if_possible(data->drm_fd, 0) |
+ vram_if_possible(data->drm_fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
f.handles[ccs_plane] = bad_ccs_bo;
}
diff --git a/tests/intel/xe_ccs.c b/tests/intel/xe_ccs.c
index ceecba416..d742d726c 100644
--- a/tests/intel/xe_ccs.c
+++ b/tests/intel/xe_ccs.c
@@ -102,8 +102,8 @@ static void surf_copy(int xe,
igt_assert(mid->compression);
ccscopy = (uint32_t *) malloc(ccssize);
- ccs = xe_bo_create(xe, 0, ccssize, sysmem);
- ccs2 = xe_bo_create(xe, 0, ccssize, sysmem);
+ ccs = xe_bo_create(xe, 0, ccssize, sysmem, 0);
+ ccs2 = xe_bo_create(xe, 0, ccssize, sysmem, 0);
blt_ctrl_surf_copy_init(xe, &surf);
surf.print_bb = param.print_bb;
@@ -111,7 +111,7 @@ static void surf_copy(int xe,
uc_mocs, BLT_INDIRECT_ACCESS);
blt_set_ctrl_surf_object(&surf.dst, ccs, sysmem, ccssize, uc_mocs, DIRECT_ACCESS);
bb_size = xe_get_default_alignment(xe);
- bb1 = xe_bo_create(xe, 0, bb_size, sysmem);
+ bb1 = xe_bo_create(xe, 0, bb_size, sysmem, 0);
blt_set_batch(&surf.bb, bb1, bb_size, sysmem);
blt_ctrl_surf_copy(xe, ctx, NULL, ahnd, &surf);
intel_ctx_xe_sync(ctx, true);
@@ -166,7 +166,7 @@ static void surf_copy(int xe,
blt_set_copy_object(&blt.dst, dst);
blt_set_object_ext(&ext.src, mid->compression_type, mid->x2, mid->y2, SURFACE_TYPE_2D);
blt_set_object_ext(&ext.dst, 0, dst->x2, dst->y2, SURFACE_TYPE_2D);
- bb2 = xe_bo_create(xe, 0, bb_size, sysmem);
+ bb2 = xe_bo_create(xe, 0, bb_size, sysmem, 0);
blt_set_batch(&blt.bb, bb2, bb_size, sysmem);
blt_block_copy(xe, ctx, NULL, ahnd, &blt, &ext);
intel_ctx_xe_sync(ctx, true);
@@ -297,7 +297,7 @@ static void block_copy(int xe,
uint8_t uc_mocs = intel_get_uc_mocs_index(xe);
int result;
- bb = xe_bo_create(xe, 0, bb_size, region1);
+ bb = xe_bo_create(xe, 0, bb_size, region1, 0);
if (!blt_uses_extended_block_copy(xe))
pext = NULL;
@@ -418,7 +418,7 @@ static void block_multicopy(int xe,
uint8_t uc_mocs = intel_get_uc_mocs_index(xe);
int result;
- bb = xe_bo_create(xe, 0, bb_size, region1);
+ bb = xe_bo_create(xe, 0, bb_size, region1, 0);
if (!blt_uses_extended_block_copy(xe))
pext3 = NULL;
diff --git a/tests/intel/xe_copy_basic.c b/tests/intel/xe_copy_basic.c
index 715f7d3b5..a51cc4c0d 100644
--- a/tests/intel/xe_copy_basic.c
+++ b/tests/intel/xe_copy_basic.c
@@ -52,7 +52,7 @@ mem_copy(int fd, uint32_t src_handle, uint32_t dst_handle, const intel_ctx_t *ct
uint32_t bb;
int result;
- bb = xe_bo_create(fd, 0, bb_size, region);
+ bb = xe_bo_create(fd, 0, bb_size, region, 0);
blt_mem_init(fd, &mem);
blt_set_mem_object(&mem.src, src_handle, size, 0, width, height,
@@ -102,7 +102,7 @@ mem_set(int fd, uint32_t dst_handle, const intel_ctx_t *ctx, uint32_t size,
uint32_t bb;
uint8_t *result;
- bb = xe_bo_create(fd, 0, bb_size, region);
+ bb = xe_bo_create(fd, 0, bb_size, region, 0);
blt_mem_init(fd, &mem);
blt_set_mem_object(&mem.dst, dst_handle, size, 0, width, height, region,
dst_mocs, M_LINEAR, COMPRESSION_DISABLED);
@@ -132,8 +132,8 @@ static void copy_test(int fd, uint32_t size, enum blt_cmd_type cmd, uint32_t reg
uint32_t bo_size = ALIGN(size, xe_get_default_alignment(fd));
intel_ctx_t *ctx;
- src_handle = xe_bo_create(fd, 0, bo_size, region);
- dst_handle = xe_bo_create(fd, 0, bo_size, region);
+ src_handle = xe_bo_create(fd, 0, bo_size, region, 0);
+ dst_handle = xe_bo_create(fd, 0, bo_size, region, 0);
vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
exec_queue = xe_exec_queue_create(fd, vm, &inst, 0);
ctx = intel_ctx_xe(fd, vm, exec_queue, 0, 0, 0);
diff --git a/tests/intel/xe_create.c b/tests/intel/xe_create.c
index f4633cfb3..a5e5b9892 100644
--- a/tests/intel/xe_create.c
+++ b/tests/intel/xe_create.c
@@ -18,13 +18,13 @@
#define PAGE_SIZE 0x1000
-static int __create_bo(int fd, uint32_t vm, uint64_t size, uint32_t flags,
+static int __create_bo(int fd, uint32_t vm, uint64_t size, uint32_t placement,
uint32_t *handlep)
{
struct drm_xe_gem_create create = {
.vm_id = vm,
.size = size,
- .flags = flags,
+ .placement = placement,
};
int ret = 0;
diff --git a/tests/intel/xe_dma_buf_sync.c b/tests/intel/xe_dma_buf_sync.c
index 9318647af..aeb4c4995 100644
--- a/tests/intel/xe_dma_buf_sync.c
+++ b/tests/intel/xe_dma_buf_sync.c
@@ -120,7 +120,7 @@ test_export_dma_buf(struct drm_xe_engine_class_instance *hwe0,
xe_get_default_alignment(fd[0]));
for (i = 0; i < n_bo; ++i) {
bo[i] = xe_bo_create(fd[0], 0, bo_size,
- vram_if_possible(fd[0], hwe0->gt_id) |
+ vram_if_possible(fd[0], hwe0->gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
dma_buf_fd[i] = prime_handle_to_fd(fd[0], bo[i]);
import_bo[i] = prime_fd_to_handle(fd[1], dma_buf_fd[i]);
diff --git a/tests/intel/xe_drm_fdinfo.c b/tests/intel/xe_drm_fdinfo.c
index 8f737a533..6bca5a6f1 100644
--- a/tests/intel/xe_drm_fdinfo.c
+++ b/tests/intel/xe_drm_fdinfo.c
@@ -85,7 +85,7 @@ static void test_active(int fd, struct drm_xe_engine_class_instance *eci)
igt_assert_f(ret != 0, "failed with err:%d\n", errno);
pre_size = info.region_mem[memregion->instance + 1].active;
- bo = xe_bo_create(fd, vm, bo_size, region);
+ bo = xe_bo_create(fd, vm, bo_size, region, 0);
data = xe_bo_map(fd, bo, bo_size);
for (i = 0; i < N_EXEC_QUEUES; i++) {
@@ -185,7 +185,7 @@ static void test_shared(int xe)
igt_assert_f(ret != 0, "failed with err:%d\n", errno);
pre_size = info.region_mem[memregion->instance + 1].shared;
- bo = xe_bo_create(xe, 0, BO_SIZE, region);
+ bo = xe_bo_create(xe, 0, BO_SIZE, region, 0);
flink.handle = bo;
ret = igt_ioctl(xe, DRM_IOCTL_GEM_FLINK, &flink);
@@ -232,7 +232,7 @@ static void test_total_resident(int xe)
igt_assert_f(ret != 0, "failed with err:%d\n", errno);
pre_size = info.region_mem[memregion->instance + 1].shared;
- handle = xe_bo_create(xe, vm, BO_SIZE, region);
+ handle = xe_bo_create(xe, vm, BO_SIZE, region, 0);
xe_vm_bind_sync(xe, vm, handle, 0, addr, BO_SIZE);
ret = igt_parse_drm_fdinfo(xe, &info, NULL, 0, NULL, 0);
diff --git a/tests/intel/xe_evict.c b/tests/intel/xe_evict.c
index a9d501d5f..436a2be02 100644
--- a/tests/intel/xe_evict.c
+++ b/tests/intel/xe_evict.c
@@ -101,16 +101,19 @@ test_evict(int fd, struct drm_xe_engine_class_instance *eci,
if (flags & MULTI_VM) {
__bo = bo[i] = xe_bo_create(fd, 0,
bo_size,
- visible_vram_memory(fd, eci->gt_id));
+ vram_memory(fd, eci->gt_id),
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
} else if (flags & THREADED) {
__bo = bo[i] = xe_bo_create(fd, vm,
bo_size,
- visible_vram_memory(fd, eci->gt_id));
+ vram_memory(fd, eci->gt_id),
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
} else {
__bo = bo[i] = xe_bo_create(fd, _vm,
bo_size,
- visible_vram_memory(fd, eci->gt_id) |
- system_memory(fd));
+ vram_memory(fd, eci->gt_id) |
+ system_memory(fd),
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
}
} else {
__bo = bo[i % (n_execs / 2)];
@@ -277,16 +280,19 @@ test_evict_cm(int fd, struct drm_xe_engine_class_instance *eci,
if (flags & MULTI_VM) {
__bo = bo[i] = xe_bo_create(fd, 0,
bo_size,
- visible_vram_memory(fd, eci->gt_id));
+ vram_memory(fd, eci->gt_id),
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
} else if (flags & THREADED) {
__bo = bo[i] = xe_bo_create(fd, vm,
bo_size,
- visible_vram_memory(fd, eci->gt_id));
+ vram_memory(fd, eci->gt_id),
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
} else {
__bo = bo[i] = xe_bo_create(fd, _vm,
bo_size,
- visible_vram_memory(fd, eci->gt_id) |
- system_memory(fd));
+ vram_memory(fd, eci->gt_id) |
+ system_memory(fd),
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
}
} else {
__bo = bo[i % (n_execs / 2)];
diff --git a/tests/intel/xe_evict_ccs.c b/tests/intel/xe_evict_ccs.c
index 35a588521..0c775e5f4 100644
--- a/tests/intel/xe_evict_ccs.c
+++ b/tests/intel/xe_evict_ccs.c
@@ -82,7 +82,8 @@ static void copy_obj(struct blt_copy_data *blt,
w = src_obj->x2;
h = src_obj->y2;
- bb = xe_bo_create(fd, 0, bb_size, visible_vram_memory(fd, 0));
+ bb = xe_bo_create(fd, 0, bb_size, vram_memory(fd, 0),
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
blt->color_depth = CD_32bit;
blt->print_bb = params.print_bb;
@@ -275,7 +276,7 @@ static void evict_single(int fd, int child, const struct config *config)
if (config->flags & TEST_SIMPLE) {
big_obj = xe_bo_create(fd, vm, kb_left * SZ_1K,
- vram_memory(fd, 0));
+ vram_memory(fd, 0), 0);
break;
}
diff --git a/tests/intel/xe_exec_balancer.c b/tests/intel/xe_exec_balancer.c
index 388bb6185..fa3d7a338 100644
--- a/tests/intel/xe_exec_balancer.c
+++ b/tests/intel/xe_exec_balancer.c
@@ -70,7 +70,7 @@ static void test_all_active(int fd, int gt, int class)
bo_size = sizeof(*data) * num_placements;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd), xe_get_default_alignment(fd));
- bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, gt) |
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, gt),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
@@ -225,7 +225,7 @@ test_exec(int fd, int gt, int class, int n_exec_queues, int n_execs,
}
memset(data, 0, bo_size);
} else {
- bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, gt) |
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, gt),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
}
@@ -454,7 +454,7 @@ test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
igt_assert(data);
}
} else {
- bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, gt) |
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, gt),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
}
diff --git a/tests/intel/xe_exec_basic.c b/tests/intel/xe_exec_basic.c
index ca287b2e5..23acdd434 100644
--- a/tests/intel/xe_exec_basic.c
+++ b/tests/intel/xe_exec_basic.c
@@ -136,11 +136,12 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
} else {
uint32_t bo_flags;
- bo_flags = vram_if_possible(fd, eci->gt_id) | DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+ bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
if (flags & DEFER_ALLOC)
bo_flags |= DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING;
- bo = xe_bo_create(fd, n_vm == 1 ? vm[0] : 0, bo_size, bo_flags);
+ bo = xe_bo_create(fd, n_vm == 1 ? vm[0] : 0, bo_size,
+ vram_if_possible(fd, eci->gt_id), bo_flags);
if (!(flags & DEFER_BIND))
data = xe_bo_map(fd, bo, bo_size);
}
diff --git a/tests/intel/xe_exec_compute_mode.c b/tests/intel/xe_exec_compute_mode.c
index 07a27fd29..98a98256e 100644
--- a/tests/intel/xe_exec_compute_mode.c
+++ b/tests/intel/xe_exec_compute_mode.c
@@ -142,7 +142,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
}
} else {
bo = xe_bo_create(fd, flags & VM_FOR_BO ? vm : 0,
- bo_size, vram_if_possible(fd, eci->gt_id) |
+ bo_size, vram_if_possible(fd, eci->gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
}
diff --git a/tests/intel/xe_exec_fault_mode.c b/tests/intel/xe_exec_fault_mode.c
index bfd61c4ea..3eb448ef4 100644
--- a/tests/intel/xe_exec_fault_mode.c
+++ b/tests/intel/xe_exec_fault_mode.c
@@ -153,11 +153,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
if (flags & PREFETCH)
bo = xe_bo_create(fd, 0, bo_size,
all_memory_regions(fd) |
- vram_if_possible(fd, 0) |
+ vram_if_possible(fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
else
bo = xe_bo_create(fd, 0, bo_size,
- vram_if_possible(fd, eci->gt_id) |
+ vram_if_possible(fd, eci->gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
}
diff --git a/tests/intel/xe_exec_reset.c b/tests/intel/xe_exec_reset.c
index 3affb19ae..d8b8e0355 100644
--- a/tests/intel/xe_exec_reset.c
+++ b/tests/intel/xe_exec_reset.c
@@ -51,7 +51,7 @@ static void test_spin(int fd, struct drm_xe_engine_class_instance *eci)
xe_get_default_alignment(fd));
bo = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, eci->gt_id) |
+ vram_if_possible(fd, eci->gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
spin = xe_bo_map(fd, bo, bo_size);
@@ -182,7 +182,7 @@ test_balancer(int fd, int gt, int class, int n_exec_queues, int n_execs,
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
- bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, gt) |
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, gt),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
@@ -370,7 +370,8 @@ test_legacy_mode(int fd, struct drm_xe_engine_class_instance *eci,
xe_get_default_alignment(fd));
bo = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, eci->gt_id) | DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
+ vram_if_possible(fd, eci->gt_id),
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
for (i = 0; i < n_exec_queues; i++) {
@@ -537,7 +538,7 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
xe_get_default_alignment(fd));
bo = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, eci->gt_id) |
+ vram_if_possible(fd, eci->gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
memset(data, 0, bo_size);
@@ -664,7 +665,7 @@ static void submit_jobs(struct gt_thread_data *t)
uint32_t bo;
uint32_t *data;
- bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, 0) |
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
data[0] = MI_BATCH_BUFFER_END;
diff --git a/tests/intel/xe_exec_store.c b/tests/intel/xe_exec_store.c
index 884183202..9ee5edeb4 100644
--- a/tests/intel/xe_exec_store.c
+++ b/tests/intel/xe_exec_store.c
@@ -82,7 +82,7 @@ static void store(int fd)
hw_engine = xe_hw_engine(fd, 1);
bo = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, hw_engine->gt_id) |
+ vram_if_possible(fd, hw_engine->gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
xe_vm_bind_async(fd, vm, hw_engine->gt_id, bo, 0, addr, bo_size, &sync, 1);
@@ -152,7 +152,7 @@ static void store_cachelines(int fd, struct drm_xe_engine_class_instance *eci,
for (i = 0; i < count; i++) {
bo[i] = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, eci->gt_id) |
+ vram_if_possible(fd, eci->gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
bo_map[i] = xe_bo_map(fd, bo[i], bo_size);
dst_offset[i] = intel_allocator_alloc_with_strategy(ahnd, bo[i],
@@ -238,7 +238,7 @@ static void store_all(int fd, int gt, int class)
xe_get_default_alignment(fd));
bo = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, 0) |
+ vram_if_possible(fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
diff --git a/tests/intel/xe_exec_threads.c b/tests/intel/xe_exec_threads.c
index ebc41dadd..f37fc612a 100644
--- a/tests/intel/xe_exec_threads.c
+++ b/tests/intel/xe_exec_threads.c
@@ -107,7 +107,7 @@ test_balancer(int fd, int gt, uint32_t vm, uint64_t addr, uint64_t userptr,
}
} else {
bo = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, gt) |
+ vram_if_possible(fd, gt),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
}
@@ -309,7 +309,7 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
}
} else {
bo = xe_bo_create(fd, 0, bo_size,
- vram_if_possible(fd, eci->gt_id) |
+ vram_if_possible(fd, eci->gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
}
@@ -513,7 +513,7 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
}
} else {
bo = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, eci->gt_id) |
+ vram_if_possible(fd, eci->gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
}
diff --git a/tests/intel/xe_exercise_blt.c b/tests/intel/xe_exercise_blt.c
index 9c69be3ef..655e9a3ea 100644
--- a/tests/intel/xe_exercise_blt.c
+++ b/tests/intel/xe_exercise_blt.c
@@ -125,7 +125,7 @@ static void fast_copy_emit(int xe, const intel_ctx_t *ctx,
uint32_t bb, width = param.width, height = param.height;
int result;
- bb = xe_bo_create(xe, 0, bb_size, region1);
+ bb = xe_bo_create(xe, 0, bb_size, region1, 0);
blt_copy_init(xe, &bltinit);
src = blt_create_object(&bltinit, region1, width, height, bpp, 0,
@@ -184,7 +184,7 @@ static void fast_copy(int xe, const intel_ctx_t *ctx,
uint32_t width = param.width, height = param.height;
int result;
- bb = xe_bo_create(xe, 0, bb_size, region1);
+ bb = xe_bo_create(xe, 0, bb_size, region1, 0);
blt_copy_init(xe, &blt);
src = blt_create_object(&blt, region1, width, height, bpp, 0,
diff --git a/tests/intel/xe_intel_bb.c b/tests/intel/xe_intel_bb.c
index a3a315297..00bd17d4c 100644
--- a/tests/intel/xe_intel_bb.c
+++ b/tests/intel/xe_intel_bb.c
@@ -396,7 +396,7 @@ static void create_in_region(struct buf_ops *bops, uint64_t region)
intel_bb_set_debug(ibb, true);
size = xe_min_page_size(xe, system_memory(xe));
- handle = xe_bo_create(xe, 0, size, system_memory(xe));
+ handle = xe_bo_create(xe, 0, size, system_memory(xe), 0);
intel_buf_init_full(bops, handle, &buf,
width/4, height, 32, 0,
I915_TILING_NONE, 0,
diff --git a/tests/intel/xe_mmap.c b/tests/intel/xe_mmap.c
index a4b53ad48..965644e22 100644
--- a/tests/intel/xe_mmap.c
+++ b/tests/intel/xe_mmap.c
@@ -45,14 +45,14 @@
* @vram-system: system vram
*/
static void
-test_mmap(int fd, uint32_t flags)
+test_mmap(int fd, uint32_t placement, uint32_t flags)
{
uint32_t bo;
void *map;
- igt_require_f(flags, "Device doesn't support such memory region\n");
+ igt_require_f(placement, "Device doesn't support such memory region\n");
- bo = xe_bo_create(fd, 0, 4096, flags);
+ bo = xe_bo_create(fd, 0, 4096, placement, flags);
map = xe_bo_map(fd, bo, 4096);
strcpy(map, "Write some data to the BO!");
@@ -73,7 +73,7 @@ static void test_bad_flags(int fd)
uint64_t size = xe_get_default_alignment(fd);
struct drm_xe_gem_mmap_offset mmo = {
.handle = xe_bo_create(fd, 0, size,
- vram_if_possible(fd, 0) |
+ vram_if_possible(fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM),
.flags = -1u,
};
@@ -94,7 +94,7 @@ static void test_bad_extensions(int fd)
struct xe_user_extension ext;
struct drm_xe_gem_mmap_offset mmo = {
.handle = xe_bo_create(fd, 0, size,
- vram_if_possible(fd, 0) |
+ vram_if_possible(fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM),
};
@@ -116,7 +116,7 @@ static void test_bad_object(int fd)
uint64_t size = xe_get_default_alignment(fd);
struct drm_xe_gem_mmap_offset mmo = {
.handle = xe_bo_create(fd, 0, size,
- vram_if_possible(fd, 0) |
+ vram_if_possible(fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM),
};
@@ -163,12 +163,14 @@ static void test_small_bar(int fd)
/* 2BIG invalid case */
igt_assert_neq(__xe_bo_create(fd, 0, visible_size + 4096,
- visible_vram_memory(fd, 0), &bo),
+ vram_memory(fd, 0),
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM,
+ &bo),
0);
/* Normal operation */
- bo = xe_bo_create(fd, 0, visible_size / 4,
- visible_vram_memory(fd, 0));
+ bo = xe_bo_create(fd, 0, visible_size / 4, vram_memory(fd, 0),
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
mmo = xe_bo_mmap_offset(fd, bo);
map = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED, fd, mmo);
igt_assert(map != MAP_FAILED);
@@ -180,8 +182,9 @@ static void test_small_bar(int fd)
/* Normal operation with system memory spilling */
bo = xe_bo_create(fd, 0, visible_size,
- visible_vram_memory(fd, 0) |
- system_memory(fd));
+ vram_memory(fd, 0) |
+ system_memory(fd),
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
mmo = xe_bo_mmap_offset(fd, bo);
map = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED, fd, mmo);
igt_assert(map != MAP_FAILED);
@@ -192,8 +195,7 @@ static void test_small_bar(int fd)
gem_close(fd, bo);
/* Bogus operation with SIGBUS */
- bo = xe_bo_create(fd, 0, visible_size + 4096,
- vram_memory(fd, 0));
+ bo = xe_bo_create(fd, 0, visible_size + 4096, vram_memory(fd, 0), 0);
mmo = xe_bo_mmap_offset(fd, bo);
map = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED, fd, mmo);
igt_assert(map != MAP_FAILED);
@@ -210,13 +212,15 @@ igt_main
fd = drm_open_driver(DRIVER_XE);
igt_subtest("system")
- test_mmap(fd, system_memory(fd));
+ test_mmap(fd, system_memory(fd), 0);
igt_subtest("vram")
- test_mmap(fd, visible_vram_memory(fd, 0));
+ test_mmap(fd, vram_memory(fd, 0),
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
igt_subtest("vram-system")
- test_mmap(fd, visible_vram_memory(fd, 0) | system_memory(fd));
+ test_mmap(fd, vram_memory(fd, 0) | system_memory(fd),
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
igt_subtest("bad-flags")
test_bad_flags(fd);
diff --git a/tests/intel/xe_noexec_ping_pong.c b/tests/intel/xe_noexec_ping_pong.c
index 88ef39783..5e3349247 100644
--- a/tests/intel/xe_noexec_ping_pong.c
+++ b/tests/intel/xe_noexec_ping_pong.c
@@ -71,7 +71,7 @@ static void test_ping_pong(int fd, struct drm_xe_engine_class_instance *eci)
(unsigned int) vm[i]);
bo[i][j] = xe_bo_create(fd, vm[i], bo_size,
- vram_memory(fd, 0));
+ vram_memory(fd, 0), 0);
xe_vm_bind(fd, vm[i], bo[i][j], 0, 0x40000 + j*bo_size,
bo_size, NULL, 0);
}
diff --git a/tests/intel/xe_perf_pmu.c b/tests/intel/xe_perf_pmu.c
index 9f1627727..e825ff5a3 100644
--- a/tests/intel/xe_perf_pmu.c
+++ b/tests/intel/xe_perf_pmu.c
@@ -103,7 +103,7 @@ static void test_any_engine_busyness(int fd, struct drm_xe_engine_class_instance
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
- bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id));
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id), 0);
spin = xe_bo_map(fd, bo, bo_size);
exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
@@ -223,7 +223,7 @@ static void test_engine_group_busyness(int fd, int gt, int class, const char *na
bo_size = sizeof(*data) * num_placements;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd), xe_get_default_alignment(fd));
- bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, gt));
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, gt), 0);
data = xe_bo_map(fd, bo, bo_size);
for (i = 0; i < num_placements; i++) {
diff --git a/tests/intel/xe_pm.c b/tests/intel/xe_pm.c
index 9fd3527f7..2e5c61b59 100644
--- a/tests/intel/xe_pm.c
+++ b/tests/intel/xe_pm.c
@@ -272,7 +272,7 @@ test_exec(device_t device, struct drm_xe_engine_class_instance *eci,
rpm_usage = igt_pm_get_runtime_usage(device.pci_xe);
bo = xe_bo_create(device.fd_xe, vm, bo_size,
- vram_if_possible(device.fd_xe, eci->gt_id) |
+ vram_if_possible(device.fd_xe, eci->gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(device.fd_xe, bo, bo_size);
@@ -381,15 +381,15 @@ static void test_vram_d3cold_threshold(device_t device, int sysfs_fd)
.data = 0,
};
uint64_t vram_used_mb = 0, vram_total_mb = 0, threshold;
- uint32_t bo, flags;
+ uint32_t bo, placement;
int handle, i;
bool active;
void *map;
igt_require(xe_has_vram(device.fd_xe));
- flags = vram_memory(device.fd_xe, 0);
- igt_require_f(flags, "Device doesn't support vram memory region\n");
+ placement = vram_memory(device.fd_xe, 0);
+ igt_require_f(placement, "Device doesn't support vram memory region\n");
igt_assert_eq(igt_ioctl(device.fd_xe, DRM_IOCTL_XE_DEVICE_QUERY, &query), 0);
igt_assert_neq(query.size, 0);
@@ -410,7 +410,7 @@ static void test_vram_d3cold_threshold(device_t device, int sysfs_fd)
threshold = vram_used_mb + (SIZE / 1024 /1024);
igt_require(threshold < vram_total_mb);
- bo = xe_bo_create(device.fd_xe, 0, SIZE, flags);
+ bo = xe_bo_create(device.fd_xe, 0, SIZE, placement, 0);
map = xe_bo_map(device.fd_xe, bo, SIZE);
memset(map, 0, SIZE);
munmap(map, SIZE);
diff --git a/tests/intel/xe_pm_residency.c b/tests/intel/xe_pm_residency.c
index 40a1693b8..6c9a95429 100644
--- a/tests/intel/xe_pm_residency.c
+++ b/tests/intel/xe_pm_residency.c
@@ -101,7 +101,7 @@ static void exec_load(int fd, struct drm_xe_engine_class_instance *hwe, unsigned
bo_size = xe_get_default_alignment(fd);
bo = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, hwe->gt_id) |
+ vram_if_possible(fd, hwe->gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
syncobj = syncobj_create(fd, 0);
diff --git a/tests/intel/xe_prime_self_import.c b/tests/intel/xe_prime_self_import.c
index 2c2f2898c..9a263d326 100644
--- a/tests/intel/xe_prime_self_import.c
+++ b/tests/intel/xe_prime_self_import.c
@@ -105,7 +105,7 @@ static void test_with_fd_dup(void)
fd1 = drm_open_driver(DRIVER_XE);
fd2 = drm_open_driver(DRIVER_XE);
- handle = xe_bo_create(fd1, 0, bo_size, vram_if_possible(fd1, 0) |
+ handle = xe_bo_create(fd1, 0, bo_size, vram_if_possible(fd1, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
dma_buf_fd1 = prime_handle_to_fd(fd1, handle);
@@ -139,9 +139,9 @@ static void test_with_two_bos(void)
fd1 = drm_open_driver(DRIVER_XE);
fd2 = drm_open_driver(DRIVER_XE);
- handle1 = xe_bo_create(fd1, 0, bo_size, vram_if_possible(fd1, 0) |
+ handle1 = xe_bo_create(fd1, 0, bo_size, vram_if_possible(fd1, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
- handle2 = xe_bo_create(fd1, 0, bo_size, vram_if_possible(fd1, 0) |
+ handle2 = xe_bo_create(fd1, 0, bo_size, vram_if_possible(fd1, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
dma_buf_fd = prime_handle_to_fd(fd1, handle1);
@@ -178,7 +178,7 @@ static void test_with_one_bo_two_files(void)
fd2 = drm_open_driver(DRIVER_XE);
handle_orig = xe_bo_create(fd1, 0, bo_size,
- vram_if_possible(fd1, 0) |
+ vram_if_possible(fd1, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
dma_buf_fd1 = prime_handle_to_fd(fd1, handle_orig);
@@ -211,7 +211,7 @@ static void test_with_one_bo(void)
fd1 = drm_open_driver(DRIVER_XE);
fd2 = drm_open_driver(DRIVER_XE);
- handle = xe_bo_create(fd1, 0, bo_size, vram_if_possible(fd1, 0) |
+ handle = xe_bo_create(fd1, 0, bo_size, vram_if_possible(fd1, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
dma_buf_fd = prime_handle_to_fd(fd1, handle);
@@ -299,7 +299,7 @@ static void *thread_fn_reimport_vs_close(void *p)
fds[0] = drm_open_driver(DRIVER_XE);
handle = xe_bo_create(fds[0], 0, bo_size,
- vram_if_possible(fds[0], 0) |
+ vram_if_possible(fds[0], 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
fds[1] = prime_handle_to_fd(fds[0], handle);
@@ -343,7 +343,7 @@ static void *thread_fn_export_vs_close(void *p)
igt_until_timeout(g_time_out) {
/* We want to race gem close against prime export on handle one.*/
handle = xe_bo_create(fd, 0, bo_size,
- vram_if_possible(fd, 0) |
+ vram_if_possible(fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
if (handle != 1)
gem_close(fd, handle);
@@ -441,7 +441,7 @@ static void test_llseek_size(void)
int bufsz = xe_get_default_alignment(fd) << i;
handle = xe_bo_create(fd, 0, bufsz,
- vram_if_possible(fd, 0) |
+ vram_if_possible(fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
dma_buf_fd = prime_handle_to_fd(fd, handle);
@@ -471,7 +471,7 @@ static void test_llseek_bad(void)
fd = drm_open_driver(DRIVER_XE);
handle = xe_bo_create(fd, 0, bo_size,
- vram_if_possible(fd, 0) |
+ vram_if_possible(fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
dma_buf_fd = prime_handle_to_fd(fd, handle);
diff --git a/tests/intel/xe_spin_batch.c b/tests/intel/xe_spin_batch.c
index c1b161f9c..6abe700da 100644
--- a/tests/intel/xe_spin_batch.c
+++ b/tests/intel/xe_spin_batch.c
@@ -169,7 +169,7 @@ static void xe_spin_fixed_duration(int fd)
exec_queue = xe_exec_queue_create_class(fd, vm, DRM_XE_ENGINE_CLASS_COPY);
ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_RELOC);
bo_size = ALIGN(sizeof(*spin) + xe_cs_prefetch_size(fd), xe_get_default_alignment(fd));
- bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, 0));
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, 0), 0);
spin = xe_bo_map(fd, bo, bo_size);
spin_addr = intel_allocator_alloc_with_strategy(ahnd, bo, bo_size, 0,
ALLOC_STRATEGY_LOW_TO_HIGH);
diff --git a/tests/intel/xe_vm.c b/tests/intel/xe_vm.c
index 7db15d4e9..a28cf31a0 100644
--- a/tests/intel/xe_vm.c
+++ b/tests/intel/xe_vm.c
@@ -52,7 +52,7 @@ write_dwords(int fd, uint32_t vm, int n_dwords, uint64_t *addrs)
batch_size = ALIGN(batch_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
batch_bo = xe_bo_create(fd, vm, batch_size,
- vram_if_possible(fd, 0) |
+ vram_if_possible(fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
batch_map = xe_bo_map(fd, batch_bo, batch_size);
@@ -117,7 +117,7 @@ __test_bind_one_bo(int fd, uint32_t vm, int n_addrs, uint64_t *addrs)
vms = malloc(sizeof(*vms) * n_addrs);
igt_assert(vms);
}
- bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, 0) |
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
map = xe_bo_map(fd, bo, bo_size);
memset(map, 0, bo_size);
@@ -269,7 +269,7 @@ static void test_partial_unbinds(int fd)
{
uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
size_t bo_size = 3 * xe_get_default_alignment(fd);
- uint32_t bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, 0));
+ uint32_t bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, 0), 0);
uint64_t unbind_size = bo_size / 3;
uint64_t addr = 0x1a0000;
@@ -318,7 +318,7 @@ static void unbind_all(int fd, int n_vmas)
};
vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
- bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, 0));
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, 0), 0);
for (i = 0; i < n_vmas; ++i)
xe_vm_bind_async(fd, vm, 0, bo, 0, addr + i * bo_size,
@@ -424,7 +424,7 @@ shared_pte_page(int fd, struct drm_xe_engine_class_instance *eci, int n_bo,
for (i = 0; i < n_bo; ++i) {
bo[i] = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, eci->gt_id) |
+ vram_if_possible(fd, eci->gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data[i] = xe_bo_map(fd, bo[i], bo_size);
}
@@ -604,7 +604,7 @@ test_bind_execqueues_independent(int fd, struct drm_xe_engine_class_instance *ec
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
bo = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, eci->gt_id) |
+ vram_if_possible(fd, eci->gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
@@ -786,7 +786,7 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
xe_get_default_alignment(fd));
bo = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, eci->gt_id) |
+ vram_if_possible(fd, eci->gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
@@ -985,7 +985,7 @@ test_large_binds(int fd, struct drm_xe_engine_class_instance *eci,
xe_visible_vram_size(fd, 0));
bo = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, eci->gt_id) |
+ vram_if_possible(fd, eci->gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
map = xe_bo_map(fd, bo, bo_size);
}
@@ -1280,7 +1280,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
igt_assert(map != MAP_FAILED);
} else {
bo = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, eci->gt_id) |
+ vram_if_possible(fd, eci->gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
map = xe_bo_map(fd, bo, bo_size);
}
@@ -1584,9 +1584,9 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
igt_assert(map0 != MAP_FAILED);
igt_assert(map1 != MAP_FAILED);
} else {
- bo0 = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id));
+ bo0 = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id), 0);
map0 = xe_bo_map(fd, bo0, bo_size);
- bo1 = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id));
+ bo1 = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id), 0);
map1 = xe_bo_map(fd, bo1, bo_size);
}
memset(map0, 0, bo_size);
diff --git a/tests/intel/xe_waitfence.c b/tests/intel/xe_waitfence.c
index eaef0185f..0169f4b9c 100644
--- a/tests/intel/xe_waitfence.c
+++ b/tests/intel/xe_waitfence.c
@@ -22,8 +22,6 @@
* Description: Test waitfences functionality
*/
-#define MY_FLAG vram_if_possible(fd, 0)
-
uint64_t wait_fence = 0;
static void do_bind(int fd, uint32_t vm, uint32_t bo, uint64_t offset,
@@ -96,19 +94,19 @@ waitfence(int fd, enum waittype wt)
int64_t timeout;
uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
- bo_1 = xe_bo_create(fd, vm, 0x40000, MY_FLAG);
+ bo_1 = xe_bo_create(fd, vm, 0x40000, vram_if_possible(fd, 0), 0);
do_bind(fd, vm, bo_1, 0, 0x200000, 0x40000, 1);
- bo_2 = xe_bo_create(fd, vm, 0x40000, MY_FLAG);
+ bo_2 = xe_bo_create(fd, vm, 0x40000, vram_if_possible(fd, 0), 0);
do_bind(fd, vm, bo_2, 0, 0xc0000000, 0x40000, 2);
- bo_3 = xe_bo_create(fd, vm, 0x40000, MY_FLAG);
+ bo_3 = xe_bo_create(fd, vm, 0x40000, vram_if_possible(fd, 0), 0);
do_bind(fd, vm, bo_3, 0, 0x180000000, 0x40000, 3);
- bo_4 = xe_bo_create(fd, vm, 0x10000, MY_FLAG);
+ bo_4 = xe_bo_create(fd, vm, 0x10000, vram_if_possible(fd, 0), 0);
do_bind(fd, vm, bo_4, 0, 0x140000000, 0x10000, 4);
- bo_5 = xe_bo_create(fd, vm, 0x100000, MY_FLAG);
+ bo_5 = xe_bo_create(fd, vm, 0x100000, vram_if_possible(fd, 0), 0);
do_bind(fd, vm, bo_5, 0, 0x100000000, 0x100000, 5);
- bo_6 = xe_bo_create(fd, vm, 0x1c0000, MY_FLAG);
+ bo_6 = xe_bo_create(fd, vm, 0x1c0000, vram_if_possible(fd, 0), 0);
do_bind(fd, vm, bo_6, 0, 0xc0040000, 0x1c0000, 6);
- bo_7 = xe_bo_create(fd, vm, 0x10000, MY_FLAG);
+ bo_7 = xe_bo_create(fd, vm, 0x10000, vram_if_possible(fd, 0), 0);
do_bind(fd, vm, bo_7, 0, 0xeffff0000, 0x10000, 7);
if (wt == RELTIME) {
@@ -173,7 +171,7 @@ invalid_flag(int fd)
uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
- bo = xe_bo_create(fd, vm, 0x40000, MY_FLAG);
+ bo = xe_bo_create(fd, vm, 0x40000, vram_if_possible(fd, 0), 0);
do_bind(fd, vm, bo, 0, 0x200000, 0x40000, 1);
@@ -198,7 +196,7 @@ invalid_ops(int fd)
uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
- bo = xe_bo_create(fd, vm, 0x40000, MY_FLAG);
+ bo = xe_bo_create(fd, vm, 0x40000, vram_if_possible(fd, 0), 0);
do_bind(fd, vm, bo, 0, 0x200000, 0x40000, 1);
@@ -223,7 +221,7 @@ invalid_engine(int fd)
uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
- bo = xe_bo_create(fd, vm, 0x40000, MY_FLAG);
+ bo = xe_bo_create(fd, vm, 0x40000, vram_if_possible(fd, 0), 0);
do_bind(fd, vm, bo, 0, 0x200000, 0x40000, 1);
diff --git a/tests/kms_addfb_basic.c b/tests/kms_addfb_basic.c
index 1e34eaa54..41d36732a 100644
--- a/tests/kms_addfb_basic.c
+++ b/tests/kms_addfb_basic.c
@@ -307,7 +307,7 @@ static void invalid_tests(int fd)
handle = gem_create_in_memory_regions(fd, size, REGION_SMEM);
} else {
igt_require(xe_has_vram(fd));
- handle = xe_bo_create(fd, 0, size, system_memory(fd));
+ handle = xe_bo_create(fd, 0, size, system_memory(fd), 0);
}
f.handles[0] = handle;
diff --git a/tests/kms_getfb.c b/tests/kms_getfb.c
index 2d93a032d..bb9679bc9 100644
--- a/tests/kms_getfb.c
+++ b/tests/kms_getfb.c
@@ -210,7 +210,7 @@ static void get_ccs_fb(int fd, struct drm_mode_fb_cmd2 *ret)
if (is_i915_device(fd))
add.handles[0] = gem_buffer_create_fb_obj(fd, size);
else
- add.handles[0] = xe_bo_create(fd, 0, size, vram_if_possible(fd, 0));
+ add.handles[0] = xe_bo_create(fd, 0, size, vram_if_possible(fd, 0), 0);
igt_require(add.handles[0] != 0);
if (!HAS_FLATCCS(devid))
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] [PATCH v4 07/20] xe: s/hw_engine/engine
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
` (5 preceding siblings ...)
2023-11-29 14:54 ` [igt-dev] [PATCH v4 06/20] drm-uapi/xe: Separate bo_create placement from flags Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 08/20] drm-uapi/xe: Make DRM_XE_DEVICE_QUERY_ENGINES future proof Francois Dugast
` (13 subsequent siblings)
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev; +Cc: Rodrigo Vivi
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
HW engine is redundant after exec_queue name was created.
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
Reviewed-by: Kamil Konieczny <kamil.konieczny@linux.intel.com>
---
benchmarks/gem_wsim.c | 8 ++---
lib/xe/xe_query.c | 36 ++++++++++----------
lib/xe/xe_query.h | 22 ++++++------
tests/intel/xe_create.c | 4 +--
tests/intel/xe_dma_buf_sync.c | 2 +-
tests/intel/xe_drm_fdinfo.c | 2 +-
tests/intel/xe_evict.c | 2 +-
tests/intel/xe_exec_balancer.c | 28 ++++++++--------
tests/intel/xe_exec_basic.c | 12 +++----
tests/intel/xe_exec_compute_mode.c | 8 ++---
tests/intel/xe_exec_fault_mode.c | 8 ++---
tests/intel/xe_exec_reset.c | 44 ++++++++++++------------
tests/intel/xe_exec_store.c | 18 +++++-----
tests/intel/xe_exec_threads.c | 24 ++++++-------
tests/intel/xe_huc_copy.c | 2 +-
tests/intel/xe_intel_bb.c | 2 +-
tests/intel/xe_noexec_ping_pong.c | 2 +-
tests/intel/xe_perf_pmu.c | 6 ++--
tests/intel/xe_pm.c | 14 ++++----
tests/intel/xe_pm_residency.c | 2 +-
tests/intel/xe_query.c | 6 ++--
tests/intel/xe_spin_batch.c | 10 +++---
tests/intel/xe_vm.c | 54 +++++++++++++++---------------
tests/intel/xe_waitfence.c | 2 +-
24 files changed, 159 insertions(+), 159 deletions(-)
diff --git a/benchmarks/gem_wsim.c b/benchmarks/gem_wsim.c
index eb9fd6266..abbe49a06 100644
--- a/benchmarks/gem_wsim.c
+++ b/benchmarks/gem_wsim.c
@@ -543,7 +543,7 @@ static struct intel_engine_data *query_engines(void)
if (is_xe) {
struct drm_xe_engine_class_instance *hwe;
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
engines.engines[engines.nengines].class = hwe->engine_class;
engines.engines[engines.nengines].instance = hwe->engine_instance;
engines.nengines++;
@@ -670,7 +670,7 @@ xe_get_engine(enum intel_engine_id engine)
igt_assert(0);
};
- xe_for_each_hw_engine(fd, hwe1) {
+ xe_for_each_engine(fd, hwe1) {
if (hwe.engine_class == hwe1->engine_class &&
hwe.engine_instance == hwe1->engine_instance) {
hwe = *hwe1;
@@ -689,8 +689,8 @@ xe_get_default_engine(void)
struct drm_xe_engine_class_instance default_hwe, *hwe;
/* select RCS0 | CCS0 or first available engine */
- default_hwe = *xe_hw_engine(fd, 0);
- xe_for_each_hw_engine(fd, hwe) {
+ default_hwe = *xe_engine(fd, 0);
+ xe_for_each_engine(fd, hwe) {
if ((hwe->engine_class == DRM_XE_ENGINE_CLASS_RENDER ||
hwe->engine_class == DRM_XE_ENGINE_CLASS_COMPUTE) &&
hwe->engine_instance == 0) {
diff --git a/lib/xe/xe_query.c b/lib/xe/xe_query.c
index fa17b46b6..ef7aaa6a1 100644
--- a/lib/xe/xe_query.c
+++ b/lib/xe/xe_query.c
@@ -75,7 +75,7 @@ static uint64_t __memory_regions(const struct drm_xe_query_gt_list *gt_list)
static struct drm_xe_engine_class_instance *
xe_query_engines_new(int fd, unsigned int *num_engines)
{
- struct drm_xe_engine_class_instance *hw_engines;
+ struct drm_xe_engine_class_instance *engines;
struct drm_xe_device_query query = {
.extensions = 0,
.query = DRM_XE_DEVICE_QUERY_ENGINES,
@@ -86,15 +86,15 @@ xe_query_engines_new(int fd, unsigned int *num_engines)
igt_assert(num_engines);
igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query), 0);
- hw_engines = malloc(query.size);
- igt_assert(hw_engines);
+ engines = malloc(query.size);
+ igt_assert(engines);
- query.data = to_user_pointer(hw_engines);
+ query.data = to_user_pointer(engines);
igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query), 0);
- *num_engines = query.size / sizeof(*hw_engines);
+ *num_engines = query.size / sizeof(*engines);
- return hw_engines;
+ return engines;
}
static struct drm_xe_query_mem_regions *xe_query_mem_regions_new(int fd)
@@ -221,7 +221,7 @@ static void xe_device_free(struct xe_device *xe_dev)
{
free(xe_dev->config);
free(xe_dev->gt_list);
- free(xe_dev->hw_engines);
+ free(xe_dev->engines);
free(xe_dev->mem_regions);
free(xe_dev->vram_size);
free(xe_dev);
@@ -253,7 +253,7 @@ struct xe_device *xe_device_get(int fd)
xe_dev->dev_id = xe_dev->config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] & 0xffff;
xe_dev->gt_list = xe_query_gt_list_new(fd);
xe_dev->memory_regions = __memory_regions(xe_dev->gt_list);
- xe_dev->hw_engines = xe_query_engines_new(fd, &xe_dev->number_hw_engines);
+ xe_dev->engines = xe_query_engines_new(fd, &xe_dev->number_engines);
xe_dev->mem_regions = xe_query_mem_regions_new(fd);
xe_dev->vram_size = calloc(xe_dev->gt_list->num_gt, sizeof(*xe_dev->vram_size));
xe_dev->visible_vram_size = calloc(xe_dev->gt_list->num_gt, sizeof(*xe_dev->visible_vram_size));
@@ -422,29 +422,29 @@ uint64_t vram_if_possible(int fd, int gt)
}
/**
- * xe_hw_engines:
+ * xe_engines:
* @fd: xe device fd
*
* Returns engines array of xe device @fd.
*/
-xe_dev_FN(xe_hw_engines, hw_engines, struct drm_xe_engine_class_instance *);
+xe_dev_FN(xe_engines, engines, struct drm_xe_engine_class_instance *);
/**
- * xe_hw_engine:
+ * xe_engine:
* @fd: xe device fd
* @idx: engine index
*
* Returns engine instance of xe device @fd and @idx.
*/
-struct drm_xe_engine_class_instance *xe_hw_engine(int fd, int idx)
+struct drm_xe_engine_class_instance *xe_engine(int fd, int idx)
{
struct xe_device *xe_dev;
xe_dev = find_in_cache(fd);
igt_assert(xe_dev);
- igt_assert(idx >= 0 && idx < xe_dev->number_hw_engines);
+ igt_assert(idx >= 0 && idx < xe_dev->number_engines);
- return &xe_dev->hw_engines[idx];
+ return &xe_dev->engines[idx];
}
/**
@@ -529,12 +529,12 @@ uint32_t xe_min_page_size(int fd, uint64_t region)
xe_dev_FN(xe_config, config, struct drm_xe_query_config *);
/**
- * xe_number_hw_engine:
+ * xe_number_engine:
* @fd: xe device fd
*
* Returns number of hw engines of xe device @fd.
*/
-xe_dev_FN(xe_number_hw_engines, number_hw_engines, unsigned int);
+xe_dev_FN(xe_number_engines, number_engines, unsigned int);
/**
* xe_has_vram:
@@ -657,8 +657,8 @@ bool xe_has_engine_class(int fd, uint16_t engine_class)
xe_dev = find_in_cache(fd);
igt_assert(xe_dev);
- for (int i = 0; i < xe_dev->number_hw_engines; i++)
- if (xe_dev->hw_engines[i].engine_class == engine_class)
+ for (int i = 0; i < xe_dev->number_engines; i++)
+ if (xe_dev->engines[i].engine_class == engine_class)
return true;
return false;
diff --git a/lib/xe/xe_query.h b/lib/xe/xe_query.h
index be92ec5ed..bf9f2b955 100644
--- a/lib/xe/xe_query.h
+++ b/lib/xe/xe_query.h
@@ -32,11 +32,11 @@ struct xe_device {
/** @gt_list: bitmask of all memory regions */
uint64_t memory_regions;
- /** @hw_engines: array of hardware engines */
- struct drm_xe_engine_class_instance *hw_engines;
+ /** @engines: array of hardware engines */
+ struct drm_xe_engine_class_instance *engines;
- /** @number_hw_engines: length of hardware engines array */
- unsigned int number_hw_engines;
+ /** @number_engines: length of hardware engines array */
+ unsigned int number_engines;
/** @mem_regions: regions memory information and usage */
struct drm_xe_query_mem_regions *mem_regions;
@@ -60,10 +60,10 @@ struct xe_device {
uint16_t dev_id;
};
-#define xe_for_each_hw_engine(__fd, __hwe) \
- for (int __i = 0; __i < xe_number_hw_engines(__fd) && \
- (__hwe = xe_hw_engine(__fd, __i)); ++__i)
-#define xe_for_each_hw_engine_class(__class) \
+#define xe_for_each_engine(__fd, __hwe) \
+ for (int __i = 0; __i < xe_number_engines(__fd) && \
+ (__hwe = xe_engine(__fd, __i)); ++__i)
+#define xe_for_each_engine_class(__class) \
for (__class = 0; __class < DRM_XE_ENGINE_CLASS_COMPUTE + 1; \
++__class)
#define xe_for_each_gt(__fd, __gt) \
@@ -81,14 +81,14 @@ uint64_t all_memory_regions(int fd);
uint64_t system_memory(int fd);
uint64_t vram_memory(int fd, int gt);
uint64_t vram_if_possible(int fd, int gt);
-struct drm_xe_engine_class_instance *xe_hw_engines(int fd);
-struct drm_xe_engine_class_instance *xe_hw_engine(int fd, int idx);
+struct drm_xe_engine_class_instance *xe_engines(int fd);
+struct drm_xe_engine_class_instance *xe_engine(int fd, int idx);
struct drm_xe_query_mem_region *xe_mem_region(int fd, uint64_t region);
const char *xe_region_name(uint64_t region);
uint16_t xe_region_class(int fd, uint64_t region);
uint32_t xe_min_page_size(int fd, uint64_t region);
struct drm_xe_query_config *xe_config(int fd);
-unsigned int xe_number_hw_engines(int fd);
+unsigned int xe_number_engines(int fd);
bool xe_has_vram(int fd);
uint64_t xe_vram_size(int fd, int gt);
uint64_t xe_visible_vram_size(int fd, int gt);
diff --git a/tests/intel/xe_create.c b/tests/intel/xe_create.c
index a5e5b9892..16a371964 100644
--- a/tests/intel/xe_create.c
+++ b/tests/intel/xe_create.c
@@ -139,7 +139,7 @@ static void create_execqueues(int fd, enum exec_queue_destroy ed)
int nproc = sysconf(_SC_NPROCESSORS_ONLN), seconds;
fd = drm_reopen_driver(fd);
- num_engines = xe_number_hw_engines(fd);
+ num_engines = xe_number_engines(fd);
vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
exec_queues_per_process = max_t(uint32_t, 1, MAXEXECQUEUES / nproc);
@@ -156,7 +156,7 @@ static void create_execqueues(int fd, enum exec_queue_destroy ed)
for (i = 0; i < exec_queues_per_process; i++) {
idx = rand() % num_engines;
- hwe = xe_hw_engine(fd, idx);
+ hwe = xe_engine(fd, idx);
err = __xe_exec_queue_create(fd, vm, hwe, 0, &exec_queue);
igt_debug("[%2d] Create exec_queue: err=%d, exec_queue=%u [idx = %d]\n",
n, err, exec_queue, i);
diff --git a/tests/intel/xe_dma_buf_sync.c b/tests/intel/xe_dma_buf_sync.c
index aeb4c4995..dfa957243 100644
--- a/tests/intel/xe_dma_buf_sync.c
+++ b/tests/intel/xe_dma_buf_sync.c
@@ -229,7 +229,7 @@ igt_main
igt_fixture {
fd = drm_open_driver(DRIVER_XE);
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
if (hwe0 == NULL) {
hwe0 = hwe;
} else {
diff --git a/tests/intel/xe_drm_fdinfo.c b/tests/intel/xe_drm_fdinfo.c
index 6bca5a6f1..d50cc6df1 100644
--- a/tests/intel/xe_drm_fdinfo.c
+++ b/tests/intel/xe_drm_fdinfo.c
@@ -313,7 +313,7 @@ igt_main
igt_describe("Create and compare active memory consumption by client");
igt_subtest("drm-active")
- test_active(xe, xe_hw_engine(xe, 0));
+ test_active(xe, xe_engine(xe, 0));
igt_fixture {
drm_close_driver(xe);
diff --git a/tests/intel/xe_evict.c b/tests/intel/xe_evict.c
index 436a2be02..2e2960b9b 100644
--- a/tests/intel/xe_evict.c
+++ b/tests/intel/xe_evict.c
@@ -759,7 +759,7 @@ igt_main
vram_size = xe_visible_vram_size(fd, 0);
igt_assert(vram_size);
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
if (hwe->engine_class != DRM_XE_ENGINE_CLASS_COPY)
break;
}
diff --git a/tests/intel/xe_exec_balancer.c b/tests/intel/xe_exec_balancer.c
index fa3d7a338..ea06c23cd 100644
--- a/tests/intel/xe_exec_balancer.c
+++ b/tests/intel/xe_exec_balancer.c
@@ -57,7 +57,7 @@ static void test_all_active(int fd, int gt, int class)
struct drm_xe_engine_class_instance eci[MAX_INSTANCE];
int i, num_placements = 0;
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
if (hwe->engine_class != class || hwe->gt_id != gt)
continue;
@@ -199,7 +199,7 @@ test_exec(int fd, int gt, int class, int n_exec_queues, int n_execs,
igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
if (hwe->engine_class != class || hwe->gt_id != gt)
continue;
@@ -426,7 +426,7 @@ test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
if (hwe->engine_class != class || hwe->gt_id != gt)
continue;
@@ -632,25 +632,25 @@ igt_main
igt_subtest("virtual-all-active")
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
test_all_active(fd, gt, class);
for (const struct section *s = sections; s->name; s++) {
igt_subtest_f("once-%s", s->name)
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
test_exec(fd, gt, class, 1, 1,
s->flags);
igt_subtest_f("twice-%s", s->name)
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
test_exec(fd, gt, class, 1, 2,
s->flags);
igt_subtest_f("many-%s", s->name)
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
test_exec(fd, gt, class, 1,
s->flags & (REBIND | INVALIDATE) ?
64 : 1024,
@@ -658,7 +658,7 @@ igt_main
igt_subtest_f("many-execqueues-%s", s->name)
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
test_exec(fd, gt, class, 16,
s->flags & (REBIND | INVALIDATE) ?
64 : 1024,
@@ -666,23 +666,23 @@ igt_main
igt_subtest_f("no-exec-%s", s->name)
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
test_exec(fd, gt, class, 1, 0,
s->flags);
igt_subtest_f("once-cm-%s", s->name)
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
test_cm(fd, gt, class, 1, 1, s->flags);
igt_subtest_f("twice-cm-%s", s->name)
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
test_cm(fd, gt, class, 1, 2, s->flags);
igt_subtest_f("many-cm-%s", s->name)
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
test_cm(fd, gt, class, 1,
s->flags & (REBIND | INVALIDATE) ?
64 : 1024,
@@ -690,7 +690,7 @@ igt_main
igt_subtest_f("many-execqueues-cm-%s", s->name)
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
test_cm(fd, gt, class, 16,
s->flags & (REBIND | INVALIDATE) ?
64 : 1024,
@@ -698,7 +698,7 @@ igt_main
igt_subtest_f("no-exec-cm-%s", s->name)
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
test_cm(fd, gt, class, 1, 0, s->flags);
}
diff --git a/tests/intel/xe_exec_basic.c b/tests/intel/xe_exec_basic.c
index 23acdd434..46b9dc2e0 100644
--- a/tests/intel/xe_exec_basic.c
+++ b/tests/intel/xe_exec_basic.c
@@ -336,36 +336,36 @@ igt_main
for (const struct section *s = sections; s->name; s++) {
igt_subtest_f("once-%s", s->name)
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_exec(fd, hwe, 1, 1, 1, s->flags);
igt_subtest_f("twice-%s", s->name)
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_exec(fd, hwe, 1, 2, 1, s->flags);
igt_subtest_f("many-%s", s->name)
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_exec(fd, hwe, 1,
s->flags & (REBIND | INVALIDATE) ?
64 : 1024, 1,
s->flags);
igt_subtest_f("many-execqueues-%s", s->name)
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_exec(fd, hwe, 16,
s->flags & (REBIND | INVALIDATE) ?
64 : 1024, 1,
s->flags);
igt_subtest_f("many-execqueues-many-vm-%s", s->name)
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_exec(fd, hwe, 16,
s->flags & (REBIND | INVALIDATE) ?
64 : 1024, 16,
s->flags);
igt_subtest_f("no-exec-%s", s->name)
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_exec(fd, hwe, 1, 0, 1, s->flags);
}
diff --git a/tests/intel/xe_exec_compute_mode.c b/tests/intel/xe_exec_compute_mode.c
index 98a98256e..a9f69deef 100644
--- a/tests/intel/xe_exec_compute_mode.c
+++ b/tests/intel/xe_exec_compute_mode.c
@@ -321,15 +321,15 @@ igt_main
for (const struct section *s = sections; s->name; s++) {
igt_subtest_f("once-%s", s->name)
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_exec(fd, hwe, 1, 1, s->flags);
igt_subtest_f("twice-%s", s->name)
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_exec(fd, hwe, 1, 2, s->flags);
igt_subtest_f("many-%s", s->name)
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_exec(fd, hwe, 1,
s->flags & (REBIND | INVALIDATE) ?
64 : 128,
@@ -339,7 +339,7 @@ igt_main
continue;
igt_subtest_f("many-execqueues-%s", s->name)
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_exec(fd, hwe, 16,
s->flags & (REBIND | INVALIDATE) ?
64 : 128,
diff --git a/tests/intel/xe_exec_fault_mode.c b/tests/intel/xe_exec_fault_mode.c
index 3eb448ef4..4c85fce76 100644
--- a/tests/intel/xe_exec_fault_mode.c
+++ b/tests/intel/xe_exec_fault_mode.c
@@ -386,22 +386,22 @@ igt_main
for (const struct section *s = sections; s->name; s++) {
igt_subtest_f("once-%s", s->name)
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_exec(fd, hwe, 1, 1, s->flags);
igt_subtest_f("twice-%s", s->name)
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_exec(fd, hwe, 1, 2, s->flags);
igt_subtest_f("many-%s", s->name)
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_exec(fd, hwe, 1,
s->flags & (REBIND | INVALIDATE) ?
64 : 128,
s->flags);
igt_subtest_f("many-execqueues-%s", s->name)
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_exec(fd, hwe, 16,
s->flags & (REBIND | INVALIDATE) ?
64 : 128,
diff --git a/tests/intel/xe_exec_reset.c b/tests/intel/xe_exec_reset.c
index d8b8e0355..988e63438 100644
--- a/tests/intel/xe_exec_reset.c
+++ b/tests/intel/xe_exec_reset.c
@@ -168,7 +168,7 @@ test_balancer(int fd, int gt, int class, int n_exec_queues, int n_execs,
if (flags & CLOSE_FD)
fd = drm_open_driver(DRIVER_XE);
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
if (hwe->engine_class != class || hwe->gt_id != gt)
continue;
@@ -790,106 +790,106 @@ igt_main
fd = drm_open_driver(DRIVER_XE);
igt_subtest("spin")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_spin(fd, hwe);
igt_subtest("cancel")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_legacy_mode(fd, hwe, 1, 1, CANCEL);
igt_subtest("execqueue-reset")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_legacy_mode(fd, hwe, 2, 2, EXEC_QUEUE_RESET);
igt_subtest("cat-error")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_legacy_mode(fd, hwe, 2, 2, CAT_ERROR);
igt_subtest("gt-reset")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_legacy_mode(fd, hwe, 2, 2, GT_RESET);
igt_subtest("close-fd-no-exec")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_legacy_mode(-1, hwe, 16, 0, CLOSE_FD);
igt_subtest("close-fd")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_legacy_mode(-1, hwe, 16, 256, CLOSE_FD);
igt_subtest("close-execqueues-close-fd")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_legacy_mode(-1, hwe, 16, 256, CLOSE_FD |
CLOSE_EXEC_QUEUES);
igt_subtest("cm-execqueue-reset")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_compute_mode(fd, hwe, 2, 2, EXEC_QUEUE_RESET);
igt_subtest("cm-cat-error")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_compute_mode(fd, hwe, 2, 2, CAT_ERROR);
igt_subtest("cm-gt-reset")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_compute_mode(fd, hwe, 2, 2, GT_RESET);
igt_subtest("cm-close-fd-no-exec")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_compute_mode(-1, hwe, 16, 0, CLOSE_FD);
igt_subtest("cm-close-fd")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_compute_mode(-1, hwe, 16, 256, CLOSE_FD);
igt_subtest("cm-close-execqueues-close-fd")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_compute_mode(-1, hwe, 16, 256, CLOSE_FD |
CLOSE_EXEC_QUEUES);
for (const struct section *s = sections; s->name; s++) {
igt_subtest_f("%s-cancel", s->name)
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
test_balancer(fd, gt, class, 1, 1,
CANCEL | s->flags);
igt_subtest_f("%s-execqueue-reset", s->name)
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
test_balancer(fd, gt, class, MAX_INSTANCE + 1,
MAX_INSTANCE + 1,
EXEC_QUEUE_RESET | s->flags);
igt_subtest_f("%s-cat-error", s->name)
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
test_balancer(fd, gt, class, MAX_INSTANCE + 1,
MAX_INSTANCE + 1,
CAT_ERROR | s->flags);
igt_subtest_f("%s-gt-reset", s->name)
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
test_balancer(fd, gt, class, MAX_INSTANCE + 1,
MAX_INSTANCE + 1,
GT_RESET | s->flags);
igt_subtest_f("%s-close-fd-no-exec", s->name)
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
test_balancer(-1, gt, class, 16, 0,
CLOSE_FD | s->flags);
igt_subtest_f("%s-close-fd", s->name)
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
test_balancer(-1, gt, class, 16, 256,
CLOSE_FD | s->flags);
igt_subtest_f("%s-close-execqueues-close-fd", s->name)
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
test_balancer(-1, gt, class, 16, 256, CLOSE_FD |
CLOSE_EXEC_QUEUES | s->flags);
}
diff --git a/tests/intel/xe_exec_store.c b/tests/intel/xe_exec_store.c
index 9ee5edeb4..0b7b3d3e9 100644
--- a/tests/intel/xe_exec_store.c
+++ b/tests/intel/xe_exec_store.c
@@ -63,7 +63,7 @@ static void store(int fd)
.syncs = to_user_pointer(&sync),
};
struct data *data;
- struct drm_xe_engine_class_instance *hw_engine;
+ struct drm_xe_engine_class_instance *engine;
uint32_t vm;
uint32_t exec_queue;
uint32_t syncobj;
@@ -80,16 +80,16 @@ static void store(int fd)
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
- hw_engine = xe_hw_engine(fd, 1);
+ engine = xe_engine(fd, 1);
bo = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, hw_engine->gt_id),
+ vram_if_possible(fd, engine->gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
- xe_vm_bind_async(fd, vm, hw_engine->gt_id, bo, 0, addr, bo_size, &sync, 1);
+ xe_vm_bind_async(fd, vm, engine->gt_id, bo, 0, addr, bo_size, &sync, 1);
data = xe_bo_map(fd, bo, bo_size);
store_dword_batch(data, addr, value);
- exec_queue = xe_exec_queue_create(fd, vm, hw_engine, 0);
+ exec_queue = xe_exec_queue_create(fd, vm, engine, 0);
exec.exec_queue_id = exec_queue;
exec.address = data->addr;
sync.flags &= DRM_XE_SYNC_FLAG_SIGNAL;
@@ -242,7 +242,7 @@ static void store_all(int fd, int gt, int class)
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
data = xe_bo_map(fd, bo, bo_size);
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
if (hwe->engine_class != class || hwe->gt_id != gt)
continue;
eci[num_placements++] = *hwe;
@@ -309,16 +309,16 @@ igt_main
igt_subtest("basic-all") {
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
store_all(fd, gt, class);
}
igt_subtest("cachelines")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
store_cachelines(fd, hwe, 0);
igt_subtest("page-sized")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
store_cachelines(fd, hwe, PAGES);
igt_fixture {
diff --git a/tests/intel/xe_exec_threads.c b/tests/intel/xe_exec_threads.c
index f37fc612a..8a01b150d 100644
--- a/tests/intel/xe_exec_threads.c
+++ b/tests/intel/xe_exec_threads.c
@@ -81,7 +81,7 @@ test_balancer(int fd, int gt, uint32_t vm, uint64_t addr, uint64_t userptr,
owns_vm = true;
}
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
if (hwe->engine_class != class || hwe->gt_id != gt)
continue;
@@ -969,22 +969,22 @@ static void threads(int fd, int flags)
uint64_t userptr = 0x00007000eadbe000;
pthread_mutex_t mutex;
pthread_cond_t cond;
- int n_hw_engines = 0, class;
+ int n_engines = 0, class;
uint64_t i = 0;
uint32_t vm_legacy_mode = 0, vm_compute_mode = 0;
bool go = false;
int n_threads = 0;
int gt;
- xe_for_each_hw_engine(fd, hwe)
- ++n_hw_engines;
+ xe_for_each_engine(fd, hwe)
+ ++n_engines;
if (flags & BALANCER) {
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class) {
+ xe_for_each_engine_class(class) {
int num_placements = 0;
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
if (hwe->engine_class != class ||
hwe->gt_id != gt)
continue;
@@ -992,11 +992,11 @@ static void threads(int fd, int flags)
}
if (num_placements > 1)
- n_hw_engines += 2;
+ n_engines += 2;
}
}
- threads_data = calloc(n_hw_engines, sizeof(*threads_data));
+ threads_data = calloc(n_engines, sizeof(*threads_data));
igt_assert(threads_data);
pthread_mutex_init(&mutex, 0);
@@ -1012,7 +1012,7 @@ static void threads(int fd, int flags)
0);
}
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
threads_data[i].mutex = &mutex;
threads_data[i].cond = &cond;
#define ADDRESS_SHIFT 39
@@ -1045,10 +1045,10 @@ static void threads(int fd, int flags)
if (flags & BALANCER) {
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class) {
+ xe_for_each_engine_class(class) {
int num_placements = 0;
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
if (hwe->engine_class != class ||
hwe->gt_id != gt)
continue;
@@ -1123,7 +1123,7 @@ static void threads(int fd, int flags)
pthread_cond_broadcast(&cond);
pthread_mutex_unlock(&mutex);
- for (i = 0; i < n_hw_engines; ++i)
+ for (i = 0; i < n_engines; ++i)
pthread_join(threads_data[i].thread, NULL);
if (vm_legacy_mode)
diff --git a/tests/intel/xe_huc_copy.c b/tests/intel/xe_huc_copy.c
index eda9e5216..dbc5afc17 100644
--- a/tests/intel/xe_huc_copy.c
+++ b/tests/intel/xe_huc_copy.c
@@ -158,7 +158,7 @@ test_huc_copy(int fd)
vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
if (hwe->engine_class == DRM_XE_ENGINE_CLASS_VIDEO_DECODE &&
!(tested_gts & BIT(hwe->gt_id))) {
tested_gts |= BIT(hwe->gt_id);
diff --git a/tests/intel/xe_intel_bb.c b/tests/intel/xe_intel_bb.c
index 00bd17d4c..e7a566f62 100644
--- a/tests/intel/xe_intel_bb.c
+++ b/tests/intel/xe_intel_bb.c
@@ -192,7 +192,7 @@ static void simple_bb(struct buf_ops *bops, bool new_context)
if (new_context) {
vm = xe_vm_create(xe, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
- ctx = xe_exec_queue_create(xe, vm, xe_hw_engine(xe, 0), 0);
+ ctx = xe_exec_queue_create(xe, vm, xe_engine(xe, 0), 0);
intel_bb_destroy(ibb);
ibb = intel_bb_create_with_context(xe, ctx, vm, NULL, PAGE_SIZE);
intel_bb_out(ibb, MI_BATCH_BUFFER_END);
diff --git a/tests/intel/xe_noexec_ping_pong.c b/tests/intel/xe_noexec_ping_pong.c
index 5e3349247..e27cc4582 100644
--- a/tests/intel/xe_noexec_ping_pong.c
+++ b/tests/intel/xe_noexec_ping_pong.c
@@ -98,7 +98,7 @@ igt_simple_main
fd = drm_open_driver(DRIVER_XE);
- test_ping_pong(fd, xe_hw_engine(fd, 0));
+ test_ping_pong(fd, xe_engine(fd, 0));
drm_close_driver(fd);
}
diff --git a/tests/intel/xe_perf_pmu.c b/tests/intel/xe_perf_pmu.c
index e825ff5a3..ba5488c48 100644
--- a/tests/intel/xe_perf_pmu.c
+++ b/tests/intel/xe_perf_pmu.c
@@ -209,7 +209,7 @@ static void test_engine_group_busyness(int fd, int gt, int class, const char *na
config = engine_group_get_config(gt, class);
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
if (hwe->engine_class != class || hwe->gt_id != gt)
continue;
@@ -315,13 +315,13 @@ igt_main
for (const struct section *s = sections; s->name; s++) {
igt_subtest_f("%s", s->name)
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
if (class == s->class)
test_engine_group_busyness(fd, gt, class, s->name);
}
igt_subtest("any-engine-group-busy")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_any_engine_busyness(fd, hwe);
igt_fixture {
diff --git a/tests/intel/xe_pm.c b/tests/intel/xe_pm.c
index 2e5c61b59..d78ca31a8 100644
--- a/tests/intel/xe_pm.c
+++ b/tests/intel/xe_pm.c
@@ -471,7 +471,7 @@ igt_main
igt_device_get_pci_slot_name(device.fd_xe, device.pci_slot_name);
/* Always perform initial once-basic exec checking for health */
- xe_for_each_hw_engine(device.fd_xe, hwe)
+ xe_for_each_engine(device.fd_xe, hwe)
test_exec(device, hwe, 1, 1, NO_SUSPEND, NO_RPM);
igt_pm_get_d3cold_allowed(device.pci_slot_name, &d3cold_allowed);
@@ -486,7 +486,7 @@ igt_main
}
igt_subtest_f("%s-basic-exec", s->name) {
- xe_for_each_hw_engine(device.fd_xe, hwe)
+ xe_for_each_engine(device.fd_xe, hwe)
test_exec(device, hwe, 1, 2, s->state,
NO_RPM);
}
@@ -494,13 +494,13 @@ igt_main
igt_subtest_f("%s-exec-after", s->name) {
igt_system_suspend_autoresume(s->state,
SUSPEND_TEST_NONE);
- xe_for_each_hw_engine(device.fd_xe, hwe)
+ xe_for_each_engine(device.fd_xe, hwe)
test_exec(device, hwe, 1, 2, NO_SUSPEND,
NO_RPM);
}
igt_subtest_f("%s-multiple-execs", s->name) {
- xe_for_each_hw_engine(device.fd_xe, hwe)
+ xe_for_each_engine(device.fd_xe, hwe)
test_exec(device, hwe, 16, 32, s->state,
NO_RPM);
}
@@ -508,7 +508,7 @@ igt_main
for (const struct d_state *d = d_states; d->name; d++) {
igt_subtest_f("%s-%s-basic-exec", s->name, d->name) {
igt_assert(setup_d3(device, d->state));
- xe_for_each_hw_engine(device.fd_xe, hwe)
+ xe_for_each_engine(device.fd_xe, hwe)
test_exec(device, hwe, 1, 2, s->state,
NO_RPM);
}
@@ -523,14 +523,14 @@ igt_main
igt_subtest_f("%s-basic-exec", d->name) {
igt_assert(setup_d3(device, d->state));
- xe_for_each_hw_engine(device.fd_xe, hwe)
+ xe_for_each_engine(device.fd_xe, hwe)
test_exec(device, hwe, 1, 1,
NO_SUSPEND, d->state);
}
igt_subtest_f("%s-multiple-execs", d->name) {
igt_assert(setup_d3(device, d->state));
- xe_for_each_hw_engine(device.fd_xe, hwe)
+ xe_for_each_engine(device.fd_xe, hwe)
test_exec(device, hwe, 16, 32,
NO_SUSPEND, d->state);
}
diff --git a/tests/intel/xe_pm_residency.c b/tests/intel/xe_pm_residency.c
index 6c9a95429..4f590c83c 100644
--- a/tests/intel/xe_pm_residency.c
+++ b/tests/intel/xe_pm_residency.c
@@ -346,7 +346,7 @@ igt_main
igt_describe("Validate idle residency on exec");
igt_subtest("idle-residency-on-exec") {
xe_for_each_gt(fd, gt) {
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
if (gt == hwe->gt_id && !hwe->engine_instance)
idle_residency_on_exec(fd, hwe);
}
diff --git a/tests/intel/xe_query.c b/tests/intel/xe_query.c
index 4a23dcb60..48042337a 100644
--- a/tests/intel/xe_query.c
+++ b/tests/intel/xe_query.c
@@ -181,7 +181,7 @@ test_query_engines(int fd)
struct drm_xe_engine_class_instance *hwe;
int i = 0;
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
igt_assert(hwe);
igt_info("engine %d: %s, engine instance: %d, tile: TILE-%d\n", i++,
xe_engine_class_string(hwe->engine_class), hwe->engine_instance,
@@ -602,7 +602,7 @@ static void test_query_engine_cycles(int fd)
igt_require(query_engine_cycles_supported(fd));
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
igt_assert(hwe);
__engine_cycles(fd, hwe);
}
@@ -626,7 +626,7 @@ static void test_engine_cycles_invalid(int fd)
igt_require(query_engine_cycles_supported(fd));
/* get one engine */
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
break;
/* sanity check engine selection is valid */
diff --git a/tests/intel/xe_spin_batch.c b/tests/intel/xe_spin_batch.c
index 6abe700da..2e2a0ed0e 100644
--- a/tests/intel/xe_spin_batch.c
+++ b/tests/intel/xe_spin_batch.c
@@ -72,8 +72,8 @@ static void spin_basic_all(int fd)
vm = xe_vm_create(fd, 0, 0);
ahnd = intel_allocator_open(fd, vm, INTEL_ALLOCATOR_RELOC);
- spin = malloc(sizeof(*spin) * xe_number_hw_engines(fd));
- xe_for_each_hw_engine(fd, hwe) {
+ spin = malloc(sizeof(*spin) * xe_number_engines(fd));
+ xe_for_each_engine(fd, hwe) {
igt_debug("Run on engine: %s:%d\n",
xe_engine_class_string(hwe->engine_class), hwe->engine_instance);
spin[i] = igt_spin_new(fd, .ahnd = ahnd, .vm = vm, .hwe = hwe);
@@ -104,7 +104,7 @@ static void spin_all(int fd, int gt, int class)
ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_RELOC);
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
if (hwe->engine_class != class || hwe->gt_id != gt)
continue;
eci[num_placements++] = *hwe;
@@ -217,7 +217,7 @@ igt_main
spin_basic(fd);
igt_subtest("spin-batch")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
spin(fd, hwe);
igt_subtest("spin-basic-all")
@@ -225,7 +225,7 @@ igt_main
igt_subtest("spin-all") {
xe_for_each_gt(fd, gt)
- xe_for_each_hw_engine_class(class)
+ xe_for_each_engine_class(class)
spin_all(fd, gt, class);
}
diff --git a/tests/intel/xe_vm.c b/tests/intel/xe_vm.c
index a28cf31a0..f5099f398 100644
--- a/tests/intel/xe_vm.c
+++ b/tests/intel/xe_vm.c
@@ -1860,7 +1860,7 @@ igt_main
igt_fixture {
fd = drm_open_driver(DRIVER_XE);
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
if (hwe->engine_class != DRM_XE_ENGINE_CLASS_COPY) {
hwe_non_copy = hwe;
break;
@@ -1892,45 +1892,45 @@ igt_main
userptr_invalid(fd);
igt_subtest("shared-pte-page")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
shared_pte_page(fd, hwe, 4,
xe_get_default_alignment(fd));
igt_subtest("shared-pde-page")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
shared_pte_page(fd, hwe, 4, 0x1000ul * 512);
igt_subtest("shared-pde2-page")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
shared_pte_page(fd, hwe, 4, 0x1000ul * 512 * 512);
igt_subtest("shared-pde3-page")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
shared_pte_page(fd, hwe, 4, 0x1000ul * 512 * 512 * 512);
igt_subtest("bind-execqueues-independent")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_bind_execqueues_independent(fd, hwe, 0);
igt_subtest("bind-execqueues-conflict")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_bind_execqueues_independent(fd, hwe, CONFLICT);
igt_subtest("bind-array-twice")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_bind_array(fd, hwe, 2, 0);
igt_subtest("bind-array-many")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_bind_array(fd, hwe, 16, 0);
igt_subtest("bind-array-exec_queue-twice")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_bind_array(fd, hwe, 2,
BIND_ARRAY_BIND_EXEC_QUEUE_FLAG);
igt_subtest("bind-array-exec_queue-many")
- xe_for_each_hw_engine(fd, hwe)
+ xe_for_each_engine(fd, hwe)
test_bind_array(fd, hwe, 16,
BIND_ARRAY_BIND_EXEC_QUEUE_FLAG);
@@ -1938,41 +1938,41 @@ igt_main
bind_size = bind_size << 1) {
igt_subtest_f("large-binds-%lld",
(long long)bind_size)
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
test_large_binds(fd, hwe, 4, 16, bind_size, 0);
break;
}
igt_subtest_f("large-split-binds-%lld",
(long long)bind_size)
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
test_large_binds(fd, hwe, 4, 16, bind_size,
LARGE_BIND_FLAG_SPLIT);
break;
}
igt_subtest_f("large-misaligned-binds-%lld",
(long long)bind_size)
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
test_large_binds(fd, hwe, 4, 16, bind_size,
LARGE_BIND_FLAG_MISALIGNED);
break;
}
igt_subtest_f("large-split-misaligned-binds-%lld",
(long long)bind_size)
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
test_large_binds(fd, hwe, 4, 16, bind_size,
LARGE_BIND_FLAG_SPLIT |
LARGE_BIND_FLAG_MISALIGNED);
break;
}
igt_subtest_f("large-userptr-binds-%lld", (long long)bind_size)
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
test_large_binds(fd, hwe, 4, 16, bind_size,
LARGE_BIND_FLAG_USERPTR);
break;
}
igt_subtest_f("large-userptr-split-binds-%lld",
(long long)bind_size)
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
test_large_binds(fd, hwe, 4, 16, bind_size,
LARGE_BIND_FLAG_SPLIT |
LARGE_BIND_FLAG_USERPTR);
@@ -1980,7 +1980,7 @@ igt_main
}
igt_subtest_f("large-userptr-misaligned-binds-%lld",
(long long)bind_size)
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
test_large_binds(fd, hwe, 4, 16, bind_size,
LARGE_BIND_FLAG_MISALIGNED |
LARGE_BIND_FLAG_USERPTR);
@@ -1988,7 +1988,7 @@ igt_main
}
igt_subtest_f("large-userptr-split-misaligned-binds-%lld",
(long long)bind_size)
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
test_large_binds(fd, hwe, 4, 16, bind_size,
LARGE_BIND_FLAG_SPLIT |
LARGE_BIND_FLAG_MISALIGNED |
@@ -1999,13 +1999,13 @@ igt_main
bind_size = (0x1ull << 21) + (0x1ull << 20);
igt_subtest_f("mixed-binds-%lld", (long long)bind_size)
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
test_large_binds(fd, hwe, 4, 16, bind_size, 0);
break;
}
igt_subtest_f("mixed-misaligned-binds-%lld", (long long)bind_size)
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
test_large_binds(fd, hwe, 4, 16, bind_size,
LARGE_BIND_FLAG_MISALIGNED);
break;
@@ -2013,14 +2013,14 @@ igt_main
bind_size = (0x1ull << 30) + (0x1ull << 29) + (0x1ull << 20);
igt_subtest_f("mixed-binds-%lld", (long long)bind_size)
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
test_large_binds(fd, hwe, 4, 16, bind_size, 0);
break;
}
bind_size = (0x1ull << 30) + (0x1ull << 29) + (0x1ull << 20);
igt_subtest_f("mixed-misaligned-binds-%lld", (long long)bind_size)
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
test_large_binds(fd, hwe, 4, 16, bind_size,
LARGE_BIND_FLAG_MISALIGNED);
break;
@@ -2028,7 +2028,7 @@ igt_main
bind_size = (0x1ull << 21) + (0x1ull << 20);
igt_subtest_f("mixed-userptr-binds-%lld", (long long) bind_size)
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
test_large_binds(fd, hwe, 4, 16, bind_size,
LARGE_BIND_FLAG_USERPTR);
break;
@@ -2036,7 +2036,7 @@ igt_main
igt_subtest_f("mixed-userptr-misaligned-binds-%lld",
(long long)bind_size)
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
test_large_binds(fd, hwe, 4, 16, bind_size,
LARGE_BIND_FLAG_MISALIGNED |
LARGE_BIND_FLAG_USERPTR);
@@ -2045,7 +2045,7 @@ igt_main
bind_size = (0x1ull << 30) + (0x1ull << 29) + (0x1ull << 20);
igt_subtest_f("mixed-userptr-binds-%lld", (long long)bind_size)
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
test_large_binds(fd, hwe, 4, 16, bind_size,
LARGE_BIND_FLAG_USERPTR);
break;
@@ -2054,7 +2054,7 @@ igt_main
bind_size = (0x1ull << 30) + (0x1ull << 29) + (0x1ull << 20);
igt_subtest_f("mixed-userptr-misaligned-binds-%lld",
(long long)bind_size)
- xe_for_each_hw_engine(fd, hwe) {
+ xe_for_each_engine(fd, hwe) {
test_large_binds(fd, hwe, 4, 16, bind_size,
LARGE_BIND_FLAG_MISALIGNED |
LARGE_BIND_FLAG_USERPTR);
diff --git a/tests/intel/xe_waitfence.c b/tests/intel/xe_waitfence.c
index 0169f4b9c..ad8adc2b0 100644
--- a/tests/intel/xe_waitfence.c
+++ b/tests/intel/xe_waitfence.c
@@ -114,7 +114,7 @@ waitfence(int fd, enum waittype wt)
igt_debug("wait type: RELTIME - timeout: %ld, timeout left: %ld\n",
MS_TO_NS(10), timeout);
} else if (wt == ENGINE) {
- eci = xe_hw_engine(fd, 1);
+ eci = xe_engine(fd, 1);
clock_gettime(CLOCK_MONOTONIC, &ts);
current = ts.tv_sec * 1e9 + ts.tv_nsec;
timeout = current + MS_TO_NS(10);
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] [PATCH v4 08/20] drm-uapi/xe: Make DRM_XE_DEVICE_QUERY_ENGINES future proof
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
` (6 preceding siblings ...)
2023-11-29 14:54 ` [igt-dev] [PATCH v4 07/20] xe: s/hw_engine/engine Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 09/20] drm-uapi/xe: Reject bo creation of unaligned size Francois Dugast
` (12 subsequent siblings)
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev; +Cc: Rodrigo Vivi
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Align with kernel commit ("drm/xe: Make DRM_XE_DEVICE_QUERY_ENGINES future proof")
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
benchmarks/gem_wsim.c | 2 +-
include/drm-uapi/xe_drm.h | 24 +++++++++++++++++++++++-
lib/xe/xe_query.c | 16 ++++++++--------
lib/xe/xe_query.h | 8 ++++----
tests/intel/xe_create.c | 7 ++++---
tests/intel/xe_drm_fdinfo.c | 5 +++--
tests/intel/xe_exec_store.c | 8 ++++----
tests/intel/xe_intel_bb.c | 3 ++-
tests/intel/xe_noexec_ping_pong.c | 5 +++--
tests/intel/xe_waitfence.c | 6 +++---
10 files changed, 55 insertions(+), 29 deletions(-)
diff --git a/benchmarks/gem_wsim.c b/benchmarks/gem_wsim.c
index abbe49a06..514fa4ba7 100644
--- a/benchmarks/gem_wsim.c
+++ b/benchmarks/gem_wsim.c
@@ -689,7 +689,7 @@ xe_get_default_engine(void)
struct drm_xe_engine_class_instance default_hwe, *hwe;
/* select RCS0 | CCS0 or first available engine */
- default_hwe = *xe_engine(fd, 0);
+ default_hwe = xe_engine(fd, 0)->instance;
xe_for_each_engine(fd, hwe) {
if ((hwe->engine_class == DRM_XE_ENGINE_CLASS_RENDER ||
hwe->engine_class == DRM_XE_ENGINE_CLASS_COMPUTE) &&
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 26c1a8604..187e02e30 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -124,7 +124,14 @@ struct xe_user_extension {
#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
-/** struct drm_xe_engine_class_instance - instance of an engine class */
+/**
+ * struct drm_xe_engine_class_instance - instance of an engine class
+ *
+ * It is returned as part of the @drm_xe_query_engine_info, but it also is
+ * used as the input of engine selection for both @drm_xe_exec_queue_create
+ * and @drm_xe_query_engine_cycles
+ *
+ */
struct drm_xe_engine_class_instance {
#define DRM_XE_ENGINE_CLASS_RENDER 0
#define DRM_XE_ENGINE_CLASS_COPY 1
@@ -145,6 +152,21 @@ struct drm_xe_engine_class_instance {
__u16 pad;
};
+/**
+ * struct drm_xe_query_engine_info - describe hardware engine
+ *
+ * If a query is made with a struct @drm_xe_device_query where .query
+ * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of
+ * struct @drm_xe_query_engine_info in .data.
+ */
+struct drm_xe_query_engine_info {
+ /** @instance: The @drm_xe_engine_class_instance */
+ struct drm_xe_engine_class_instance instance;
+
+ /** @reserved: Reserved */
+ __u64 reserved[5];
+};
+
/**
* enum drm_xe_memory_class - Supported memory classes.
*/
diff --git a/lib/xe/xe_query.c b/lib/xe/xe_query.c
index ef7aaa6a1..f9dec1f7a 100644
--- a/lib/xe/xe_query.c
+++ b/lib/xe/xe_query.c
@@ -72,10 +72,10 @@ static uint64_t __memory_regions(const struct drm_xe_query_gt_list *gt_list)
return regions;
}
-static struct drm_xe_engine_class_instance *
-xe_query_engines_new(int fd, unsigned int *num_engines)
+static struct drm_xe_query_engine_info *
+xe_query_engines(int fd, unsigned int *num_engines)
{
- struct drm_xe_engine_class_instance *engines;
+ struct drm_xe_query_engine_info *engines;
struct drm_xe_device_query query = {
.extensions = 0,
.query = DRM_XE_DEVICE_QUERY_ENGINES,
@@ -253,7 +253,7 @@ struct xe_device *xe_device_get(int fd)
xe_dev->dev_id = xe_dev->config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] & 0xffff;
xe_dev->gt_list = xe_query_gt_list_new(fd);
xe_dev->memory_regions = __memory_regions(xe_dev->gt_list);
- xe_dev->engines = xe_query_engines_new(fd, &xe_dev->number_engines);
+ xe_dev->engines = xe_query_engines(fd, &xe_dev->number_engines);
xe_dev->mem_regions = xe_query_mem_regions_new(fd);
xe_dev->vram_size = calloc(xe_dev->gt_list->num_gt, sizeof(*xe_dev->vram_size));
xe_dev->visible_vram_size = calloc(xe_dev->gt_list->num_gt, sizeof(*xe_dev->visible_vram_size));
@@ -427,16 +427,16 @@ uint64_t vram_if_possible(int fd, int gt)
*
* Returns engines array of xe device @fd.
*/
-xe_dev_FN(xe_engines, engines, struct drm_xe_engine_class_instance *);
+xe_dev_FN(xe_engines, engines, struct drm_xe_query_engine_info *);
/**
* xe_engine:
* @fd: xe device fd
* @idx: engine index
*
- * Returns engine instance of xe device @fd and @idx.
+ * Returns engine info of xe device @fd and @idx.
*/
-struct drm_xe_engine_class_instance *xe_engine(int fd, int idx)
+struct drm_xe_query_engine_info *xe_engine(int fd, int idx)
{
struct xe_device *xe_dev;
@@ -658,7 +658,7 @@ bool xe_has_engine_class(int fd, uint16_t engine_class)
igt_assert(xe_dev);
for (int i = 0; i < xe_dev->number_engines; i++)
- if (xe_dev->engines[i].engine_class == engine_class)
+ if (xe_dev->engines[i].instance.engine_class == engine_class)
return true;
return false;
diff --git a/lib/xe/xe_query.h b/lib/xe/xe_query.h
index bf9f2b955..fede00036 100644
--- a/lib/xe/xe_query.h
+++ b/lib/xe/xe_query.h
@@ -33,7 +33,7 @@ struct xe_device {
uint64_t memory_regions;
/** @engines: array of hardware engines */
- struct drm_xe_engine_class_instance *engines;
+ struct drm_xe_query_engine_info *engines;
/** @number_engines: length of hardware engines array */
unsigned int number_engines;
@@ -62,7 +62,7 @@ struct xe_device {
#define xe_for_each_engine(__fd, __hwe) \
for (int __i = 0; __i < xe_number_engines(__fd) && \
- (__hwe = xe_engine(__fd, __i)); ++__i)
+ (__hwe = &xe_engine(__fd, __i)->instance); ++__i)
#define xe_for_each_engine_class(__class) \
for (__class = 0; __class < DRM_XE_ENGINE_CLASS_COMPUTE + 1; \
++__class)
@@ -81,8 +81,8 @@ uint64_t all_memory_regions(int fd);
uint64_t system_memory(int fd);
uint64_t vram_memory(int fd, int gt);
uint64_t vram_if_possible(int fd, int gt);
-struct drm_xe_engine_class_instance *xe_engines(int fd);
-struct drm_xe_engine_class_instance *xe_engine(int fd, int idx);
+struct drm_xe_query_engine_info *xe_engines(int fd);
+struct drm_xe_query_engine_info *xe_engine(int fd, int idx);
struct drm_xe_query_mem_region *xe_mem_region(int fd, uint64_t region);
const char *xe_region_name(uint64_t region);
uint16_t xe_region_class(int fd, uint64_t region);
diff --git a/tests/intel/xe_create.c b/tests/intel/xe_create.c
index 16a371964..03feecd25 100644
--- a/tests/intel/xe_create.c
+++ b/tests/intel/xe_create.c
@@ -148,7 +148,7 @@ static void create_execqueues(int fd, enum exec_queue_destroy ed)
igt_nsec_elapsed(&tv);
igt_fork(n, nproc) {
- struct drm_xe_engine_class_instance *hwe;
+ struct drm_xe_query_engine_info *engine;
uint32_t exec_queue, exec_queues[exec_queues_per_process];
int idx, err, i;
@@ -156,8 +156,9 @@ static void create_execqueues(int fd, enum exec_queue_destroy ed)
for (i = 0; i < exec_queues_per_process; i++) {
idx = rand() % num_engines;
- hwe = xe_engine(fd, idx);
- err = __xe_exec_queue_create(fd, vm, hwe, 0, &exec_queue);
+ engine = xe_engine(fd, idx);
+ err = __xe_exec_queue_create(fd, vm, &engine->instance,
+ 0, &exec_queue);
igt_debug("[%2d] Create exec_queue: err=%d, exec_queue=%u [idx = %d]\n",
n, err, exec_queue, i);
if (err)
diff --git a/tests/intel/xe_drm_fdinfo.c b/tests/intel/xe_drm_fdinfo.c
index d50cc6df1..cec3e0825 100644
--- a/tests/intel/xe_drm_fdinfo.c
+++ b/tests/intel/xe_drm_fdinfo.c
@@ -40,7 +40,7 @@ IGT_TEST_DESCRIPTION("Read and verify drm client memory consumption using fdinfo
#define BO_SIZE (65536)
/* Subtests */
-static void test_active(int fd, struct drm_xe_engine_class_instance *eci)
+static void test_active(int fd, struct drm_xe_query_engine_info *engine)
{
struct drm_xe_query_mem_region *memregion;
uint64_t memreg = all_memory_regions(fd), region;
@@ -89,7 +89,8 @@ static void test_active(int fd, struct drm_xe_engine_class_instance *eci)
data = xe_bo_map(fd, bo, bo_size);
for (i = 0; i < N_EXEC_QUEUES; i++) {
- exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
+ exec_queues[i] = xe_exec_queue_create(fd, vm,
+ &engine->instance, 0);
bind_exec_queues[i] = xe_bind_exec_queue_create(fd, vm, 0, true);
syncobjs[i] = syncobj_create(fd, 0);
}
diff --git a/tests/intel/xe_exec_store.c b/tests/intel/xe_exec_store.c
index 0b7b3d3e9..48e843af5 100644
--- a/tests/intel/xe_exec_store.c
+++ b/tests/intel/xe_exec_store.c
@@ -63,7 +63,7 @@ static void store(int fd)
.syncs = to_user_pointer(&sync),
};
struct data *data;
- struct drm_xe_engine_class_instance *engine;
+ struct drm_xe_query_engine_info *engine;
uint32_t vm;
uint32_t exec_queue;
uint32_t syncobj;
@@ -82,14 +82,14 @@ static void store(int fd)
engine = xe_engine(fd, 1);
bo = xe_bo_create(fd, vm, bo_size,
- vram_if_possible(fd, engine->gt_id),
+ vram_if_possible(fd, engine->instance.gt_id),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
- xe_vm_bind_async(fd, vm, engine->gt_id, bo, 0, addr, bo_size, &sync, 1);
+ xe_vm_bind_async(fd, vm, engine->instance.gt_id, bo, 0, addr, bo_size, &sync, 1);
data = xe_bo_map(fd, bo, bo_size);
store_dword_batch(data, addr, value);
- exec_queue = xe_exec_queue_create(fd, vm, engine, 0);
+ exec_queue = xe_exec_queue_create(fd, vm, &engine->instance, 0);
exec.exec_queue_id = exec_queue;
exec.address = data->addr;
sync.flags &= DRM_XE_SYNC_FLAG_SIGNAL;
diff --git a/tests/intel/xe_intel_bb.c b/tests/intel/xe_intel_bb.c
index e7a566f62..b64812f9d 100644
--- a/tests/intel/xe_intel_bb.c
+++ b/tests/intel/xe_intel_bb.c
@@ -192,7 +192,8 @@ static void simple_bb(struct buf_ops *bops, bool new_context)
if (new_context) {
vm = xe_vm_create(xe, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
- ctx = xe_exec_queue_create(xe, vm, xe_engine(xe, 0), 0);
+ ctx = xe_exec_queue_create(xe, vm, &xe_engine(xe, 0)->instance,
+ 0);
intel_bb_destroy(ibb);
ibb = intel_bb_create_with_context(xe, ctx, vm, NULL, PAGE_SIZE);
intel_bb_out(ibb, MI_BATCH_BUFFER_END);
diff --git a/tests/intel/xe_noexec_ping_pong.c b/tests/intel/xe_noexec_ping_pong.c
index e27cc4582..585af413d 100644
--- a/tests/intel/xe_noexec_ping_pong.c
+++ b/tests/intel/xe_noexec_ping_pong.c
@@ -43,7 +43,7 @@
* there is worked queued on one of the VM's compute exec_queues.
*/
-static void test_ping_pong(int fd, struct drm_xe_engine_class_instance *eci)
+static void test_ping_pong(int fd, struct drm_xe_query_engine_info *engine)
{
size_t vram_size = xe_vram_size(fd, 0);
size_t align = xe_get_default_alignment(fd);
@@ -75,7 +75,8 @@ static void test_ping_pong(int fd, struct drm_xe_engine_class_instance *eci)
xe_vm_bind(fd, vm[i], bo[i][j], 0, 0x40000 + j*bo_size,
bo_size, NULL, 0);
}
- exec_queues[i] = xe_exec_queue_create(fd, vm[i], eci, 0);
+ exec_queues[i] = xe_exec_queue_create(fd, vm[i],
+ &engine->instance, 0);
}
igt_info("Now sleeping for %ds.\n", SECONDS_TO_WAIT);
diff --git a/tests/intel/xe_waitfence.c b/tests/intel/xe_waitfence.c
index ad8adc2b0..bab2bed42 100644
--- a/tests/intel/xe_waitfence.c
+++ b/tests/intel/xe_waitfence.c
@@ -81,7 +81,7 @@ enum waittype {
static void
waitfence(int fd, enum waittype wt)
{
- struct drm_xe_engine_class_instance *eci = NULL;
+ struct drm_xe_query_engine_info *engine = NULL;
struct timespec ts;
int64_t current, signalled;
uint32_t bo_1;
@@ -114,11 +114,11 @@ waitfence(int fd, enum waittype wt)
igt_debug("wait type: RELTIME - timeout: %ld, timeout left: %ld\n",
MS_TO_NS(10), timeout);
} else if (wt == ENGINE) {
- eci = xe_engine(fd, 1);
+ engine = xe_engine(fd, 1);
clock_gettime(CLOCK_MONOTONIC, &ts);
current = ts.tv_sec * 1e9 + ts.tv_nsec;
timeout = current + MS_TO_NS(10);
- signalled = wait_with_eci_abstime(fd, &wait_fence, 7, eci, timeout);
+ signalled = wait_with_eci_abstime(fd, &wait_fence, 7, &engine->instance, timeout);
igt_debug("wait type: ENGINE ABSTIME - timeout: %" PRId64
", signalled: %" PRId64
", elapsed: %" PRId64 "\n",
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] [PATCH v4 09/20] drm-uapi/xe: Reject bo creation of unaligned size
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
` (7 preceding siblings ...)
2023-11-29 14:54 ` [igt-dev] [PATCH v4 08/20] drm-uapi/xe: Make DRM_XE_DEVICE_QUERY_ENGINES future proof Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 10/20] drm-uapi/xe: Align on a common way to return arrays (memory regions) Francois Dugast
` (11 subsequent siblings)
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev
Align with kernel commit ("drm/xe/uapi: Reject bo creation of unaligned size")
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
include/drm-uapi/xe_drm.h | 17 +++++++++--------
tests/intel/xe_mmap.c | 22 ++++++++++++----------
tests/intel/xe_prime_self_import.c | 28 +++++++++++++++++++++++++---
tests/intel/xe_vm.c | 13 ++++++-------
4 files changed, 52 insertions(+), 28 deletions(-)
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 187e02e30..4b944b32a 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -206,11 +206,13 @@ struct drm_xe_query_mem_region {
*
* When the kernel allocates memory for this region, the
* underlying pages will be at least @min_page_size in size.
- *
- * Important note: When userspace allocates a GTT address which
- * can point to memory allocated from this region, it must also
- * respect this minimum alignment. This is enforced by the
- * kernel.
+ * Buffer objects with an allowable placement in this region must be
+ * created with a size aligned to this value.
+ * GPU virtual address mappings of (parts of) buffer objects that
+ * may be placed in this region must also have their GPU virtual
+ * address and range aligned to this value.
+ * Affected IOCTLS will return %-EINVAL if alignment restrictions are
+ * not met.
*/
__u32 min_page_size;
/**
@@ -516,9 +518,8 @@ struct drm_xe_gem_create {
__u64 extensions;
/**
- * @size: Requested size for the object
- *
- * The (page-aligned) allocated size for the object will be returned.
+ * @size: Size of the object to be created, must match region
+ * (system or vram) minimum alignment (&min_page_size).
*/
__u64 size;
diff --git a/tests/intel/xe_mmap.c b/tests/intel/xe_mmap.c
index 965644e22..d6c8d5114 100644
--- a/tests/intel/xe_mmap.c
+++ b/tests/intel/xe_mmap.c
@@ -47,17 +47,18 @@
static void
test_mmap(int fd, uint32_t placement, uint32_t flags)
{
+ size_t bo_size = xe_get_default_alignment(fd);
uint32_t bo;
void *map;
igt_require_f(placement, "Device doesn't support such memory region\n");
- bo = xe_bo_create(fd, 0, 4096, placement, flags);
+ bo = xe_bo_create(fd, 0, bo_size, placement, flags);
- map = xe_bo_map(fd, bo, 4096);
+ map = xe_bo_map(fd, bo, bo_size);
strcpy(map, "Write some data to the BO!");
- munmap(map, 4096);
+ munmap(map, bo_size);
gem_close(fd, bo);
}
@@ -156,13 +157,14 @@ static void trap_sigbus(uint32_t *ptr)
*/
static void test_small_bar(int fd)
{
+ size_t page_size = xe_get_default_alignment(fd);
uint32_t visible_size = xe_visible_vram_size(fd, 0);
uint32_t bo;
uint64_t mmo;
uint32_t *map;
/* 2BIG invalid case */
- igt_assert_neq(__xe_bo_create(fd, 0, visible_size + 4096,
+ igt_assert_neq(__xe_bo_create(fd, 0, visible_size + page_size,
vram_memory(fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM,
&bo),
@@ -172,12 +174,12 @@ static void test_small_bar(int fd)
bo = xe_bo_create(fd, 0, visible_size / 4, vram_memory(fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
mmo = xe_bo_mmap_offset(fd, bo);
- map = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED, fd, mmo);
+ map = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd, mmo);
igt_assert(map != MAP_FAILED);
map[0] = 0xdeadbeaf;
- munmap(map, 4096);
+ munmap(map, page_size);
gem_close(fd, bo);
/* Normal operation with system memory spilling */
@@ -186,18 +188,18 @@ static void test_small_bar(int fd)
system_memory(fd),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
mmo = xe_bo_mmap_offset(fd, bo);
- map = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED, fd, mmo);
+ map = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd, mmo);
igt_assert(map != MAP_FAILED);
map[0] = 0xdeadbeaf;
- munmap(map, 4096);
+ munmap(map, page_size);
gem_close(fd, bo);
/* Bogus operation with SIGBUS */
- bo = xe_bo_create(fd, 0, visible_size + 4096, vram_memory(fd, 0), 0);
+ bo = xe_bo_create(fd, 0, visible_size + page_size, vram_memory(fd, 0), 0);
mmo = xe_bo_mmap_offset(fd, bo);
- map = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED, fd, mmo);
+ map = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd, mmo);
igt_assert(map != MAP_FAILED);
trap_sigbus(map);
diff --git a/tests/intel/xe_prime_self_import.c b/tests/intel/xe_prime_self_import.c
index 9a263d326..8e7290e9e 100644
--- a/tests/intel/xe_prime_self_import.c
+++ b/tests/intel/xe_prime_self_import.c
@@ -59,15 +59,20 @@ IGT_TEST_DESCRIPTION("Check whether prime import/export works on the same"
static char counter;
static int g_time_out = 5;
static pthread_barrier_t g_barrier;
-static size_t bo_size;
+
+static size_t get_min_bo_size(int fd1, int fd2)
+{
+ return 4 * max(xe_get_default_alignment(fd1),
+ xe_get_default_alignment(fd2));
+}
static void
check_bo(int fd1, uint32_t handle1, int fd2, uint32_t handle2)
{
+ size_t bo_size = get_min_bo_size(fd1, fd2);
char *ptr1, *ptr2;
int i;
-
ptr1 = xe_bo_map(fd1, handle1, bo_size);
ptr2 = xe_bo_map(fd2, handle2, bo_size);
@@ -97,6 +102,7 @@ check_bo(int fd1, uint32_t handle1, int fd2, uint32_t handle2)
static void test_with_fd_dup(void)
{
int fd1, fd2;
+ size_t bo_size;
uint32_t handle, handle_import;
int dma_buf_fd1, dma_buf_fd2;
@@ -105,6 +111,8 @@ static void test_with_fd_dup(void)
fd1 = drm_open_driver(DRIVER_XE);
fd2 = drm_open_driver(DRIVER_XE);
+ bo_size = get_min_bo_size(fd1, fd2);
+
handle = xe_bo_create(fd1, 0, bo_size, vram_if_possible(fd1, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
@@ -131,6 +139,7 @@ static void test_with_fd_dup(void)
static void test_with_two_bos(void)
{
int fd1, fd2;
+ size_t bo_size;
uint32_t handle1, handle2, handle_import;
int dma_buf_fd;
@@ -139,6 +148,8 @@ static void test_with_two_bos(void)
fd1 = drm_open_driver(DRIVER_XE);
fd2 = drm_open_driver(DRIVER_XE);
+ bo_size = get_min_bo_size(fd1, fd2);
+
handle1 = xe_bo_create(fd1, 0, bo_size, vram_if_possible(fd1, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
handle2 = xe_bo_create(fd1, 0, bo_size, vram_if_possible(fd1, 0),
@@ -171,12 +182,15 @@ static void test_with_two_bos(void)
static void test_with_one_bo_two_files(void)
{
int fd1, fd2;
+ size_t bo_size;
uint32_t handle_import, handle_open, handle_orig, flink_name;
int dma_buf_fd1, dma_buf_fd2;
fd1 = drm_open_driver(DRIVER_XE);
fd2 = drm_open_driver(DRIVER_XE);
+ bo_size = get_min_bo_size(fd1, fd2);
+
handle_orig = xe_bo_create(fd1, 0, bo_size,
vram_if_possible(fd1, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
@@ -205,12 +219,15 @@ static void test_with_one_bo_two_files(void)
static void test_with_one_bo(void)
{
int fd1, fd2;
+ size_t bo_size;
uint32_t handle, handle_import1, handle_import2, handle_selfimport;
int dma_buf_fd;
fd1 = drm_open_driver(DRIVER_XE);
fd2 = drm_open_driver(DRIVER_XE);
+ bo_size = get_min_bo_size(fd1, fd2);
+
handle = xe_bo_create(fd1, 0, bo_size, vram_if_possible(fd1, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
@@ -279,6 +296,7 @@ static void *thread_fn_reimport_vs_close(void *p)
pthread_t *threads;
int r, i, num_threads;
int fds[2];
+ size_t bo_size;
int obj_count;
void *status;
uint32_t handle;
@@ -298,6 +316,8 @@ static void *thread_fn_reimport_vs_close(void *p)
fds[0] = drm_open_driver(DRIVER_XE);
+ bo_size = xe_get_default_alignment(fds[0]);
+
handle = xe_bo_create(fds[0], 0, bo_size,
vram_if_possible(fds[0], 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
@@ -336,6 +356,7 @@ static void *thread_fn_export_vs_close(void *p)
struct drm_prime_handle prime_h2f;
struct drm_gem_close close_bo;
int fd = (uintptr_t)p;
+ size_t bo_size = xe_get_default_alignment(fd);
uint32_t handle;
pthread_barrier_wait(&g_barrier);
@@ -463,6 +484,7 @@ static void test_llseek_size(void)
static void test_llseek_bad(void)
{
int fd;
+ size_t bo_size;
uint32_t handle;
int dma_buf_fd;
@@ -470,6 +492,7 @@ static void test_llseek_bad(void)
fd = drm_open_driver(DRIVER_XE);
+ bo_size = 4 * xe_get_default_alignment(fd);
handle = xe_bo_create(fd, 0, bo_size,
vram_if_possible(fd, 0),
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
@@ -510,7 +533,6 @@ igt_main
igt_fixture {
fd = drm_open_driver(DRIVER_XE);
- bo_size = xe_get_default_alignment(fd);
}
for (i = 0; i < ARRAY_SIZE(tests); i++) {
diff --git a/tests/intel/xe_vm.c b/tests/intel/xe_vm.c
index f5099f398..0cc1c0de2 100644
--- a/tests/intel/xe_vm.c
+++ b/tests/intel/xe_vm.c
@@ -1312,11 +1312,10 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
t.fd = fd;
t.vm = vm;
-#define PAGE_SIZE 4096
- t.addr = addr + PAGE_SIZE / 2;
+ t.addr = addr + page_size / 2;
t.eci = eci;
t.exit = &exit;
- t.map = map + PAGE_SIZE / 2;
+ t.map = map + page_size / 2;
t.barrier = &barrier;
pthread_barrier_init(&barrier, NULL, 2);
pthread_create(&t.thread, 0, hammer_thread, &t);
@@ -1369,8 +1368,8 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
igt_assert_eq(data->data, 0xc0ffee);
}
if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
- memset(map, 0, PAGE_SIZE / 2);
- memset(map + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
+ memset(map, 0, page_size / 2);
+ memset(map + page_size, 0, bo_size - page_size);
} else {
memset(map, 0, bo_size);
}
@@ -1419,8 +1418,8 @@ try_again_after_invalidate:
}
}
if (flags & MAP_FLAG_HAMMER_FIRST_PAGE) {
- memset(map, 0, PAGE_SIZE / 2);
- memset(map + PAGE_SIZE, 0, bo_size - PAGE_SIZE);
+ memset(map, 0, page_size / 2);
+ memset(map + page_size, 0, bo_size - page_size);
} else {
memset(map, 0, bo_size);
}
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] [PATCH v4 10/20] drm-uapi/xe: Align on a common way to return arrays (memory regions)
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
` (8 preceding siblings ...)
2023-11-29 14:54 ` [igt-dev] [PATCH v4 09/20] drm-uapi/xe: Reject bo creation of unaligned size Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 11/20] drm-uapi/xe: Align on a common way to return arrays (gt) Francois Dugast
` (10 subsequent siblings)
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev
Align with commit ("drm/xe/uapi: Align on a common way to return
arrays (memory regions)")
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
include/drm-uapi/xe_drm.h | 12 +++++------
lib/xe/xe_query.c | 32 ++++++++++++++---------------
lib/xe/xe_query.h | 2 +-
lib/xe/xe_util.c | 6 +++---
tests/intel/xe_create.c | 2 +-
tests/intel/xe_drm_fdinfo.c | 8 ++++----
tests/intel/xe_pm.c | 8 ++++----
tests/intel/xe_query.c | 40 ++++++++++++++++++-------------------
tests/kms_plane.c | 2 +-
9 files changed, 56 insertions(+), 56 deletions(-)
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 4b944b32a..a6dc63c21 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -182,10 +182,10 @@ enum drm_xe_memory_class {
};
/**
- * struct drm_xe_query_mem_region - Describes some region as known to
+ * struct drm_xe_mem_region - Describes some region as known to
* the driver.
*/
-struct drm_xe_query_mem_region {
+struct drm_xe_mem_region {
/**
* @mem_class: The memory class describing this region.
*
@@ -322,12 +322,12 @@ struct drm_xe_query_engine_cycles {
* struct drm_xe_query_mem_regions in .data.
*/
struct drm_xe_query_mem_regions {
- /** @num_regions: number of memory regions returned in @regions */
- __u32 num_regions;
+ /** @num_mem_regions: number of memory regions returned in @mem_regions */
+ __u32 num_mem_regions;
/** @pad: MBZ */
__u32 pad;
- /** @regions: The returned regions for this device */
- struct drm_xe_query_mem_region regions[];
+ /** @mem_regions: The returned memory regions for this device */
+ struct drm_xe_mem_region mem_regions[];
};
/**
diff --git a/lib/xe/xe_query.c b/lib/xe/xe_query.c
index f9dec1f7a..d00051bd9 100644
--- a/lib/xe/xe_query.c
+++ b/lib/xe/xe_query.c
@@ -134,8 +134,8 @@ static uint64_t gt_vram_size(const struct drm_xe_query_mem_regions *mem_regions,
{
int region_idx = ffs(native_region_for_gt(gt_list, gt)) - 1;
- if (XE_IS_CLASS_VRAM(&mem_regions->regions[region_idx]))
- return mem_regions->regions[region_idx].total_size;
+ if (XE_IS_CLASS_VRAM(&mem_regions->mem_regions[region_idx]))
+ return mem_regions->mem_regions[region_idx].total_size;
return 0;
}
@@ -145,16 +145,16 @@ static uint64_t gt_visible_vram_size(const struct drm_xe_query_mem_regions *mem_
{
int region_idx = ffs(native_region_for_gt(gt_list, gt)) - 1;
- if (XE_IS_CLASS_VRAM(&mem_regions->regions[region_idx]))
- return mem_regions->regions[region_idx].cpu_visible_size;
+ if (XE_IS_CLASS_VRAM(&mem_regions->mem_regions[region_idx]))
+ return mem_regions->mem_regions[region_idx].cpu_visible_size;
return 0;
}
static bool __mem_has_vram(struct drm_xe_query_mem_regions *mem_regions)
{
- for (int i = 0; i < mem_regions->num_regions; i++)
- if (XE_IS_CLASS_VRAM(&mem_regions->regions[i]))
+ for (int i = 0; i < mem_regions->num_mem_regions; i++)
+ if (XE_IS_CLASS_VRAM(&mem_regions->mem_regions[i]))
return true;
return false;
@@ -164,9 +164,9 @@ static uint32_t __mem_default_alignment(struct drm_xe_query_mem_regions *mem_reg
{
uint32_t alignment = XE_DEFAULT_ALIGNMENT;
- for (int i = 0; i < mem_regions->num_regions; i++)
- if (alignment < mem_regions->regions[i].min_page_size)
- alignment = mem_regions->regions[i].min_page_size;
+ for (int i = 0; i < mem_regions->num_mem_regions; i++)
+ if (alignment < mem_regions->mem_regions[i].min_page_size)
+ alignment = mem_regions->mem_regions[i].min_page_size;
return alignment;
}
@@ -454,16 +454,16 @@ struct drm_xe_query_engine_info *xe_engine(int fd, int idx)
*
* Returns memory region structure for @region mask.
*/
-struct drm_xe_query_mem_region *xe_mem_region(int fd, uint64_t region)
+struct drm_xe_mem_region *xe_mem_region(int fd, uint64_t region)
{
struct xe_device *xe_dev;
int region_idx = ffs(region) - 1;
xe_dev = find_in_cache(fd);
igt_assert(xe_dev);
- igt_assert(xe_dev->mem_regions->num_regions > region_idx);
+ igt_assert(xe_dev->mem_regions->num_mem_regions > region_idx);
- return &xe_dev->mem_regions->regions[region_idx];
+ return &xe_dev->mem_regions->mem_regions[region_idx];
}
/**
@@ -501,7 +501,7 @@ const char *xe_region_name(uint64_t region)
*/
uint16_t xe_region_class(int fd, uint64_t region)
{
- struct drm_xe_query_mem_region *memreg;
+ struct drm_xe_mem_region *memreg;
memreg = xe_mem_region(fd, region);
@@ -593,21 +593,21 @@ uint64_t xe_vram_available(int fd, int gt)
{
struct xe_device *xe_dev;
int region_idx;
- struct drm_xe_query_mem_region *mem_region;
+ struct drm_xe_mem_region *mem_region;
struct drm_xe_query_mem_regions *mem_regions;
xe_dev = find_in_cache(fd);
igt_assert(xe_dev);
region_idx = ffs(native_region_for_gt(xe_dev->gt_list, gt)) - 1;
- mem_region = &xe_dev->mem_regions->regions[region_idx];
+ mem_region = &xe_dev->mem_regions->mem_regions[region_idx];
if (XE_IS_CLASS_VRAM(mem_region)) {
uint64_t available_vram;
mem_regions = xe_query_mem_regions_new(fd);
pthread_mutex_lock(&cache.cache_mutex);
- mem_region->used = mem_regions->regions[region_idx].used;
+ mem_region->used = mem_regions->mem_regions[region_idx].used;
available_vram = mem_region->total_size - mem_region->used;
pthread_mutex_unlock(&cache.cache_mutex);
free(mem_regions);
diff --git a/lib/xe/xe_query.h b/lib/xe/xe_query.h
index fede00036..5862ecba6 100644
--- a/lib/xe/xe_query.h
+++ b/lib/xe/xe_query.h
@@ -83,7 +83,7 @@ uint64_t vram_memory(int fd, int gt);
uint64_t vram_if_possible(int fd, int gt);
struct drm_xe_query_engine_info *xe_engines(int fd);
struct drm_xe_query_engine_info *xe_engine(int fd, int idx);
-struct drm_xe_query_mem_region *xe_mem_region(int fd, uint64_t region);
+struct drm_xe_mem_region *xe_mem_region(int fd, uint64_t region);
const char *xe_region_name(uint64_t region);
uint16_t xe_region_class(int fd, uint64_t region);
uint32_t xe_min_page_size(int fd, uint64_t region);
diff --git a/lib/xe/xe_util.c b/lib/xe/xe_util.c
index 742e6333e..1bb52b142 100644
--- a/lib/xe/xe_util.c
+++ b/lib/xe/xe_util.c
@@ -10,7 +10,7 @@
#include "xe/xe_query.h"
#include "xe/xe_util.h"
-static bool __region_belongs_to_regions_type(struct drm_xe_query_mem_region *region,
+static bool __region_belongs_to_regions_type(struct drm_xe_mem_region *region,
uint32_t *mem_regions_type,
int num_regions)
{
@@ -23,7 +23,7 @@ static bool __region_belongs_to_regions_type(struct drm_xe_query_mem_region *reg
struct igt_collection *
__xe_get_memory_region_set(int xe, uint32_t *mem_regions_type, int num_regions)
{
- struct drm_xe_query_mem_region *memregion;
+ struct drm_xe_mem_region *memregion;
struct igt_collection *set = NULL;
uint64_t memreg = all_memory_regions(xe), region;
int count = 0, pos = 0;
@@ -78,7 +78,7 @@ char *xe_memregion_dynamic_subtest_name(int xe, struct igt_collection *set)
igt_assert(name);
for_each_collection_data(data, set) {
- struct drm_xe_query_mem_region *memreg;
+ struct drm_xe_mem_region *memreg;
int r;
region = data->value;
diff --git a/tests/intel/xe_create.c b/tests/intel/xe_create.c
index 03feecd25..865cb9840 100644
--- a/tests/intel/xe_create.c
+++ b/tests/intel/xe_create.c
@@ -48,7 +48,7 @@ static int __create_bo(int fd, uint32_t vm, uint64_t size, uint32_t placement,
*/
static void create_invalid_size(int fd)
{
- struct drm_xe_query_mem_region *memregion;
+ struct drm_xe_mem_region *memregion;
uint64_t memreg = all_memory_regions(fd), region;
uint32_t vm;
uint32_t handle;
diff --git a/tests/intel/xe_drm_fdinfo.c b/tests/intel/xe_drm_fdinfo.c
index cec3e0825..fc39649ea 100644
--- a/tests/intel/xe_drm_fdinfo.c
+++ b/tests/intel/xe_drm_fdinfo.c
@@ -42,7 +42,7 @@ IGT_TEST_DESCRIPTION("Read and verify drm client memory consumption using fdinfo
/* Subtests */
static void test_active(int fd, struct drm_xe_query_engine_info *engine)
{
- struct drm_xe_query_mem_region *memregion;
+ struct drm_xe_mem_region *memregion;
uint64_t memreg = all_memory_regions(fd), region;
struct drm_client_fdinfo info = { };
uint32_t vm;
@@ -169,7 +169,7 @@ static void test_active(int fd, struct drm_xe_query_engine_info *engine)
static void test_shared(int xe)
{
- struct drm_xe_query_mem_region *memregion;
+ struct drm_xe_mem_region *memregion;
uint64_t memreg = all_memory_regions(xe), region;
struct drm_client_fdinfo info = { };
struct drm_gem_flink flink;
@@ -214,7 +214,7 @@ static void test_shared(int xe)
static void test_total_resident(int xe)
{
- struct drm_xe_query_mem_region *memregion;
+ struct drm_xe_mem_region *memregion;
uint64_t memreg = all_memory_regions(xe), region;
struct drm_client_fdinfo info = { };
uint32_t vm;
@@ -262,7 +262,7 @@ static void test_total_resident(int xe)
static void basic(int xe)
{
- struct drm_xe_query_mem_region *memregion;
+ struct drm_xe_mem_region *memregion;
uint64_t memreg = all_memory_regions(xe), region;
struct drm_client_fdinfo info = { };
unsigned int ret;
diff --git a/tests/intel/xe_pm.c b/tests/intel/xe_pm.c
index d78ca31a8..a8fc56e4b 100644
--- a/tests/intel/xe_pm.c
+++ b/tests/intel/xe_pm.c
@@ -400,10 +400,10 @@ static void test_vram_d3cold_threshold(device_t device, int sysfs_fd)
query.data = to_user_pointer(mem_regions);
igt_assert_eq(igt_ioctl(device.fd_xe, DRM_IOCTL_XE_DEVICE_QUERY, &query), 0);
- for (i = 0; i < mem_regions->num_regions; i++) {
- if (mem_regions->regions[i].mem_class == DRM_XE_MEM_REGION_CLASS_VRAM) {
- vram_used_mb += (mem_regions->regions[i].used / (1024 * 1024));
- vram_total_mb += (mem_regions->regions[i].total_size / (1024 * 1024));
+ for (i = 0; i < mem_regions->num_mem_regions; i++) {
+ if (mem_regions->mem_regions[i].mem_class == DRM_XE_MEM_REGION_CLASS_VRAM) {
+ vram_used_mb += (mem_regions->mem_regions[i].used / (1024 * 1024));
+ vram_total_mb += (mem_regions->mem_regions[i].total_size / (1024 * 1024));
}
}
diff --git a/tests/intel/xe_query.c b/tests/intel/xe_query.c
index 48042337a..207785a38 100644
--- a/tests/intel/xe_query.c
+++ b/tests/intel/xe_query.c
@@ -218,34 +218,34 @@ test_query_mem_regions(int fd)
query.data = to_user_pointer(mem_regions);
igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query), 0);
- for (i = 0; i < mem_regions->num_regions; i++) {
+ for (i = 0; i < mem_regions->num_mem_regions; i++) {
igt_info("mem region %d: %s\t%#llx / %#llx\n", i,
- mem_regions->regions[i].mem_class ==
+ mem_regions->mem_regions[i].mem_class ==
DRM_XE_MEM_REGION_CLASS_SYSMEM ? "SYSMEM"
- :mem_regions->regions[i].mem_class ==
+ :mem_regions->mem_regions[i].mem_class ==
DRM_XE_MEM_REGION_CLASS_VRAM ? "VRAM" : "?",
- mem_regions->regions[i].used,
- mem_regions->regions[i].total_size
+ mem_regions->mem_regions[i].used,
+ mem_regions->mem_regions[i].total_size
);
igt_info("min_page_size=0x%x\n",
- mem_regions->regions[i].min_page_size);
+ mem_regions->mem_regions[i].min_page_size);
igt_info("visible size=%lluMiB\n",
- mem_regions->regions[i].cpu_visible_size >> 20);
+ mem_regions->mem_regions[i].cpu_visible_size >> 20);
igt_info("visible used=%lluMiB\n",
- mem_regions->regions[i].cpu_visible_used >> 20);
-
- igt_assert_lte_u64(mem_regions->regions[i].cpu_visible_size,
- mem_regions->regions[i].total_size);
- igt_assert_lte_u64(mem_regions->regions[i].cpu_visible_used,
- mem_regions->regions[i].cpu_visible_size);
- igt_assert_lte_u64(mem_regions->regions[i].cpu_visible_used,
- mem_regions->regions[i].used);
- igt_assert_lte_u64(mem_regions->regions[i].used,
- mem_regions->regions[i].total_size);
- igt_assert_lte_u64(mem_regions->regions[i].used -
- mem_regions->regions[i].cpu_visible_used,
- mem_regions->regions[i].total_size);
+ mem_regions->mem_regions[i].cpu_visible_used >> 20);
+
+ igt_assert_lte_u64(mem_regions->mem_regions[i].cpu_visible_size,
+ mem_regions->mem_regions[i].total_size);
+ igt_assert_lte_u64(mem_regions->mem_regions[i].cpu_visible_used,
+ mem_regions->mem_regions[i].cpu_visible_size);
+ igt_assert_lte_u64(mem_regions->mem_regions[i].cpu_visible_used,
+ mem_regions->mem_regions[i].used);
+ igt_assert_lte_u64(mem_regions->mem_regions[i].used,
+ mem_regions->mem_regions[i].total_size);
+ igt_assert_lte_u64(mem_regions->mem_regions[i].used -
+ mem_regions->mem_regions[i].cpu_visible_used,
+ mem_regions->mem_regions[i].total_size);
}
dump_hex_debug(mem_regions, query.size);
free(mem_regions);
diff --git a/tests/kms_plane.c b/tests/kms_plane.c
index e50a94578..51ca082ae 100644
--- a/tests/kms_plane.c
+++ b/tests/kms_plane.c
@@ -467,7 +467,7 @@ test_plane_panning(data_t *data, enum pipe pipe)
}
if (is_xe_device(data->drm_fd)) {
- struct drm_xe_query_mem_region *memregion;
+ struct drm_xe_mem_region *memregion;
uint64_t memreg = all_memory_regions(data->drm_fd), region;
xe_for_each_mem_region(data->drm_fd, memreg, region) {
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] [PATCH v4 11/20] drm-uapi/xe: Align on a common way to return arrays (gt)
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
` (9 preceding siblings ...)
2023-11-29 14:54 ` [igt-dev] [PATCH v4 10/20] drm-uapi/xe: Align on a common way to return arrays (memory regions) Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 12/20] drm-uapi/xe: Align on a common way to return arrays (engines) Francois Dugast
` (9 subsequent siblings)
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev
Align with commit ("drm/xe/uapi: Align on a common way to return
arrays (gt)")
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
include/drm-uapi/xe_drm.h | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index a6dc63c21..735576f91 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -356,14 +356,14 @@ struct drm_xe_query_config {
};
/**
- * struct drm_xe_query_gt - describe an individual GT.
+ * struct drm_xe_gt - describe an individual GT.
*
* To be used with drm_xe_query_gt_list, which will return a list with all the
* existing GT individual descriptions.
* Graphics Technology (GT) is a subset of a GPU/tile that is responsible for
* implementing graphics and/or media operations.
*/
-struct drm_xe_query_gt {
+struct drm_xe_gt {
#define DRM_XE_QUERY_GT_TYPE_MAIN 0
#define DRM_XE_QUERY_GT_TYPE_MEDIA 1
/** @type: GT type: Main or Media */
@@ -403,7 +403,7 @@ struct drm_xe_query_gt_list {
/** @pad: MBZ */
__u32 pad;
/** @gt_list: The GT list returned for this device */
- struct drm_xe_query_gt gt_list[];
+ struct drm_xe_gt gt_list[];
};
/**
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] [PATCH v4 12/20] drm-uapi/xe: Align on a common way to return arrays (engines)
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
` (10 preceding siblings ...)
2023-11-29 14:54 ` [igt-dev] [PATCH v4 11/20] drm-uapi/xe: Align on a common way to return arrays (gt) Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 13/20] drm-uapi/xe: Split xe_sync types from flags Francois Dugast
` (8 subsequent siblings)
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev
Align with commit ("drm/xe/uapi: Align on a common way to return
arrays (engines)")
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
include/drm-uapi/xe_drm.h | 78 +++++++++++++++++++------------
lib/xe/xe_query.c | 24 ++++------
lib/xe/xe_query.h | 11 ++---
tests/intel/xe_create.c | 2 +-
tests/intel/xe_drm_fdinfo.c | 2 +-
tests/intel/xe_exec_store.c | 2 +-
tests/intel/xe_noexec_ping_pong.c | 2 +-
tests/intel/xe_waitfence.c | 2 +-
8 files changed, 66 insertions(+), 57 deletions(-)
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 735576f91..8bc669c55 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -127,9 +127,9 @@ struct xe_user_extension {
/**
* struct drm_xe_engine_class_instance - instance of an engine class
*
- * It is returned as part of the @drm_xe_query_engine_info, but it also is
- * used as the input of engine selection for both @drm_xe_exec_queue_create
- * and @drm_xe_query_engine_cycles
+ * It is returned as part of the @drm_xe_engine, but it also is used as
+ * the input of engine selection for both @drm_xe_exec_queue_create and
+ * @drm_xe_query_engine_cycles
*
*/
struct drm_xe_engine_class_instance {
@@ -153,13 +153,9 @@ struct drm_xe_engine_class_instance {
};
/**
- * struct drm_xe_query_engine_info - describe hardware engine
- *
- * If a query is made with a struct @drm_xe_device_query where .query
- * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of
- * struct @drm_xe_query_engine_info in .data.
+ * struct drm_xe_engine - describe hardware engine
*/
-struct drm_xe_query_engine_info {
+struct drm_xe_engine {
/** @instance: The @drm_xe_engine_class_instance */
struct drm_xe_engine_class_instance instance;
@@ -167,6 +163,22 @@ struct drm_xe_query_engine_info {
__u64 reserved[5];
};
+/**
+ * struct drm_xe_query_engines - describe engines
+ *
+ * If a query is made with a struct @drm_xe_device_query where .query
+ * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of
+ * struct @drm_xe_query_engines in .data.
+ */
+struct drm_xe_query_engines {
+ /** @num_engines: number of engines returned in @engines */
+ __u32 num_engines;
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @engines: The returned engines for this device */
+ struct drm_xe_engine engines[];
+};
+
/**
* enum drm_xe_memory_class - Supported memory classes.
*/
@@ -466,28 +478,32 @@ struct drm_xe_query_topology_mask {
*
* .. code-block:: C
*
- * struct drm_xe_engine_class_instance *hwe;
- * struct drm_xe_device_query query = {
- * .extensions = 0,
- * .query = DRM_XE_DEVICE_QUERY_ENGINES,
- * .size = 0,
- * .data = 0,
- * };
- * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
- * hwe = malloc(query.size);
- * query.data = (uintptr_t)hwe;
- * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
- * int num_engines = query.size / sizeof(*hwe);
- * for (int i = 0; i < num_engines; i++) {
- * printf("Engine %d: %s\n", i,
- * hwe[i].engine_class == DRM_XE_ENGINE_CLASS_RENDER ? "RENDER":
- * hwe[i].engine_class == DRM_XE_ENGINE_CLASS_COPY ? "COPY":
- * hwe[i].engine_class == DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE":
- * hwe[i].engine_class == DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE":
- * hwe[i].engine_class == DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE":
- * "UNKNOWN");
- * }
- * free(hwe);
+ * struct drm_xe_query_engines *engines;
+ * struct drm_xe_device_query query = {
+ * .extensions = 0,
+ * .query = DRM_XE_DEVICE_QUERY_ENGINES,
+ * .size = 0,
+ * .data = 0,
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
+ * engines = malloc(query.size);
+ * query.data = (uintptr_t)engines;
+ * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
+ * for (int i = 0; i < engines->num_engines; i++) {
+ * printf("Engine %d: %s\n", i,
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_RENDER ? "RENDER":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_COPY ? "COPY":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE":
+ * "UNKNOWN");
+ * }
+ * free(engines);
*/
struct drm_xe_device_query {
/** @extensions: Pointer to the first extension struct, if any */
diff --git a/lib/xe/xe_query.c b/lib/xe/xe_query.c
index d00051bd9..fa2b49079 100644
--- a/lib/xe/xe_query.c
+++ b/lib/xe/xe_query.c
@@ -72,10 +72,9 @@ static uint64_t __memory_regions(const struct drm_xe_query_gt_list *gt_list)
return regions;
}
-static struct drm_xe_query_engine_info *
-xe_query_engines(int fd, unsigned int *num_engines)
+static struct drm_xe_query_engines *xe_query_engines(int fd)
{
- struct drm_xe_query_engine_info *engines;
+ struct drm_xe_query_engines *engines;
struct drm_xe_device_query query = {
.extensions = 0,
.query = DRM_XE_DEVICE_QUERY_ENGINES,
@@ -83,7 +82,6 @@ xe_query_engines(int fd, unsigned int *num_engines)
.data = 0,
};
- igt_assert(num_engines);
igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query), 0);
engines = malloc(query.size);
@@ -92,8 +90,6 @@ xe_query_engines(int fd, unsigned int *num_engines)
query.data = to_user_pointer(engines);
igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query), 0);
- *num_engines = query.size / sizeof(*engines);
-
return engines;
}
@@ -253,7 +249,7 @@ struct xe_device *xe_device_get(int fd)
xe_dev->dev_id = xe_dev->config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] & 0xffff;
xe_dev->gt_list = xe_query_gt_list_new(fd);
xe_dev->memory_regions = __memory_regions(xe_dev->gt_list);
- xe_dev->engines = xe_query_engines(fd, &xe_dev->number_engines);
+ xe_dev->engines = xe_query_engines(fd);
xe_dev->mem_regions = xe_query_mem_regions_new(fd);
xe_dev->vram_size = calloc(xe_dev->gt_list->num_gt, sizeof(*xe_dev->vram_size));
xe_dev->visible_vram_size = calloc(xe_dev->gt_list->num_gt, sizeof(*xe_dev->visible_vram_size));
@@ -427,7 +423,7 @@ uint64_t vram_if_possible(int fd, int gt)
*
* Returns engines array of xe device @fd.
*/
-xe_dev_FN(xe_engines, engines, struct drm_xe_query_engine_info *);
+xe_dev_FN(xe_engines, engines->engines, struct drm_xe_engine *);
/**
* xe_engine:
@@ -436,15 +432,15 @@ xe_dev_FN(xe_engines, engines, struct drm_xe_query_engine_info *);
*
* Returns engine info of xe device @fd and @idx.
*/
-struct drm_xe_query_engine_info *xe_engine(int fd, int idx)
+struct drm_xe_engine *xe_engine(int fd, int idx)
{
struct xe_device *xe_dev;
xe_dev = find_in_cache(fd);
igt_assert(xe_dev);
- igt_assert(idx >= 0 && idx < xe_dev->number_engines);
+ igt_assert(idx >= 0 && idx < xe_dev->engines->num_engines);
- return &xe_dev->engines[idx];
+ return &xe_dev->engines->engines[idx];
}
/**
@@ -534,7 +530,7 @@ xe_dev_FN(xe_config, config, struct drm_xe_query_config *);
*
* Returns number of hw engines of xe device @fd.
*/
-xe_dev_FN(xe_number_engines, number_engines, unsigned int);
+xe_dev_FN(xe_number_engines, engines->num_engines, unsigned int);
/**
* xe_has_vram:
@@ -657,8 +653,8 @@ bool xe_has_engine_class(int fd, uint16_t engine_class)
xe_dev = find_in_cache(fd);
igt_assert(xe_dev);
- for (int i = 0; i < xe_dev->number_engines; i++)
- if (xe_dev->engines[i].instance.engine_class == engine_class)
+ for (int i = 0; i < xe_dev->engines->num_engines; i++)
+ if (xe_dev->engines->engines[i].instance.engine_class == engine_class)
return true;
return false;
diff --git a/lib/xe/xe_query.h b/lib/xe/xe_query.h
index 5862ecba6..883cabb7d 100644
--- a/lib/xe/xe_query.h
+++ b/lib/xe/xe_query.h
@@ -32,11 +32,8 @@ struct xe_device {
/** @gt_list: bitmask of all memory regions */
uint64_t memory_regions;
- /** @engines: array of hardware engines */
- struct drm_xe_query_engine_info *engines;
-
- /** @number_engines: length of hardware engines array */
- unsigned int number_engines;
+ /** @engines: hardware engines */
+ struct drm_xe_query_engines *engines;
/** @mem_regions: regions memory information and usage */
struct drm_xe_query_mem_regions *mem_regions;
@@ -81,8 +78,8 @@ uint64_t all_memory_regions(int fd);
uint64_t system_memory(int fd);
uint64_t vram_memory(int fd, int gt);
uint64_t vram_if_possible(int fd, int gt);
-struct drm_xe_query_engine_info *xe_engines(int fd);
-struct drm_xe_query_engine_info *xe_engine(int fd, int idx);
+struct drm_xe_engine *xe_engines(int fd);
+struct drm_xe_engine *xe_engine(int fd, int idx);
struct drm_xe_mem_region *xe_mem_region(int fd, uint64_t region);
const char *xe_region_name(uint64_t region);
uint16_t xe_region_class(int fd, uint64_t region);
diff --git a/tests/intel/xe_create.c b/tests/intel/xe_create.c
index 865cb9840..94ef7f013 100644
--- a/tests/intel/xe_create.c
+++ b/tests/intel/xe_create.c
@@ -148,7 +148,7 @@ static void create_execqueues(int fd, enum exec_queue_destroy ed)
igt_nsec_elapsed(&tv);
igt_fork(n, nproc) {
- struct drm_xe_query_engine_info *engine;
+ struct drm_xe_engine *engine;
uint32_t exec_queue, exec_queues[exec_queues_per_process];
int idx, err, i;
diff --git a/tests/intel/xe_drm_fdinfo.c b/tests/intel/xe_drm_fdinfo.c
index fc39649ea..ec457b1c1 100644
--- a/tests/intel/xe_drm_fdinfo.c
+++ b/tests/intel/xe_drm_fdinfo.c
@@ -40,7 +40,7 @@ IGT_TEST_DESCRIPTION("Read and verify drm client memory consumption using fdinfo
#define BO_SIZE (65536)
/* Subtests */
-static void test_active(int fd, struct drm_xe_query_engine_info *engine)
+static void test_active(int fd, struct drm_xe_engine *engine)
{
struct drm_xe_mem_region *memregion;
uint64_t memreg = all_memory_regions(fd), region;
diff --git a/tests/intel/xe_exec_store.c b/tests/intel/xe_exec_store.c
index 48e843af5..2927214e3 100644
--- a/tests/intel/xe_exec_store.c
+++ b/tests/intel/xe_exec_store.c
@@ -63,7 +63,7 @@ static void store(int fd)
.syncs = to_user_pointer(&sync),
};
struct data *data;
- struct drm_xe_query_engine_info *engine;
+ struct drm_xe_engine *engine;
uint32_t vm;
uint32_t exec_queue;
uint32_t syncobj;
diff --git a/tests/intel/xe_noexec_ping_pong.c b/tests/intel/xe_noexec_ping_pong.c
index 585af413d..9659272b5 100644
--- a/tests/intel/xe_noexec_ping_pong.c
+++ b/tests/intel/xe_noexec_ping_pong.c
@@ -43,7 +43,7 @@
* there is worked queued on one of the VM's compute exec_queues.
*/
-static void test_ping_pong(int fd, struct drm_xe_query_engine_info *engine)
+static void test_ping_pong(int fd, struct drm_xe_engine *engine)
{
size_t vram_size = xe_vram_size(fd, 0);
size_t align = xe_get_default_alignment(fd);
diff --git a/tests/intel/xe_waitfence.c b/tests/intel/xe_waitfence.c
index bab2bed42..a902ad408 100644
--- a/tests/intel/xe_waitfence.c
+++ b/tests/intel/xe_waitfence.c
@@ -81,7 +81,7 @@ enum waittype {
static void
waitfence(int fd, enum waittype wt)
{
- struct drm_xe_query_engine_info *engine = NULL;
+ struct drm_xe_engine *engine = NULL;
struct timespec ts;
int64_t current, signalled;
uint32_t bo_1;
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] [PATCH v4 13/20] drm-uapi/xe: Split xe_sync types from flags
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
` (11 preceding siblings ...)
2023-11-29 14:54 ` [igt-dev] [PATCH v4 12/20] drm-uapi/xe: Align on a common way to return arrays (engines) Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 14/20] drm-uapi/xe: Kill tile_mask Francois Dugast
` (7 subsequent siblings)
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev
Align with commit ("drm/xe/uapi: Split xe_sync types from flags")
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
benchmarks/gem_wsim.c | 9 +++++----
include/drm-uapi/xe_drm.h | 16 +++++++--------
lib/intel_batchbuffer.c | 8 ++++----
lib/intel_compute.c | 6 ++++--
lib/intel_ctx.c | 4 ++--
lib/xe/xe_ioctl.c | 3 ++-
lib/xe/xe_spin.c | 4 ++--
lib/xe/xe_util.c | 4 ++--
tests/intel/xe_dma_buf_sync.c | 4 ++--
tests/intel/xe_drm_fdinfo.c | 4 ++--
tests/intel/xe_evict.c | 6 +++---
tests/intel/xe_exec_balancer.c | 10 +++++-----
tests/intel/xe_exec_basic.c | 4 ++--
tests/intel/xe_exec_compute_mode.c | 5 +++--
tests/intel/xe_exec_fault_mode.c | 2 +-
tests/intel/xe_exec_reset.c | 14 ++++++-------
tests/intel/xe_exec_store.c | 11 +++++-----
tests/intel/xe_exec_threads.c | 14 ++++++-------
tests/intel/xe_huc_copy.c | 3 ++-
tests/intel/xe_perf_pmu.c | 8 ++++----
tests/intel/xe_pm.c | 4 ++--
tests/intel/xe_pm_residency.c | 2 +-
tests/intel/xe_spin_batch.c | 3 ++-
tests/intel/xe_vm.c | 32 +++++++++++++++---------------
tests/intel/xe_waitfence.c | 3 ++-
25 files changed, 96 insertions(+), 87 deletions(-)
diff --git a/benchmarks/gem_wsim.c b/benchmarks/gem_wsim.c
index 514fa4ba7..66ad7563d 100644
--- a/benchmarks/gem_wsim.c
+++ b/benchmarks/gem_wsim.c
@@ -1784,21 +1784,22 @@ xe_alloc_step_batch(struct workload *wrk, struct w_step *w)
i = 0;
/* out fence */
w->xe.syncs[i].handle = syncobj_create(fd, 0);
- w->xe.syncs[i++].flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL;
+ w->xe.syncs[i++].type = DRM_XE_SYNC_TYPE_SYNCOBJ;
+ w->xe.syncs[i++].flags = DRM_XE_SYNC_FLAG_SIGNAL;
/* in fence(s) */
for_each_dep(dep, w->data_deps) {
int dep_idx = w->idx + dep->target;
igt_assert(wrk->steps[dep_idx].xe.syncs && wrk->steps[dep_idx].xe.syncs[0].handle);
w->xe.syncs[i].handle = wrk->steps[dep_idx].xe.syncs[0].handle;
- w->xe.syncs[i++].flags = DRM_XE_SYNC_FLAG_SYNCOBJ;
+ w->xe.syncs[i++].type = DRM_XE_SYNC_TYPE_SYNCOBJ;
}
for_each_dep(dep, w->fence_deps) {
int dep_idx = w->idx + dep->target;
igt_assert(wrk->steps[dep_idx].xe.syncs && wrk->steps[dep_idx].xe.syncs[0].handle);
w->xe.syncs[i].handle = wrk->steps[dep_idx].xe.syncs[0].handle;
- w->xe.syncs[i++].flags = DRM_XE_SYNC_FLAG_SYNCOBJ;
+ w->xe.syncs[i++].type = DRM_XE_SYNC_TYPE_SYNCOBJ;
}
w->xe.exec.syncs = to_user_pointer(w->xe.syncs);
}
@@ -2375,7 +2376,7 @@ static int xe_prepare_contexts(unsigned int id, struct workload *wrk)
if (w->type == SW_FENCE) {
w->xe.syncs = calloc(1, sizeof(struct drm_xe_sync));
w->xe.syncs[0].handle = syncobj_create(fd, 0);
- w->xe.syncs[0].flags = DRM_XE_SYNC_FLAG_SYNCOBJ;
+ w->xe.syncs[0].type = DRM_XE_SYNC_TYPE_SYNCOBJ;
}
return 0;
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 8bc669c55..1e98363b2 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -880,16 +880,16 @@ struct drm_xe_sync {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
-#define DRM_XE_SYNC_FLAG_SYNCOBJ 0x0
-#define DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ 0x1
-#define DRM_XE_SYNC_FLAG_DMA_BUF 0x2
-#define DRM_XE_SYNC_FLAG_USER_FENCE 0x3
-#define DRM_XE_SYNC_FLAG_SIGNAL 0x10
+#define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0
+#define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1
+#define DRM_XE_SYNC_TYPE_USER_FENCE 0x2
+ /** @type: Type of the this sync object */
+ __u32 type;
+
+#define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0)
+ /** @flags: Sync Flags */
__u32 flags;
- /** @pad: MBZ */
- __u32 pad;
-
union {
__u32 handle;
diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index ef55b6330..6c85c5aa3 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -1318,8 +1318,8 @@ static struct drm_xe_vm_bind_op *xe_alloc_bind_ops(struct intel_bb *ibb,
static void __unbind_xe_objects(struct intel_bb *ibb)
{
struct drm_xe_sync syncs[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
int ret;
@@ -2306,8 +2306,8 @@ __xe_bb_exec(struct intel_bb *ibb, uint64_t flags, bool sync)
uint32_t engine = flags & (I915_EXEC_BSD_MASK | I915_EXEC_RING_MASK);
uint32_t engine_id;
struct drm_xe_sync syncs[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_vm_bind_op *bind_ops;
void *map;
diff --git a/lib/intel_compute.c b/lib/intel_compute.c
index dd921bf46..de797c6f7 100644
--- a/lib/intel_compute.c
+++ b/lib/intel_compute.c
@@ -106,7 +106,8 @@ static void bo_execenv_bind(struct bo_execenv *execenv,
uint64_t alignment = xe_get_default_alignment(fd);
struct drm_xe_sync sync = { 0 };
- sync.flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL;
+ sync.type = DRM_XE_SYNC_TYPE_SYNCOBJ;
+ sync.flags = DRM_XE_SYNC_FLAG_SIGNAL;
sync.handle = syncobj_create(fd, 0);
for (int i = 0; i < entries; i++) {
@@ -162,7 +163,8 @@ static void bo_execenv_unbind(struct bo_execenv *execenv,
uint32_t vm = execenv->vm;
struct drm_xe_sync sync = { 0 };
- sync.flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL;
+ sync.type = DRM_XE_SYNC_TYPE_SYNCOBJ;
+ sync.flags = DRM_XE_SYNC_FLAG_SIGNAL;
sync.handle = syncobj_create(fd, 0);
for (int i = 0; i < entries; i++) {
diff --git a/lib/intel_ctx.c b/lib/intel_ctx.c
index f82564572..b43dd6391 100644
--- a/lib/intel_ctx.c
+++ b/lib/intel_ctx.c
@@ -423,8 +423,8 @@ intel_ctx_t *intel_ctx_xe(int fd, uint32_t vm, uint32_t exec_queue,
int __intel_ctx_xe_exec(const intel_ctx_t *ctx, uint64_t ahnd, uint64_t bb_offset)
{
struct drm_xe_sync syncs[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.exec_queue_id = ctx->exec_queue,
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index c6d7af878..56d2fe592 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -405,7 +405,8 @@ void xe_exec_sync(int fd, uint32_t exec_queue, uint64_t addr,
void xe_exec_wait(int fd, uint32_t exec_queue, uint64_t addr)
{
struct drm_xe_sync sync = {
- .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL,
+ .type = DRM_XE_SYNC_TYPE_SYNCOBJ,
+ .flags = DRM_XE_SYNC_FLAG_SIGNAL,
.handle = syncobj_create(fd, 0),
};
diff --git a/lib/xe/xe_spin.c b/lib/xe/xe_spin.c
index 91bc6664d..deba06f73 100644
--- a/lib/xe/xe_spin.c
+++ b/lib/xe/xe_spin.c
@@ -191,7 +191,7 @@ xe_spin_create(int fd, const struct igt_spin_factory *opt)
struct igt_spin *spin;
struct xe_spin *xe_spin;
struct drm_xe_sync sync = {
- .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL,
+ .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
@@ -289,7 +289,7 @@ void xe_cork_init(int fd, struct drm_xe_engine_class_instance *hwe,
uint32_t vm, bo, exec_queue, syncobj;
struct xe_spin *spin;
struct drm_xe_sync sync = {
- .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL,
+ .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
diff --git a/lib/xe/xe_util.c b/lib/xe/xe_util.c
index 1bb52b142..ae6cf3979 100644
--- a/lib/xe/xe_util.c
+++ b/lib/xe/xe_util.c
@@ -179,8 +179,8 @@ void xe_bind_unbind_async(int xe, uint32_t vm, uint32_t bind_engine,
{
struct drm_xe_vm_bind_op *bind_ops;
struct drm_xe_sync tabsyncs[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ, .handle = sync_in },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, .handle = sync_out },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .handle = sync_in },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, .handle = sync_out },
};
struct drm_xe_sync *syncs;
uint32_t num_binds = 0;
diff --git a/tests/intel/xe_dma_buf_sync.c b/tests/intel/xe_dma_buf_sync.c
index dfa957243..eca3a5e95 100644
--- a/tests/intel/xe_dma_buf_sync.c
+++ b/tests/intel/xe_dma_buf_sync.c
@@ -145,8 +145,8 @@ test_export_dma_buf(struct drm_xe_engine_class_instance *hwe0,
uint64_t sdi_addr = addr + sdi_offset;
uint64_t spin_offset = (char *)&data[i]->spin - (char *)data[i];
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
diff --git a/tests/intel/xe_drm_fdinfo.c b/tests/intel/xe_drm_fdinfo.c
index ec457b1c1..fd6c07410 100644
--- a/tests/intel/xe_drm_fdinfo.c
+++ b/tests/intel/xe_drm_fdinfo.c
@@ -48,8 +48,8 @@ static void test_active(int fd, struct drm_xe_engine *engine)
uint32_t vm;
uint64_t addr = 0x1a0000;
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
diff --git a/tests/intel/xe_evict.c b/tests/intel/xe_evict.c
index 2e2960b9b..5b06b8953 100644
--- a/tests/intel/xe_evict.c
+++ b/tests/intel/xe_evict.c
@@ -38,8 +38,8 @@ test_evict(int fd, struct drm_xe_engine_class_instance *eci,
uint32_t bind_exec_queues[3] = { 0, 0, 0 };
uint64_t addr = 0x100000000, base_addr = 0x100000000;
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
@@ -219,7 +219,7 @@ test_evict_cm(int fd, struct drm_xe_engine_class_instance *eci,
uint64_t addr = 0x100000000, base_addr = 0x100000000;
#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
struct drm_xe_sync sync[1] = {
- { .flags = DRM_XE_SYNC_FLAG_USER_FENCE | DRM_XE_SYNC_FLAG_SIGNAL,
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
.timeline_value = USER_FENCE_VALUE },
};
struct drm_xe_exec exec = {
diff --git a/tests/intel/xe_exec_balancer.c b/tests/intel/xe_exec_balancer.c
index ea06c23cd..742724641 100644
--- a/tests/intel/xe_exec_balancer.c
+++ b/tests/intel/xe_exec_balancer.c
@@ -37,8 +37,8 @@ static void test_all_active(int fd, int gt, int class)
uint32_t vm;
uint64_t addr = 0x1a0000;
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
@@ -177,8 +177,8 @@ test_exec(int fd, int gt, int class, int n_exec_queues, int n_execs,
uint32_t vm;
uint64_t addr = 0x1a0000;
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.num_syncs = 2,
@@ -401,7 +401,7 @@ test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
uint64_t addr = 0x1a0000;
#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
struct drm_xe_sync sync[1] = {
- { .flags = DRM_XE_SYNC_FLAG_USER_FENCE | DRM_XE_SYNC_FLAG_SIGNAL,
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
.timeline_value = USER_FENCE_VALUE },
};
struct drm_xe_exec exec = {
diff --git a/tests/intel/xe_exec_basic.c b/tests/intel/xe_exec_basic.c
index 46b9dc2e0..2defd1e35 100644
--- a/tests/intel/xe_exec_basic.c
+++ b/tests/intel/xe_exec_basic.c
@@ -81,8 +81,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
int n_exec_queues, int n_execs, int n_vm, unsigned int flags)
{
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
diff --git a/tests/intel/xe_exec_compute_mode.c b/tests/intel/xe_exec_compute_mode.c
index a9f69deef..881f3829b 100644
--- a/tests/intel/xe_exec_compute_mode.c
+++ b/tests/intel/xe_exec_compute_mode.c
@@ -88,8 +88,9 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
uint64_t addr = 0x1a0000;
#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
struct drm_xe_sync sync[1] = {
- { .flags = DRM_XE_SYNC_FLAG_USER_FENCE | DRM_XE_SYNC_FLAG_SIGNAL,
- .timeline_value = USER_FENCE_VALUE },
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE,
+ .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
diff --git a/tests/intel/xe_exec_fault_mode.c b/tests/intel/xe_exec_fault_mode.c
index 4c85fce76..228e7e44a 100644
--- a/tests/intel/xe_exec_fault_mode.c
+++ b/tests/intel/xe_exec_fault_mode.c
@@ -107,7 +107,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
uint64_t addr = 0x1a0000;
#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
struct drm_xe_sync sync[1] = {
- { .flags = DRM_XE_SYNC_FLAG_USER_FENCE | DRM_XE_SYNC_FLAG_SIGNAL,
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
.timeline_value = USER_FENCE_VALUE },
};
struct drm_xe_exec exec = {
diff --git a/tests/intel/xe_exec_reset.c b/tests/intel/xe_exec_reset.c
index 988e63438..b8f5c6fbc 100644
--- a/tests/intel/xe_exec_reset.c
+++ b/tests/intel/xe_exec_reset.c
@@ -30,8 +30,8 @@ static void test_spin(int fd, struct drm_xe_engine_class_instance *eci)
uint32_t vm;
uint64_t addr = 0x1a0000;
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
@@ -141,8 +141,8 @@ test_balancer(int fd, int gt, int class, int n_exec_queues, int n_execs,
uint32_t vm;
uint64_t addr = 0x1a0000;
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.num_syncs = 2,
@@ -338,8 +338,8 @@ test_legacy_mode(int fd, struct drm_xe_engine_class_instance *eci,
uint32_t vm;
uint64_t addr = 0x1a0000;
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
@@ -504,7 +504,7 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
uint64_t addr = 0x1a0000;
#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
struct drm_xe_sync sync[1] = {
- { .flags = DRM_XE_SYNC_FLAG_USER_FENCE | DRM_XE_SYNC_FLAG_SIGNAL,
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
.timeline_value = USER_FENCE_VALUE },
};
struct drm_xe_exec exec = {
diff --git a/tests/intel/xe_exec_store.c b/tests/intel/xe_exec_store.c
index 2927214e3..dec8546a3 100644
--- a/tests/intel/xe_exec_store.c
+++ b/tests/intel/xe_exec_store.c
@@ -55,7 +55,8 @@ static void store_dword_batch(struct data *data, uint64_t addr, int value)
static void store(int fd)
{
struct drm_xe_sync sync = {
- .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL,
+ .type = DRM_XE_SYNC_TYPE_SYNCOBJ,
+ .flags = DRM_XE_SYNC_FLAG_SIGNAL,
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
@@ -122,8 +123,8 @@ static void store_cachelines(int fd, struct drm_xe_engine_class_instance *eci,
unsigned int flags)
{
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, }
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, }
};
struct drm_xe_exec exec = {
@@ -212,8 +213,8 @@ static void store_cachelines(int fd, struct drm_xe_engine_class_instance *eci,
static void store_all(int fd, int gt, int class)
{
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, }
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, }
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
diff --git a/tests/intel/xe_exec_threads.c b/tests/intel/xe_exec_threads.c
index 8a01b150d..9aa989ab5 100644
--- a/tests/intel/xe_exec_threads.c
+++ b/tests/intel/xe_exec_threads.c
@@ -47,8 +47,8 @@ test_balancer(int fd, int gt, uint32_t vm, uint64_t addr, uint64_t userptr,
int class, int n_exec_queues, int n_execs, unsigned int flags)
{
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_sync sync_all[MAX_N_EXEC_QUEUES];
struct drm_xe_exec exec = {
@@ -126,7 +126,7 @@ test_balancer(int fd, int gt, uint32_t vm, uint64_t addr, uint64_t userptr,
&create), 0);
exec_queues[i] = create.exec_queue_id;
syncobjs[i] = syncobj_create(fd, 0);
- sync_all[i].flags = DRM_XE_SYNC_FLAG_SYNCOBJ;
+ sync_all[i].type = DRM_XE_SYNC_TYPE_SYNCOBJ;
sync_all[i].handle = syncobjs[i];
};
exec.num_batch_buffer = flags & PARALLEL ? num_placements : 1;
@@ -255,7 +255,7 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
{
#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
struct drm_xe_sync sync[1] = {
- { .flags = DRM_XE_SYNC_FLAG_USER_FENCE | DRM_XE_SYNC_FLAG_SIGNAL,
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
.timeline_value = USER_FENCE_VALUE },
};
struct drm_xe_exec exec = {
@@ -459,8 +459,8 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
int n_execs, unsigned int flags)
{
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_sync sync_all[MAX_N_EXEC_QUEUES];
struct drm_xe_exec exec = {
@@ -539,7 +539,7 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
else
bind_exec_queues[i] = 0;
syncobjs[i] = syncobj_create(fd, 0);
- sync_all[i].flags = DRM_XE_SYNC_FLAG_SYNCOBJ;
+ sync_all[i].type = DRM_XE_SYNC_TYPE_SYNCOBJ;
sync_all[i].handle = syncobjs[i];
};
diff --git a/tests/intel/xe_huc_copy.c b/tests/intel/xe_huc_copy.c
index dbc5afc17..035d86ea8 100644
--- a/tests/intel/xe_huc_copy.c
+++ b/tests/intel/xe_huc_copy.c
@@ -118,7 +118,8 @@ __test_huc_copy(int fd, uint32_t vm, struct drm_xe_engine_class_instance *hwe)
};
exec_queue = xe_exec_queue_create(fd, vm, hwe, 0);
- sync.flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL;
+ sync.type = DRM_XE_SYNC_TYPE_SYNCOBJ;
+ sync.flags = DRM_XE_SYNC_FLAG_SIGNAL;
sync.handle = syncobj_create(fd, 0);
for(int i = 0; i < BO_DICT_ENTRIES; i++) {
diff --git a/tests/intel/xe_perf_pmu.c b/tests/intel/xe_perf_pmu.c
index ba5488c48..42cf62729 100644
--- a/tests/intel/xe_perf_pmu.c
+++ b/tests/intel/xe_perf_pmu.c
@@ -81,8 +81,8 @@ static void test_any_engine_busyness(int fd, struct drm_xe_engine_class_instance
uint32_t vm;
uint64_t addr = 0x1a0000;
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
@@ -185,8 +185,8 @@ static void test_engine_group_busyness(int fd, int gt, int class, const char *na
uint32_t vm;
uint64_t addr = 0x1a0000;
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
diff --git a/tests/intel/xe_pm.c b/tests/intel/xe_pm.c
index a8fc56e4b..c899bd67a 100644
--- a/tests/intel/xe_pm.c
+++ b/tests/intel/xe_pm.c
@@ -231,8 +231,8 @@ test_exec(device_t device, struct drm_xe_engine_class_instance *eci,
uint32_t vm;
uint64_t addr = 0x1a0000;
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
diff --git a/tests/intel/xe_pm_residency.c b/tests/intel/xe_pm_residency.c
index 4f590c83c..5542f8fb4 100644
--- a/tests/intel/xe_pm_residency.c
+++ b/tests/intel/xe_pm_residency.c
@@ -87,7 +87,7 @@ static void exec_load(int fd, struct drm_xe_engine_class_instance *hwe, unsigned
} *data;
struct drm_xe_sync sync = {
- .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL,
+ .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
};
struct drm_xe_exec exec = {
diff --git a/tests/intel/xe_spin_batch.c b/tests/intel/xe_spin_batch.c
index 2e2a0ed0e..c75709c4e 100644
--- a/tests/intel/xe_spin_batch.c
+++ b/tests/intel/xe_spin_batch.c
@@ -145,7 +145,8 @@ static void xe_spin_fixed_duration(int fd)
{
struct drm_xe_sync sync = {
.handle = syncobj_create(fd, 0),
- .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL,
+ .type = DRM_XE_SYNC_TYPE_SYNCOBJ,
+ .flags = DRM_XE_SYNC_FLAG_SIGNAL,
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
diff --git a/tests/intel/xe_vm.c b/tests/intel/xe_vm.c
index 0cc1c0de2..7871fa789 100644
--- a/tests/intel/xe_vm.c
+++ b/tests/intel/xe_vm.c
@@ -274,7 +274,7 @@ static void test_partial_unbinds(int fd)
uint64_t addr = 0x1a0000;
struct drm_xe_sync sync = {
- .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL,
+ .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
.handle = syncobj_create(fd, 0),
};
@@ -314,7 +314,7 @@ static void unbind_all(int fd, int n_vmas)
uint32_t vm;
int i;
struct drm_xe_sync sync[1] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
@@ -389,8 +389,8 @@ shared_pte_page(int fd, struct drm_xe_engine_class_instance *eci, int n_bo,
uint32_t vm;
uint64_t addr = 0x1000 * 512;
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_sync sync_all[MAX_N_EXEC_QUEUES + 1];
struct drm_xe_exec exec = {
@@ -433,7 +433,7 @@ shared_pte_page(int fd, struct drm_xe_engine_class_instance *eci, int n_bo,
for (i = 0; i < n_exec_queues; i++) {
exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
syncobjs[i] = syncobj_create(fd, 0);
- sync_all[i].flags = DRM_XE_SYNC_FLAG_SYNCOBJ;
+ sync_all[i].type = DRM_XE_SYNC_TYPE_SYNCOBJ;
sync_all[i].handle = syncobjs[i];
};
@@ -576,8 +576,8 @@ test_bind_execqueues_independent(int fd, struct drm_xe_engine_class_instance *ec
uint32_t vm;
uint64_t addr = 0x1a0000;
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
@@ -759,8 +759,8 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
uint32_t vm;
uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
@@ -948,8 +948,8 @@ test_large_binds(int fd, struct drm_xe_engine_class_instance *eci,
unsigned int flags)
{
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
@@ -1109,7 +1109,7 @@ static void *hammer_thread(void *tdata)
{
struct thread_data *t = tdata;
struct drm_xe_sync sync[1] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
@@ -1235,8 +1235,8 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
unsigned int flags)
{
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
@@ -1536,8 +1536,8 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
int unbind_n_pages, unsigned int flags)
{
struct drm_xe_sync sync[2] = {
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
- { .flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
+ { .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
};
struct drm_xe_exec exec = {
.num_batch_buffer = 1,
diff --git a/tests/intel/xe_waitfence.c b/tests/intel/xe_waitfence.c
index a902ad408..3be987954 100644
--- a/tests/intel/xe_waitfence.c
+++ b/tests/intel/xe_waitfence.c
@@ -28,7 +28,8 @@ static void do_bind(int fd, uint32_t vm, uint32_t bo, uint64_t offset,
uint64_t addr, uint64_t size, uint64_t val)
{
struct drm_xe_sync sync[1] = {};
- sync[0].flags = DRM_XE_SYNC_FLAG_USER_FENCE | DRM_XE_SYNC_FLAG_SIGNAL;
+ sync[0].type = DRM_XE_SYNC_TYPE_USER_FENCE;
+ sync[0].flags = DRM_XE_SYNC_FLAG_SIGNAL;
sync[0].addr = to_user_pointer(&wait_fence);
sync[0].timeline_value = val;
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] [PATCH v4 14/20] drm-uapi/xe: Kill tile_mask
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
` (12 preceding siblings ...)
2023-11-29 14:54 ` [igt-dev] [PATCH v4 13/20] drm-uapi/xe: Split xe_sync types from flags Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 9:05 ` Matthew Brost
2023-11-29 14:54 ` [igt-dev] [PATCH v4 15/20] drm-uapi/xe: Crystal Reference Clock updates Francois Dugast
` (6 subsequent siblings)
20 siblings, 1 reply; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev
Align with commit ("drm/xe/uapi: Kill tile_mask")
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
include/drm-uapi/xe_drm.h | 8 +-------
tests/intel/xe_vm.c | 1 -
2 files changed, 1 insertion(+), 8 deletions(-)
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 1e98363b2..2a4ac6e8e 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -683,12 +683,6 @@ struct drm_xe_vm_bind_op {
/** @addr: Address to operate on, MBZ for UNMAP_ALL */
__u64 addr;
- /**
- * @tile_mask: Mask for which tiles to create binds for, 0 == All tiles,
- * only applies to creating new VMAs
- */
- __u64 tile_mask;
-
#define DRM_XE_VM_BIND_OP_MAP 0x0
#define DRM_XE_VM_BIND_OP_UNMAP 0x1
#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
@@ -723,7 +717,7 @@ struct drm_xe_vm_bind_op {
__u32 prefetch_mem_region_instance;
/** @reserved: Reserved */
- __u64 reserved[2];
+ __u64 reserved[3];
};
struct drm_xe_vm_bind {
diff --git a/tests/intel/xe_vm.c b/tests/intel/xe_vm.c
index 7871fa789..4122b5e49 100644
--- a/tests/intel/xe_vm.c
+++ b/tests/intel/xe_vm.c
@@ -799,7 +799,6 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
bind_ops[i].obj_offset = 0;
bind_ops[i].range = bo_size;
bind_ops[i].addr = addr;
- bind_ops[i].tile_mask = 0x1 << eci->gt_id;
bind_ops[i].op = DRM_XE_VM_BIND_OP_MAP;
bind_ops[i].flags = DRM_XE_VM_BIND_FLAG_ASYNC;
bind_ops[i].prefetch_mem_region_instance = 0;
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* Re: [igt-dev] [PATCH v4 14/20] drm-uapi/xe: Kill tile_mask
2023-11-29 14:54 ` [igt-dev] [PATCH v4 14/20] drm-uapi/xe: Kill tile_mask Francois Dugast
@ 2023-11-29 9:05 ` Matthew Brost
0 siblings, 0 replies; 23+ messages in thread
From: Matthew Brost @ 2023-11-29 9:05 UTC (permalink / raw)
To: Francois Dugast; +Cc: igt-dev
On Wed, Nov 29, 2023 at 02:54:51PM +0000, Francois Dugast wrote:
> Align with commit ("drm/xe/uapi: Kill tile_mask")
>
> Signed-off-by: Francois Dugast <francois.dugast@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
> ---
> include/drm-uapi/xe_drm.h | 8 +-------
> tests/intel/xe_vm.c | 1 -
> 2 files changed, 1 insertion(+), 8 deletions(-)
>
> diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
> index 1e98363b2..2a4ac6e8e 100644
> --- a/include/drm-uapi/xe_drm.h
> +++ b/include/drm-uapi/xe_drm.h
> @@ -683,12 +683,6 @@ struct drm_xe_vm_bind_op {
> /** @addr: Address to operate on, MBZ for UNMAP_ALL */
> __u64 addr;
>
> - /**
> - * @tile_mask: Mask for which tiles to create binds for, 0 == All tiles,
> - * only applies to creating new VMAs
> - */
> - __u64 tile_mask;
> -
> #define DRM_XE_VM_BIND_OP_MAP 0x0
> #define DRM_XE_VM_BIND_OP_UNMAP 0x1
> #define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
> @@ -723,7 +717,7 @@ struct drm_xe_vm_bind_op {
> __u32 prefetch_mem_region_instance;
>
> /** @reserved: Reserved */
> - __u64 reserved[2];
> + __u64 reserved[3];
> };
>
> struct drm_xe_vm_bind {
> diff --git a/tests/intel/xe_vm.c b/tests/intel/xe_vm.c
> index 7871fa789..4122b5e49 100644
> --- a/tests/intel/xe_vm.c
> +++ b/tests/intel/xe_vm.c
> @@ -799,7 +799,6 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
> bind_ops[i].obj_offset = 0;
> bind_ops[i].range = bo_size;
> bind_ops[i].addr = addr;
> - bind_ops[i].tile_mask = 0x1 << eci->gt_id;
> bind_ops[i].op = DRM_XE_VM_BIND_OP_MAP;
> bind_ops[i].flags = DRM_XE_VM_BIND_FLAG_ASYNC;
> bind_ops[i].prefetch_mem_region_instance = 0;
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 23+ messages in thread
* [igt-dev] [PATCH v4 15/20] drm-uapi/xe: Crystal Reference Clock updates
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
` (13 preceding siblings ...)
2023-11-29 14:54 ` [igt-dev] [PATCH v4 14/20] drm-uapi/xe: Kill tile_mask Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 16/20] drm-uapi/xe: Remove bogus engine list from the wait_user_fence IOCTL Francois Dugast
` (5 subsequent siblings)
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev
Align with commit ("drm/xe/uapi: Crystal Reference Clock updates")
Cc: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
include/drm-uapi/xe_drm.h | 11 ++++-------
lib/xe/xe_spin.c | 2 +-
tests/intel/xe_query.c | 35 +++++++++++++++++++++++++++--------
3 files changed, 32 insertions(+), 16 deletions(-)
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 2a4ac6e8e..0b9047086 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -280,8 +280,8 @@ struct drm_xe_mem_region {
* in .data. struct drm_xe_query_engine_cycles is allocated by the user and
* .data points to this allocated structure.
*
- * The query returns the engine cycles and the frequency that can
- * be used to calculate the engine timestamp. In addition the
+ * The query returns the engine cycles, which along with GT's @reference_clock,
+ * can be used to calculate the engine timestamp. In addition the
* query returns a set of cpu timestamps that indicate when the command
* streamer cycle count was captured.
*/
@@ -309,9 +309,6 @@ struct drm_xe_query_engine_cycles {
*/
__u64 engine_cycles;
- /** @engine_frequency: Frequency of the engine cycles in Hz. */
- __u64 engine_frequency;
-
/**
* @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
* reading the engine_cycles register using the reference clockid set by the
@@ -382,8 +379,8 @@ struct drm_xe_gt {
__u16 type;
/** @gt_id: Unique ID of this GT within the PCI Device */
__u16 gt_id;
- /** @clock_freq: A clock frequency for timestamp */
- __u32 clock_freq;
+ /** @reference_clock: A clock frequency for timestamp */
+ __u32 reference_clock;
/**
* @near_mem_regions: Bit mask of instances from
* drm_xe_query_mem_regions that are nearest to the current engines
diff --git a/lib/xe/xe_spin.c b/lib/xe/xe_spin.c
index deba06f73..243e97047 100644
--- a/lib/xe/xe_spin.c
+++ b/lib/xe/xe_spin.c
@@ -23,7 +23,7 @@ static uint32_t read_timestamp_frequency(int fd, int gt_id)
igt_assert(dev && dev->gt_list && dev->gt_list->num_gt);
igt_assert(gt_id >= 0 && gt_id <= dev->gt_list->num_gt);
- return dev->gt_list->gt_list[gt_id].clock_freq;
+ return dev->gt_list->gt_list[gt_id].reference_clock;
}
static uint64_t div64_u64_round_up(const uint64_t x, const uint64_t y)
diff --git a/tests/intel/xe_query.c b/tests/intel/xe_query.c
index 207785a38..7afea9945 100644
--- a/tests/intel/xe_query.c
+++ b/tests/intel/xe_query.c
@@ -280,7 +280,7 @@ test_query_gt_list(int fd)
for (i = 0; i < gt_list->num_gt; i++) {
igt_info("type: %d\n", gt_list->gt_list[i].type);
igt_info("gt_id: %d\n", gt_list->gt_list[i].gt_id);
- igt_info("clock_freq: %u\n", gt_list->gt_list[i].clock_freq);
+ igt_info("reference_clock: %u\n", gt_list->gt_list[i].reference_clock);
igt_info("near_mem_regions: 0x%016llx\n",
gt_list->gt_list[i].near_mem_regions);
igt_info("far_mem_regions: 0x%016llx\n",
@@ -496,6 +496,23 @@ query_engine_cycles(int fd, struct drm_xe_query_engine_cycles *resp)
igt_assert(query.size);
}
+static uint32_t
+__engine_reference_clock(int fd, int gt_id)
+{
+ uint32_t reference_clock = 0;
+ struct xe_device *xe_dev = xe_device_get(fd);
+
+ for (int gt = 0; gt < xe_dev->gt_list->num_gt; gt++) {
+ if (gt == gt_id) {
+ reference_clock = xe_dev->gt_list->gt_list[gt].reference_clock;
+ break;
+ }
+ }
+ igt_assert(reference_clock);
+
+ return reference_clock;
+}
+
static void
__engine_cycles(int fd, struct drm_xe_engine_class_instance *hwe)
{
@@ -506,7 +523,7 @@ __engine_cycles(int fd, struct drm_xe_engine_class_instance *hwe)
int i, usable = 0;
igt_spin_t *spin;
uint64_t ahnd;
- uint32_t vm;
+ uint32_t vm, engine_frequency1, engine_frequency2;
struct {
int32_t id;
const char *name;
@@ -539,28 +556,30 @@ __engine_cycles(int fd, struct drm_xe_engine_class_instance *hwe)
ts2.clockid = clock[index].id;
query_engine_cycles(fd, &ts1);
+ engine_frequency1 = __engine_reference_clock(fd, hwe->gt_id);
query_engine_cycles(fd, &ts2);
+ engine_frequency2 = __engine_reference_clock(fd, hwe->gt_id);
igt_debug("[1] cpu_ts before %llu, reg read time %llu\n",
ts1.cpu_timestamp,
ts1.cpu_delta);
- igt_debug("[1] engine_ts %llu, freq %llu Hz, width %u\n",
- ts1.engine_cycles, ts1.engine_frequency, ts1.width);
+ igt_debug("[1] engine_ts %llu, freq %u Hz, width %u\n",
+ ts1.engine_cycles, engine_frequency1, ts1.width);
igt_debug("[2] cpu_ts before %llu, reg read time %llu\n",
ts2.cpu_timestamp,
ts2.cpu_delta);
- igt_debug("[2] engine_ts %llu, freq %llu Hz, width %u\n",
- ts2.engine_cycles, ts2.engine_frequency, ts2.width);
+ igt_debug("[2] engine_ts %llu, freq %u Hz, width %u\n",
+ ts2.engine_cycles, engine_frequency2, ts2.width);
delta_cpu = ts2.cpu_timestamp - ts1.cpu_timestamp;
if (ts2.engine_cycles >= ts1.engine_cycles)
delta_cs = (ts2.engine_cycles - ts1.engine_cycles) *
- NSEC_PER_SEC / ts1.engine_frequency;
+ NSEC_PER_SEC / engine_frequency1;
else
delta_cs = (((1 << ts2.width) - ts2.engine_cycles) + ts1.engine_cycles) *
- NSEC_PER_SEC / ts1.engine_frequency;
+ NSEC_PER_SEC / engine_frequency1;
igt_debug("delta_cpu[%lu], delta_cs[%lu]\n",
delta_cpu, delta_cs);
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] [PATCH v4 16/20] drm-uapi/xe: Remove bogus engine list from the wait_user_fence IOCTL
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
` (14 preceding siblings ...)
2023-11-29 14:54 ` [igt-dev] [PATCH v4 15/20] drm-uapi/xe: Crystal Reference Clock updates Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 17/20] drm-uapi/xe: Add Tile ID information to the GT info query Francois Dugast
` (4 subsequent siblings)
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev
Align with commit ("drm/xe/uapi: Remove bogus engine list from the
wait_user_fence IOCTL")
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
include/drm-uapi/xe_drm.h | 17 ++---------------
lib/xe/xe_ioctl.c | 8 ++------
tests/intel/xe_waitfence.c | 8 --------
3 files changed, 4 insertions(+), 29 deletions(-)
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 0b9047086..df510f87e 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -961,8 +961,7 @@ struct drm_xe_wait_user_fence {
/** @op: wait operation (type of comparison) */
__u16 op;
-#define DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */
-#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 1)
+#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0)
/** @flags: wait flags */
__u16 flags;
@@ -995,20 +994,8 @@ struct drm_xe_wait_user_fence {
*/
__s64 timeout;
- /**
- * @num_engines: number of engine instances to wait on, must be zero
- * when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
- */
- __u64 num_engines;
-
- /**
- * @instances: user pointer to array of drm_xe_engine_class_instance to
- * wait on, must be NULL when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
- */
- __u64 instances;
-
/** @reserved: Reserved */
- __u64 reserved[2];
+ __u64 reserved[4];
};
/**
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index 56d2fe592..37a64954c 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -423,12 +423,10 @@ int64_t xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
struct drm_xe_wait_user_fence wait = {
.addr = to_user_pointer(addr),
.op = DRM_XE_UFENCE_WAIT_OP_EQ,
- .flags = !eci ? DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP : 0,
+ .flags = 0,
.value = value,
.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
.timeout = timeout,
- .num_engines = eci ? 1 :0,
- .instances = eci ? to_user_pointer(eci) : 0,
};
igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_WAIT_USER_FENCE, &wait), 0);
@@ -455,12 +453,10 @@ int64_t xe_wait_ufence_abstime(int fd, uint64_t *addr, uint64_t value,
struct drm_xe_wait_user_fence wait = {
.addr = to_user_pointer(addr),
.op = DRM_XE_UFENCE_WAIT_OP_EQ,
- .flags = !eci ? DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP | DRM_XE_UFENCE_WAIT_FLAG_ABSTIME : 0,
+ .flags = !eci ? DRM_XE_UFENCE_WAIT_FLAG_ABSTIME : 0,
.value = value,
.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
.timeout = timeout,
- .num_engines = eci ? 1 : 0,
- .instances = eci ? to_user_pointer(eci) : 0,
};
struct timespec ts;
diff --git a/tests/intel/xe_waitfence.c b/tests/intel/xe_waitfence.c
index 3be987954..53b390640 100644
--- a/tests/intel/xe_waitfence.c
+++ b/tests/intel/xe_waitfence.c
@@ -47,8 +47,6 @@ static int64_t wait_with_eci_abstime(int fd, uint64_t *addr, uint64_t value,
.value = value,
.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
.timeout = timeout,
- .num_engines = eci ? 1 : 0,
- .instances = eci ? to_user_pointer(eci) : 0,
};
struct timespec ts;
@@ -166,8 +164,6 @@ invalid_flag(int fd)
.value = 1,
.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
.timeout = -1,
- .num_engines = 0,
- .instances = 0,
};
uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
@@ -191,8 +187,6 @@ invalid_ops(int fd)
.value = 1,
.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
.timeout = 1,
- .num_engines = 0,
- .instances = 0,
};
uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
@@ -216,8 +210,6 @@ invalid_engine(int fd)
.value = 1,
.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
.timeout = -1,
- .num_engines = 1,
- .instances = 0,
};
uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] [PATCH v4 17/20] drm-uapi/xe: Add Tile ID information to the GT info query
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
` (15 preceding siblings ...)
2023-11-29 14:54 ` [igt-dev] [PATCH v4 16/20] drm-uapi/xe: Remove bogus engine list from the wait_user_fence IOCTL Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 18/20] drm-uapi/xe: Fix various struct padding for 64b alignment Francois Dugast
` (3 subsequent siblings)
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev
Align with commit ("drm/xe/uapi: Add Tile ID information to the GT
info query")
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
include/drm-uapi/xe_drm.h | 2 ++
1 file changed, 2 insertions(+)
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index df510f87e..555560e43 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -377,6 +377,8 @@ struct drm_xe_gt {
#define DRM_XE_QUERY_GT_TYPE_MEDIA 1
/** @type: GT type: Main or Media */
__u16 type;
+ /** @tile_id: Tile ID where this GT lives (Information only) */
+ __u16 tile_id;
/** @gt_id: Unique ID of this GT within the PCI Device */
__u16 gt_id;
/** @reference_clock: A clock frequency for timestamp */
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] [PATCH v4 18/20] drm-uapi/xe: Fix various struct padding for 64b alignment
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
` (16 preceding siblings ...)
2023-11-29 14:54 ` [igt-dev] [PATCH v4 17/20] drm-uapi/xe: Add Tile ID information to the GT info query Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 19/20] drm-uapi/xe: Move xe_exec after xe_exec_queue Francois Dugast
` (2 subsequent siblings)
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev
Align with commit ("drm/xe/uapi: Fix various struct padding for
64b alignment")
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
include/drm-uapi/xe_drm.h | 19 +++++++++++--------
1 file changed, 11 insertions(+), 8 deletions(-)
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 555560e43..f5078fd99 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -211,8 +211,6 @@ struct drm_xe_mem_region {
* a unique pair.
*/
__u16 instance;
- /** @pad: MBZ */
- __u32 pad;
/**
* @min_page_size: Min page-size in bytes for this region.
*
@@ -381,6 +379,8 @@ struct drm_xe_gt {
__u16 tile_id;
/** @gt_id: Unique ID of this GT within the PCI Device */
__u16 gt_id;
+ /** @pad: MBZ */
+ __u16 pad[3];
/** @reference_clock: A clock frequency for timestamp */
__u32 reference_clock;
/**
@@ -715,6 +715,9 @@ struct drm_xe_vm_bind_op {
*/
__u32 prefetch_mem_region_instance;
+ /** @pad: MBZ */
+ __u32 pad2;
+
/** @reserved: Reserved */
__u64 reserved[3];
};
@@ -733,12 +736,12 @@ struct drm_xe_vm_bind {
*/
__u32 exec_queue_id;
- /** @num_binds: number of binds in this IOCTL */
- __u32 num_binds;
-
/** @pad: MBZ */
__u32 pad;
+ /** @num_binds: number of binds in this IOCTL */
+ __u32 num_binds;
+
union {
/** @bind: used if num_binds == 1 */
struct drm_xe_vm_bind_op bind;
@@ -750,12 +753,12 @@ struct drm_xe_vm_bind {
__u64 vector_of_binds;
};
+ /** @pad: MBZ */
+ __u32 pad2;
+
/** @num_syncs: amount of syncs to wait on */
__u32 num_syncs;
- /** @pad2: MBZ */
- __u32 pad2;
-
/** @syncs: pointer to struct drm_xe_sync array */
__u64 syncs;
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] [PATCH v4 19/20] drm-uapi/xe: Move xe_exec after xe_exec_queue
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
` (17 preceding siblings ...)
2023-11-29 14:54 ` [igt-dev] [PATCH v4 18/20] drm-uapi/xe: Fix various struct padding for 64b alignment Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 14:54 ` [igt-dev] [PATCH v4 20/20] tests/intel/xe: Adjust to KMD uAPI changes for long-running VMs Francois Dugast
2023-11-29 15:16 ` [igt-dev] ✗ Fi.CI.BUILD: failure for uAPI Alignment - Cleanup and future proof (rev4) Patchwork
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev
Align with commit ("drm/xe/uapi: Move xe_exec after xe_exec_queue")
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
---
include/drm-uapi/xe_drm.h | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index f5078fd99..400910524 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -103,11 +103,11 @@ struct xe_user_extension {
#define DRM_XE_VM_CREATE 0x03
#define DRM_XE_VM_DESTROY 0x04
#define DRM_XE_VM_BIND 0x05
-#define DRM_XE_EXEC 0x06
-#define DRM_XE_EXEC_QUEUE_CREATE 0x07
-#define DRM_XE_EXEC_QUEUE_DESTROY 0x08
-#define DRM_XE_EXEC_QUEUE_SET_PROPERTY 0x09
-#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x0a
+#define DRM_XE_EXEC_QUEUE_CREATE 0x06
+#define DRM_XE_EXEC_QUEUE_DESTROY 0x07
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY 0x08
+#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x09
+#define DRM_XE_EXEC 0x0a
#define DRM_XE_WAIT_USER_FENCE 0x0b
/* Must be kept compact -- no holes */
@@ -117,11 +117,11 @@ struct xe_user_extension {
#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
-#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
#define DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_SET_PROPERTY, struct drm_xe_exec_queue_set_property)
#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
+#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
/**
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] [PATCH v4 20/20] tests/intel/xe: Adjust to KMD uAPI changes for long-running VMs
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
` (18 preceding siblings ...)
2023-11-29 14:54 ` [igt-dev] [PATCH v4 19/20] drm-uapi/xe: Move xe_exec after xe_exec_queue Francois Dugast
@ 2023-11-29 14:54 ` Francois Dugast
2023-11-29 15:16 ` [igt-dev] ✗ Fi.CI.BUILD: failure for uAPI Alignment - Cleanup and future proof (rev4) Patchwork
20 siblings, 0 replies; 23+ messages in thread
From: Francois Dugast @ 2023-11-29 14:54 UTC (permalink / raw)
To: igt-dev; +Cc: Thomas Hellström, Oak Zeng, Rodrigo Vivi
From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Currently we're using "compute mode" for long running VMs using
using preempt-fences for memory management, and "fault mode" for long
running VMs using page faults.
Change this to use the terminology "long-running" abbreviated as LR for
long-running VMs. These VMs can then either be in preempt-fence mode or
fault mode. The user can force fault mode at creation time, but otherwise
the driver can choose whether to use or not use fault mode mode for
long-running vms depending on the device capabilities.
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Francois Dugast <francois.dugast@intel.com>
Cc: Oak Zeng <oak.zeng@intel.com>
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
---
benchmarks/gem_wsim.c | 2 +-
include/drm-uapi/xe_drm.h | 23 ++++++++++++++++++++++-
tests/intel/xe_evict.c | 4 ++--
tests/intel/xe_exec_balancer.c | 2 +-
tests/intel/xe_exec_compute_mode.c | 2 +-
tests/intel/xe_exec_fault_mode.c | 1 +
tests/intel/xe_exec_reset.c | 2 +-
tests/intel/xe_exec_threads.c | 4 ++--
tests/intel/xe_noexec_ping_pong.c | 2 +-
9 files changed, 32 insertions(+), 10 deletions(-)
diff --git a/benchmarks/gem_wsim.c b/benchmarks/gem_wsim.c
index 66ad7563d..e937e1027 100644
--- a/benchmarks/gem_wsim.c
+++ b/benchmarks/gem_wsim.c
@@ -2038,7 +2038,7 @@ static void xe_vm_create_(struct xe_vm *vm)
if (vm->compute_mode)
flags |= DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT |
- DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE;
+ DRM_XE_VM_CREATE_FLAG_LR_MODE;
vm->id = xe_vm_create(fd, flags, 0);
}
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index 400910524..d1297378a 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -627,8 +627,29 @@ struct drm_xe_vm_create {
__u64 extensions;
#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0)
-#define DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE (1 << 1)
+ /*
+ * An LR, or Long Running VM accepts exec submissions
+ * to its exec_queues that don't have an upper time limit on
+ * the job execution time. But exec submissions to these
+ * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ,
+ * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF,
+ * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
+ * LR VMs can be created in recoverable page-fault mode using
+ * DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
+ * If that flag is omitted, the UMD can not rely on the slightly
+ * different per-VM overcommit semantics that are enabled by
+ * DRM_XE_VM_CREATE_FLAG_FAULT_MODE (see below), but KMD may
+ * still enable recoverable pagefaults if supported by the device.
+ */
+#define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1)
#define DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT (1 << 2)
+ /*
+ * DRM_XE_VM_CREATE_FLAG_FAULT_MODE requires also
+ * DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated
+ * on demand when accessed, and also allows per-VM overcommit of memory.
+ * The xe driver internally uses recoverable pagefaults to implement
+ * this.
+ */
#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 3)
/** @flags: Flags */
__u32 flags;
diff --git a/tests/intel/xe_evict.c b/tests/intel/xe_evict.c
index 5b06b8953..89dc46fae 100644
--- a/tests/intel/xe_evict.c
+++ b/tests/intel/xe_evict.c
@@ -246,12 +246,12 @@ test_evict_cm(int fd, struct drm_xe_engine_class_instance *eci,
fd = drm_open_driver(DRIVER_XE);
vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT |
- DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE, 0);
+ DRM_XE_VM_CREATE_FLAG_LR_MODE, 0);
if (flags & BIND_EXEC_QUEUE)
bind_exec_queues[0] = xe_bind_exec_queue_create(fd, vm, 0, true);
if (flags & MULTI_VM) {
vm2 = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT |
- DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE, 0);
+ DRM_XE_VM_CREATE_FLAG_LR_MODE, 0);
if (flags & BIND_EXEC_QUEUE)
bind_exec_queues[1] = xe_bind_exec_queue_create(fd, vm2,
0, true);
diff --git a/tests/intel/xe_exec_balancer.c b/tests/intel/xe_exec_balancer.c
index 742724641..79ff65e89 100644
--- a/tests/intel/xe_exec_balancer.c
+++ b/tests/intel/xe_exec_balancer.c
@@ -436,7 +436,7 @@ test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
return;
vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT |
- DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE, 0);
+ DRM_XE_VM_CREATE_FLAG_LR_MODE, 0);
bo_size = sizeof(*data) * n_execs;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
diff --git a/tests/intel/xe_exec_compute_mode.c b/tests/intel/xe_exec_compute_mode.c
index 881f3829b..7d3004d65 100644
--- a/tests/intel/xe_exec_compute_mode.c
+++ b/tests/intel/xe_exec_compute_mode.c
@@ -115,7 +115,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
igt_assert(n_exec_queues <= MAX_N_EXECQUEUES);
vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT |
- DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE, 0);
+ DRM_XE_VM_CREATE_FLAG_LR_MODE, 0);
bo_size = sizeof(*data) * n_execs;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
diff --git a/tests/intel/xe_exec_fault_mode.c b/tests/intel/xe_exec_fault_mode.c
index 228e7e44a..ee7cbb604 100644
--- a/tests/intel/xe_exec_fault_mode.c
+++ b/tests/intel/xe_exec_fault_mode.c
@@ -132,6 +132,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT |
+ DRM_XE_VM_CREATE_FLAG_LR_MODE |
DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
bo_size = sizeof(*data) * n_execs;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
diff --git a/tests/intel/xe_exec_reset.c b/tests/intel/xe_exec_reset.c
index b8f5c6fbc..edfd27fe0 100644
--- a/tests/intel/xe_exec_reset.c
+++ b/tests/intel/xe_exec_reset.c
@@ -532,7 +532,7 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
fd = drm_open_driver(DRIVER_XE);
vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT |
- DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE, 0);
+ DRM_XE_VM_CREATE_FLAG_LR_MODE, 0);
bo_size = sizeof(*data) * n_execs;
bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
xe_get_default_alignment(fd));
diff --git a/tests/intel/xe_exec_threads.c b/tests/intel/xe_exec_threads.c
index 9aa989ab5..fcb926698 100644
--- a/tests/intel/xe_exec_threads.c
+++ b/tests/intel/xe_exec_threads.c
@@ -287,7 +287,7 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
if (!vm) {
vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT |
- DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE, 0);
+ DRM_XE_VM_CREATE_FLAG_LR_MODE, 0);
owns_vm = true;
}
@@ -1008,7 +1008,7 @@ static void threads(int fd, int flags)
0);
vm_compute_mode = xe_vm_create(fd,
DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT |
- DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE,
+ DRM_XE_VM_CREATE_FLAG_LR_MODE,
0);
}
diff --git a/tests/intel/xe_noexec_ping_pong.c b/tests/intel/xe_noexec_ping_pong.c
index 9659272b5..c91340784 100644
--- a/tests/intel/xe_noexec_ping_pong.c
+++ b/tests/intel/xe_noexec_ping_pong.c
@@ -64,7 +64,7 @@ static void test_ping_pong(int fd, struct drm_xe_engine *engine)
* stats.
*/
for (i = 0; i < NUM_VMS; ++i) {
- vm[i] = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE, 0);
+ vm[i] = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE, 0);
for (j = 0; j < NUM_BOS; ++j) {
igt_debug("Creating bo size %lu for vm %u\n",
(unsigned long) bo_size,
--
2.34.1
^ permalink raw reply related [flat|nested] 23+ messages in thread* [igt-dev] ✗ Fi.CI.BUILD: failure for uAPI Alignment - Cleanup and future proof (rev4)
2023-11-29 14:54 [igt-dev] [PATCH v4 00/20] uAPI Alignment - Cleanup and future proof Francois Dugast
` (19 preceding siblings ...)
2023-11-29 14:54 ` [igt-dev] [PATCH v4 20/20] tests/intel/xe: Adjust to KMD uAPI changes for long-running VMs Francois Dugast
@ 2023-11-29 15:16 ` Patchwork
20 siblings, 0 replies; 23+ messages in thread
From: Patchwork @ 2023-11-29 15:16 UTC (permalink / raw)
To: Francois Dugast; +Cc: igt-dev
== Series Details ==
Series: uAPI Alignment - Cleanup and future proof (rev4)
URL : https://patchwork.freedesktop.org/series/126537/
State : failure
== Summary ==
Applying: drm-uapi/xe: Extend drm_xe_vm_bind_op
Applying: xe_ioctl: Converge bo_create to the most used version
Using index info to reconstruct a base tree...
M lib/xe/xe_ioctl.c
M lib/xe/xe_ioctl.h
M tests/intel/xe_vm.c
Falling back to patching base and 3-way merge...
Auto-merging tests/intel/xe_vm.c
Auto-merging lib/xe/xe_ioctl.h
CONFLICT (content): Merge conflict in lib/xe/xe_ioctl.h
Auto-merging lib/xe/xe_ioctl.c
CONFLICT (content): Merge conflict in lib/xe/xe_ioctl.c
Patch failed at 0002 xe_ioctl: Converge bo_create to the most used version
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".
^ permalink raw reply [flat|nested] 23+ messages in thread