* [PATCH i-g-t 0/5] Madvise Tests in IGT
@ 2025-08-28 16:58 nishit.sharma
2025-08-28 16:58 ` [PATCH i-g-t 1/5] DO-NOT-MERGE: include/drm-uapi: Add drm_xe_madvise structure nishit.sharma
` (8 more replies)
0 siblings, 9 replies; 19+ messages in thread
From: nishit.sharma @ 2025-08-28 16:58 UTC (permalink / raw)
To: igt-dev, pravalika.gurram, himal.prasad.ghimiray, matthew.brost,
nishit.sharma
From: Nishit Sharma <nishit.sharma@intel.com>
Revision 1:
Added madvise tests in IGT which validate different features related
to attributes passed. Madvise tests related to Atomic operation,
Preferred Loc have been added and validated. Madvise tests are called as
part of different struct section and available as madvise-<test-name> in
list of subtests.
ver2:
- added back subtest which was deleted due to rebasing
ver3:
- added variable deleted during rebase.
ver4:
- Removed redundant loop for multi-vma test. Instead added multi-vma check
in which is manipulating address, batch addreess only and the
remaining execution is done as per default flow.
- Passed region DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC in prefetch tests.
ver5:
- Incorporated review comments
- Removed section from test which was not required
- Added subtests description
- Tests executed on latest drm tip
Nishit Sharma (5):
DO-NOT-MERGE: include/drm-uapi: Add drm_xe_madvise structure
lib/xe: Add xe_vm_madvise ioctl support
lib/xe: Add Helper to get memory attributes
tests/intel/xe_exec_system_allocator: Add preferred_loc_smem test
tests/intel/xe_exec_system_allocator: Add atomic_batch test in IGT
include/drm-uapi/xe_drm.h | 289 +++++++++++-
lib/xe/xe_ioctl.c | 154 ++++++
lib/xe/xe_ioctl.h | 9 +-
tests/intel/xe_exec_system_allocator.c | 624 ++++++++++++++++++++++---
4 files changed, 990 insertions(+), 86 deletions(-)
--
2.43.0
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH i-g-t 1/5] DO-NOT-MERGE: include/drm-uapi: Add drm_xe_madvise structure
2025-08-28 16:58 [PATCH i-g-t 0/5] Madvise Tests in IGT nishit.sharma
@ 2025-08-28 16:58 ` nishit.sharma
2025-08-28 16:58 ` [PATCH i-g-t 2/5] lib/xe: Add xe_vm_madvise ioctl support nishit.sharma
` (7 subsequent siblings)
8 siblings, 0 replies; 19+ messages in thread
From: nishit.sharma @ 2025-08-28 16:58 UTC (permalink / raw)
To: igt-dev, pravalika.gurram, himal.prasad.ghimiray, matthew.brost,
nishit.sharma
From: Nishit Sharma <nishit.sharma@intel.com>
Defined IOCTL number for madvise operation. Added drm_xe_madvise
which is passed as Input to MADVISE IOCTL.
Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
---
include/drm-uapi/xe_drm.h | 289 ++++++++++++++++++++++++++++++++++++--
1 file changed, 281 insertions(+), 8 deletions(-)
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index a52f95593..e9a27a844 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -3,8 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
-#ifndef _XE_DRM_H_
-#define _XE_DRM_H_
+#ifndef _UAPI_XE_DRM_H_
+#define _UAPI_XE_DRM_H_
#include "drm.h"
@@ -81,6 +81,8 @@ extern "C" {
* - &DRM_IOCTL_XE_EXEC
* - &DRM_IOCTL_XE_WAIT_USER_FENCE
* - &DRM_IOCTL_XE_OBSERVATION
+ * - &DRM_IOCTL_XE_MADVISE
+ * - &DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS
*/
/*
@@ -102,6 +104,8 @@ extern "C" {
#define DRM_XE_EXEC 0x09
#define DRM_XE_WAIT_USER_FENCE 0x0a
#define DRM_XE_OBSERVATION 0x0b
+#define DRM_XE_MADVISE 0x0c
+#define DRM_XE_VM_QUERY_MEM_REGION_ATTRS 0x0d
/* Must be kept compact -- no holes */
@@ -117,6 +121,8 @@ extern "C" {
#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
+#define DRM_IOCTL_XE_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
+#define DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_MEM_REGION_ATTRS, struct drm_xe_vm_query_mem_range_attr)
/**
* DOC: Xe IOCTL Extensions
@@ -134,7 +140,7 @@ extern "C" {
* redefine the interface more easily than an ever growing struct of
* increasing complexity, and for large parts of that interface to be
* entirely optional. The downside is more pointer chasing; chasing across
- * the boundary with pointers encapsulated inside u64.
+ * the __user boundary with pointers encapsulated inside u64.
*
* Example chaining:
*
@@ -925,9 +931,9 @@ struct drm_xe_gem_mmap_offset {
* - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
* exec submissions to its exec_queues that don't have an upper time
* limit on the job execution time. But exec submissions to these
- * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ,
- * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF,
- * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
+ * don't allow any of the sync types DRM_XE_SYNC_TYPE_SYNCOBJ,
+ * DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ, used as out-syncobjs, that is,
+ * together with sync flag DRM_XE_SYNC_FLAG_SIGNAL.
* LR VMs can be created in recoverable page-fault mode using
* DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
* If that flag is omitted, the UMD can not rely on the slightly
@@ -1003,6 +1009,10 @@ struct drm_xe_vm_destroy {
* valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
* mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
* handle MBZ, and the BO offset MBZ.
+ *
+ * The @prefetch_mem_region_instance for %DRM_XE_VM_BIND_OP_PREFETCH can also be:
+ * - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in
+ * the memory region advised by madvise.
*/
struct drm_xe_vm_bind_op {
/** @extensions: Pointer to the first extension struct, if any */
@@ -1108,6 +1118,7 @@ struct drm_xe_vm_bind_op {
/** @flags: Bind flags */
__u32 flags;
+#define DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC -1
/**
* @prefetch_mem_region_instance: Memory region to prefetch VMA to.
* It is a region instance, not a mask.
@@ -1394,7 +1405,7 @@ struct drm_xe_sync {
/**
* @timeline_value: Input for the timeline sync object. Needs to be
- * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ.
+ * different than 0 when used with %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ.
*/
__u64 timeline_value;
@@ -1974,8 +1985,270 @@ struct drm_xe_query_eu_stall {
__u64 sampling_rates[];
};
+/**
+ * struct drm_xe_madvise - Input of &DRM_IOCTL_XE_MADVISE
+ *
+ * This structure is used to set memory attributes for a virtual address range
+ * in a VM. The type of attribute is specified by @type, and the corresponding
+ * union member is used to provide additional parameters for @type.
+ *
+ * Supported attribute types:
+ * - DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC: Set preferred memory location.
+ * - DRM_XE_MEM_RANGE_ATTR_ATOMIC: Set atomic access policy.
+ * - DRM_XE_MEM_RANGE_ATTR_PAT: Set page attribute table index.
+ *
+ * Example:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_madvise madvise = {
+ * .vm_id = vm_id,
+ * .start = 0x100000,
+ * .range = 0x2000,
+ * .type = DRM_XE_MEM_RANGE_ATTR_ATOMIC,
+ * .atomic_val = DRM_XE_ATOMIC_DEVICE,
+ * .pad = 0,
+ * };
+ *
+ * ioctl(fd, DRM_IOCTL_XE_MADVISE, &madvise);
+ *
+ */
+struct drm_xe_madvise {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @start: start of the virtual address range */
+ __u64 start;
+
+ /** @range: size of the virtual address range */
+ __u64 range;
+
+ /** @vm_id: vm_id of the virtual range */
+ __u32 vm_id;
+
+#define DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC 0
+#define DRM_XE_MEM_RANGE_ATTR_ATOMIC 1
+#define DRM_XE_MEM_RANGE_ATTR_PAT 2
+ /** @type: type of attribute */
+ __u32 type;
+
+ union {
+ /**
+ * @preferred_mem_loc: preferred memory location
+ *
+ * Used when @type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC
+ *
+ * Supported values for @preferred_mem_loc.devmem_fd:
+ * - DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE: set vram of faulting tile as preferred loc
+ * - DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM: set smem as preferred loc
+ *
+ * Supported values for @preferred_mem_loc.migration_policy:
+ * - DRM_XE_MIGRATE_ALL_PAGES
+ * - DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES
+ */
+ struct {
+#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE 0
+#define DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM -1
+ /** @preferred_mem_loc.devmem_fd: fd for preferred loc */
+ __u32 devmem_fd;
+
+#define DRM_XE_MIGRATE_ALL_PAGES 0
+#define DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES 1
+ /** @preferred_mem_loc.migration_policy: Page migration policy */
+ __u16 migration_policy;
+
+ /** @preferred_mem_loc.pad : MBZ */
+ __u16 pad;
+
+ /** @preferred_mem_loc.reserved : Reserved */
+ __u64 reserved;
+ } preferred_mem_loc;
+
+ /**
+ * @atomic: Atomic access policy
+ *
+ * Used when @type == DRM_XE_MEM_RANGE_ATTR_ATOMIC.
+ *
+ * Supported values for @atomic.val:
+ * - DRM_XE_ATOMIC_UNDEFINED: Undefined or default behaviour
+ * Support both GPU and CPU atomic operations for system allocator
+ * Support GPU atomic operations for normal(bo) allocator
+ * - DRM_XE_ATOMIC_DEVICE: Support GPU atomic operations
+ * - DRM_XE_ATOMIC_GLOBAL: Support both GPU and CPU atomic operations
+ * - DRM_XE_ATOMIC_CPU: Support CPU atomic
+ */
+ struct {
+#define DRM_XE_ATOMIC_UNDEFINED 0
+#define DRM_XE_ATOMIC_DEVICE 1
+#define DRM_XE_ATOMIC_GLOBAL 2
+#define DRM_XE_ATOMIC_CPU 3
+ /** @atomic.val: value of atomic operation */
+ __u32 val;
+
+ /** @atomic.pad: MBZ */
+ __u32 pad;
+
+ /** @atomic.reserved: Reserved */
+ __u64 reserved;
+ } atomic;
+
+ /**
+ * @pat_index: Page attribute table index
+ *
+ * Used when @type == DRM_XE_MEM_RANGE_ATTR_PAT.
+ */
+ struct {
+ /** @pat_index.val: PAT index value */
+ __u32 val;
+
+ /** @pat_index.pad: MBZ */
+ __u32 pad;
+
+ /** @pat_index.reserved: Reserved */
+ __u64 reserved;
+ } pat_index;
+ };
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_mem_range_attr - Output of &DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS
+ *
+ * This structure is provided by userspace and filled by KMD in response to the
+ * DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS ioctl. It describes memory attributes of
+ * a memory ranges within a user specified address range in a VM.
+ *
+ * The structure includes information such as atomic access policy,
+ * page attribute table (PAT) index, and preferred memory location.
+ * Userspace allocates an array of these structures and passes a pointer to the
+ * ioctl to retrieve attributes for each memory ranges
+ *
+ * @extensions: Pointer to the first extension struct, if any
+ * @start: Start address of the memory range
+ * @end: End address of the virtual memory range
+ *
+ */
+struct drm_xe_mem_range_attr {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @start: start of the memory range */
+ __u64 start;
+
+ /** @end: end of the memory range */
+ __u64 end;
+
+ /** @preferred_mem_loc: preferred memory location */
+ struct {
+ /** @preferred_mem_loc.devmem_fd: fd for preferred loc */
+ __u32 devmem_fd;
+
+ /** @preferred_mem_loc.migration_policy: Page migration policy */
+ __u32 migration_policy;
+ } preferred_mem_loc;
+
+ struct {
+ /** @atomic.val: atomic attribute */
+ __u32 val;
+
+ /** @atomic.reserved: Reserved */
+ __u32 reserved;
+ } atomic;
+
+ struct {
+ /** @pat_index.val: PAT index */
+ __u32 val;
+
+ /** @pat_index.reserved: Reserved */
+ __u32 reserved;
+ } pat_index;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_query_mem_range_attr - Input of &DRM_IOCTL_XE_VM_QUERY_MEM_ATTRIBUTES
+ *
+ * This structure is used to query memory attributes of memory regions
+ * within a user specified address range in a VM. It provides detailed
+ * information about each memory range, including atomic access policy,
+ * page attribute table (PAT) index, and preferred memory location.
+ *
+ * Userspace first calls the ioctl with @num_mem_ranges = 0,
+ * @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL to retrieve
+ * the number of memory regions and size of each memory range attribute.
+ * Then, it allocates a buffer of that size and calls the ioctl again to fill
+ * the buffer with memory range attributes.
+ *
+ * If second call fails with -ENOSPC, it means memory ranges changed between
+ * first call and now, retry IOCTL again with @num_mem_ranges = 0,
+ * @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL followed by
+ * Second ioctl call.
+ *
+ * Example:
+ *
+ * .. code-block:: C
+ * struct drm_xe_vm_query_mem_range_attr query = {
+ * .vm_id = vm_id,
+ * .start = 0x100000,
+ * .range = 0x2000,
+ * };
+ *
+ * // First ioctl call to get num of mem regions and sizeof each attribute
+ * ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
+ *
+ * // Allocate buffer for the memory region attributes
+ * void *ptr = malloc(query.num_mem_ranges * query.sizeof_mem_range_attr);
+ *
+ * query.vector_of_mem_attr = (uintptr_t)ptr;
+ *
+ * // Second ioctl call to actually fill the memory attributes
+ * ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
+ *
+ * // Iterate over the returned memory region attributes
+ * for (unsigned int i = 0; i < query.num_mem_ranges; ++i) {
+ * struct drm_xe_mem_range_attr *attr = (struct drm_xe_mem_range_attr *)ptr;
+ *
+ * // Do something with attr
+ *
+ * // Move pointer by one entry
+ * ptr += query.sizeof_mem_range_attr;
+ * }
+ *
+ * free(ptr);
+ */
+struct drm_xe_vm_query_mem_range_attr {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @vm_id: vm_id of the virtual range */
+ __u32 vm_id;
+
+ /** @num_mem_ranges: number of mem_ranges in range */
+ __u32 num_mem_ranges;
+
+ /** @start: start of the virtual address range */
+ __u64 start;
+
+ /** @range: size of the virtual address range */
+ __u64 range;
+
+ /** @sizeof_mem_range_attr: size of struct drm_xe_mem_range_attr */
+ __u64 sizeof_mem_range_attr;
+
+ /** @vector_of_ops: userptr to array of struct drm_xe_mem_range_attr */
+ __u64 vector_of_mem_attr;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+
+};
+
#if defined(__cplusplus)
}
#endif
-#endif /* _XE_DRM_H_ */
+#endif /* _UAPI_XE_DRM_H_ */
--
2.43.0
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [PATCH i-g-t 2/5] lib/xe: Add xe_vm_madvise ioctl support
2025-08-28 16:58 [PATCH i-g-t 0/5] Madvise Tests in IGT nishit.sharma
2025-08-28 16:58 ` [PATCH i-g-t 1/5] DO-NOT-MERGE: include/drm-uapi: Add drm_xe_madvise structure nishit.sharma
@ 2025-08-28 16:58 ` nishit.sharma
2025-08-29 13:56 ` Gurram, Pravalika
2025-08-28 16:58 ` [PATCH i-g-t 3/5] lib/xe: Add Helper to get memory attributes nishit.sharma
` (6 subsequent siblings)
8 siblings, 1 reply; 19+ messages in thread
From: nishit.sharma @ 2025-08-28 16:58 UTC (permalink / raw)
To: igt-dev, pravalika.gurram, himal.prasad.ghimiray, matthew.brost,
nishit.sharma
From: Nishit Sharma <nishit.sharma@intel.com>
Added xe_vm_madvise() which issues madvise ioctl DRM_IOCTL_XE_MADVISE for
VM region advising the driver about expected usage or memory policy for
specified address range. MADVISE ioctl requires pointer to drm_xe_madvise
structure as one of the inputs. Depending upon type of madvise operation
like Atomic, Preferred LOC or PAT required members of drm_xe_madvise
structure are initialized and passed in MADVISE ioctl.
Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
---
lib/xe/xe_ioctl.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++
lib/xe/xe_ioctl.h | 5 ++++-
2 files changed, 60 insertions(+), 1 deletion(-)
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index 1e95af409..43bad8452 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -585,3 +585,59 @@ int64_t xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
igt_assert_eq(__xe_wait_ufence(fd, addr, value, exec_queue, &timeout), 0);
return timeout;
}
+
+int __xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range,
+ uint64_t ext, uint32_t type, uint32_t op_val, uint16_t policy)
+{
+ struct drm_xe_madvise madvise = {};
+
+ madvise.extensions = ext;
+ madvise.vm_id = vm;
+ madvise.start = addr;
+ madvise.range = range;
+
+ if (type == DRM_XE_MEM_RANGE_ATTR_ATOMIC) {
+ madvise.type = DRM_XE_MEM_RANGE_ATTR_ATOMIC;
+ madvise.atomic.val = op_val;
+ } else if (type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC) {
+ madvise.type = DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC;
+ madvise.preferred_mem_loc.devmem_fd = op_val;
+ madvise.preferred_mem_loc.migration_policy = policy;
+ igt_debug("madvise.preferred_mem_loc.devmem_fd = %d\n",
+ madvise.preferred_mem_loc.devmem_fd);
+ } else if (type == DRM_XE_MEM_RANGE_ATTR_PAT) {
+ madvise.type = DRM_XE_MEM_RANGE_ATTR_PAT;
+ madvise.pat_index.val = op_val;
+ } else {
+ igt_warn("Unknown attribute\n");
+ return -EINVAL;
+ }
+
+ if (igt_ioctl(fd, DRM_IOCTL_XE_MADVISE, &madvise))
+ return -errno;
+
+ return 0;
+}
+
+/**
+ * xe_vm_madvise:
+ * @fd: xe device fd
+ * @vm: vm_id of the virtual range
+ * @addr: start of the virtual address range
+ * @range: size of the virtual address range
+ * @ext: Pointer to the first extension struct, if any
+ * @type: type of attribute
+ * @op_val: fd/atomic value/pat index, depending upon type of operation
+ * @policy: Page migration policy
+ *
+ * Function initializes different members of struct drm_xe_madvise and calls
+ * MADVISE IOCTL .
+ *
+ * Returns 0 if success and asserts otherwise.
+ */
+int xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range,
+ uint64_t ext, uint32_t type, uint32_t op_val, uint16_t policy)
+{
+ igt_assert_eq(__xe_vm_madvise(fd, vm, addr, range, ext, type, op_val, policy), 0);
+ return 0;
+}
diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
index 6302d1a7d..a5996cf65 100644
--- a/lib/xe/xe_ioctl.h
+++ b/lib/xe/xe_ioctl.h
@@ -99,5 +99,8 @@ int __xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
uint32_t exec_queue, int64_t *timeout);
int64_t xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
uint32_t exec_queue, int64_t timeout);
-
+int __xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range, uint64_t ext,
+ uint32_t type, uint32_t op_val, uint16_t policy);
+int xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range, uint64_t ext,
+ uint32_t type, uint32_t op_val, uint16_t policy);
#endif /* XE_IOCTL_H */
--
2.43.0
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [PATCH i-g-t 3/5] lib/xe: Add Helper to get memory attributes
2025-08-28 16:58 [PATCH i-g-t 0/5] Madvise Tests in IGT nishit.sharma
2025-08-28 16:58 ` [PATCH i-g-t 1/5] DO-NOT-MERGE: include/drm-uapi: Add drm_xe_madvise structure nishit.sharma
2025-08-28 16:58 ` [PATCH i-g-t 2/5] lib/xe: Add xe_vm_madvise ioctl support nishit.sharma
@ 2025-08-28 16:58 ` nishit.sharma
2025-08-29 14:02 ` Gurram, Pravalika
2025-08-28 16:58 ` [PATCH i-g-t 4/5] tests/intel/xe_exec_system_allocator: Add preferred_loc_smem test nishit.sharma
` (5 subsequent siblings)
8 siblings, 1 reply; 19+ messages in thread
From: nishit.sharma @ 2025-08-28 16:58 UTC (permalink / raw)
To: igt-dev, pravalika.gurram, himal.prasad.ghimiray, matthew.brost,
nishit.sharma
From: Nishit Sharma <nishit.sharma@intel.com>
xe_vm_print_mem_attr_values_in_range() function added which calls
QUERY_MEM_RANGES_ATTRS ioctl to get different memory attributes from KMD
and then prints memory attributes returned by KMD for different access
policies like atomic access, preferred loc and pat index.
Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
---
lib/xe/xe_ioctl.c | 92 +++++++++++++++++++++++++++++++++++++++++++++++
lib/xe/xe_ioctl.h | 4 +++
2 files changed, 96 insertions(+)
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index 43bad8452..4ab2ef39c 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -57,6 +57,98 @@ uint64_t xe_bb_size(int fd, uint64_t reqsize)
xe_get_default_alignment(fd));
}
+int xe_vm_number_vmas_in_range(int fd, struct drm_xe_vm_query_mem_range_attr *vmas_attr)
+{
+ if (igt_ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, vmas_attr))
+ return -errno;
+ return 0;
+}
+
+int xe_vm_vma_attrs(int fd, struct drm_xe_vm_query_mem_range_attr *vmas_attr,
+ struct drm_xe_mem_range_attr *mem_attr)
+{
+ if (!mem_attr)
+ return -EINVAL;
+
+ vmas_attr->vector_of_mem_attr = (uintptr_t)mem_attr;
+
+ if (igt_ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, vmas_attr))
+ return -errno;
+
+ return 0;
+}
+
+/**
+ * xe_vm_print_mem_attr_values_in_range:
+ * @fd: xe device fd
+ * @vm: vm_id of the virtual range
+ * @start: start of the virtual address range
+ * @range: size of the virtual address range
+ *
+ * Calls QUERY_MEM_RANGES_ATTRS ioctl to get memory attributes for different
+ * memory ranges from KMD. prints memory attributes as returned by KMD for
+ * atomic, prefrred loc and pat index types.
+ *
+ * Returns 0 for success or error for failure
+ */
+
+int xe_vm_print_mem_attr_values_in_range(int fd, uint32_t vm, uint64_t start, uint64_t range)
+{
+
+ void *ptr_start, *ptr;
+ int err;
+ struct drm_xe_vm_query_mem_range_attr query = {
+ .vm_id = vm,
+ .start = start,
+ .range = range,
+ .num_mem_ranges = 0,
+ .sizeof_mem_range_attr = 0,
+ .vector_of_mem_attr = (uintptr_t)NULL,
+ };
+
+ igt_debug("mem_attr_values_in_range called start = %"PRIu64"\n range = %"PRIu64"\n",
+ start, range);
+
+ err = xe_vm_number_vmas_in_range(fd, &query);
+ if (err || !query.num_mem_ranges || !query.sizeof_mem_range_attr) {
+ igt_warn("ioctl failed for xe_vm_number_vmas_in_range\n");
+ igt_debug("vmas_in_range err = %d query.num_mem_ranges = %u query.sizeof_mem_range_attr=%lld\n",
+ err, query.num_mem_ranges, query.sizeof_mem_range_attr);
+ return err;
+ }
+
+ /* Allocate buffer for the memory region attributes */
+ ptr = malloc(query.num_mem_ranges * query.sizeof_mem_range_attr);
+ ptr_start = ptr;
+
+ if (!ptr)
+ return -ENOMEM;
+
+ err = xe_vm_vma_attrs(fd, &query, ptr);
+ if (err) {
+ igt_warn("ioctl failed for vma_attrs err = %d\n", err);
+ return err;
+ }
+
+ /* Iterate over the returned memory region attributes */
+ for (unsigned int i = 0; i < query.num_mem_ranges; ++i) {
+ struct drm_xe_mem_range_attr *mem_attrs = (struct drm_xe_mem_range_attr *)ptr;
+
+ igt_info("vma_id = %d\nvma_start = 0x%016llx\nvma_end = 0x%016llx\n"
+ "vma:atomic = %d\nvma:pat_index = %d\nvma:preferred_loc_region = %d\n"
+ "vma:preferred_loc_devmem_fd = %d\n\n\n", i, mem_attrs->start,
+ mem_attrs->end,
+ mem_attrs->atomic.val, mem_attrs->pat_index.val,
+ mem_attrs->preferred_mem_loc.migration_policy,
+ mem_attrs->preferred_mem_loc.devmem_fd);
+
+ ptr += query.sizeof_mem_range_attr;
+ }
+
+ free(ptr_start);
+ return 0;
+}
+
uint32_t xe_vm_create(int fd, uint32_t flags, uint64_t ext)
{
struct drm_xe_vm_create create = {
diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
index a5996cf65..ae16af233 100644
--- a/lib/xe/xe_ioctl.h
+++ b/lib/xe/xe_ioctl.h
@@ -103,4 +103,8 @@ int __xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range, uint64_t
uint32_t type, uint32_t op_val, uint16_t policy);
int xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range, uint64_t ext,
uint32_t type, uint32_t op_val, uint16_t policy);
+int xe_vm_number_vmas_in_range(int fd, struct drm_xe_vm_query_mem_range_attr *vmas_attr);
+int xe_vm_vma_attrs(int fd, struct drm_xe_vm_query_mem_range_attr *vmas_attr,
+ struct drm_xe_mem_range_attr *mem_attr);
+int xe_vm_print_mem_attr_values_in_range(int fd, uint32_t vm, uint64_t start, uint64_t range);
#endif /* XE_IOCTL_H */
--
2.43.0
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [PATCH i-g-t 4/5] tests/intel/xe_exec_system_allocator: Add preferred_loc_smem test
2025-08-28 16:58 [PATCH i-g-t 0/5] Madvise Tests in IGT nishit.sharma
` (2 preceding siblings ...)
2025-08-28 16:58 ` [PATCH i-g-t 3/5] lib/xe: Add Helper to get memory attributes nishit.sharma
@ 2025-08-28 16:58 ` nishit.sharma
2025-08-29 14:21 ` Gurram, Pravalika
2025-08-29 19:55 ` Matthew Brost
2025-08-28 16:58 ` [PATCH i-g-t v5 5/5] tests/intel/xe_exec_system_allocator: Add atomic_batch test in IGT nishit.sharma
` (4 subsequent siblings)
8 siblings, 2 replies; 19+ messages in thread
From: nishit.sharma @ 2025-08-28 16:58 UTC (permalink / raw)
To: igt-dev, pravalika.gurram, himal.prasad.ghimiray, matthew.brost,
nishit.sharma
From: Nishit Sharma <nishit.sharma@intel.com>
Added preferred-loc-smem test which is called in combination with other
tests as well. In this test the buffer object preferred location is
system memory. MADVISE ioctl is called with preferred_loc attribute and
default_system system memory as preferred location.
Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
---
tests/intel/xe_exec_system_allocator.c | 225 ++++++++++++++++++++-----
1 file changed, 187 insertions(+), 38 deletions(-)
diff --git a/tests/intel/xe_exec_system_allocator.c b/tests/intel/xe_exec_system_allocator.c
index 007d9bdc0..70ca5fc2e 100644
--- a/tests/intel/xe_exec_system_allocator.c
+++ b/tests/intel/xe_exec_system_allocator.c
@@ -138,7 +138,6 @@ static void signal_pdata(struct process_data *pdata)
#define CPU_FAULT_THREADS (0x1 << 2)
#define CPU_FAULT_PROCESS (0x1 << 3)
#define CPU_FAULT_SAME_PAGE (0x1 << 4)
-
static void process_check(void *ptr, uint64_t alloc_size, uint64_t stride,
unsigned int flags)
{
@@ -406,6 +405,39 @@ static void __aligned_partial_free(struct aligned_alloc_type *aligned_alloc_typ
aligned_alloc_type->__size - aligned_alloc_type->size - begin_size);
}
+#define MAX_N_EXEC_QUEUES 16
+
+#define MMAP (0x1 << 0)
+#define NEW (0x1 << 1)
+#define BO_UNMAP (0x1 << 2)
+#define FREE (0x1 << 3)
+#define BUSY (0x1 << 4)
+#define BO_MAP (0x1 << 5)
+#define RACE (0x1 << 6)
+#define SKIP_MEMSET (0x1 << 7)
+#define FAULT (0x1 << 8)
+#define FILE_BACKED (0x1 << 9)
+#define LOCK (0x1 << 10)
+#define MMAP_SHARED (0x1 << 11)
+#define HUGE_PAGE (0x1 << 12)
+#define SHARED_ALLOC (0x1 << 13)
+#define FORK_READ (0x1 << 14)
+#define FORK_READ_AFTER (0x1 << 15)
+#define MREMAP (0x1 << 16)
+#define DONTUNMAP (0x1 << 17)
+#define READ_ONLY_REMAP (0x1 << 18)
+#define SYNC_EXEC (0x1 << 19)
+#define EVERY_OTHER_CHECK (0x1 << 20)
+#define MULTI_FAULT (0x1 << 21)
+#define PREFETCH (0x1 << 22)
+#define THREADS (0x1 << 23)
+#define PROCESSES (0x1 << 24)
+#define PREFETCH_BENCHMARK (0x1 << 25)
+#define PREFETCH_SYS_BENCHMARK (0x1 << 26)
+#define PREFERRED_LOC_SMEM (0x1 << 27)
+
+#define N_MULTI_FAULT 4
+
/**
* SUBTEST: unaligned-alloc
* Description: allocate unaligned sizes of memory
@@ -460,7 +492,7 @@ many_allocs(int fd, struct drm_xe_engine_class_instance *eci,
uint32_t *bos = NULL;
struct timespec tv = {};
uint64_t submit, read, elapsed;
- int i;
+ int i, err;
vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
@@ -500,6 +532,15 @@ many_allocs(int fd, struct drm_xe_engine_class_instance *eci,
alloc.ptr = aligned_alloc(SZ_2M, alloc_size);
igt_assert(alloc.ptr);
}
+
+ if (flags & PREFERRED_LOC_SMEM) {
+ err = xe_vm_madvise(fd, vm, to_user_pointer(alloc.ptr), alloc_size, 0,
+ DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+ DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
+ if (err)
+ igt_warn("MADVISE_FAILURE err = %s, vm =%u data=%"PRIu64" alloc_size =%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(alloc.ptr), alloc_size);
+ }
allocs[i] = alloc;
touch_all_pages(fd, exec_queue, allocs[i].ptr, alloc_size, stride,
@@ -662,7 +703,7 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
size_t bo_size = SZ_2M, unmap_offset = 0;
uint32_t vm, exec_queue;
u64 *exec_ufence = NULL;
- int i;
+ int i, err;
void *old, *new = NULL;
struct aligned_alloc_type alloc;
@@ -688,6 +729,15 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
data[0].vm_sync = 0;
+ if (flags & PREFERRED_LOC_SMEM) {
+ err = xe_vm_madvise(fd, vm, to_user_pointer(data), bo_size, 0,
+ DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+ DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
+ if (err)
+ igt_warn("MADVISE_FAILURE err = %s, vm =%u data=%"PRIu64" alloc_size = %zu\n",
+ strerror(errno), vm, to_user_pointer(data), bo_size);
+ }
+
exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
PROT_WRITE, MAP_SHARED |
MAP_ANONYMOUS, -1, 0);
@@ -747,38 +797,6 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
xe_vm_destroy(fd, vm);
}
-#define MAX_N_EXEC_QUEUES 16
-
-#define MMAP (0x1 << 0)
-#define NEW (0x1 << 1)
-#define BO_UNMAP (0x1 << 2)
-#define FREE (0x1 << 3)
-#define BUSY (0x1 << 4)
-#define BO_MAP (0x1 << 5)
-#define RACE (0x1 << 6)
-#define SKIP_MEMSET (0x1 << 7)
-#define FAULT (0x1 << 8)
-#define FILE_BACKED (0x1 << 9)
-#define LOCK (0x1 << 10)
-#define MMAP_SHARED (0x1 << 11)
-#define HUGE_PAGE (0x1 << 12)
-#define SHARED_ALLOC (0x1 << 13)
-#define FORK_READ (0x1 << 14)
-#define FORK_READ_AFTER (0x1 << 15)
-#define MREMAP (0x1 << 16)
-#define DONTUNMAP (0x1 << 17)
-#define READ_ONLY_REMAP (0x1 << 18)
-#define SYNC_EXEC (0x1 << 19)
-#define EVERY_OTHER_CHECK (0x1 << 20)
-#define MULTI_FAULT (0x1 << 21)
-#define PREFETCH (0x1 << 22)
-#define THREADS (0x1 << 23)
-#define PROCESSES (0x1 << 24)
-#define PREFETCH_BENCHMARK (0x1 << 25)
-#define PREFETCH_SYS_BENCHMARK (0x1 << 26)
-
-#define N_MULTI_FAULT 4
-
/**
* SUBTEST: once-%s
* Description: Run %arg[1] system allocator test only once
@@ -951,6 +969,80 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
* @mmap-new-nomemset: mmap a new buffer for each exec, skip memset of buffers
* @mmap-new-huge-nomemset: mmap huge page new buffer for each exec, skip memset of buffers
* @mmap-new-race-nomemset: mmap a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
+ * @free-nomemset-preferred-loc-smem: malloc and free buffer for each exec and perform preferred loc madvise operation
+ * @free-preferred-loc-smem: free buffer for each exec and perform preferred loc madvise operation
+ * @free-race-nomemset-preferred-loc-smem: free buffer for each exec with race between cpu and gpu access and perform madvise operation skipping memset
+ * @free-race-preferred-loc-smem: free buffer for each exec with race between cpu and gpu access and perform madvise operation
+ * @malloc-bo-unmap-nomemset-preferred-loc-smem: malloc single buffer for all execs, bind and unbind a BO to same address, skip memset and perform madvise operation
+ * @malloc-busy-nomemset-preferred-loc-smem: malloc single buffer for all execs, try to unbind while buffer valid, skip memset of buffers and perform madvise operation
+ * @malloc-busy-preferred-loc-smem: malloc single buffer for all execs, try to unbind while buffer valid and perform madvise operation
+ * @malloc-fork-read-after-preferred-loc-smem: malloc single buffer for all execs, fork a process to read test output, perform madvise operation
+ * @malloc-fork-read-preferred-loc-smem: malloc single buffer for all execs, fork a process to read test output, perform madvise operation
+ * @malloc-mlock-nomemset-preferred-loc-smem: malloc and mlock single buffer for all execs, skip memset of buffers, perform madvise operation
+ * @malloc-mlock-preferred-loc-smem: malloc and mlock single buffer for all execs, perform madvise operation
+ * @malloc-multi-fault-preferred-loc-smem: malloc single buffer for all execs and perform madvise operation
+ * @malloc-nomemset-preferred-loc-smem: malloc single buffer for all execs, skip memset of buffers and perform madvise operation
+ * @malloc-preferred-loc-smem: malloc single buffer for all execs, issue a command which will trigger multiple faults, perform madvise operation
+ * @malloc-prefetch-preferred-loc-smem: malloc single buffer for all execs, prefetch buffer before each exec, perform madvise operation
+ * @malloc-prefetch-race-preferred-loc-smem: malloc single buffer for all execs, prefetch buffer before each exec, perform madvise operation
+ * @malloc-race-nomemset-preferred-loc-smem: malloc single buffer for all execs with race between cpu and gpu access, perform madvise operation
+ * @malloc-race-preferred-loc-smem: malloc single buffer for all execs with race between cpu and gpu access, perform madvise operation
+ * @free-race-nomemset-preferred-loc-smem: malloc and free buffer for each exec with race between cpu and gpu access, perform madvise operation
+ * @free-race-preferred-loc-smem: malloc and free buffer for each exec with race between cpu and gpu access, perform madvise operation
+ * @malloc-bo-unmap-nomemset-preferred-loc-smem: malloc single buffer for all execs, bind and unbind a BO to same address before execs, perform madvise operation
+ * @malloc-bo-unmap-preferred-loc-smem: malloc single buffer for all execs and perform madvise operation
+ * @malloc-busy-nomemset-preferred-loc-smem: malloc single buffer for all execs and perform madvise operation
+ * @malloc-busy-preferred-loc-smem: malloc single buffer for all execs and perform madvise
+ * @mmap-file-mlock-nomemset-preferred-loc-smem: mmap and mlock single buffer, with file backing, perform madvise
+ * @mmap-file-mlock-preferred-loc-smem: mmap and mlock single buffer, with file backing, perform madvise
+ * @mmap-file-nomemset-preferred-loc-smem: mmap single buffer, with file backing and perform madvise
+ * @mmap-file-preferred-loc-smem: mmap single buffer, with file backing and perform madvise
+ * @mmap-free-huge-nomemset-preferred-loc-smem: mmap huge page and free buffer for each exec and perform madvise
+ * @mmap-free-huge-preferred-loc-smem: mmap huge page and free buffer for each exec and perform madvise
+ * @mmap-free-nomemset-preferred-loc-smem: mmap and free buffer for each exec and perform madvise
+ * @mmap-free-preferred-loc-smem: mmap and free buffer for each exec and perform madvise
+ * @mmap-free-race-nomemset-preferred-loc-smem:
+ * @mmap-free-race-preferred-loc-smem:
+ * @mmap-huge-nomemset-preferred-loc-smem:
+ * @mmap-huge-preferred-loc-smem:
+ * @mmap-mlock-nomemset-preferred-loc-smem:
+ * @mmap-mlock-preferred-loc-smem:
+ * @mmap-new-huge-nomemset-preferred-loc-smem:
+ * @mmap-new-huge-preferred-loc-smem:
+ * @mmap-new-nomemset-preferred-loc-smem:
+ * @mmap-new-preferred-loc-smem:
+ * @mmap-new-race-nomemset-preferred-loc-smem:
+ * @mmap-new-race-preferred-loc-smem:
+ * @mmap-nomemset-preferred-loc-smem:
+ * @mmap-preferred-loc-smem:
+ * @mmap-prefetch-preferred-loc-smem:
+ * @mmap-prefetch-shared-preferred-loc-smem:
+ * @mmap-race-nomemset-preferred-loc-smem:
+ * @mmap-race-preferred-loc-smem:
+ * @mmap-remap-dontunmap-eocheck-preferred-loc-smem:
+ * @mmap-remap-dontunmap-preferred-loc-smem:
+ * @mmap-remap-eocheck-preferred-loc-smem:
+ * @mmap-remap-preferred-loc-smem:
+ * @mmap-remap-ro-dontunmap-eocheck-preferred-loc-smem:
+ * @mmap-remap-ro-dontunmap-preferred-loc-smem:
+ * @mmap-remap-ro-eocheck-preferred-loc-smem:
+ * @mmap-remap-ro-preferred-loc-smem:
+ * @mmap-shared-nomemset-preferred-loc-smem:
+ * @mmap-shared-preferred-loc-smem:
+ * @mmap-shared-remap-dontunmap-eocheck-preferred-loc-smem:
+ * @mmap-shared-remap-dontunmap-preferred-loc-smem:
+ * @mmap-shared-remap-eocheck-preferred-loc-smem:
+ * @mmap-shared-remap-preferred-loc-smem:
+ * @new-bo-map-nomemset-preferred-loc-smem:
+ * @new-bo-map-preferred-loc-smem:
+ * @new-busy-nomemset-preferred-loc-smem:
+ * @new-busy-preferred-loc-smem:
+ * @new-nomemset-preferred-loc-smem:
+ * @new-preferred-loc-smem:
+ * @new-prefetch-preferred-loc-smem:
+ * @new-race-nomemset-preferred-loc-smem:
+ * @new-race-preferred-loc-smem:
+ * @prefetch-benchmark:
*
* SUBTEST: prefetch-benchmark
* Description: Prefetch a 64M buffer 128 times, measure bandwidth of prefetch
@@ -1020,7 +1112,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
uint32_t bo = 0, bind_sync = 0;
void **pending_free;
u64 *exec_ufence = NULL, *bind_ufence = NULL;
- int i, j, b, file_fd = -1, prev_idx, pf_count;
+ int i, j, b, file_fd = -1, prev_idx, pf_count, err;
bool free_vm = false;
size_t aligned_size = bo_size ?: xe_get_default_alignment(fd);
size_t orig_size = bo_size;
@@ -1133,6 +1225,15 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
addr = to_user_pointer(data);
+ if (flags & PREFERRED_LOC_SMEM) {
+ err = xe_vm_madvise(fd, vm, to_user_pointer(data), bo_size, 0,
+ DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+ DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
+ if (err)
+ igt_warn("MADVISE_FAILURE err = %s, vm =%u data=%"PRIu64" alloc_size = %zu\n",
+ strerror(errno), vm, to_user_pointer(data), bo_size);
+ }
+
if (flags & BO_UNMAP) {
bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
bo = xe_bo_create(fd, vm, bo_size,
@@ -1202,7 +1303,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
uint64_t batch_addr = addr + batch_offset;
uint64_t sdi_offset = (char *)&data[idx].data - (char *)data;
uint64_t sdi_addr = addr + sdi_offset;
- int e = i % n_exec_queues, err;
+ int e = i % n_exec_queues;
bool fault_inject = (FAULT & flags) && i == n_execs / 2;
bool fault_injected = (FAULT & flags) && i > n_execs;
@@ -1232,6 +1333,16 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
next_data = aligned_alloc_type.ptr;
igt_assert(next_data);
+
+ if (flags & PREFERRED_LOC_SMEM) {
+ err = xe_vm_madvise(fd, vm, to_user_pointer(next_data), bo_size, 0,
+ DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+ DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
+ if (err)
+ igt_warn("MADVISE_FAILURE err = %s, vm =%u data=%"PRIu64" alloc_size = %zu\n",
+ strerror(errno), vm, to_user_pointer(next_data), bo_size);
+ }
+
__aligned_partial_free(&aligned_alloc_type);
b = 0;
@@ -1253,6 +1364,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
sync[0].addr = to_user_pointer(bind_ufence);
start = igt_nsec_elapsed(&tv);
+
xe_vm_prefetch_async(fd, vm, 0, 0, addr, bo_size, sync,
1, region);
end = igt_nsec_elapsed(&tv);
@@ -1355,6 +1467,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
} else {
igt_assert_eq(data[idx].data,
READ_VALUE(&data[idx]));
+
if (flags & PREFETCH_SYS_BENCHMARK) {
struct timespec tv = {};
u64 start, end;
@@ -1429,6 +1542,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
data = aligned_alloc_type.ptr;
igt_assert(data);
+
__aligned_partial_free(&aligned_alloc_type);
bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
@@ -1450,6 +1564,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
}
bo = 0;
data = aligned_alloc(aligned_size, bo_size);
+
igt_assert(data);
}
addr = to_user_pointer(data);
@@ -1460,6 +1575,15 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
prev_idx = idx;
}
+ if (flags & PREFERRED_LOC_SMEM) {
+ err = xe_vm_madvise(fd, vm, to_user_pointer(data), bo_size, 0,
+ DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+ DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
+ if (err)
+ igt_warn("MADVISE_FAILURE err = %s, vm =%u data=%"PRIu64" alloc_size = %zu\n",
+ strerror(errno), vm, to_user_pointer(data), bo_size);
+ }
+
if (flags & PREFETCH_BENCHMARK) {
igt_info("Prefetch VRAM execution took %.3fms, %.1f5 GB/s\n",
1e-6 * prefetch_ns,
@@ -1587,6 +1711,7 @@ threads(int fd, int n_exec_queues, int n_execs, size_t bo_size,
uint32_t vm = 0;
bool go = false;
void *alloc = NULL;
+ int err;
if ((FILE_BACKED | FORK_READ) & flags)
return;
@@ -1614,6 +1739,15 @@ threads(int fd, int n_exec_queues, int n_execs, size_t bo_size,
alloc = aligned_alloc(SZ_2M, alloc_size);
igt_assert(alloc);
+ if (flags & PREFERRED_LOC_SMEM) {
+ err = xe_vm_madvise(fd, vm, to_user_pointer(alloc), alloc_size, 0,
+ DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+ DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
+ if (err)
+ igt_warn("MADVISE_FAILURE err = %s, vm =%u data=%"PRIu64" alloc_size = %zu\n",
+ strerror(errno), vm, to_user_pointer(alloc), alloc_size);
+ }
+
memset(alloc, 5, alloc_size);
flags &= ~SHARED_ALLOC;
}
@@ -1831,6 +1965,7 @@ igt_main
{ NULL },
};
int fd;
+ int num_sections;
igt_fixture {
struct xe_device *xe;
@@ -1843,7 +1978,21 @@ igt_main
open_sync_file();
}
- for (const struct section *s = sections; s->name; s++) {
+
+ num_sections = 0;
+ for (const struct section *s = sections; s[num_sections].name; num_sections++)
+ ;
+
+ for (int i = 0; i < num_sections * 2; i++) {
+ struct section *s = §ions[i % num_sections];
+
+ if (i/num_sections == 0) {
+ static char modified_name[256];
+ snprintf(modified_name, sizeof(modified_name), "%s-preferred-loc-smem", s->name);
+ s->name = modified_name;
+ s->flags |= PREFERRED_LOC_SMEM;
+ }
+
igt_subtest_f("once-%s", s->name)
xe_for_each_engine(fd, hwe)
test_exec(fd, hwe, 1, 1, 0, 0, 0, NULL,
--
2.43.0
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [PATCH i-g-t v5 5/5] tests/intel/xe_exec_system_allocator: Add atomic_batch test in IGT
2025-08-28 16:58 [PATCH i-g-t 0/5] Madvise Tests in IGT nishit.sharma
` (3 preceding siblings ...)
2025-08-28 16:58 ` [PATCH i-g-t 4/5] tests/intel/xe_exec_system_allocator: Add preferred_loc_smem test nishit.sharma
@ 2025-08-28 16:58 ` nishit.sharma
2025-08-29 14:37 ` Gurram, Pravalika
` (2 more replies)
2025-08-29 2:40 ` ✓ Xe.CI.BAT: success for Madvise Tests in IGT (rev5) Patchwork
` (3 subsequent siblings)
8 siblings, 3 replies; 19+ messages in thread
From: nishit.sharma @ 2025-08-28 16:58 UTC (permalink / raw)
To: igt-dev, pravalika.gurram, himal.prasad.ghimiray, matthew.brost,
nishit.sharma
From: Nishit Sharma <nishit.sharma@intel.com>
ATOMIC_BATCH flag is introduced when true MI_ATOMIC | MI_ATOMIC_INC
operation will be called. This will avoid writing another function which
performs atomic increment operations. ATOMIC_BATCH flag is passed as
argument in write_dword() if true then value will be written on passed
address and incremented by ATOMIC_INC operation. For all memory
operations this flag will be used to verify if ATOMIC operation is
working or not.
Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
---
lib/xe/xe_ioctl.c | 18 +-
tests/intel/xe_exec_system_allocator.c | 545 ++++++++++++++++++++-----
2 files changed, 445 insertions(+), 118 deletions(-)
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index 4ab2ef39c..71a427b4d 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -688,19 +688,26 @@ int __xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range,
madvise.start = addr;
madvise.range = range;
- if (type == DRM_XE_MEM_RANGE_ATTR_ATOMIC) {
+ switch (type) {
+ case DRM_XE_MEM_RANGE_ATTR_ATOMIC:
madvise.type = DRM_XE_MEM_RANGE_ATTR_ATOMIC;
madvise.atomic.val = op_val;
- } else if (type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC) {
+ break;
+
+ case DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC:
madvise.type = DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC;
madvise.preferred_mem_loc.devmem_fd = op_val;
madvise.preferred_mem_loc.migration_policy = policy;
igt_debug("madvise.preferred_mem_loc.devmem_fd = %d\n",
madvise.preferred_mem_loc.devmem_fd);
- } else if (type == DRM_XE_MEM_RANGE_ATTR_PAT) {
+ break;
+
+ case DRM_XE_MEM_RANGE_ATTR_PAT:
madvise.type = DRM_XE_MEM_RANGE_ATTR_PAT;
madvise.pat_index.val = op_val;
- } else {
+ break;
+
+ default:
igt_warn("Unknown attribute\n");
return -EINVAL;
}
@@ -730,6 +737,5 @@ int __xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range,
int xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range,
uint64_t ext, uint32_t type, uint32_t op_val, uint16_t policy)
{
- igt_assert_eq(__xe_vm_madvise(fd, vm, addr, range, ext, type, op_val, policy), 0);
- return 0;
+ return __xe_vm_madvise(fd, vm, addr, range, ext, type, op_val, policy);
}
diff --git a/tests/intel/xe_exec_system_allocator.c b/tests/intel/xe_exec_system_allocator.c
index 70ca5fc2e..d0a8431a2 100644
--- a/tests/intel/xe_exec_system_allocator.c
+++ b/tests/intel/xe_exec_system_allocator.c
@@ -21,6 +21,7 @@
#include "lib/intel_reg.h"
#include "xe_drm.h"
+#include "intel_pat.h"
#include "xe/xe_gt.h"
#include "xe/xe_ioctl.h"
#include "xe/xe_query.h"
@@ -29,6 +30,14 @@
#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
#define QUARTER_SEC (NSEC_PER_SEC / 4)
#define FIVE_SEC (5LL * NSEC_PER_SEC)
+struct test_exec_data {
+ uint32_t batch[32];
+ uint64_t pad;
+ uint64_t vm_sync;
+ uint64_t exec_sync;
+ uint32_t data;
+ uint32_t expected_data;
+};
struct batch_data {
uint32_t batch[16];
@@ -37,6 +46,7 @@ struct batch_data {
uint32_t expected_data;
};
+#define VAL_ATOMIC_EXPECTED 56
#define WRITE_VALUE(data__, i__) ({ \
if (!(data__)->expected_data) \
(data__)->expected_data = rand() << 12 | (i__); \
@@ -53,10 +63,19 @@ static void __write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
batch[(*idx)++] = wdata;
}
-static void write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
- int *idx)
+static void write_dword(struct test_exec_data *data, uint64_t sdi_addr, uint32_t wdata,
+ int *idx, bool atomic)
{
- __write_dword(batch, sdi_addr, wdata, idx);
+ uint32_t *batch = data->batch;
+
+ if (atomic) {
+ data->data = 55;
+ batch[(*idx)++] = MI_ATOMIC | MI_ATOMIC_INC;
+ batch[(*idx)++] = sdi_addr;
+ batch[(*idx)++] = sdi_addr >> 32;
+ } else {
+ __write_dword(batch, sdi_addr, wdata, idx);
+ }
batch[(*idx)++] = MI_BATCH_BUFFER_END;
}
@@ -271,7 +290,7 @@ check_all_pages_threads(void *ptr, uint64_t alloc_size, uint64_t stride,
static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
uint64_t alloc_size, uint64_t stride,
- struct timespec *tv, uint64_t *submit)
+ struct timespec *tv, uint64_t *submit, bool atomic)
{
struct drm_xe_sync sync[1] = {
{ .type = DRM_XE_SYNC_TYPE_USER_FENCE,
@@ -302,7 +321,8 @@ static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
uint64_t sdi_addr = addr + sdi_offset;
int b = 0;
- write_dword(data->batch, sdi_addr, WRITE_VALUE(data, i), &b);
+ write_dword((struct test_exec_data *)data, sdi_addr, WRITE_VALUE(data, i),
+ &b, atomic ? true : false);
igt_assert(b <= ARRAY_SIZE(data->batch));
}
@@ -407,34 +427,45 @@ static void __aligned_partial_free(struct aligned_alloc_type *aligned_alloc_typ
#define MAX_N_EXEC_QUEUES 16
-#define MMAP (0x1 << 0)
-#define NEW (0x1 << 1)
-#define BO_UNMAP (0x1 << 2)
-#define FREE (0x1 << 3)
-#define BUSY (0x1 << 4)
-#define BO_MAP (0x1 << 5)
-#define RACE (0x1 << 6)
-#define SKIP_MEMSET (0x1 << 7)
-#define FAULT (0x1 << 8)
-#define FILE_BACKED (0x1 << 9)
-#define LOCK (0x1 << 10)
-#define MMAP_SHARED (0x1 << 11)
-#define HUGE_PAGE (0x1 << 12)
-#define SHARED_ALLOC (0x1 << 13)
-#define FORK_READ (0x1 << 14)
-#define FORK_READ_AFTER (0x1 << 15)
-#define MREMAP (0x1 << 16)
-#define DONTUNMAP (0x1 << 17)
-#define READ_ONLY_REMAP (0x1 << 18)
-#define SYNC_EXEC (0x1 << 19)
-#define EVERY_OTHER_CHECK (0x1 << 20)
-#define MULTI_FAULT (0x1 << 21)
-#define PREFETCH (0x1 << 22)
-#define THREADS (0x1 << 23)
-#define PROCESSES (0x1 << 24)
-#define PREFETCH_BENCHMARK (0x1 << 25)
-#define PREFETCH_SYS_BENCHMARK (0x1 << 26)
-#define PREFERRED_LOC_SMEM (0x1 << 27)
+#define MMAP (0x1 << 0)
+#define NEW (0x1 << 1)
+#define BO_UNMAP (0x1 << 2)
+#define FREE (0x1 << 3)
+#define BUSY (0x1 << 4)
+#define BO_MAP (0x1 << 5)
+#define RACE (0x1 << 6)
+#define SKIP_MEMSET (0x1 << 7)
+#define FAULT (0x1 << 8)
+#define FILE_BACKED (0x1 << 9)
+#define LOCK (0x1 << 10)
+#define MMAP_SHARED (0x1 << 11)
+#define HUGE_PAGE (0x1 << 12)
+#define SHARED_ALLOC (0x1 << 13)
+#define FORK_READ (0x1 << 14)
+#define FORK_READ_AFTER (0x1 << 15)
+#define MREMAP (0x1 << 16)
+#define DONTUNMAP (0x1 << 17)
+#define READ_ONLY_REMAP (0x1 << 18)
+#define SYNC_EXEC (0x1 << 19)
+#define EVERY_OTHER_CHECK (0x1 << 20)
+#define MULTI_FAULT (0x1 << 21)
+#define PREFETCH (0x1 << 22)
+#define THREADS (0x1 << 23)
+#define PROCESSES (0x1 << 24)
+#define PREFETCH_BENCHMARK (0x1 << 25)
+#define PREFETCH_SYS_BENCHMARK (0x1 << 26)
+#define PREFERRED_LOC_SMEM (0x1 << 27)
+#define ATOMIC_BATCH (0x1 << 28)
+#define MIGRATE_ALL_PAGES (0x1 << 29)
+#define PREFERRED_LOC_ATOMIC_DEVICE (0x1 << 30)
+#define PREFERRED_LOC_ATOMIC_GL (0x1ull << 31)
+#define PREFERRED_LOC_ATOMIC_CPU (0x1ull << 32)
+#define MADVISE_MULTI_VMA (0x1ull << 33)
+#define MADVISE_SPLIT_VMA (0x1ull << 34)
+#define MADVISE_ATOMIC_VMA (0x1ull << 35)
+#define PREFETCH_SPLIT_VMA (0x1ull << 36)
+#define PREFETCH_CHANGE_ATTR (0x1ull << 37)
+#define PREFETCH_SAME_ATTR (0x1ull << 38)
#define N_MULTI_FAULT 4
@@ -478,6 +509,47 @@ static void __aligned_partial_free(struct aligned_alloc_type *aligned_alloc_typ
* SUBTEST: processes-evict-malloc-mix-bo
* Description: multi-process trigger eviction of VRAM allocated via malloc and BO create
* Test category: stress test
+ *
+ * SUBTEST: madvise-multi-vma
+ * Description: performs multiple madvise operations on multiple virtual memory area using atomic device attributes
+ * Test category: functionality test
+ *
+ * SUBTEST: madvise-split-vma
+ * Description: perform madvise operations on multiple type VMAs (BO and CPU VMAs)
+ * Test category: perform madvise operations on multiple type VMAs (BO and CPU VMAs)
+ *
+ * SUBTEST: madvise-atomic-vma
+ * Description: perform madvise atomic operations on BO in VRAM/SMEM if atomic ATTR global/device
+ * Test category: functionality test
+ *
+ * SUBTEST: madvise-split-vma-with-mapping
+ * Description: performs prefetch and page migration
+ * Test category: functionality test
+ *
+ * SUBTEST: madvise-preffered-loc-atomic-vram
+ * Description: performs both atomic and preferred loc madvise operations atomic device attributes set
+ * Test category: functionality test
+ *
+ * SUBTEST: madvise-preffered-loc-atomic-gl
+ * Description: performs both atomic and preferred loc madvise operations with atomic global attributes set
+ * Test category: functionality test
+ *
+ * SUBTEST: madvise-preffered-loc-atomic-cpu
+ * Description: performs both atomic and preferred loc madvise operations with atomic cpu attributes set
+ * Test category: functionality test
+ *
+ * SUBTEST: madvise-preffered-loc-sram-migrate-pages
+ * Description: performs preferred loc madvise operations and migrating all pages in smem
+ * Test category: functionality test
+ *
+ * SUBTEST: madvise-no-range-invalidate-same-attr
+ * Description: performs atomic global madvise operation, prefetch and again madvise operation with same atomic attribute
+ * Test category: functionality test
+ *
+ * SUBTEST: madvise-range-invalidate-change-attr
+ * Description: performs atomic global madvise operation, prefetch and again madvise operation with different atomic attribute
+ * Test category: functionality test
+ *
*/
static void
@@ -544,7 +616,7 @@ many_allocs(int fd, struct drm_xe_engine_class_instance *eci,
allocs[i] = alloc;
touch_all_pages(fd, exec_queue, allocs[i].ptr, alloc_size, stride,
- &tv, &submit);
+ &tv, &submit, flags & ATOMIC_BATCH);
}
if (barrier)
@@ -692,7 +764,7 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
.num_syncs = 1,
.syncs = to_user_pointer(sync),
};
- struct {
+ struct batch_data {
uint32_t batch[16];
uint64_t pad;
uint64_t vm_sync;
@@ -750,7 +822,8 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
uint64_t sdi_addr = addr + sdi_offset;
int b = 0;
- write_dword(data[i].batch, sdi_addr, WRITE_VALUE(&data[i], i), &b);
+ write_dword((struct test_exec_data *)&data[i], sdi_addr, WRITE_VALUE(&data[i], i),
+ &b, ATOMIC_BATCH ? true : false);
igt_assert(b <= ARRAY_SIZE(data[i].batch));
if (!i)
@@ -773,7 +846,10 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
xe_wait_ufence(fd, new ?: exec_ufence, USER_FENCE_VALUE,
exec_queue, FIVE_SEC);
if (i || (flags & CPU_FAULT))
- igt_assert_eq(data[i].data, READ_VALUE(&data[i]));
+ igt_assert_eq(data[i].data,
+ flags & ATOMIC_BATCH
+ ? VAL_ATOMIC_EXPECTED
+ : READ_VALUE(&data[i]));
exec_ufence[0] = 0;
if (!i) {
@@ -1001,48 +1077,47 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
* @mmap-free-huge-preferred-loc-smem: mmap huge page and free buffer for each exec and perform madvise
* @mmap-free-nomemset-preferred-loc-smem: mmap and free buffer for each exec and perform madvise
* @mmap-free-preferred-loc-smem: mmap and free buffer for each exec and perform madvise
- * @mmap-free-race-nomemset-preferred-loc-smem:
- * @mmap-free-race-preferred-loc-smem:
- * @mmap-huge-nomemset-preferred-loc-smem:
- * @mmap-huge-preferred-loc-smem:
- * @mmap-mlock-nomemset-preferred-loc-smem:
- * @mmap-mlock-preferred-loc-smem:
- * @mmap-new-huge-nomemset-preferred-loc-smem:
- * @mmap-new-huge-preferred-loc-smem:
- * @mmap-new-nomemset-preferred-loc-smem:
- * @mmap-new-preferred-loc-smem:
- * @mmap-new-race-nomemset-preferred-loc-smem:
- * @mmap-new-race-preferred-loc-smem:
- * @mmap-nomemset-preferred-loc-smem:
- * @mmap-preferred-loc-smem:
- * @mmap-prefetch-preferred-loc-smem:
- * @mmap-prefetch-shared-preferred-loc-smem:
- * @mmap-race-nomemset-preferred-loc-smem:
- * @mmap-race-preferred-loc-smem:
- * @mmap-remap-dontunmap-eocheck-preferred-loc-smem:
- * @mmap-remap-dontunmap-preferred-loc-smem:
- * @mmap-remap-eocheck-preferred-loc-smem:
- * @mmap-remap-preferred-loc-smem:
- * @mmap-remap-ro-dontunmap-eocheck-preferred-loc-smem:
- * @mmap-remap-ro-dontunmap-preferred-loc-smem:
- * @mmap-remap-ro-eocheck-preferred-loc-smem:
- * @mmap-remap-ro-preferred-loc-smem:
- * @mmap-shared-nomemset-preferred-loc-smem:
- * @mmap-shared-preferred-loc-smem:
- * @mmap-shared-remap-dontunmap-eocheck-preferred-loc-smem:
- * @mmap-shared-remap-dontunmap-preferred-loc-smem:
- * @mmap-shared-remap-eocheck-preferred-loc-smem:
- * @mmap-shared-remap-preferred-loc-smem:
- * @new-bo-map-nomemset-preferred-loc-smem:
- * @new-bo-map-preferred-loc-smem:
- * @new-busy-nomemset-preferred-loc-smem:
- * @new-busy-preferred-loc-smem:
- * @new-nomemset-preferred-loc-smem:
- * @new-preferred-loc-smem:
- * @new-prefetch-preferred-loc-smem:
- * @new-race-nomemset-preferred-loc-smem:
- * @new-race-preferred-loc-smem:
- * @prefetch-benchmark:
+ * @mmap-free-race-nomemset-preferred-loc-smem: mmap and free buffer for each exec with race between cpu and gpu access, perform madvise operation
+ * @mmap-free-race-preferred-loc-smem: mmap and free buffer for each exec with race between cpu and gpu access, perform madvise operation
+ * @mmap-huge-nomemset-preferred-loc-smem: mmap huge page single buffer for all execs, skips memset and perform madvise operation
+ * @mmap-huge-preferred-loc-smem: mmap huge page single buffer for all execs, perform madvise operation
+ * @mmap-mlock-nomemset-preferred-loc-smem: mmap and mlock of a buffer with preferred location set to system memory, skipping memset
+ * @mmap-mlock-preferred-loc-smem: mmap and mlock of a buffer with preferred location set to system memory
+ * @mmap-new-huge-nomemset-preferred-loc-smem: mmap of a newly allocated buffer using huge pages, with preferred location set to system memory and skipping memset
+ * @mmap-new-huge-preferred-loc-smem: mmap of a newly allocated buffer using huge pages, with preferred location set to system memory
+ * @mmap-new-nomemset-preferred-loc-smem: mmap of a newly allocated buffer with preferred location set to system memory and skipping memset
+ * @mmap-new-preferred-loc-smem: mmap of a newly allocated buffer with preferred location set to system memory
+ * @mmap-new-race-nomemset-preferred-loc-smem: mmap of a newly allocated buffer with preferred location set to system memory and skipping memset
+ * @mmap-new-race-preferred-loc-smem: mmap of a newly allocated buffer with preferred location set to system memory
+ * @mmap-nomemset-preferred-loc-smem: mmap of a buffer with preferred location set to system memory, skipping memset
+ * @mmap-preferred-loc-smem: mmap of a buffer with preferred location set to system memory
+ * @mmap-prefetch-preferred-loc-smem: prefetching mmap buffer with preferred location set to system memory
+ * @mmap-prefetch-shared-preferred-loc-smem: mmap of a shared buffer with prefetch and preferred location set to system memory
+ * @mmap-race-nomemset-preferred-loc-smem: Tests mmap of a buffer with preferred location set to system memory, skipping memset
+ * @mmap-race-preferred-loc-smem: mmap buffer with race between GPU and CPU access with preferred location set to system memory
+ * @mmap-remap-dontunmap-eocheck-preferred-loc-smem: mmap and remap of a buffer with preferred location set to system memory, does not unmap after use
+ * @mmap-remap-dontunmap-preferred-loc-smem: mmap and remap of a buffer with preferred location set to system memory, does not unmap after use
+ * @mmap-remap-eocheck-preferred-loc-smem: mmap and remap of a buffer with preferred location set to system memory
+ * @mmap-remap-preferred-loc-smem: mmap and remap of a buffer with preferred location set to system memory
+ * @mmap-remap-ro-dontunmap-eocheck-preferred-loc-smem: mmap and remap of a read-only buffer with preferred location set to system memory, does not unmap after use
+ * @mmap-remap-ro-dontunmap-preferred-loc-smem: mmap and remap of a read-only buffer with preferred location set to system memory, does not unmap after use
+ * @mmap-remap-ro-eocheck-preferred-loc-smem: mmap and remap of a read-only buffer with preferred location set to system memory
+ * @mmap-remap-ro-preferred-loc-smem: mmap and remap of a read-only buffer with preferred location set to system memory
+ * @mmap-shared-nomemset-preferred-loc-smem: mmap of a shared buffer with preferred location set to system memory, skipping memset
+ * @mmap-shared-preferred-loc-smem: mmap of a shared buffer with preferred location set to system memory
+ * @mmap-shared-remap-dontunmap-eocheck-preferred-loc-smem: mmap and remap of a shared buffer with preferred location set to system memory, does not unmap after use
+ * @mmap-shared-remap-dontunmap-preferred-loc-smem: mmap and remap of a shared buffer with preferred location set to system memory
+ * @mmap-shared-remap-eocheck-preferred-loc-smem: mmap and remap of a shared buffer with preferred location set to system memory with end of check validation
+ * @mmap-shared-remap-preferred-loc-smem: mmap and remap of a shared buffer with preferred location set to system memory without end of check validation
+ * @new-bo-map-nomemset-preferred-loc-smem: Tests allocation and mapping of a new buffer object with preferred location set to system memory, skipping memset
+ * @new-bo-map-preferred-loc-smem: ests allocation and mapping of a new buffer object with preferred location set to system memory
+ * @new-busy-nomemset-preferred-loc-smem: Tests allocation and usage of a new busy buffer object with preferred location set to system memory, skipping memset
+ * @new-busy-preferred-loc-smem: ests allocation and usage of a new busy buffer object with preferred location set to system memory
+ * @new-nomemset-preferred-loc-smem: Tests allocation of a new buffer object with preferred location set to system memory, skipping memset
+ * @new-preferred-loc-smem: Tests allocation of a new buffer object with preferred location set to system memory
+ * @new-prefetch-preferred-loc-smem: Tests allocation and prefetching of a new buffer object with preferred location set to system memory
+ * @new-race-nomemset-preferred-loc-smem: Tests allocation of a new buffer object with preferred location set to system memory, skipping memset
+ * @new-race-preferred-loc-smem: tests allocation of a new buffer object with preferred location set to system memory
*
* SUBTEST: prefetch-benchmark
* Description: Prefetch a 64M buffer 128 times, measure bandwidth of prefetch
@@ -1072,16 +1147,6 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
* Description: Create multiple threads with a faults on different hardware engines to same addresses, racing between CPU and GPU access
* Test category: stress test
*/
-
-struct test_exec_data {
- uint32_t batch[32];
- uint64_t pad;
- uint64_t vm_sync;
- uint64_t exec_sync;
- uint32_t data;
- uint32_t expected_data;
-};
-
static void igt_require_hugepages(void)
{
igt_skip_on_f(!igt_get_meminfo("HugePages_Total"),
@@ -1090,11 +1155,37 @@ static void igt_require_hugepages(void)
"No huge pages available!\n");
}
+static int
+xe_vm_madvixe_pat_attr(int fd, uint32_t vm, uint64_t addr, uint64_t range,
+ int pat_index)
+{
+ return xe_vm_madvise(fd, vm, addr, range, 0,
+ DRM_XE_MEM_RANGE_ATTR_PAT, pat_index, 0);
+}
+
+static int
+xe_vm_madvise_atomic_attr(int fd, uint32_t vm, uint64_t addr, uint64_t range,
+ int mem_attr)
+{
+ return xe_vm_madvise(fd, vm, addr, range, 0,
+ DRM_XE_MEM_RANGE_ATTR_ATOMIC,
+ mem_attr, 0);
+}
+
+static int
+xe_vm_madvise_migrate_pages(int fd, uint32_t vm, uint64_t addr, uint64_t range)
+{
+ return xe_vm_madvise(fd, vm, addr, range, 0,
+ DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+ DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM,
+ DRM_XE_MIGRATE_ALL_PAGES);
+}
+
static void
test_exec(int fd, struct drm_xe_engine_class_instance *eci,
int n_exec_queues, int n_execs, size_t bo_size,
size_t stride, uint32_t vm, void *alloc, pthread_barrier_t *barrier,
- unsigned int flags)
+ unsigned long long flags)
{
uint64_t addr;
struct drm_xe_sync sync[1] = {
@@ -1107,9 +1198,10 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
.syncs = to_user_pointer(sync),
};
uint32_t exec_queues[MAX_N_EXEC_QUEUES];
- struct test_exec_data *data, *next_data = NULL;
+ struct test_exec_data *data, *next_data = NULL, *org_data;
uint32_t bo_flags;
uint32_t bo = 0, bind_sync = 0;
+ uint32_t val;
void **pending_free;
u64 *exec_ufence = NULL, *bind_ufence = NULL;
int i, j, b, file_fd = -1, prev_idx, pf_count, err;
@@ -1234,6 +1326,133 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
strerror(errno), vm, to_user_pointer(data), bo_size);
}
+ if (flags & PREFERRED_LOC_SMEM) {
+ if (flags & MIGRATE_ALL_PAGES) {
+ err = xe_vm_madvise_migrate_pages(fd, vm, to_user_pointer(data), bo_size);
+ if (err)
+ igt_warn("failure in page migration err = %s, vm =%u, data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+ } else {
+ err = xe_vm_madvise(fd, vm, to_user_pointer(data), bo_size, 0,
+ DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
+ DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
+ }
+ }
+ if (flags & PREFERRED_LOC_ATOMIC_DEVICE) {
+ err = xe_vm_madvise_migrate_pages(fd, vm, to_user_pointer(data), bo_size);
+ if (err)
+ igt_warn("failure in page migration err = %s, vm =%u data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+
+ err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
+ DRM_XE_ATOMIC_DEVICE);
+ if (err)
+ igt_warn("failure in atomic device attr err = %s, vm =%u data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+ }
+ if (flags & PREFERRED_LOC_ATOMIC_GL) {
+ err = xe_vm_madvise_migrate_pages(fd, vm, to_user_pointer(data), bo_size);
+ if (err)
+ igt_warn("failure in page migration err = %s, vm =%u data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+ err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
+ DRM_XE_ATOMIC_GLOBAL);
+ if (err)
+ igt_warn("failure in atomic global attr err = %s, vm =%u data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+ }
+ if (flags & PREFERRED_LOC_ATOMIC_CPU) {
+ err = xe_vm_madvise_migrate_pages(fd, vm, to_user_pointer(data), bo_size);
+ if (err)
+ igt_warn("failure in page migration err = %s, vm =%u data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+
+ err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
+ DRM_XE_ATOMIC_CPU);
+ if (err)
+ igt_warn("failure in atomic cpu attr err = %s, vm =%u data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+ }
+ if (flags & MADVISE_MULTI_VMA) {
+ if (bo_size)
+ bo_size = ALIGN(bo_size, SZ_4K);
+ err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data) + bo_size/2,
+ bo_size/2, DRM_XE_ATOMIC_DEVICE);
+ if (err)
+ igt_warn("failure in atomic device attr err = %s, vm =%u data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+
+ err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data) + bo_size,
+ bo_size, DRM_XE_ATOMIC_DEVICE);
+ if (err)
+ igt_warn("failure in atomic multi_vma err = %s, vm =%u data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+
+ err = xe_vm_madvixe_pat_attr(fd, vm, to_user_pointer(data), bo_size, intel_get_pat_idx_wb(fd));
+ if (err)
+ igt_warn("failure in pat attr index 4 err = %s, vm =%u data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+
+ err = xe_vm_madvixe_pat_attr(fd, vm, to_user_pointer(data), bo_size, intel_get_pat_idx_wb(fd));
+ if (err)
+ igt_warn("failure in pat attr index 3 err = %s, vm =%u data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+
+ err = xe_vm_madvixe_pat_attr(fd, vm, to_user_pointer(data) + bo_size/2, bo_size/2,
+ intel_get_pat_idx_wb(fd));
+ if (err)
+ igt_warn("failure in pat attr index 8 err = %s, vm =%u data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+ }
+ if (flags & MADVISE_SPLIT_VMA) {
+ if (bo_size)
+ bo_size = ALIGN(bo_size, SZ_4K);
+
+ bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+ bo = xe_bo_create(fd, vm, bo_size,
+ vram_if_possible(fd, eci->gt_id), bo_flags);
+ xe_vm_bind_async(fd, vm, 0, bo, 0, to_user_pointer(data) + bo_size/2,
+ bo_size/2, 0, 0);
+
+ __xe_vm_bind_assert(fd, vm, 0, 0, 0, to_user_pointer(data) + bo_size/2,
+ bo_size/2, DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, sync,
+ 1, 0, 0);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
+ data[0].vm_sync = 0;
+ gem_close(fd, bo);
+ bo = 0;
+
+ err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data),
+ bo_size/2, DRM_XE_ATOMIC_DEVICE);
+ if (err)
+ igt_warn("failure in split atomic device attr err = %s, vm =%u data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+ }
+ if (flags & MADVISE_ATOMIC_VMA) {
+ if (bo_size)
+ bo_size = ALIGN(bo_size, SZ_4K);
+
+ bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
+ bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id), bo_flags);
+ xe_vm_bind_async(fd, vm, 0, bo, 0, to_user_pointer(data), bo_size, 0, 0);
+
+ __xe_vm_bind_assert(fd, vm, 0, 0, 0, to_user_pointer(data), bo_size,
+ DRM_XE_VM_BIND_OP_MAP,
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, sync,
+ 1, 0, 0);
+ xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
+ data[0].vm_sync = 0;
+ gem_close(fd, bo);
+ bo = 0;
+
+ err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size/2,
+ DRM_XE_ATOMIC_GLOBAL);
+ if (err)
+ igt_warn("failure in atomic vma err = %s, vm =%u data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+ }
+
if (flags & BO_UNMAP) {
bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
bo = xe_bo_create(fd, vm, bo_size,
@@ -1307,6 +1526,16 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
bool fault_inject = (FAULT & flags) && i == n_execs / 2;
bool fault_injected = (FAULT & flags) && i > n_execs;
+ if (flags & MADVISE_MULTI_VMA) {
+ addr = addr + bo_size;
+ org_data = data;
+ data = from_user_pointer(addr);
+ batch_offset = (char *)&(data[idx].batch) - (char *)data;
+ batch_addr = addr + batch_offset;
+ sdi_offset = (char *)&(data[idx].data) - (char *)data;
+ sdi_addr = addr + sdi_offset;
+ }
+
if (barrier)
pthread_barrier_wait(barrier);
@@ -1316,18 +1545,74 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
__write_dword(data[idx].batch,
sdi_addr + j * orig_size,
WRITE_VALUE(&data[idx], idx), &b);
- write_dword(data[idx].batch, sdi_addr + j * orig_size,
- WRITE_VALUE(&data[idx], idx), &b);
+ write_dword(&data[idx], sdi_addr + j * orig_size,
+ WRITE_VALUE(&data[idx], idx), &b,
+ flags & ATOMIC_BATCH ? true : false);
igt_assert(b <= ARRAY_SIZE(data[idx].batch));
} else if (!(flags & EVERY_OTHER_CHECK)) {
b = 0;
- write_dword(data[idx].batch, sdi_addr,
- WRITE_VALUE(&data[idx], idx), &b);
+ write_dword(&data[idx], sdi_addr,
+ WRITE_VALUE(&data[idx], idx), &b,
+ flags & ATOMIC_BATCH ? true : false);
igt_assert(b <= ARRAY_SIZE(data[idx].batch));
+ if (flags & PREFETCH) {
+ if (flags & PREFETCH_SPLIT_VMA) {
+ bo_size = ALIGN(bo_size, SZ_4K);
+
+ xe_vm_prefetch_async(fd, vm, 0, 0, addr, bo_size, NULL, 0, 0);
+
+ igt_info("num_vmas before madvise = %d \n", val);
+
+ val = xe_vm_print_mem_attr_values_in_range(fd, vm, addr, bo_size);
+
+ err = xe_vm_madvise_migrate_pages(fd, vm, to_user_pointer(data), bo_size/2);
+ if (err)
+ igt_warn("failure in prefetch split vma err = %s, vm =%u data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+ igt_info("num_vmas after madvise= %d \n", val);
+ val = xe_vm_print_mem_attr_values_in_range(fd, vm, addr, bo_size);
+ } else if (flags & PREFETCH_SAME_ATTR) {
+ err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
+ DRM_XE_ATOMIC_GLOBAL);
+ if (err)
+ igt_warn("failure in prefetch same attr err = %s, vm =%u data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+ val = xe_vm_print_mem_attr_values_in_range(fd, vm, addr, bo_size);
+ xe_vm_prefetch_async(fd, vm, 0, 0, addr, bo_size, NULL, 0, DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC);
+ err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size/2,
+ DRM_XE_ATOMIC_GLOBAL);
+ if (err)
+ igt_warn("failure in prefetch atomic attr err = %s, vm =%u data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+ } else if (flags & PREFETCH_CHANGE_ATTR) {
+ err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
+ DRM_XE_ATOMIC_GLOBAL);
+ if (err)
+ igt_warn("failure in prefetch atomic attr err = %s, vm =%u data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+ val = xe_vm_print_mem_attr_values_in_range(fd, vm, addr, bo_size);
+
+ xe_vm_prefetch_async(fd, vm, 0, 0, addr, bo_size, NULL, 0, DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC);
+
+ err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
+ DRM_XE_ATOMIC_DEVICE);
+ if (err)
+ igt_warn("failure in prefetch change attr err = %s, vm =%u data=%"PRIu64"\n",
+ strerror(errno), vm, to_user_pointer(data));
+ val = xe_vm_print_mem_attr_values_in_range(fd, vm, addr, bo_size);
+ }
+ } else {
+ b = 0;
+ write_dword((struct test_exec_data *)&data[idx], sdi_addr,
+ WRITE_VALUE(&data[idx], idx), &b,
+ flags & ATOMIC_BATCH ? true : false);
+ igt_assert(b <= ARRAY_SIZE(data[idx].batch));
+ }
} else if (flags & EVERY_OTHER_CHECK && !odd(i)) {
b = 0;
- write_dword(data[idx].batch, sdi_addr,
- WRITE_VALUE(&data[idx], idx), &b);
+ write_dword(&data[idx], sdi_addr,
+ WRITE_VALUE(&data[idx], idx), &b,
+ flags & ATOMIC_BATCH ? true : false);
igt_assert(b <= ARRAY_SIZE(data[idx].batch));
aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
@@ -1346,10 +1631,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
__aligned_partial_free(&aligned_alloc_type);
b = 0;
- write_dword(data[next_idx].batch,
+ write_dword(&data[next_idx],
to_user_pointer(next_data) +
(char *)&data[next_idx].data - (char *)data,
- WRITE_VALUE(&data[next_idx], next_idx), &b);
+ WRITE_VALUE(&data[next_idx], next_idx), &b,
+ flags & ATOMIC_BATCH ? true : false);
igt_assert(b <= ARRAY_SIZE(data[next_idx].batch));
}
@@ -1404,9 +1690,18 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
exec_queues[e], &timeout);
igt_assert(err == -ETIME || err == -EIO);
} else {
- xe_wait_ufence(fd, exec_ufence ? exec_ufence :
- &data[idx].exec_sync, USER_FENCE_VALUE,
- exec_queues[e], FIVE_SEC);
+ if (flags & PREFERRED_LOC_ATOMIC_CPU) {
+ int64_t timeout = QUARTER_SEC;
+ err = __xe_wait_ufence(fd, exec_ufence ? exec_ufence :
+ &data[idx].exec_sync,
+ USER_FENCE_VALUE,
+ exec_queues[e], &timeout);
+ if (err)
+ goto cleanup;
+ } else
+ xe_wait_ufence(fd, exec_ufence ? exec_ufence :
+ &data[idx].exec_sync, USER_FENCE_VALUE,
+ exec_queues[e], FIVE_SEC);
if (flags & LOCK && !i)
munlock(data, bo_size);
@@ -1456,17 +1751,17 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
if (flags & FORK_READ) {
igt_fork(child, 1)
igt_assert_eq(data[idx].data,
- READ_VALUE(&data[idx]));
+ flags & ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED : READ_VALUE(&data[idx]));
if (!(flags & FORK_READ_AFTER))
igt_assert_eq(data[idx].data,
- READ_VALUE(&data[idx]));
+ flags & ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED : READ_VALUE(&data[idx]));
igt_waitchildren();
if (flags & FORK_READ_AFTER)
igt_assert_eq(data[idx].data,
- READ_VALUE(&data[idx]));
+ flags & ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED : READ_VALUE(&data[idx]));
} else {
igt_assert_eq(data[idx].data,
- READ_VALUE(&data[idx]));
+ flags & ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED : READ_VALUE(&data[idx]));
if (flags & PREFETCH_SYS_BENCHMARK) {
struct timespec tv = {};
@@ -1494,13 +1789,13 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
((void *)data) + j * orig_size;
igt_assert_eq(__data[idx].data,
- READ_VALUE(&data[idx]));
+ flags & ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED : READ_VALUE(&data[idx]));
}
}
}
if (flags & EVERY_OTHER_CHECK)
igt_assert_eq(data[prev_idx].data,
- READ_VALUE(&data[prev_idx]));
+ flags & ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED : READ_VALUE(&data[prev_idx]));
}
}
@@ -1521,6 +1816,9 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
gem_close(fd, bo);
}
+ if (flags & MADVISE_MULTI_VMA)
+ data = org_data;
+
if (flags & NEW) {
if (flags & MMAP) {
if (flags & FREE)
@@ -1610,6 +1908,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
pf_count, pf_count_after);
}
+cleanup:
if (bo) {
sync[0].addr = to_user_pointer(bind_ufence);
__xe_vm_bind_assert(fd, vm, 0,
@@ -1864,7 +2163,7 @@ processes(int fd, int n_exec_queues, int n_execs, size_t bo_size,
struct section {
const char *name;
- unsigned int flags;
+ unsigned long long flags;
};
igt_main
@@ -1964,6 +2263,19 @@ igt_main
{ "malloc-mix-bo", MIX_BO_ALLOC },
{ NULL },
};
+ const struct section msections[] = {
+ { "preffered-loc-sram-migrate-pages", PREFERRED_LOC_SMEM | MIGRATE_ALL_PAGES | ATOMIC_BATCH },
+ { "preffered-loc-atomic-vram", PREFERRED_LOC_ATOMIC_DEVICE | ATOMIC_BATCH },
+ { "preffered-loc-atomic-gl", PREFERRED_LOC_ATOMIC_GL | ATOMIC_BATCH },
+ { "preffered-loc-atomic-cpu", PREFERRED_LOC_ATOMIC_CPU | ATOMIC_BATCH },
+ { "multi-vma", MADVISE_MULTI_VMA | ATOMIC_BATCH },
+ { "split-vma", MADVISE_SPLIT_VMA | ATOMIC_BATCH },
+ { "atomic-vma", MADVISE_ATOMIC_VMA | ATOMIC_BATCH },
+ { "split-vma-with-mapping", PREFETCH | PREFETCH_SPLIT_VMA | ATOMIC_BATCH },
+ { "range-invalidate-change-attr", PREFETCH | PREFETCH_CHANGE_ATTR | ATOMIC_BATCH },
+ { "no-range-invalidate-same-attr", PREFETCH | PREFETCH_SAME_ATTR | ATOMIC_BATCH },
+ { NULL },
+ };
int fd;
int num_sections;
@@ -1983,10 +2295,11 @@ igt_main
for (const struct section *s = sections; s[num_sections].name; num_sections++)
;
- for (int i = 0; i < num_sections * 2; i++) {
- struct section *s = §ions[i % num_sections];
+ for (int i = 0; i < num_sections * 3; i++) {
+ struct section p = sections[i % num_sections];
+ struct section *s = &p;
- if (i/num_sections == 0) {
+ if (i/num_sections == 1) {
static char modified_name[256];
snprintf(modified_name, sizeof(modified_name), "%s-preferred-loc-smem", s->name);
s->name = modified_name;
@@ -2175,6 +2488,14 @@ igt_main
processes_evict(fd, SZ_8M, SZ_1M, s->flags);
}
+ for (const struct section *s = msections; s->name; s++) {
+ igt_subtest_f("madvise-%s", s->name) {
+ xe_for_each_engine(fd, hwe)
+ test_exec(fd, hwe, 1, 1, SZ_64K, 0, 0, NULL,
+ NULL, s->flags);
+ }
+ }
+
igt_fixture {
xe_device_put(fd);
drm_close_driver(fd);
--
2.43.0
^ permalink raw reply related [flat|nested] 19+ messages in thread
* ✓ Xe.CI.BAT: success for Madvise Tests in IGT (rev5)
2025-08-28 16:58 [PATCH i-g-t 0/5] Madvise Tests in IGT nishit.sharma
` (4 preceding siblings ...)
2025-08-28 16:58 ` [PATCH i-g-t v5 5/5] tests/intel/xe_exec_system_allocator: Add atomic_batch test in IGT nishit.sharma
@ 2025-08-29 2:40 ` Patchwork
2025-08-29 2:55 ` ✓ i915.CI.BAT: " Patchwork
` (2 subsequent siblings)
8 siblings, 0 replies; 19+ messages in thread
From: Patchwork @ 2025-08-29 2:40 UTC (permalink / raw)
To: nishit.sharma; +Cc: igt-dev
[-- Attachment #1: Type: text/plain, Size: 2148 bytes --]
== Series Details ==
Series: Madvise Tests in IGT (rev5)
URL : https://patchwork.freedesktop.org/series/153335/
State : success
== Summary ==
CI Bug Log - changes from XEIGT_8514_BAT -> XEIGTPW_13656_BAT
====================================================
Summary
-------
**SUCCESS**
No regressions found.
Participating hosts (11 -> 11)
------------------------------
No changes in participating hosts
Known issues
------------
Here are the changes found in XEIGTPW_13656_BAT that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@kms_flip@basic-flip-vs-dpms:
- bat-adlp-7: [PASS][1] -> [DMESG-WARN][2] ([Intel XE#4543]) +1 other test dmesg-warn
[1]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/bat-adlp-7/igt@kms_flip@basic-flip-vs-dpms.html
[2]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/bat-adlp-7/igt@kms_flip@basic-flip-vs-dpms.html
* igt@xe_pat@pat-index-xe2@render:
- bat-bmg-2: [PASS][3] -> [FAIL][4] ([Intel XE#5507]) +1 other test fail
[3]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/bat-bmg-2/igt@xe_pat@pat-index-xe2@render.html
[4]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/bat-bmg-2/igt@xe_pat@pat-index-xe2@render.html
{name}: This element is suppressed. This means it is ignored when computing
the status of the difference (SUCCESS, WARNING, or FAILURE).
[Intel XE#4543]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/4543
[Intel XE#5507]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5507
[Intel XE#5783]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/5783
Build changes
-------------
* IGT: IGT_8514 -> IGTPW_13656
* Linux: xe-3631-a59cf1d7fe0e19ce3a6e8cf2a20cfb24896baa97 -> xe-3634-e9c2ae006de881ad2d3cfee15fd4a25bc9cc3f14
IGTPW_13656: 13656
IGT_8514: 8514
xe-3631-a59cf1d7fe0e19ce3a6e8cf2a20cfb24896baa97: a59cf1d7fe0e19ce3a6e8cf2a20cfb24896baa97
xe-3634-e9c2ae006de881ad2d3cfee15fd4a25bc9cc3f14: e9c2ae006de881ad2d3cfee15fd4a25bc9cc3f14
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/index.html
[-- Attachment #2: Type: text/html, Size: 2692 bytes --]
^ permalink raw reply [flat|nested] 19+ messages in thread
* ✓ i915.CI.BAT: success for Madvise Tests in IGT (rev5)
2025-08-28 16:58 [PATCH i-g-t 0/5] Madvise Tests in IGT nishit.sharma
` (5 preceding siblings ...)
2025-08-29 2:40 ` ✓ Xe.CI.BAT: success for Madvise Tests in IGT (rev5) Patchwork
@ 2025-08-29 2:55 ` Patchwork
2025-08-29 12:11 ` ✓ Xe.CI.Full: " Patchwork
2025-08-29 13:38 ` ✓ i915.CI.Full: " Patchwork
8 siblings, 0 replies; 19+ messages in thread
From: Patchwork @ 2025-08-29 2:55 UTC (permalink / raw)
To: nishit.sharma; +Cc: igt-dev
[-- Attachment #1: Type: text/plain, Size: 5068 bytes --]
== Series Details ==
Series: Madvise Tests in IGT (rev5)
URL : https://patchwork.freedesktop.org/series/153335/
State : success
== Summary ==
CI Bug Log - changes from IGT_8514 -> IGTPW_13656
====================================================
Summary
-------
**SUCCESS**
No regressions found.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/index.html
Participating hosts (43 -> 41)
------------------------------
Missing (2): bat-rpls-4 fi-snb-2520m
Known issues
------------
Here are the changes found in IGTPW_13656 that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@dmabuf@all-tests:
- bat-apl-1: [PASS][1] -> [ABORT][2] ([i915#12904]) +1 other test abort
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8514/bat-apl-1/igt@dmabuf@all-tests.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/bat-apl-1/igt@dmabuf@all-tests.html
* igt@dmabuf@all-tests@dma_fence_chain:
- fi-bsw-nick: [PASS][3] -> [ABORT][4] ([i915#12904]) +1 other test abort
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8514/fi-bsw-nick/igt@dmabuf@all-tests@dma_fence_chain.html
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/fi-bsw-nick/igt@dmabuf@all-tests@dma_fence_chain.html
* igt@i915_module_load@load:
- bat-mtlp-9: [PASS][5] -> [DMESG-WARN][6] ([i915#13494])
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8514/bat-mtlp-9/igt@i915_module_load@load.html
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/bat-mtlp-9/igt@i915_module_load@load.html
* igt@i915_selftest@live:
- bat-mtlp-8: [PASS][7] -> [DMESG-FAIL][8] ([i915#12061]) +1 other test dmesg-fail
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8514/bat-mtlp-8/igt@i915_selftest@live.html
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/bat-mtlp-8/igt@i915_selftest@live.html
- bat-adlp-6: [PASS][9] -> [ABORT][10] ([i915#14365]) +1 other test abort
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8514/bat-adlp-6/igt@i915_selftest@live.html
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/bat-adlp-6/igt@i915_selftest@live.html
* igt@i915_selftest@live@workarounds:
- bat-mtlp-9: [PASS][11] -> [DMESG-FAIL][12] ([i915#12061]) +1 other test dmesg-fail
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8514/bat-mtlp-9/igt@i915_selftest@live@workarounds.html
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/bat-mtlp-9/igt@i915_selftest@live@workarounds.html
#### Possible fixes ####
* igt@dmabuf@all-tests@dma_fence_chain:
- fi-bsw-n3050: [ABORT][13] ([i915#12904]) -> [PASS][14] +1 other test pass
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8514/fi-bsw-n3050/igt@dmabuf@all-tests@dma_fence_chain.html
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/fi-bsw-n3050/igt@dmabuf@all-tests@dma_fence_chain.html
* igt@i915_selftest@live@sanitycheck:
- fi-kbl-7567u: [DMESG-WARN][15] ([i915#13735]) -> [PASS][16] +79 other tests pass
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8514/fi-kbl-7567u/igt@i915_selftest@live@sanitycheck.html
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/fi-kbl-7567u/igt@i915_selftest@live@sanitycheck.html
* igt@kms_busy@basic@flip:
- fi-kbl-7567u: [DMESG-WARN][17] ([i915#13735] / [i915#180]) -> [PASS][18]
[17]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8514/fi-kbl-7567u/igt@kms_busy@basic@flip.html
[18]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/fi-kbl-7567u/igt@kms_busy@basic@flip.html
* igt@kms_pm_rpm@basic-pci-d3-state:
- fi-kbl-7567u: [DMESG-WARN][19] ([i915#13735] / [i915#13890] / [i915#180]) -> [PASS][20] +52 other tests pass
[19]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_8514/fi-kbl-7567u/igt@kms_pm_rpm@basic-pci-d3-state.html
[20]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/fi-kbl-7567u/igt@kms_pm_rpm@basic-pci-d3-state.html
[i915#12061]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12061
[i915#12904]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12904
[i915#13494]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13494
[i915#13735]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13735
[i915#13890]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13890
[i915#14365]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14365
[i915#180]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/180
Build changes
-------------
* CI: CI-20190529 -> None
* IGT: IGT_8514 -> IGTPW_13656
* Linux: CI_DRM_17082 -> CI_DRM_17086
CI-20190529: 20190529
CI_DRM_17082: a59cf1d7fe0e19ce3a6e8cf2a20cfb24896baa97 @ git://anongit.freedesktop.org/gfx-ci/linux
CI_DRM_17086: 669eae0e8de0328817f4ce7d3c4a3ef834850751 @ git://anongit.freedesktop.org/gfx-ci/linux
IGTPW_13656: 13656
IGT_8514: 8514
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/index.html
[-- Attachment #2: Type: text/html, Size: 6345 bytes --]
^ permalink raw reply [flat|nested] 19+ messages in thread
* ✓ Xe.CI.Full: success for Madvise Tests in IGT (rev5)
2025-08-28 16:58 [PATCH i-g-t 0/5] Madvise Tests in IGT nishit.sharma
` (6 preceding siblings ...)
2025-08-29 2:55 ` ✓ i915.CI.BAT: " Patchwork
@ 2025-08-29 12:11 ` Patchwork
2025-08-29 13:38 ` ✓ i915.CI.Full: " Patchwork
8 siblings, 0 replies; 19+ messages in thread
From: Patchwork @ 2025-08-29 12:11 UTC (permalink / raw)
To: nishit.sharma; +Cc: igt-dev
[-- Attachment #1: Type: text/plain, Size: 18993 bytes --]
== Series Details ==
Series: Madvise Tests in IGT (rev5)
URL : https://patchwork.freedesktop.org/series/153335/
State : success
== Summary ==
CI Bug Log - changes from XEIGT_8514_FULL -> XEIGTPW_13656_FULL
====================================================
Summary
-------
**SUCCESS**
No regressions found.
Participating hosts (4 -> 3)
------------------------------
Missing (1): shard-adlp
Known issues
------------
Here are the changes found in XEIGTPW_13656_FULL that come from known issues:
### IGT changes ###
#### Possible fixes ####
* igt@xe_module_load@load:
- shard-lnl: ([SKIP][1], [PASS][2], [PASS][3], [PASS][4], [PASS][5], [PASS][6], [PASS][7], [PASS][8], [PASS][9], [PASS][10], [PASS][11], [PASS][12], [PASS][13], [PASS][14], [PASS][15], [PASS][16], [PASS][17], [PASS][18], [PASS][19], [PASS][20], [PASS][21], [PASS][22], [PASS][23], [PASS][24], [PASS][25], [PASS][26]) ([Intel XE#378]) -> ([PASS][27], [PASS][28], [PASS][29], [PASS][30], [PASS][31], [PASS][32], [PASS][33], [PASS][34], [PASS][35], [PASS][36], [PASS][37], [PASS][38], [PASS][39], [PASS][40], [PASS][41], [PASS][42], [PASS][43], [PASS][44], [PASS][45], [PASS][46], [PASS][47], [PASS][48], [PASS][49], [PASS][50], [PASS][51])
[1]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-7/igt@xe_module_load@load.html
[2]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-2/igt@xe_module_load@load.html
[3]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-2/igt@xe_module_load@load.html
[4]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-3/igt@xe_module_load@load.html
[5]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-2/igt@xe_module_load@load.html
[6]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-5/igt@xe_module_load@load.html
[7]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-3/igt@xe_module_load@load.html
[8]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-3/igt@xe_module_load@load.html
[9]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-1/igt@xe_module_load@load.html
[10]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-7/igt@xe_module_load@load.html
[11]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-7/igt@xe_module_load@load.html
[12]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-7/igt@xe_module_load@load.html
[13]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-4/igt@xe_module_load@load.html
[14]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-4/igt@xe_module_load@load.html
[15]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-4/igt@xe_module_load@load.html
[16]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-3/igt@xe_module_load@load.html
[17]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-5/igt@xe_module_load@load.html
[18]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-1/igt@xe_module_load@load.html
[19]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-1/igt@xe_module_load@load.html
[20]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-2/igt@xe_module_load@load.html
[21]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-1/igt@xe_module_load@load.html
[22]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-8/igt@xe_module_load@load.html
[23]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-8/igt@xe_module_load@load.html
[24]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-8/igt@xe_module_load@load.html
[25]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-7/igt@xe_module_load@load.html
[26]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-lnl-5/igt@xe_module_load@load.html
[27]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-1/igt@xe_module_load@load.html
[28]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-1/igt@xe_module_load@load.html
[29]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-1/igt@xe_module_load@load.html
[30]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-1/igt@xe_module_load@load.html
[31]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-8/igt@xe_module_load@load.html
[32]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-8/igt@xe_module_load@load.html
[33]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-8/igt@xe_module_load@load.html
[34]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-8/igt@xe_module_load@load.html
[35]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-4/igt@xe_module_load@load.html
[36]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-4/igt@xe_module_load@load.html
[37]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-4/igt@xe_module_load@load.html
[38]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-3/igt@xe_module_load@load.html
[39]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-3/igt@xe_module_load@load.html
[40]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-3/igt@xe_module_load@load.html
[41]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-3/igt@xe_module_load@load.html
[42]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-2/igt@xe_module_load@load.html
[43]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-2/igt@xe_module_load@load.html
[44]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-2/igt@xe_module_load@load.html
[45]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-7/igt@xe_module_load@load.html
[46]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-7/igt@xe_module_load@load.html
[47]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-7/igt@xe_module_load@load.html
[48]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-5/igt@xe_module_load@load.html
[49]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-5/igt@xe_module_load@load.html
[50]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-5/igt@xe_module_load@load.html
[51]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-lnl-5/igt@xe_module_load@load.html
- shard-bmg: ([PASS][52], [PASS][53], [PASS][54], [PASS][55], [PASS][56], [PASS][57], [PASS][58], [PASS][59], [PASS][60], [SKIP][61], [PASS][62], [PASS][63], [PASS][64], [PASS][65], [PASS][66], [PASS][67], [PASS][68], [PASS][69], [PASS][70], [PASS][71], [PASS][72], [PASS][73], [PASS][74], [PASS][75], [PASS][76]) ([Intel XE#2457]) -> ([PASS][77], [PASS][78], [PASS][79], [PASS][80], [PASS][81], [PASS][82], [PASS][83], [PASS][84], [PASS][85], [PASS][86], [PASS][87], [PASS][88], [PASS][89], [PASS][90], [PASS][91], [PASS][92], [PASS][93], [PASS][94], [PASS][95], [PASS][96], [PASS][97], [PASS][98], [PASS][99], [PASS][100], [PASS][101])
[52]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-5/igt@xe_module_load@load.html
[53]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-5/igt@xe_module_load@load.html
[54]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-8/igt@xe_module_load@load.html
[55]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-4/igt@xe_module_load@load.html
[56]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-6/igt@xe_module_load@load.html
[57]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-6/igt@xe_module_load@load.html
[58]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-5/igt@xe_module_load@load.html
[59]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-5/igt@xe_module_load@load.html
[60]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-4/igt@xe_module_load@load.html
[61]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-7/igt@xe_module_load@load.html
[62]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-8/igt@xe_module_load@load.html
[63]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-7/igt@xe_module_load@load.html
[64]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-1/igt@xe_module_load@load.html
[65]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-1/igt@xe_module_load@load.html
[66]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-8/igt@xe_module_load@load.html
[67]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-8/igt@xe_module_load@load.html
[68]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-2/igt@xe_module_load@load.html
[69]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-2/igt@xe_module_load@load.html
[70]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-2/igt@xe_module_load@load.html
[71]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-6/igt@xe_module_load@load.html
[72]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-1/igt@xe_module_load@load.html
[73]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-3/igt@xe_module_load@load.html
[74]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-3/igt@xe_module_load@load.html
[75]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-3/igt@xe_module_load@load.html
[76]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-bmg-3/igt@xe_module_load@load.html
[77]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-4/igt@xe_module_load@load.html
[78]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-4/igt@xe_module_load@load.html
[79]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-4/igt@xe_module_load@load.html
[80]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-8/igt@xe_module_load@load.html
[81]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-8/igt@xe_module_load@load.html
[82]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-8/igt@xe_module_load@load.html
[83]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-1/igt@xe_module_load@load.html
[84]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-1/igt@xe_module_load@load.html
[85]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-1/igt@xe_module_load@load.html
[86]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-7/igt@xe_module_load@load.html
[87]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-7/igt@xe_module_load@load.html
[88]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-7/igt@xe_module_load@load.html
[89]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-2/igt@xe_module_load@load.html
[90]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-2/igt@xe_module_load@load.html
[91]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-2/igt@xe_module_load@load.html
[92]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-2/igt@xe_module_load@load.html
[93]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-6/igt@xe_module_load@load.html
[94]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-6/igt@xe_module_load@load.html
[95]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-6/igt@xe_module_load@load.html
[96]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-3/igt@xe_module_load@load.html
[97]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-3/igt@xe_module_load@load.html
[98]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-3/igt@xe_module_load@load.html
[99]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-5/igt@xe_module_load@load.html
[100]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-5/igt@xe_module_load@load.html
[101]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-bmg-5/igt@xe_module_load@load.html
- shard-dg2-set2: ([PASS][102], [PASS][103], [PASS][104], [PASS][105], [PASS][106], [PASS][107], [PASS][108], [PASS][109], [PASS][110], [PASS][111], [PASS][112], [PASS][113], [PASS][114], [PASS][115], [PASS][116], [PASS][117], [SKIP][118], [PASS][119], [PASS][120], [PASS][121], [PASS][122], [PASS][123], [PASS][124], [PASS][125], [PASS][126], [PASS][127]) ([Intel XE#378]) -> ([PASS][128], [PASS][129], [PASS][130], [PASS][131], [PASS][132], [PASS][133], [PASS][134], [PASS][135], [PASS][136], [PASS][137], [PASS][138], [PASS][139], [PASS][140], [PASS][141], [PASS][142], [PASS][143], [PASS][144], [PASS][145], [PASS][146], [PASS][147], [PASS][148], [PASS][149], [PASS][150], [PASS][151], [PASS][152])
[102]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-463/igt@xe_module_load@load.html
[103]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-463/igt@xe_module_load@load.html
[104]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-435/igt@xe_module_load@load.html
[105]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-434/igt@xe_module_load@load.html
[106]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-434/igt@xe_module_load@load.html
[107]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-434/igt@xe_module_load@load.html
[108]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-434/igt@xe_module_load@load.html
[109]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-435/igt@xe_module_load@load.html
[110]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-434/igt@xe_module_load@load.html
[111]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-434/igt@xe_module_load@load.html
[112]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-434/igt@xe_module_load@load.html
[113]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-435/igt@xe_module_load@load.html
[114]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-432/igt@xe_module_load@load.html
[115]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-464/igt@xe_module_load@load.html
[116]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-464/igt@xe_module_load@load.html
[117]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-436/igt@xe_module_load@load.html
[118]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-433/igt@xe_module_load@load.html
[119]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-466/igt@xe_module_load@load.html
[120]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-466/igt@xe_module_load@load.html
[121]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-433/igt@xe_module_load@load.html
[122]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-433/igt@xe_module_load@load.html
[123]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-433/igt@xe_module_load@load.html
[124]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-436/igt@xe_module_load@load.html
[125]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-432/igt@xe_module_load@load.html
[126]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-434/igt@xe_module_load@load.html
[127]: https://intel-gfx-ci.01.org/tree/intel-xe/IGT_8514/shard-dg2-436/igt@xe_module_load@load.html
[128]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-434/igt@xe_module_load@load.html
[129]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-434/igt@xe_module_load@load.html
[130]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-434/igt@xe_module_load@load.html
[131]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-434/igt@xe_module_load@load.html
[132]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-434/igt@xe_module_load@load.html
[133]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-434/igt@xe_module_load@load.html
[134]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-464/igt@xe_module_load@load.html
[135]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-464/igt@xe_module_load@load.html
[136]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-464/igt@xe_module_load@load.html
[137]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-433/igt@xe_module_load@load.html
[138]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-433/igt@xe_module_load@load.html
[139]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-436/igt@xe_module_load@load.html
[140]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-436/igt@xe_module_load@load.html
[141]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-436/igt@xe_module_load@load.html
[142]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-463/igt@xe_module_load@load.html
[143]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-463/igt@xe_module_load@load.html
[144]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-463/igt@xe_module_load@load.html
[145]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-466/igt@xe_module_load@load.html
[146]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-466/igt@xe_module_load@load.html
[147]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-466/igt@xe_module_load@load.html
[148]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-435/igt@xe_module_load@load.html
[149]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-435/igt@xe_module_load@load.html
[150]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-435/igt@xe_module_load@load.html
[151]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-432/igt@xe_module_load@load.html
[152]: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/shard-dg2-432/igt@xe_module_load@load.html
[Intel XE#2457]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/2457
[Intel XE#378]: https://gitlab.freedesktop.org/drm/xe/kernel/issues/378
Build changes
-------------
* IGT: IGT_8514 -> IGTPW_13656
* Linux: xe-3631-a59cf1d7fe0e19ce3a6e8cf2a20cfb24896baa97 -> xe-3634-e9c2ae006de881ad2d3cfee15fd4a25bc9cc3f14
IGTPW_13656: 13656
IGT_8514: 8514
xe-3631-a59cf1d7fe0e19ce3a6e8cf2a20cfb24896baa97: a59cf1d7fe0e19ce3a6e8cf2a20cfb24896baa97
xe-3634-e9c2ae006de881ad2d3cfee15fd4a25bc9cc3f14: e9c2ae006de881ad2d3cfee15fd4a25bc9cc3f14
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/intel-xe/IGTPW_13656/index.html
[-- Attachment #2: Type: text/html, Size: 19455 bytes --]
^ permalink raw reply [flat|nested] 19+ messages in thread
* ✓ i915.CI.Full: success for Madvise Tests in IGT (rev5)
2025-08-28 16:58 [PATCH i-g-t 0/5] Madvise Tests in IGT nishit.sharma
` (7 preceding siblings ...)
2025-08-29 12:11 ` ✓ Xe.CI.Full: " Patchwork
@ 2025-08-29 13:38 ` Patchwork
8 siblings, 0 replies; 19+ messages in thread
From: Patchwork @ 2025-08-29 13:38 UTC (permalink / raw)
To: nishit.sharma; +Cc: igt-dev
[-- Attachment #1: Type: text/plain, Size: 149993 bytes --]
== Series Details ==
Series: Madvise Tests in IGT (rev5)
URL : https://patchwork.freedesktop.org/series/153335/
State : success
== Summary ==
CI Bug Log - changes from CI_DRM_17086_full -> IGTPW_13656_full
====================================================
Summary
-------
**SUCCESS**
No regressions found.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/index.html
Participating hosts (11 -> 12)
------------------------------
Additional (1): shard-dg2-set2
Known issues
------------
Here are the changes found in IGTPW_13656_full that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@api_intel_bb@crc32:
- shard-tglu: NOTRUN -> [SKIP][1] ([i915#6230])
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-6/igt@api_intel_bb@crc32.html
* igt@device_reset@cold-reset-bound:
- shard-rkl: NOTRUN -> [SKIP][2] ([i915#11078])
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@device_reset@cold-reset-bound.html
* igt@fbdev@unaligned-read:
- shard-rkl: [PASS][3] -> [SKIP][4] ([i915#14544] / [i915#2582])
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-8/igt@fbdev@unaligned-read.html
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@fbdev@unaligned-read.html
* igt@gem_ccs@ctrl-surf-copy:
- shard-tglu-1: NOTRUN -> [SKIP][5] ([i915#3555] / [i915#9323])
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@gem_ccs@ctrl-surf-copy.html
* igt@gem_ccs@ctrl-surf-copy-new-ctx:
- shard-rkl: NOTRUN -> [SKIP][6] ([i915#9323])
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@gem_ccs@ctrl-surf-copy-new-ctx.html
* igt@gem_ccs@suspend-resume@linear-compressed-compfmt0-smem-lmem0:
- shard-dg2: [PASS][7] -> [INCOMPLETE][8] ([i915#13356])
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg2-6/igt@gem_ccs@suspend-resume@linear-compressed-compfmt0-smem-lmem0.html
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-7/igt@gem_ccs@suspend-resume@linear-compressed-compfmt0-smem-lmem0.html
* igt@gem_create@create-ext-set-pat:
- shard-dg2: NOTRUN -> [SKIP][9] ([i915#8562])
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-6/igt@gem_create@create-ext-set-pat.html
* igt@gem_ctx_persistence@heartbeat-stop:
- shard-dg2: NOTRUN -> [SKIP][10] ([i915#8555])
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-7/igt@gem_ctx_persistence@heartbeat-stop.html
- shard-dg1: NOTRUN -> [SKIP][11] ([i915#8555])
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-16/igt@gem_ctx_persistence@heartbeat-stop.html
- shard-mtlp: NOTRUN -> [SKIP][12] ([i915#8555])
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-4/igt@gem_ctx_persistence@heartbeat-stop.html
* igt@gem_ctx_persistence@saturated-hostile-nopreempt@rcs0:
- shard-dg2-9: NOTRUN -> [SKIP][13] ([i915#5882]) +7 other tests skip
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@gem_ctx_persistence@saturated-hostile-nopreempt@rcs0.html
* igt@gem_ctx_sseu@engines:
- shard-tglu-1: NOTRUN -> [SKIP][14] ([i915#280])
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@gem_ctx_sseu@engines.html
- shard-dg2-9: NOTRUN -> [SKIP][15] ([i915#280])
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@gem_ctx_sseu@engines.html
* igt@gem_exec_balancer@bonded-dual:
- shard-mtlp: NOTRUN -> [SKIP][16] ([i915#4771])
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-8/igt@gem_exec_balancer@bonded-dual.html
- shard-dg2-9: NOTRUN -> [SKIP][17] ([i915#4771])
[17]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@gem_exec_balancer@bonded-dual.html
* igt@gem_exec_balancer@bonded-semaphore:
- shard-dg2-9: NOTRUN -> [SKIP][18] ([i915#4812])
[18]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@gem_exec_balancer@bonded-semaphore.html
* igt@gem_exec_balancer@parallel-balancer:
- shard-tglu: NOTRUN -> [SKIP][19] ([i915#4525])
[19]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-2/igt@gem_exec_balancer@parallel-balancer.html
* igt@gem_exec_balancer@parallel-keep-submit-fence:
- shard-tglu-1: NOTRUN -> [SKIP][20] ([i915#4525])
[20]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@gem_exec_balancer@parallel-keep-submit-fence.html
* igt@gem_exec_flush@basic-wb-ro-default:
- shard-dg2-9: NOTRUN -> [SKIP][21] ([i915#3539] / [i915#4852]) +1 other test skip
[21]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@gem_exec_flush@basic-wb-ro-default.html
* igt@gem_exec_gttfill@basic:
- shard-rkl: [PASS][22] -> [DMESG-WARN][23] ([i915#12917] / [i915#12964])
[22]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-2/igt@gem_exec_gttfill@basic.html
[23]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@gem_exec_gttfill@basic.html
* igt@gem_exec_params@secure-non-root:
- shard-dg2-9: NOTRUN -> [SKIP][24] +4 other tests skip
[24]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@gem_exec_params@secure-non-root.html
* igt@gem_exec_reloc@basic-cpu-read:
- shard-dg2-9: NOTRUN -> [SKIP][25] ([i915#3281]) +6 other tests skip
[25]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@gem_exec_reloc@basic-cpu-read.html
* igt@gem_exec_reloc@basic-cpu-read-active:
- shard-rkl: NOTRUN -> [SKIP][26] ([i915#3281])
[26]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@gem_exec_reloc@basic-cpu-read-active.html
* igt@gem_exec_reloc@basic-wc-cpu-noreloc:
- shard-mtlp: NOTRUN -> [SKIP][27] ([i915#3281]) +1 other test skip
[27]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-6/igt@gem_exec_reloc@basic-wc-cpu-noreloc.html
* igt@gem_exec_reloc@basic-write-wc-noreloc:
- shard-dg2: NOTRUN -> [SKIP][28] ([i915#3281]) +2 other tests skip
[28]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-8/igt@gem_exec_reloc@basic-write-wc-noreloc.html
* igt@gem_exec_schedule@semaphore-power:
- shard-dg2-9: NOTRUN -> [SKIP][29] ([i915#4537] / [i915#4812]) +1 other test skip
[29]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@gem_exec_schedule@semaphore-power.html
* igt@gem_exec_schedule@wide:
- shard-rkl: [PASS][30] -> [DMESG-WARN][31] ([i915#12964]) +31 other tests dmesg-warn
[30]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@gem_exec_schedule@wide.html
[31]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@gem_exec_schedule@wide.html
* igt@gem_fence_thrash@bo-write-verify-y:
- shard-mtlp: NOTRUN -> [SKIP][32] ([i915#4860])
[32]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-6/igt@gem_fence_thrash@bo-write-verify-y.html
* igt@gem_fenced_exec_thrash@no-spare-fences-busy-interruptible:
- shard-dg2-9: NOTRUN -> [SKIP][33] ([i915#4860]) +1 other test skip
[33]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@gem_fenced_exec_thrash@no-spare-fences-busy-interruptible.html
* igt@gem_huc_copy@huc-copy:
- shard-tglu: NOTRUN -> [SKIP][34] ([i915#2190])
[34]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-4/igt@gem_huc_copy@huc-copy.html
* igt@gem_lmem_evict@dontneed-evict-race:
- shard-glk: NOTRUN -> [SKIP][35] ([i915#4613]) +1 other test skip
[35]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-glk9/igt@gem_lmem_evict@dontneed-evict-race.html
* igt@gem_lmem_swapping@heavy-multi:
- shard-mtlp: NOTRUN -> [SKIP][36] ([i915#4613])
[36]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-4/igt@gem_lmem_swapping@heavy-multi.html
* igt@gem_lmem_swapping@heavy-verify-random:
- shard-rkl: NOTRUN -> [SKIP][37] ([i915#4613])
[37]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@gem_lmem_swapping@heavy-verify-random.html
* igt@gem_lmem_swapping@parallel-random-verify-ccs:
- shard-tglu: NOTRUN -> [SKIP][38] ([i915#4613]) +1 other test skip
[38]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-2/igt@gem_lmem_swapping@parallel-random-verify-ccs.html
* igt@gem_lmem_swapping@random:
- shard-tglu-1: NOTRUN -> [SKIP][39] ([i915#4613])
[39]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@gem_lmem_swapping@random.html
* igt@gem_madvise@dontneed-before-exec:
- shard-dg2-9: NOTRUN -> [SKIP][40] ([i915#3282]) +3 other tests skip
[40]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@gem_madvise@dontneed-before-exec.html
* igt@gem_media_fill@media-fill:
- shard-dg2: NOTRUN -> [SKIP][41] ([i915#8289])
[41]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-5/igt@gem_media_fill@media-fill.html
* igt@gem_mmap@bad-size:
- shard-dg2-9: NOTRUN -> [SKIP][42] ([i915#4083]) +2 other tests skip
[42]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@gem_mmap@bad-size.html
* igt@gem_mmap@big-bo:
- shard-dg2: NOTRUN -> [SKIP][43] ([i915#4083]) +2 other tests skip
[43]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-4/igt@gem_mmap@big-bo.html
* igt@gem_mmap_gtt@big-bo-tiledy:
- shard-mtlp: NOTRUN -> [SKIP][44] ([i915#4077]) +2 other tests skip
[44]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-4/igt@gem_mmap_gtt@big-bo-tiledy.html
* igt@gem_mmap_gtt@hang:
- shard-dg2-9: NOTRUN -> [SKIP][45] ([i915#4077]) +7 other tests skip
[45]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@gem_mmap_gtt@hang.html
* igt@gem_mmap_wc@write:
- shard-dg1: NOTRUN -> [SKIP][46] ([i915#4083])
[46]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-15/igt@gem_mmap_wc@write.html
* igt@gem_mmap_wc@write-cpu-read-wc-unflushed:
- shard-mtlp: NOTRUN -> [SKIP][47] ([i915#4083])
[47]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-6/igt@gem_mmap_wc@write-cpu-read-wc-unflushed.html
* igt@gem_partial_pwrite_pread@reads:
- shard-dg2: NOTRUN -> [SKIP][48] ([i915#3282]) +3 other tests skip
[48]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-11/igt@gem_partial_pwrite_pread@reads.html
* igt@gem_partial_pwrite_pread@reads-uncached:
- shard-mtlp: NOTRUN -> [SKIP][49] ([i915#3282])
[49]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-6/igt@gem_partial_pwrite_pread@reads-uncached.html
* igt@gem_pread@exhaustion:
- shard-rkl: NOTRUN -> [SKIP][50] ([i915#3282])
[50]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@gem_pread@exhaustion.html
* igt@gem_pwrite@basic-exhaustion:
- shard-glk10: NOTRUN -> [WARN][51] ([i915#14702] / [i915#2658])
[51]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-glk10/igt@gem_pwrite@basic-exhaustion.html
* igt@gem_pxp@create-regular-buffer:
- shard-dg2-9: NOTRUN -> [SKIP][52] ([i915#4270]) +2 other tests skip
[52]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@gem_pxp@create-regular-buffer.html
- shard-rkl: NOTRUN -> [TIMEOUT][53] ([i915#12917] / [i915#12964])
[53]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@gem_pxp@create-regular-buffer.html
* igt@gem_pxp@create-regular-context-1:
- shard-dg2: NOTRUN -> [SKIP][54] ([i915#4270]) +1 other test skip
[54]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-11/igt@gem_pxp@create-regular-context-1.html
* igt@gem_pxp@dmabuf-shared-protected-dst-is-context-refcounted:
- shard-rkl: [PASS][55] -> [TIMEOUT][56] ([i915#12964])
[55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-8/igt@gem_pxp@dmabuf-shared-protected-dst-is-context-refcounted.html
[56]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@gem_pxp@dmabuf-shared-protected-dst-is-context-refcounted.html
* igt@gem_pxp@hw-rejects-pxp-context:
- shard-mtlp: NOTRUN -> [SKIP][57] ([i915#13398])
[57]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-4/igt@gem_pxp@hw-rejects-pxp-context.html
* igt@gem_pxp@reject-modify-context-protection-off-1:
- shard-rkl: [PASS][58] -> [TIMEOUT][59] ([i915#12917] / [i915#12964])
[58]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-8/igt@gem_pxp@reject-modify-context-protection-off-1.html
[59]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@gem_pxp@reject-modify-context-protection-off-1.html
* igt@gem_readwrite@beyond-eob:
- shard-dg1: NOTRUN -> [SKIP][60] ([i915#3282])
[60]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-17/igt@gem_readwrite@beyond-eob.html
* igt@gem_render_copy@y-tiled-to-vebox-x-tiled:
- shard-dg2-9: NOTRUN -> [SKIP][61] ([i915#5190] / [i915#8428]) +4 other tests skip
[61]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@gem_render_copy@y-tiled-to-vebox-x-tiled.html
* igt@gem_render_copy@y-tiled-to-vebox-y-tiled:
- shard-mtlp: NOTRUN -> [SKIP][62] ([i915#8428])
[62]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-2/igt@gem_render_copy@y-tiled-to-vebox-y-tiled.html
* igt@gem_render_copy@yf-tiled-ccs-to-x-tiled:
- shard-dg2: NOTRUN -> [SKIP][63] ([i915#5190] / [i915#8428]) +1 other test skip
[63]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-11/igt@gem_render_copy@yf-tiled-ccs-to-x-tiled.html
* igt@gem_set_tiling_vs_blt@tiled-to-untiled:
- shard-mtlp: NOTRUN -> [SKIP][64] ([i915#4079])
[64]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-6/igt@gem_set_tiling_vs_blt@tiled-to-untiled.html
* igt@gem_unfence_active_buffers:
- shard-dg2-9: NOTRUN -> [SKIP][65] ([i915#4879])
[65]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@gem_unfence_active_buffers.html
* igt@gem_userptr_blits@forbidden-operations:
- shard-dg2: NOTRUN -> [SKIP][66] ([i915#3282] / [i915#3297])
[66]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-11/igt@gem_userptr_blits@forbidden-operations.html
* igt@gem_userptr_blits@invalid-mmap-offset-unsync:
- shard-tglu: NOTRUN -> [SKIP][67] ([i915#3297])
[67]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-4/igt@gem_userptr_blits@invalid-mmap-offset-unsync.html
* igt@gem_userptr_blits@map-fixed-invalidate-busy:
- shard-dg2: NOTRUN -> [SKIP][68] ([i915#3297] / [i915#4880]) +1 other test skip
[68]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-1/igt@gem_userptr_blits@map-fixed-invalidate-busy.html
* igt@gem_userptr_blits@sd-probe:
- shard-dg2-9: NOTRUN -> [SKIP][69] ([i915#3297] / [i915#4958])
[69]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@gem_userptr_blits@sd-probe.html
* igt@gem_userptr_blits@set-cache-level:
- shard-mtlp: NOTRUN -> [SKIP][70] ([i915#3297]) +2 other tests skip
[70]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-8/igt@gem_userptr_blits@set-cache-level.html
* igt@gem_userptr_blits@unsync-overlap:
- shard-rkl: NOTRUN -> [SKIP][71] ([i915#3297])
[71]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@gem_userptr_blits@unsync-overlap.html
* igt@gem_workarounds@suspend-resume:
- shard-rkl: [PASS][72] -> [INCOMPLETE][73] ([i915#13356]) +2 other tests incomplete
[72]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@gem_workarounds@suspend-resume.html
[73]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-3/igt@gem_workarounds@suspend-resume.html
* igt@gem_workarounds@suspend-resume-fd:
- shard-tglu: [PASS][74] -> [ABORT][75] ([i915#14850])
[74]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-tglu-2/igt@gem_workarounds@suspend-resume-fd.html
[75]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-9/igt@gem_workarounds@suspend-resume-fd.html
* igt@gen7_exec_parse@basic-rejected:
- shard-dg2: NOTRUN -> [SKIP][76] +10 other tests skip
[76]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-3/igt@gen7_exec_parse@basic-rejected.html
* igt@gen9_exec_parse@allowed-all:
- shard-glk: NOTRUN -> [ABORT][77] ([i915#5566])
[77]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-glk9/igt@gen9_exec_parse@allowed-all.html
* igt@gen9_exec_parse@bb-start-cmd:
- shard-tglu-1: NOTRUN -> [SKIP][78] ([i915#2527] / [i915#2856]) +2 other tests skip
[78]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@gen9_exec_parse@bb-start-cmd.html
* igt@gen9_exec_parse@bb-start-out:
- shard-rkl: NOTRUN -> [SKIP][79] ([i915#2527])
[79]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@gen9_exec_parse@bb-start-out.html
* igt@gen9_exec_parse@cmd-crossing-page:
- shard-tglu: NOTRUN -> [SKIP][80] ([i915#2527] / [i915#2856]) +2 other tests skip
[80]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-4/igt@gen9_exec_parse@cmd-crossing-page.html
- shard-dg2-9: NOTRUN -> [SKIP][81] ([i915#2856]) +4 other tests skip
[81]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@gen9_exec_parse@cmd-crossing-page.html
* igt@gen9_exec_parse@shadow-peek:
- shard-dg2: NOTRUN -> [SKIP][82] ([i915#2856])
[82]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-8/igt@gen9_exec_parse@shadow-peek.html
* igt@gen9_exec_parse@unaligned-jump:
- shard-mtlp: NOTRUN -> [SKIP][83] ([i915#2856])
[83]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-7/igt@gen9_exec_parse@unaligned-jump.html
* igt@i915_drm_fdinfo@virtual-busy-idle-all:
- shard-dg2-9: NOTRUN -> [SKIP][84] ([i915#14118])
[84]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@i915_drm_fdinfo@virtual-busy-idle-all.html
* igt@i915_module_load@resize-bar:
- shard-tglu: NOTRUN -> [SKIP][85] ([i915#6412])
[85]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-7/igt@i915_module_load@resize-bar.html
* igt@i915_pm_freq_api@freq-reset-multiple:
- shard-tglu-1: NOTRUN -> [SKIP][86] ([i915#8399])
[86]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@i915_pm_freq_api@freq-reset-multiple.html
* igt@i915_pm_freq_api@freq-suspend@gt0:
- shard-dg2: [PASS][87] -> [INCOMPLETE][88] ([i915#13356] / [i915#13820]) +1 other test incomplete
[87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg2-11/igt@i915_pm_freq_api@freq-suspend@gt0.html
[88]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-6/igt@i915_pm_freq_api@freq-suspend@gt0.html
* igt@i915_pm_rc6_residency@rc6-fence:
- shard-tglu: NOTRUN -> [WARN][89] ([i915#13790] / [i915#2681]) +1 other test warn
[89]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-3/igt@i915_pm_rc6_residency@rc6-fence.html
* igt@i915_pm_rpm@gem-evict-pwrite:
- shard-dg2: NOTRUN -> [SKIP][90] ([i915#4077]) +4 other tests skip
[90]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-11/igt@i915_pm_rpm@gem-evict-pwrite.html
* igt@i915_pm_rpm@system-suspend:
- shard-glk: [PASS][91] -> [INCOMPLETE][92] ([i915#13356])
[91]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-glk9/igt@i915_pm_rpm@system-suspend.html
[92]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-glk5/igt@i915_pm_rpm@system-suspend.html
* igt@i915_pm_rps@min-max-config-idle:
- shard-mtlp: NOTRUN -> [SKIP][93] ([i915#11681] / [i915#6621])
[93]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-2/igt@i915_pm_rps@min-max-config-idle.html
* igt@i915_pm_sseu@full-enable:
- shard-tglu-1: NOTRUN -> [SKIP][94] ([i915#4387])
[94]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@i915_pm_sseu@full-enable.html
* igt@i915_query@query-topology-unsupported:
- shard-rkl: NOTRUN -> [SKIP][95] +2 other tests skip
[95]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@i915_query@query-topology-unsupported.html
* igt@i915_selftest@live@workarounds:
- shard-dg2: [PASS][96] -> [DMESG-FAIL][97] ([i915#12061]) +1 other test dmesg-fail
[96]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg2-4/igt@i915_selftest@live@workarounds.html
[97]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-1/igt@i915_selftest@live@workarounds.html
- shard-mtlp: NOTRUN -> [DMESG-FAIL][98] ([i915#12061]) +1 other test dmesg-fail
[98]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-7/igt@i915_selftest@live@workarounds.html
* igt@i915_suspend@sysfs-reader:
- shard-glk: NOTRUN -> [INCOMPLETE][99] ([i915#4817])
[99]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-glk8/igt@i915_suspend@sysfs-reader.html
* igt@kms_addfb_basic@addfb25-x-tiled-legacy:
- shard-dg2-9: NOTRUN -> [SKIP][100] ([i915#4212])
[100]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_addfb_basic@addfb25-x-tiled-legacy.html
* igt@kms_addfb_basic@basic-y-tiled-legacy:
- shard-dg2: NOTRUN -> [SKIP][101] ([i915#4215] / [i915#5190])
[101]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-3/igt@kms_addfb_basic@basic-y-tiled-legacy.html
* igt@kms_addfb_basic@clobberred-modifier:
- shard-mtlp: NOTRUN -> [SKIP][102] ([i915#4212])
[102]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-8/igt@kms_addfb_basic@clobberred-modifier.html
* igt@kms_async_flips@async-flip-suspend-resume@pipe-a-hdmi-a-1:
- shard-glk: [PASS][103] -> [INCOMPLETE][104] ([i915#12761])
[103]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-glk9/igt@kms_async_flips@async-flip-suspend-resume@pipe-a-hdmi-a-1.html
[104]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-glk1/igt@kms_async_flips@async-flip-suspend-resume@pipe-a-hdmi-a-1.html
* igt@kms_atomic_transition@plane-all-modeset-transition-fencing-internal-panels:
- shard-tglu: NOTRUN -> [SKIP][105] ([i915#1769] / [i915#3555])
[105]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-5/igt@kms_atomic_transition@plane-all-modeset-transition-fencing-internal-panels.html
* igt@kms_atomic_transition@plane-use-after-nonblocking-unbind:
- shard-rkl: [PASS][106] -> [SKIP][107] ([i915#14544]) +30 other tests skip
[106]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-8/igt@kms_atomic_transition@plane-use-after-nonblocking-unbind.html
[107]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_atomic_transition@plane-use-after-nonblocking-unbind.html
* igt@kms_big_fb@4-tiled-64bpp-rotate-90:
- shard-dg1: NOTRUN -> [SKIP][108] ([i915#4538] / [i915#5286])
[108]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-13/igt@kms_big_fb@4-tiled-64bpp-rotate-90.html
* igt@kms_big_fb@4-tiled-8bpp-rotate-180:
- shard-tglu: NOTRUN -> [SKIP][109] ([i915#5286]) +4 other tests skip
[109]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-5/igt@kms_big_fb@4-tiled-8bpp-rotate-180.html
* igt@kms_big_fb@4-tiled-addfb:
- shard-rkl: NOTRUN -> [SKIP][110] ([i915#5286]) +2 other tests skip
[110]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@kms_big_fb@4-tiled-addfb.html
- shard-dg1: NOTRUN -> [SKIP][111] ([i915#5286])
[111]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-13/igt@kms_big_fb@4-tiled-addfb.html
* igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0-hflip:
- shard-tglu-1: NOTRUN -> [SKIP][112] ([i915#5286]) +4 other tests skip
[112]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0-hflip.html
* igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-180-hflip:
- shard-mtlp: [PASS][113] -> [FAIL][114] ([i915#5138])
[113]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-mtlp-2/igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-180-hflip.html
[114]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-2/igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-180-hflip.html
* igt@kms_big_fb@y-tiled-32bpp-rotate-90:
- shard-dg2-9: NOTRUN -> [SKIP][115] ([i915#4538] / [i915#5190]) +7 other tests skip
[115]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_big_fb@y-tiled-32bpp-rotate-90.html
* igt@kms_big_fb@y-tiled-8bpp-rotate-90:
- shard-dg2: NOTRUN -> [SKIP][116] ([i915#4538] / [i915#5190]) +4 other tests skip
[116]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-4/igt@kms_big_fb@y-tiled-8bpp-rotate-90.html
* igt@kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-0:
- shard-mtlp: NOTRUN -> [SKIP][117] +10 other tests skip
[117]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-8/igt@kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-0.html
* igt@kms_big_fb@yf-tiled-32bpp-rotate-180:
- shard-rkl: NOTRUN -> [SKIP][118] ([i915#14544]) +1 other test skip
[118]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_big_fb@yf-tiled-32bpp-rotate-180.html
* igt@kms_busy@basic:
- shard-rkl: [PASS][119] -> [SKIP][120] ([i915#11190] / [i915#14544])
[119]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-4/igt@kms_busy@basic.html
[120]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_busy@basic.html
* igt@kms_ccs@bad-aux-stride-y-tiled-gen12-mc-ccs:
- shard-tglu-1: NOTRUN -> [SKIP][121] ([i915#6095]) +34 other tests skip
[121]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_ccs@bad-aux-stride-y-tiled-gen12-mc-ccs.html
* igt@kms_ccs@bad-rotation-90-4-tiled-dg2-rc-ccs@pipe-c-edp-1:
- shard-mtlp: NOTRUN -> [SKIP][122] ([i915#6095]) +4 other tests skip
[122]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-8/igt@kms_ccs@bad-rotation-90-4-tiled-dg2-rc-ccs@pipe-c-edp-1.html
* igt@kms_ccs@bad-rotation-90-4-tiled-mtl-rc-ccs-cc@pipe-b-hdmi-a-4:
- shard-dg1: NOTRUN -> [SKIP][123] ([i915#6095]) +124 other tests skip
[123]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-16/igt@kms_ccs@bad-rotation-90-4-tiled-mtl-rc-ccs-cc@pipe-b-hdmi-a-4.html
* igt@kms_ccs@bad-rotation-90-4-tiled-mtl-rc-ccs@pipe-a-hdmi-a-1:
- shard-rkl: NOTRUN -> [SKIP][124] ([i915#6095]) +54 other tests skip
[124]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@kms_ccs@bad-rotation-90-4-tiled-mtl-rc-ccs@pipe-a-hdmi-a-1.html
* igt@kms_ccs@ccs-on-another-bo-y-tiled-gen12-mc-ccs@pipe-c-hdmi-a-2:
- shard-dg2-9: NOTRUN -> [SKIP][125] ([i915#10307] / [i915#6095]) +39 other tests skip
[125]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_ccs@ccs-on-another-bo-y-tiled-gen12-mc-ccs@pipe-c-hdmi-a-2.html
* igt@kms_ccs@crc-primary-rotation-180-4-tiled-bmg-ccs:
- shard-tglu-1: NOTRUN -> [SKIP][126] ([i915#12313]) +2 other tests skip
[126]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_ccs@crc-primary-rotation-180-4-tiled-bmg-ccs.html
* igt@kms_ccs@crc-primary-suspend-4-tiled-bmg-ccs:
- shard-dg2: NOTRUN -> [SKIP][127] ([i915#12805])
[127]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-7/igt@kms_ccs@crc-primary-suspend-4-tiled-bmg-ccs.html
- shard-dg1: NOTRUN -> [SKIP][128] ([i915#12805])
[128]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-16/igt@kms_ccs@crc-primary-suspend-4-tiled-bmg-ccs.html
* igt@kms_ccs@crc-primary-suspend-4-tiled-dg2-rc-ccs-cc@pipe-c-hdmi-a-2:
- shard-rkl: NOTRUN -> [SKIP][129] ([i915#14098] / [i915#6095]) +52 other tests skip
[129]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@kms_ccs@crc-primary-suspend-4-tiled-dg2-rc-ccs-cc@pipe-c-hdmi-a-2.html
* igt@kms_ccs@crc-primary-suspend-4-tiled-lnl-ccs:
- shard-dg2-9: NOTRUN -> [SKIP][130] ([i915#12805])
[130]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_ccs@crc-primary-suspend-4-tiled-lnl-ccs.html
* igt@kms_ccs@crc-primary-suspend-y-tiled-ccs@pipe-a-hdmi-a-1:
- shard-glk: NOTRUN -> [INCOMPLETE][131] ([i915#12796]) +1 other test incomplete
[131]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-glk6/igt@kms_ccs@crc-primary-suspend-y-tiled-ccs@pipe-a-hdmi-a-1.html
* igt@kms_ccs@crc-primary-suspend-y-tiled-gen12-mc-ccs@pipe-d-hdmi-a-1:
- shard-tglu: NOTRUN -> [SKIP][132] ([i915#6095]) +39 other tests skip
[132]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-2/igt@kms_ccs@crc-primary-suspend-y-tiled-gen12-mc-ccs@pipe-d-hdmi-a-1.html
* igt@kms_ccs@crc-primary-suspend-y-tiled-gen12-rc-ccs-cc:
- shard-dg1: [PASS][133] -> [DMESG-WARN][134] ([i915#4423]) +2 other tests dmesg-warn
[133]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg1-15/igt@kms_ccs@crc-primary-suspend-y-tiled-gen12-rc-ccs-cc.html
[134]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-19/igt@kms_ccs@crc-primary-suspend-y-tiled-gen12-rc-ccs-cc.html
* igt@kms_ccs@crc-primary-suspend-y-tiled-gen12-rc-ccs-cc@pipe-c-hdmi-a-3:
- shard-dg2: NOTRUN -> [SKIP][135] ([i915#6095]) +7 other tests skip
[135]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-3/igt@kms_ccs@crc-primary-suspend-y-tiled-gen12-rc-ccs-cc@pipe-c-hdmi-a-3.html
* igt@kms_ccs@crc-primary-suspend-yf-tiled-ccs@pipe-d-hdmi-a-2:
- shard-dg2-9: NOTRUN -> [SKIP][136] ([i915#6095]) +4 other tests skip
[136]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_ccs@crc-primary-suspend-yf-tiled-ccs@pipe-d-hdmi-a-2.html
* igt@kms_ccs@missing-ccs-buffer-4-tiled-mtl-mc-ccs@pipe-b-hdmi-a-1:
- shard-dg2: NOTRUN -> [SKIP][137] ([i915#10307] / [i915#6095]) +92 other tests skip
[137]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-4/igt@kms_ccs@missing-ccs-buffer-4-tiled-mtl-mc-ccs@pipe-b-hdmi-a-1.html
* igt@kms_ccs@random-ccs-data-4-tiled-bmg-ccs:
- shard-dg2-9: NOTRUN -> [SKIP][138] ([i915#12313]) +1 other test skip
[138]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_ccs@random-ccs-data-4-tiled-bmg-ccs.html
* igt@kms_ccs@random-ccs-data-4-tiled-lnl-ccs:
- shard-dg1: NOTRUN -> [SKIP][139] ([i915#12313])
[139]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-16/igt@kms_ccs@random-ccs-data-4-tiled-lnl-ccs.html
- shard-snb: NOTRUN -> [SKIP][140] +25 other tests skip
[140]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-snb6/igt@kms_ccs@random-ccs-data-4-tiled-lnl-ccs.html
- shard-tglu: NOTRUN -> [SKIP][141] ([i915#12313])
[141]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-6/igt@kms_ccs@random-ccs-data-4-tiled-lnl-ccs.html
- shard-mtlp: NOTRUN -> [SKIP][142] ([i915#12313]) +2 other tests skip
[142]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-4/igt@kms_ccs@random-ccs-data-4-tiled-lnl-ccs.html
- shard-dg2: NOTRUN -> [SKIP][143] ([i915#12313])
[143]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-7/igt@kms_ccs@random-ccs-data-4-tiled-lnl-ccs.html
- shard-rkl: NOTRUN -> [SKIP][144] ([i915#12313])
[144]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@kms_ccs@random-ccs-data-4-tiled-lnl-ccs.html
* igt@kms_ccs@random-ccs-data-yf-tiled-ccs@pipe-d-hdmi-a-1:
- shard-dg2: NOTRUN -> [SKIP][145] ([i915#10307] / [i915#10434] / [i915#6095]) +2 other tests skip
[145]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-4/igt@kms_ccs@random-ccs-data-yf-tiled-ccs@pipe-d-hdmi-a-1.html
* igt@kms_cdclk@mode-transition-all-outputs:
- shard-dg2-9: NOTRUN -> [SKIP][146] ([i915#13784])
[146]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_cdclk@mode-transition-all-outputs.html
* igt@kms_cdclk@plane-scaling@pipe-d-hdmi-a-1:
- shard-dg2: NOTRUN -> [SKIP][147] ([i915#13783]) +3 other tests skip
[147]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-4/igt@kms_cdclk@plane-scaling@pipe-d-hdmi-a-1.html
* igt@kms_chamelium_audio@dp-audio:
- shard-tglu: NOTRUN -> [SKIP][148] ([i915#11151] / [i915#7828]) +5 other tests skip
[148]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-7/igt@kms_chamelium_audio@dp-audio.html
* igt@kms_chamelium_frames@hdmi-crc-nonplanar-formats:
- shard-dg2: NOTRUN -> [SKIP][149] ([i915#11151] / [i915#7828]) +1 other test skip
[149]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-3/igt@kms_chamelium_frames@hdmi-crc-nonplanar-formats.html
* igt@kms_chamelium_hpd@dp-hpd-storm-disable:
- shard-tglu-1: NOTRUN -> [SKIP][150] ([i915#11151] / [i915#7828]) +6 other tests skip
[150]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_chamelium_hpd@dp-hpd-storm-disable.html
* igt@kms_chamelium_hpd@hdmi-hpd-for-each-pipe:
- shard-mtlp: NOTRUN -> [SKIP][151] ([i915#11151] / [i915#7828])
[151]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-1/igt@kms_chamelium_hpd@hdmi-hpd-for-each-pipe.html
* igt@kms_chamelium_hpd@vga-hpd-for-each-pipe:
- shard-rkl: NOTRUN -> [SKIP][152] ([i915#11151] / [i915#7828]) +2 other tests skip
[152]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@kms_chamelium_hpd@vga-hpd-for-each-pipe.html
* igt@kms_chamelium_hpd@vga-hpd-without-ddc:
- shard-dg2-9: NOTRUN -> [SKIP][153] ([i915#11151] / [i915#7828]) +5 other tests skip
[153]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_chamelium_hpd@vga-hpd-without-ddc.html
* igt@kms_color@ctm-0-75:
- shard-rkl: [PASS][154] -> [SKIP][155] ([i915#12655] / [i915#14544]) +1 other test skip
[154]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-4/igt@kms_color@ctm-0-75.html
[155]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_color@ctm-0-75.html
* igt@kms_content_protection@atomic:
- shard-dg1: NOTRUN -> [SKIP][156] ([i915#7116] / [i915#9424])
[156]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-19/igt@kms_content_protection@atomic.html
- shard-mtlp: NOTRUN -> [SKIP][157] ([i915#6944] / [i915#9424])
[157]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-1/igt@kms_content_protection@atomic.html
* igt@kms_content_protection@atomic-dpms:
- shard-rkl: NOTRUN -> [SKIP][158] ([i915#7118] / [i915#9424]) +1 other test skip
[158]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@kms_content_protection@atomic-dpms.html
- shard-tglu: NOTRUN -> [SKIP][159] ([i915#6944] / [i915#7116] / [i915#7118] / [i915#9424]) +1 other test skip
[159]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-8/igt@kms_content_protection@atomic-dpms.html
* igt@kms_content_protection@dp-mst-lic-type-0:
- shard-tglu-1: NOTRUN -> [SKIP][160] ([i915#3116] / [i915#3299])
[160]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_content_protection@dp-mst-lic-type-0.html
* igt@kms_content_protection@lic-type-0@pipe-a-dp-3:
- shard-dg2: NOTRUN -> [FAIL][161] ([i915#7173])
[161]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-11/igt@kms_content_protection@lic-type-0@pipe-a-dp-3.html
* igt@kms_content_protection@lic-type-1:
- shard-dg2: NOTRUN -> [SKIP][162] ([i915#9424])
[162]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-3/igt@kms_content_protection@lic-type-1.html
* igt@kms_cursor_crc@cursor-offscreen-32x10:
- shard-mtlp: NOTRUN -> [SKIP][163] ([i915#3555] / [i915#8814]) +1 other test skip
[163]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-2/igt@kms_cursor_crc@cursor-offscreen-32x10.html
* igt@kms_cursor_crc@cursor-onscreen-32x32:
- shard-tglu: NOTRUN -> [SKIP][164] ([i915#3555]) +3 other tests skip
[164]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-4/igt@kms_cursor_crc@cursor-onscreen-32x32.html
* igt@kms_cursor_crc@cursor-onscreen-512x170:
- shard-dg2-9: NOTRUN -> [SKIP][165] ([i915#13049]) +1 other test skip
[165]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_cursor_crc@cursor-onscreen-512x170.html
* igt@kms_cursor_crc@cursor-random-32x10:
- shard-dg2: NOTRUN -> [SKIP][166] ([i915#3555])
[166]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-7/igt@kms_cursor_crc@cursor-random-32x10.html
- shard-rkl: NOTRUN -> [SKIP][167] ([i915#3555])
[167]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@kms_cursor_crc@cursor-random-32x10.html
- shard-dg1: NOTRUN -> [SKIP][168] ([i915#3555])
[168]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-16/igt@kms_cursor_crc@cursor-random-32x10.html
* igt@kms_cursor_crc@cursor-random-512x170:
- shard-dg2: NOTRUN -> [SKIP][169] ([i915#13049]) +1 other test skip
[169]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-1/igt@kms_cursor_crc@cursor-random-512x170.html
* igt@kms_cursor_crc@cursor-random-512x512:
- shard-tglu-1: NOTRUN -> [SKIP][170] ([i915#13049])
[170]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_cursor_crc@cursor-random-512x512.html
* igt@kms_cursor_crc@cursor-random-max-size:
- shard-glk: NOTRUN -> [SKIP][171] +220 other tests skip
[171]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-glk6/igt@kms_cursor_crc@cursor-random-max-size.html
* igt@kms_cursor_crc@cursor-rapid-movement-max-size:
- shard-dg2-9: NOTRUN -> [SKIP][172] ([i915#3555]) +6 other tests skip
[172]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_cursor_crc@cursor-rapid-movement-max-size.html
* igt@kms_cursor_crc@cursor-sliding-128x42:
- shard-rkl: [PASS][173] -> [FAIL][174] ([i915#13566])
[173]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@kms_cursor_crc@cursor-sliding-128x42.html
[174]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@kms_cursor_crc@cursor-sliding-128x42.html
* igt@kms_cursor_crc@cursor-sliding-128x42@pipe-a-hdmi-a-1:
- shard-rkl: NOTRUN -> [FAIL][175] ([i915#13566]) +1 other test fail
[175]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@kms_cursor_crc@cursor-sliding-128x42@pipe-a-hdmi-a-1.html
- shard-tglu: [PASS][176] -> [FAIL][177] ([i915#13566]) +1 other test fail
[176]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-tglu-5/igt@kms_cursor_crc@cursor-sliding-128x42@pipe-a-hdmi-a-1.html
[177]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-6/igt@kms_cursor_crc@cursor-sliding-128x42@pipe-a-hdmi-a-1.html
* igt@kms_cursor_crc@cursor-sliding-64x21:
- shard-mtlp: NOTRUN -> [SKIP][178] ([i915#8814])
[178]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-3/igt@kms_cursor_crc@cursor-sliding-64x21.html
* igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic:
- shard-tglu-1: NOTRUN -> [SKIP][179] ([i915#4103])
[179]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic.html
* igt@kms_cursor_legacy@basic-flip-after-cursor-varying-size:
- shard-glk10: NOTRUN -> [SKIP][180] ([i915#11190]) +1 other test skip
[180]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-glk10/igt@kms_cursor_legacy@basic-flip-after-cursor-varying-size.html
* igt@kms_cursor_legacy@cursora-vs-flipb-legacy:
- shard-dg2-9: NOTRUN -> [SKIP][181] ([i915#13046] / [i915#5354]) +3 other tests skip
[181]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_cursor_legacy@cursora-vs-flipb-legacy.html
* igt@kms_cursor_legacy@flip-vs-cursor-varying-size:
- shard-rkl: [PASS][182] -> [FAIL][183] ([i915#2346])
[182]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@kms_cursor_legacy@flip-vs-cursor-varying-size.html
[183]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@kms_cursor_legacy@flip-vs-cursor-varying-size.html
* igt@kms_cursor_legacy@short-busy-flip-before-cursor-atomic-transitions-varying-size:
- shard-mtlp: NOTRUN -> [SKIP][184] ([i915#4213])
[184]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-1/igt@kms_cursor_legacy@short-busy-flip-before-cursor-atomic-transitions-varying-size.html
* igt@kms_dirtyfb@drrs-dirtyfb-ioctl:
- shard-tglu-1: NOTRUN -> [SKIP][185] ([i915#9723])
[185]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_dirtyfb@drrs-dirtyfb-ioctl.html
* igt@kms_dp_link_training@uhbr-mst:
- shard-dg2: NOTRUN -> [SKIP][186] ([i915#13748])
[186]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-5/igt@kms_dp_link_training@uhbr-mst.html
* igt@kms_dsc@dsc-fractional-bpp-with-bpc:
- shard-tglu: NOTRUN -> [SKIP][187] ([i915#3840])
[187]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-6/igt@kms_dsc@dsc-fractional-bpp-with-bpc.html
* igt@kms_dsc@dsc-with-formats:
- shard-dg2-9: NOTRUN -> [SKIP][188] ([i915#3555] / [i915#3840])
[188]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_dsc@dsc-with-formats.html
- shard-tglu-1: NOTRUN -> [SKIP][189] ([i915#3555] / [i915#3840])
[189]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_dsc@dsc-with-formats.html
* igt@kms_fbcon_fbt@psr:
- shard-dg2-9: NOTRUN -> [SKIP][190] ([i915#3469])
[190]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_fbcon_fbt@psr.html
* igt@kms_feature_discovery@chamelium:
- shard-tglu-1: NOTRUN -> [SKIP][191] ([i915#2065] / [i915#4854])
[191]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_feature_discovery@chamelium.html
* igt@kms_feature_discovery@display-1x:
- shard-rkl: [PASS][192] -> [SKIP][193] ([i915#14544] / [i915#9738])
[192]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-4/igt@kms_feature_discovery@display-1x.html
[193]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_feature_discovery@display-1x.html
* igt@kms_feature_discovery@psr1:
- shard-dg2-9: NOTRUN -> [SKIP][194] ([i915#658])
[194]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_feature_discovery@psr1.html
* igt@kms_feature_discovery@psr2:
- shard-dg2: NOTRUN -> [SKIP][195] ([i915#658])
[195]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-7/igt@kms_feature_discovery@psr2.html
* igt@kms_flip@2x-absolute-wf_vblank:
- shard-dg2: NOTRUN -> [SKIP][196] ([i915#9934]) +4 other tests skip
[196]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-5/igt@kms_flip@2x-absolute-wf_vblank.html
- shard-rkl: NOTRUN -> [SKIP][197] ([i915#9934]) +3 other tests skip
[197]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@kms_flip@2x-absolute-wf_vblank.html
- shard-dg1: NOTRUN -> [SKIP][198] ([i915#9934])
[198]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-13/igt@kms_flip@2x-absolute-wf_vblank.html
* igt@kms_flip@2x-blocking-absolute-wf_vblank:
- shard-tglu: NOTRUN -> [SKIP][199] ([i915#3637] / [i915#9934]) +4 other tests skip
[199]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-10/igt@kms_flip@2x-blocking-absolute-wf_vblank.html
* igt@kms_flip@2x-flip-vs-blocking-wf-vblank:
- shard-tglu-1: NOTRUN -> [SKIP][200] ([i915#3637] / [i915#9934])
[200]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_flip@2x-flip-vs-blocking-wf-vblank.html
* igt@kms_flip@2x-flip-vs-dpms:
- shard-mtlp: NOTRUN -> [SKIP][201] ([i915#3637] / [i915#9934]) +2 other tests skip
[201]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-8/igt@kms_flip@2x-flip-vs-dpms.html
* igt@kms_flip@2x-flip-vs-fences-interruptible:
- shard-mtlp: NOTRUN -> [SKIP][202] ([i915#8381])
[202]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-6/igt@kms_flip@2x-flip-vs-fences-interruptible.html
* igt@kms_flip@2x-flip-vs-modeset-vs-hang:
- shard-dg2-9: NOTRUN -> [SKIP][203] ([i915#9934]) +2 other tests skip
[203]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_flip@2x-flip-vs-modeset-vs-hang.html
* igt@kms_flip@basic-flip-vs-dpms:
- shard-rkl: [PASS][204] -> [SKIP][205] ([i915#14544] / [i915#3637]) +3 other tests skip
[204]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@kms_flip@basic-flip-vs-dpms.html
[205]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_flip@basic-flip-vs-dpms.html
* igt@kms_flip@dpms-off-confusion-interruptible@a-hdmi-a1:
- shard-rkl: NOTRUN -> [DMESG-WARN][206] ([i915#12964]) +6 other tests dmesg-warn
[206]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@kms_flip@dpms-off-confusion-interruptible@a-hdmi-a1.html
* igt@kms_flip@plain-flip-ts-check:
- shard-rkl: [PASS][207] -> [FAIL][208] ([i915#14600]) +1 other test fail
[207]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-7/igt@kms_flip@plain-flip-ts-check.html
[208]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@kms_flip@plain-flip-ts-check.html
* igt@kms_flip_scaled_crc@flip-32bpp-4tile-to-64bpp-4tile-upscaling:
- shard-tglu-1: NOTRUN -> [SKIP][209] ([i915#2672] / [i915#3555]) +1 other test skip
[209]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_flip_scaled_crc@flip-32bpp-4tile-to-64bpp-4tile-upscaling.html
* igt@kms_flip_scaled_crc@flip-32bpp-yftile-to-64bpp-yftile-downscaling:
- shard-dg2-9: NOTRUN -> [SKIP][210] ([i915#2672] / [i915#3555]) +1 other test skip
[210]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_flip_scaled_crc@flip-32bpp-yftile-to-64bpp-yftile-downscaling.html
* igt@kms_flip_scaled_crc@flip-32bpp-yftile-to-64bpp-yftile-downscaling@pipe-a-valid-mode:
- shard-dg2-9: NOTRUN -> [SKIP][211] ([i915#2672]) +3 other tests skip
[211]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_flip_scaled_crc@flip-32bpp-yftile-to-64bpp-yftile-downscaling@pipe-a-valid-mode.html
* igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytilegen12rcccs-downscaling@pipe-a-default-mode:
- shard-mtlp: NOTRUN -> [SKIP][212] ([i915#2672] / [i915#3555] / [i915#8813]) +1 other test skip
[212]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-4/igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytilegen12rcccs-downscaling@pipe-a-default-mode.html
* igt@kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling:
- shard-dg2-9: NOTRUN -> [SKIP][213] ([i915#2672] / [i915#3555] / [i915#5190]) +1 other test skip
[213]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling.html
- shard-tglu-1: NOTRUN -> [SKIP][214] ([i915#2587] / [i915#2672] / [i915#3555])
[214]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling.html
* igt@kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling@pipe-a-valid-mode:
- shard-rkl: NOTRUN -> [SKIP][215] ([i915#2672]) +1 other test skip
[215]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling@pipe-a-valid-mode.html
- shard-tglu-1: NOTRUN -> [SKIP][216] ([i915#2587] / [i915#2672]) +2 other tests skip
[216]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling@pipe-a-valid-mode.html
* igt@kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-downscaling:
- shard-tglu: NOTRUN -> [SKIP][217] ([i915#2672] / [i915#3555])
[217]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-8/igt@kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-downscaling.html
* igt@kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-downscaling@pipe-a-valid-mode:
- shard-tglu: NOTRUN -> [SKIP][218] ([i915#2587] / [i915#2672])
[218]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-8/igt@kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-downscaling@pipe-a-valid-mode.html
* igt@kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling:
- shard-rkl: [PASS][219] -> [SKIP][220] ([i915#14544] / [i915#3555]) +2 other tests skip
[219]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling.html
[220]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling.html
* igt@kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling:
- shard-mtlp: NOTRUN -> [SKIP][221] ([i915#3555] / [i915#8810] / [i915#8813]) +1 other test skip
[221]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-5/igt@kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling.html
* igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling:
- shard-dg2: NOTRUN -> [SKIP][222] ([i915#2672] / [i915#3555] / [i915#5190])
[222]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-11/igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling.html
* igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling@pipe-a-valid-mode:
- shard-dg2: NOTRUN -> [SKIP][223] ([i915#2672])
[223]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-11/igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling@pipe-a-valid-mode.html
* igt@kms_frontbuffer_tracking@fbc-1p-pri-indfb-multidraw:
- shard-dg2: [PASS][224] -> [FAIL][225] ([i915#6880])
[224]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg2-3/igt@kms_frontbuffer_tracking@fbc-1p-pri-indfb-multidraw.html
[225]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-3/igt@kms_frontbuffer_tracking@fbc-1p-pri-indfb-multidraw.html
* igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-mmap-gtt:
- shard-rkl: NOTRUN -> [SKIP][226] ([i915#14544] / [i915#1849] / [i915#5354])
[226]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-mmap-gtt.html
* igt@kms_frontbuffer_tracking@fbc-2p-primscrn-indfb-plflip-blt:
- shard-rkl: NOTRUN -> [SKIP][227] ([i915#1825]) +7 other tests skip
[227]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-indfb-plflip-blt.html
* igt@kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-render:
- shard-glk: [PASS][228] -> [SKIP][229] +4 other tests skip
[228]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-glk1/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-render.html
[229]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-glk8/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-render.html
* igt@kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu:
- shard-rkl: [PASS][230] -> [SKIP][231] ([i915#14544] / [i915#1849] / [i915#5354]) +1 other test skip
[230]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-4/igt@kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu.html
[231]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu.html
* igt@kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-gtt:
- shard-mtlp: NOTRUN -> [SKIP][232] ([i915#8708]) +2 other tests skip
[232]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-1/igt@kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-gtt.html
* igt@kms_frontbuffer_tracking@fbc-suspend:
- shard-rkl: NOTRUN -> [INCOMPLETE][233] ([i915#10056])
[233]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@kms_frontbuffer_tracking@fbc-suspend.html
- shard-glk: NOTRUN -> [INCOMPLETE][234] ([i915#10056])
[234]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-glk5/igt@kms_frontbuffer_tracking@fbc-suspend.html
* igt@kms_frontbuffer_tracking@fbc-tiling-4:
- shard-tglu-1: NOTRUN -> [SKIP][235] ([i915#5439])
[235]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_frontbuffer_tracking@fbc-tiling-4.html
* igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-render:
- shard-dg2-9: NOTRUN -> [SKIP][236] ([i915#3458]) +9 other tests skip
[236]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-render.html
* igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-indfb-plflip-blt:
- shard-dg1: NOTRUN -> [SKIP][237] +5 other tests skip
[237]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-19/igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-indfb-plflip-blt.html
* igt@kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-shrfb-pgflip-blt:
- shard-tglu-1: NOTRUN -> [SKIP][238] +49 other tests skip
[238]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-shrfb-pgflip-blt.html
* igt@kms_frontbuffer_tracking@fbcpsr-rgb565-draw-blt:
- shard-dg2: NOTRUN -> [SKIP][239] ([i915#3458]) +9 other tests skip
[239]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-11/igt@kms_frontbuffer_tracking@fbcpsr-rgb565-draw-blt.html
* igt@kms_frontbuffer_tracking@fbcpsr-tiling-y:
- shard-dg1: NOTRUN -> [SKIP][240] ([i915#3458]) +2 other tests skip
[240]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-13/igt@kms_frontbuffer_tracking@fbcpsr-tiling-y.html
- shard-mtlp: NOTRUN -> [SKIP][241] ([i915#10055])
[241]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-8/igt@kms_frontbuffer_tracking@fbcpsr-tiling-y.html
* igt@kms_frontbuffer_tracking@psr-1p-primscrn-shrfb-plflip-blt:
- shard-dg2: NOTRUN -> [SKIP][242] ([i915#10433] / [i915#3458])
[242]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-4/igt@kms_frontbuffer_tracking@psr-1p-primscrn-shrfb-plflip-blt.html
* igt@kms_frontbuffer_tracking@psr-2p-primscrn-cur-indfb-draw-mmap-gtt:
- shard-dg1: NOTRUN -> [SKIP][243] ([i915#8708])
[243]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-19/igt@kms_frontbuffer_tracking@psr-2p-primscrn-cur-indfb-draw-mmap-gtt.html
* igt@kms_frontbuffer_tracking@psr-2p-primscrn-pri-indfb-draw-mmap-wc:
- shard-dg2: NOTRUN -> [SKIP][244] ([i915#8708]) +3 other tests skip
[244]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-11/igt@kms_frontbuffer_tracking@psr-2p-primscrn-pri-indfb-draw-mmap-wc.html
* igt@kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-draw-mmap-wc:
- shard-dg2-9: NOTRUN -> [SKIP][245] ([i915#8708]) +13 other tests skip
[245]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-draw-mmap-wc.html
* igt@kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-onoff:
- shard-dg2-9: NOTRUN -> [SKIP][246] ([i915#5354]) +20 other tests skip
[246]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-onoff.html
* igt@kms_frontbuffer_tracking@psr-2p-scndscrn-indfb-plflip-blt:
- shard-dg2: NOTRUN -> [SKIP][247] ([i915#5354]) +11 other tests skip
[247]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-1/igt@kms_frontbuffer_tracking@psr-2p-scndscrn-indfb-plflip-blt.html
* igt@kms_frontbuffer_tracking@psr-2p-scndscrn-pri-shrfb-draw-mmap-cpu:
- shard-mtlp: NOTRUN -> [SKIP][248] ([i915#1825]) +6 other tests skip
[248]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-6/igt@kms_frontbuffer_tracking@psr-2p-scndscrn-pri-shrfb-draw-mmap-cpu.html
* igt@kms_frontbuffer_tracking@psr-2p-scndscrn-pri-shrfb-draw-mmap-wc:
- shard-tglu: NOTRUN -> [SKIP][249] +44 other tests skip
[249]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-6/igt@kms_frontbuffer_tracking@psr-2p-scndscrn-pri-shrfb-draw-mmap-wc.html
* igt@kms_frontbuffer_tracking@psr-indfb-scaledprimary:
- shard-rkl: NOTRUN -> [SKIP][250] ([i915#3023]) +5 other tests skip
[250]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-3/igt@kms_frontbuffer_tracking@psr-indfb-scaledprimary.html
* igt@kms_hdr@bpc-switch:
- shard-dg2: [PASS][251] -> [SKIP][252] ([i915#3555] / [i915#8228])
[251]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg2-11/igt@kms_hdr@bpc-switch.html
[252]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-1/igt@kms_hdr@bpc-switch.html
* igt@kms_hdr@bpc-switch-dpms:
- shard-dg2-9: NOTRUN -> [SKIP][253] ([i915#3555] / [i915#8228])
[253]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_hdr@bpc-switch-dpms.html
* igt@kms_hdr@invalid-hdr:
- shard-tglu-1: NOTRUN -> [SKIP][254] ([i915#3555] / [i915#8228])
[254]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_hdr@invalid-hdr.html
* igt@kms_hdr@static-toggle:
- shard-tglu: NOTRUN -> [SKIP][255] ([i915#3555] / [i915#8228])
[255]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-4/igt@kms_hdr@static-toggle.html
* igt@kms_invalid_mode@bad-hsync-end:
- shard-rkl: [PASS][256] -> [SKIP][257] ([i915#14544] / [i915#3555] / [i915#8826])
[256]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-3/igt@kms_invalid_mode@bad-hsync-end.html
[257]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_invalid_mode@bad-hsync-end.html
* igt@kms_invalid_mode@zero-hdisplay:
- shard-glk10: NOTRUN -> [SKIP][258] +330 other tests skip
[258]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-glk10/igt@kms_invalid_mode@zero-hdisplay.html
* igt@kms_joiner@basic-big-joiner:
- shard-dg2-9: NOTRUN -> [SKIP][259] ([i915#10656]) +1 other test skip
[259]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_joiner@basic-big-joiner.html
* igt@kms_joiner@basic-force-big-joiner:
- shard-tglu: NOTRUN -> [SKIP][260] ([i915#12388])
[260]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-2/igt@kms_joiner@basic-force-big-joiner.html
* igt@kms_joiner@basic-ultra-joiner:
- shard-dg2: NOTRUN -> [SKIP][261] ([i915#12339])
[261]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-11/igt@kms_joiner@basic-ultra-joiner.html
* igt@kms_joiner@invalid-modeset-big-joiner:
- shard-tglu-1: NOTRUN -> [SKIP][262] ([i915#10656])
[262]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_joiner@invalid-modeset-big-joiner.html
* igt@kms_joiner@invalid-modeset-force-ultra-joiner:
- shard-rkl: NOTRUN -> [SKIP][263] ([i915#12394])
[263]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@kms_joiner@invalid-modeset-force-ultra-joiner.html
- shard-tglu: NOTRUN -> [SKIP][264] ([i915#12394])
[264]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-10/igt@kms_joiner@invalid-modeset-force-ultra-joiner.html
* igt@kms_joiner@invalid-modeset-ultra-joiner:
- shard-dg2-9: NOTRUN -> [SKIP][265] ([i915#12339])
[265]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_joiner@invalid-modeset-ultra-joiner.html
* igt@kms_pipe_crc_basic@suspend-read-crc:
- shard-rkl: [PASS][266] -> [INCOMPLETE][267] ([i915#13476])
[266]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-2/igt@kms_pipe_crc_basic@suspend-read-crc.html
[267]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@kms_pipe_crc_basic@suspend-read-crc.html
* igt@kms_pipe_crc_basic@suspend-read-crc@pipe-a-hdmi-a-2:
- shard-rkl: NOTRUN -> [INCOMPLETE][268] ([i915#13476])
[268]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@kms_pipe_crc_basic@suspend-read-crc@pipe-a-hdmi-a-2.html
* igt@kms_plane@pixel-format:
- shard-rkl: [PASS][269] -> [SKIP][270] ([i915#14544] / [i915#8825])
[269]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-3/igt@kms_plane@pixel-format.html
[270]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_plane@pixel-format.html
* igt@kms_plane_alpha_blend@coverage-vs-premult-vs-constant:
- shard-rkl: [PASS][271] -> [SKIP][272] ([i915#14544] / [i915#7294]) +1 other test skip
[271]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-2/igt@kms_plane_alpha_blend@coverage-vs-premult-vs-constant.html
[272]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_plane_alpha_blend@coverage-vs-premult-vs-constant.html
* igt@kms_plane_lowres@tiling-yf:
- shard-tglu-1: NOTRUN -> [SKIP][273] ([i915#3555]) +6 other tests skip
[273]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_plane_lowres@tiling-yf.html
* igt@kms_plane_multiple@2x-tiling-x:
- shard-rkl: NOTRUN -> [SKIP][274] ([i915#13958])
[274]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@kms_plane_multiple@2x-tiling-x.html
* igt@kms_plane_multiple@2x-tiling-y:
- shard-tglu: NOTRUN -> [SKIP][275] ([i915#13958]) +1 other test skip
[275]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-5/igt@kms_plane_multiple@2x-tiling-y.html
* igt@kms_plane_scaling@2x-scaler-multi-pipe:
- shard-dg2-9: NOTRUN -> [SKIP][276] ([i915#13046] / [i915#5354] / [i915#9423])
[276]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_plane_scaling@2x-scaler-multi-pipe.html
* igt@kms_plane_scaling@invalid-num-scalers:
- shard-rkl: [PASS][277] -> [SKIP][278] ([i915#14544] / [i915#3555] / [i915#6953] / [i915#8152])
[277]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@kms_plane_scaling@invalid-num-scalers.html
[278]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_plane_scaling@invalid-num-scalers.html
* igt@kms_plane_scaling@planes-downscale-factor-0-5-upscale-factor-0-25@pipe-a:
- shard-rkl: [PASS][279] -> [SKIP][280] ([i915#12247] / [i915#14544]) +1 other test skip
[279]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-7/igt@kms_plane_scaling@planes-downscale-factor-0-5-upscale-factor-0-25@pipe-a.html
[280]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_plane_scaling@planes-downscale-factor-0-5-upscale-factor-0-25@pipe-a.html
* igt@kms_plane_scaling@planes-downscale-factor-0-5-upscale-factor-0-25@pipe-b:
- shard-rkl: [PASS][281] -> [SKIP][282] ([i915#12247] / [i915#14544] / [i915#8152]) +1 other test skip
[281]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-7/igt@kms_plane_scaling@planes-downscale-factor-0-5-upscale-factor-0-25@pipe-b.html
[282]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_plane_scaling@planes-downscale-factor-0-5-upscale-factor-0-25@pipe-b.html
* igt@kms_plane_scaling@planes-downscale-factor-0-75-upscale-factor-0-25:
- shard-rkl: [PASS][283] -> [SKIP][284] ([i915#14544] / [i915#6953] / [i915#8152]) +1 other test skip
[283]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-8/igt@kms_plane_scaling@planes-downscale-factor-0-75-upscale-factor-0-25.html
[284]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_plane_scaling@planes-downscale-factor-0-75-upscale-factor-0-25.html
* igt@kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5:
- shard-mtlp: NOTRUN -> [SKIP][285] ([i915#12247] / [i915#6953])
[285]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-1/igt@kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5.html
* igt@kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5@pipe-c:
- shard-mtlp: NOTRUN -> [SKIP][286] ([i915#12247]) +3 other tests skip
[286]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-1/igt@kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5@pipe-c.html
* igt@kms_pm_dc@dc3co-vpb-simulation:
- shard-dg2-9: NOTRUN -> [SKIP][287] ([i915#9685])
[287]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_pm_dc@dc3co-vpb-simulation.html
* igt@kms_pm_dc@dc6-psr:
- shard-dg2: NOTRUN -> [SKIP][288] ([i915#9685]) +1 other test skip
[288]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-4/igt@kms_pm_dc@dc6-psr.html
- shard-dg1: NOTRUN -> [SKIP][289] ([i915#9685])
[289]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-18/igt@kms_pm_dc@dc6-psr.html
* igt@kms_pm_rpm@dpms-mode-unset-lpsp:
- shard-dg2-9: NOTRUN -> [SKIP][290] ([i915#9519]) +1 other test skip
[290]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_pm_rpm@dpms-mode-unset-lpsp.html
* igt@kms_pm_rpm@modeset-lpsp:
- shard-dg2: NOTRUN -> [SKIP][291] ([i915#9519])
[291]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-5/igt@kms_pm_rpm@modeset-lpsp.html
- shard-rkl: [PASS][292] -> [SKIP][293] ([i915#9519]) +1 other test skip
[292]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-4/igt@kms_pm_rpm@modeset-lpsp.html
[293]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@kms_pm_rpm@modeset-lpsp.html
* igt@kms_pm_rpm@modeset-lpsp-stress-no-wait:
- shard-dg2: [PASS][294] -> [SKIP][295] ([i915#9519])
[294]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg2-4/igt@kms_pm_rpm@modeset-lpsp-stress-no-wait.html
[295]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-1/igt@kms_pm_rpm@modeset-lpsp-stress-no-wait.html
* igt@kms_pm_rpm@modeset-non-lpsp:
- shard-rkl: [PASS][296] -> [SKIP][297] ([i915#14544] / [i915#9519]) +1 other test skip
[296]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@kms_pm_rpm@modeset-non-lpsp.html
[297]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_pm_rpm@modeset-non-lpsp.html
* igt@kms_pm_rpm@modeset-non-lpsp-stress-no-wait:
- shard-tglu: NOTRUN -> [SKIP][298] ([i915#9519])
[298]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-6/igt@kms_pm_rpm@modeset-non-lpsp-stress-no-wait.html
* igt@kms_prime@d3hot:
- shard-tglu-1: NOTRUN -> [SKIP][299] ([i915#6524])
[299]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_prime@d3hot.html
* igt@kms_psr2_sf@fbc-pr-cursor-plane-update-sf:
- shard-tglu: NOTRUN -> [SKIP][300] ([i915#11520]) +6 other tests skip
[300]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-7/igt@kms_psr2_sf@fbc-pr-cursor-plane-update-sf.html
* igt@kms_psr2_sf@fbc-pr-overlay-plane-update-continuous-sf:
- shard-rkl: NOTRUN -> [SKIP][301] ([i915#11520]) +1 other test skip
[301]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@kms_psr2_sf@fbc-pr-overlay-plane-update-continuous-sf.html
* igt@kms_psr2_sf@fbc-pr-overlay-plane-update-sf-dmg-area:
- shard-glk: NOTRUN -> [SKIP][302] ([i915#11520]) +7 other tests skip
[302]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-glk9/igt@kms_psr2_sf@fbc-pr-overlay-plane-update-sf-dmg-area.html
* igt@kms_psr2_sf@fbc-psr2-cursor-plane-update-sf@pipe-b-edp-1:
- shard-mtlp: NOTRUN -> [SKIP][303] ([i915#9808]) +2 other tests skip
[303]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-1/igt@kms_psr2_sf@fbc-psr2-cursor-plane-update-sf@pipe-b-edp-1.html
* igt@kms_psr2_sf@fbc-psr2-overlay-plane-move-continuous-sf:
- shard-glk10: NOTRUN -> [SKIP][304] ([i915#11520]) +9 other tests skip
[304]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-glk10/igt@kms_psr2_sf@fbc-psr2-overlay-plane-move-continuous-sf.html
* igt@kms_psr2_sf@fbc-psr2-overlay-plane-update-continuous-sf:
- shard-dg2: NOTRUN -> [SKIP][305] ([i915#11520]) +2 other tests skip
[305]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-3/igt@kms_psr2_sf@fbc-psr2-overlay-plane-update-continuous-sf.html
* igt@kms_psr2_sf@pr-overlay-plane-move-continuous-sf:
- shard-dg2-9: NOTRUN -> [SKIP][306] ([i915#11520]) +4 other tests skip
[306]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_psr2_sf@pr-overlay-plane-move-continuous-sf.html
* igt@kms_psr2_sf@pr-primary-plane-update-sf-dmg-area:
- shard-mtlp: NOTRUN -> [SKIP][307] ([i915#12316])
[307]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-5/igt@kms_psr2_sf@pr-primary-plane-update-sf-dmg-area.html
* igt@kms_psr2_sf@psr2-cursor-plane-update-sf:
- shard-snb: NOTRUN -> [SKIP][308] ([i915#11520])
[308]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-snb1/igt@kms_psr2_sf@psr2-cursor-plane-update-sf.html
- shard-dg1: NOTRUN -> [SKIP][309] ([i915#11520])
[309]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-17/igt@kms_psr2_sf@psr2-cursor-plane-update-sf.html
* igt@kms_psr2_sf@psr2-primary-plane-update-sf-dmg-area-big-fb:
- shard-tglu-1: NOTRUN -> [SKIP][310] ([i915#11520]) +5 other tests skip
[310]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_psr2_sf@psr2-primary-plane-update-sf-dmg-area-big-fb.html
* igt@kms_psr2_su@page_flip-nv12:
- shard-dg2-9: NOTRUN -> [SKIP][311] ([i915#9683])
[311]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_psr2_su@page_flip-nv12.html
* igt@kms_psr2_su@page_flip-p010:
- shard-tglu: NOTRUN -> [SKIP][312] ([i915#9683])
[312]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-8/igt@kms_psr2_su@page_flip-p010.html
* igt@kms_psr@fbc-pr-cursor-render:
- shard-dg2: NOTRUN -> [SKIP][313] ([i915#1072] / [i915#9732]) +8 other tests skip
[313]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-7/igt@kms_psr@fbc-pr-cursor-render.html
* igt@kms_psr@fbc-pr-primary-blt:
- shard-mtlp: NOTRUN -> [SKIP][314] ([i915#9688]) +3 other tests skip
[314]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-6/igt@kms_psr@fbc-pr-primary-blt.html
* igt@kms_psr@fbc-psr-primary-mmap-gtt:
- shard-dg2-9: NOTRUN -> [SKIP][315] ([i915#1072] / [i915#9732]) +13 other tests skip
[315]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_psr@fbc-psr-primary-mmap-gtt.html
* igt@kms_psr@fbc-psr-sprite-plane-move:
- shard-rkl: NOTRUN -> [SKIP][316] ([i915#1072] / [i915#9732]) +2 other tests skip
[316]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@kms_psr@fbc-psr-sprite-plane-move.html
- shard-dg1: NOTRUN -> [SKIP][317] ([i915#1072] / [i915#9732]) +1 other test skip
[317]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-13/igt@kms_psr@fbc-psr-sprite-plane-move.html
* igt@kms_psr@pr-dpms:
- shard-tglu: NOTRUN -> [SKIP][318] ([i915#9732]) +10 other tests skip
[318]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-2/igt@kms_psr@pr-dpms.html
* igt@kms_psr@psr-cursor-plane-onoff:
- shard-tglu-1: NOTRUN -> [SKIP][319] ([i915#9732]) +9 other tests skip
[319]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_psr@psr-cursor-plane-onoff.html
* igt@kms_psr_stress_test@invalidate-primary-flip-overlay:
- shard-tglu-1: NOTRUN -> [SKIP][320] ([i915#9685])
[320]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_psr_stress_test@invalidate-primary-flip-overlay.html
* igt@kms_rotation_crc@primary-yf-tiled-reflect-x-0:
- shard-dg2: NOTRUN -> [SKIP][321] ([i915#5190]) +1 other test skip
[321]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-5/igt@kms_rotation_crc@primary-yf-tiled-reflect-x-0.html
- shard-rkl: NOTRUN -> [SKIP][322] ([i915#5289])
[322]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@kms_rotation_crc@primary-yf-tiled-reflect-x-0.html
- shard-dg1: NOTRUN -> [SKIP][323] ([i915#5289])
[323]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-12/igt@kms_rotation_crc@primary-yf-tiled-reflect-x-0.html
- shard-mtlp: NOTRUN -> [SKIP][324] ([i915#5289])
[324]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-5/igt@kms_rotation_crc@primary-yf-tiled-reflect-x-0.html
* igt@kms_rotation_crc@primary-yf-tiled-reflect-x-270:
- shard-tglu: NOTRUN -> [SKIP][325] ([i915#5289]) +2 other tests skip
[325]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-9/igt@kms_rotation_crc@primary-yf-tiled-reflect-x-270.html
- shard-dg2-9: NOTRUN -> [SKIP][326] ([i915#12755] / [i915#5190])
[326]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_rotation_crc@primary-yf-tiled-reflect-x-270.html
* igt@kms_rotation_crc@sprite-rotation-270:
- shard-mtlp: NOTRUN -> [SKIP][327] ([i915#12755])
[327]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-8/igt@kms_rotation_crc@sprite-rotation-270.html
* igt@kms_selftest@drm_framebuffer:
- shard-tglu-1: NOTRUN -> [ABORT][328] ([i915#13179]) +1 other test abort
[328]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_selftest@drm_framebuffer.html
- shard-dg1: NOTRUN -> [ABORT][329] ([i915#13179]) +1 other test abort
[329]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-13/igt@kms_selftest@drm_framebuffer.html
- shard-glk: NOTRUN -> [ABORT][330] ([i915#13179]) +1 other test abort
[330]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-glk6/igt@kms_selftest@drm_framebuffer.html
* igt@kms_selftest@drm_framebuffer@drm_test_framebuffer_free:
- shard-dg2: NOTRUN -> [ABORT][331] ([i915#13179]) +1 other test abort
[331]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-4/igt@kms_selftest@drm_framebuffer@drm_test_framebuffer_free.html
* igt@kms_vrr@flip-basic-fastset:
- shard-dg2-9: NOTRUN -> [SKIP][332] ([i915#9906])
[332]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@kms_vrr@flip-basic-fastset.html
* igt@kms_vrr@negative-basic:
- shard-tglu: NOTRUN -> [SKIP][333] ([i915#3555] / [i915#9906])
[333]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-4/igt@kms_vrr@negative-basic.html
* igt@kms_vrr@seamless-rr-switch-vrr:
- shard-dg2: NOTRUN -> [SKIP][334] ([i915#9906])
[334]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-5/igt@kms_vrr@seamless-rr-switch-vrr.html
* igt@kms_writeback@writeback-check-output:
- shard-rkl: NOTRUN -> [SKIP][335] ([i915#2437])
[335]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@kms_writeback@writeback-check-output.html
- shard-tglu: NOTRUN -> [SKIP][336] ([i915#2437])
[336]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-3/igt@kms_writeback@writeback-check-output.html
* igt@kms_writeback@writeback-check-output-xrgb2101010:
- shard-tglu-1: NOTRUN -> [SKIP][337] ([i915#2437] / [i915#9412])
[337]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-1/igt@kms_writeback@writeback-check-output-xrgb2101010.html
* igt@kms_writeback@writeback-fb-id-xrgb2101010:
- shard-mtlp: NOTRUN -> [SKIP][338] ([i915#2437] / [i915#9412])
[338]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-5/igt@kms_writeback@writeback-fb-id-xrgb2101010.html
* igt@perf_pmu@module-unload:
- shard-glk10: NOTRUN -> [FAIL][339] ([i915#14433])
[339]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-glk10/igt@perf_pmu@module-unload.html
* igt@perf_pmu@render-node-busy-idle@vcs1:
- shard-dg2: [PASS][340] -> [FAIL][341] ([i915#4349]) +5 other tests fail
[340]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg2-3/igt@perf_pmu@render-node-busy-idle@vcs1.html
[341]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-3/igt@perf_pmu@render-node-busy-idle@vcs1.html
* igt@prime_vgem@basic-write:
- shard-dg2: NOTRUN -> [SKIP][342] ([i915#3291] / [i915#3708])
[342]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-7/igt@prime_vgem@basic-write.html
* igt@prime_vgem@coherency-gtt:
- shard-rkl: NOTRUN -> [SKIP][343] ([i915#3708])
[343]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@prime_vgem@coherency-gtt.html
- shard-dg1: NOTRUN -> [SKIP][344] ([i915#3708] / [i915#4077])
[344]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-19/igt@prime_vgem@coherency-gtt.html
- shard-mtlp: NOTRUN -> [SKIP][345] ([i915#3708] / [i915#4077])
[345]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-1/igt@prime_vgem@coherency-gtt.html
* igt@prime_vgem@fence-read-hang:
- shard-mtlp: NOTRUN -> [SKIP][346] ([i915#3708])
[346]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-8/igt@prime_vgem@fence-read-hang.html
* igt@prime_vgem@fence-write-hang:
- shard-dg2-9: NOTRUN -> [SKIP][347] ([i915#3708]) +1 other test skip
[347]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@prime_vgem@fence-write-hang.html
* igt@sriov_basic@enable-vfs-bind-unbind-each:
- shard-dg2-9: NOTRUN -> [SKIP][348] ([i915#9917])
[348]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-9/igt@sriov_basic@enable-vfs-bind-unbind-each.html
#### Possible fixes ####
* igt@fbdev@unaligned-write:
- shard-rkl: [SKIP][349] ([i915#14544] / [i915#2582]) -> [PASS][350] +1 other test pass
[349]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@fbdev@unaligned-write.html
[350]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@fbdev@unaligned-write.html
* igt@gem_eio@in-flight-suspend:
- shard-rkl: [DMESG-WARN][351] ([i915#12964]) -> [PASS][352] +49 other tests pass
[351]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-4/igt@gem_eio@in-flight-suspend.html
[352]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@gem_eio@in-flight-suspend.html
- shard-dg1: [DMESG-WARN][353] ([i915#4391] / [i915#4423]) -> [PASS][354]
[353]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg1-12/igt@gem_eio@in-flight-suspend.html
[354]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-16/igt@gem_eio@in-flight-suspend.html
* igt@gem_eio@reset-stress:
- shard-dg1: [FAIL][355] ([i915#5784]) -> [PASS][356]
[355]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg1-15/igt@gem_eio@reset-stress.html
[356]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-13/igt@gem_eio@reset-stress.html
* igt@gem_pxp@fail-invalid-protected-context:
- shard-rkl: [TIMEOUT][357] ([i915#12964]) -> [PASS][358]
[357]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-2/igt@gem_pxp@fail-invalid-protected-context.html
[358]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@gem_pxp@fail-invalid-protected-context.html
* igt@i915_selftest@live@gt_pm:
- shard-rkl: [DMESG-FAIL][359] ([i915#12942]) -> [PASS][360] +1 other test pass
[359]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-7/igt@i915_selftest@live@gt_pm.html
[360]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@i915_selftest@live@gt_pm.html
* igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0-hflip:
- shard-mtlp: [FAIL][361] ([i915#5138]) -> [PASS][362]
[361]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-mtlp-6/igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0-hflip.html
[362]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-1/igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0-hflip.html
* igt@kms_ccs@crc-primary-suspend-4-tiled-dg2-rc-ccs-cc:
- shard-dg2: [ABORT][363] ([i915#8213]) -> [PASS][364]
[363]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg2-10/igt@kms_ccs@crc-primary-suspend-4-tiled-dg2-rc-ccs-cc.html
[364]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-3/igt@kms_ccs@crc-primary-suspend-4-tiled-dg2-rc-ccs-cc.html
* igt@kms_color@ctm-red-to-blue:
- shard-rkl: [SKIP][365] ([i915#12655] / [i915#14544]) -> [PASS][366] +1 other test pass
[365]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_color@ctm-red-to-blue.html
[366]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@kms_color@ctm-red-to-blue.html
* igt@kms_cursor_crc@cursor-random-256x85@pipe-a-hdmi-a-1:
- shard-rkl: [FAIL][367] ([i915#13566]) -> [PASS][368] +2 other tests pass
[367]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-7/igt@kms_cursor_crc@cursor-random-256x85@pipe-a-hdmi-a-1.html
[368]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@kms_cursor_crc@cursor-random-256x85@pipe-a-hdmi-a-1.html
* igt@kms_cursor_crc@cursor-sliding-64x21@pipe-a-hdmi-a-1:
- shard-tglu: [FAIL][369] ([i915#13566]) -> [PASS][370] +3 other tests pass
[369]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-tglu-8/igt@kms_cursor_crc@cursor-sliding-64x21@pipe-a-hdmi-a-1.html
[370]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-tglu-5/igt@kms_cursor_crc@cursor-sliding-64x21@pipe-a-hdmi-a-1.html
* igt@kms_cursor_legacy@basic-flip-after-cursor-atomic:
- shard-rkl: [SKIP][371] ([i915#11190] / [i915#14544]) -> [PASS][372] +2 other tests pass
[371]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_cursor_legacy@basic-flip-after-cursor-atomic.html
[372]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@kms_cursor_legacy@basic-flip-after-cursor-atomic.html
* igt@kms_cursor_legacy@flip-vs-cursor-legacy:
- shard-rkl: [FAIL][373] ([i915#2346]) -> [PASS][374]
[373]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-3/igt@kms_cursor_legacy@flip-vs-cursor-legacy.html
[374]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@kms_cursor_legacy@flip-vs-cursor-legacy.html
* igt@kms_draw_crc@draw-method-mmap-gtt:
- shard-rkl: [SKIP][375] ([i915#14544]) -> [PASS][376] +52 other tests pass
[375]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_draw_crc@draw-method-mmap-gtt.html
[376]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@kms_draw_crc@draw-method-mmap-gtt.html
* igt@kms_flip@bo-too-big:
- shard-rkl: [SKIP][377] ([i915#14544] / [i915#3637]) -> [PASS][378] +5 other tests pass
[377]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_flip@bo-too-big.html
[378]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@kms_flip@bo-too-big.html
* igt@kms_flip@flip-vs-suspend:
- shard-rkl: [INCOMPLETE][379] ([i915#6113]) -> [PASS][380]
[379]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-3/igt@kms_flip@flip-vs-suspend.html
[380]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@kms_flip@flip-vs-suspend.html
* igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling:
- shard-rkl: [SKIP][381] ([i915#14544] / [i915#3555]) -> [PASS][382] +4 other tests pass
[381]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling.html
[382]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling.html
* igt@kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-mmap-wc:
- shard-rkl: [SKIP][383] ([i915#14544] / [i915#1849] / [i915#5354]) -> [PASS][384] +7 other tests pass
[383]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-mmap-wc.html
[384]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-3/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-mmap-wc.html
* igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-indfb-pgflip-blt:
- shard-glk: [SKIP][385] -> [PASS][386]
[385]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-glk8/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-indfb-pgflip-blt.html
[386]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-glk9/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-indfb-pgflip-blt.html
* igt@kms_getfb@getfb-handle-protection:
- shard-dg1: [DMESG-WARN][387] ([i915#4423]) -> [PASS][388] +8 other tests pass
[387]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg1-14/igt@kms_getfb@getfb-handle-protection.html
[388]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-15/igt@kms_getfb@getfb-handle-protection.html
* igt@kms_hdr@static-toggle-suspend:
- shard-dg2: [SKIP][389] ([i915#3555] / [i915#8228]) -> [PASS][390]
[389]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg2-3/igt@kms_hdr@static-toggle-suspend.html
[390]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-11/igt@kms_hdr@static-toggle-suspend.html
* igt@kms_joiner@invalid-modeset-force-big-joiner:
- shard-dg2: [SKIP][391] ([i915#12388]) -> [PASS][392]
[391]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg2-3/igt@kms_joiner@invalid-modeset-force-big-joiner.html
[392]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-11/igt@kms_joiner@invalid-modeset-force-big-joiner.html
* igt@kms_plane@plane-panning-top-left:
- shard-rkl: [SKIP][393] ([i915#14544] / [i915#8825]) -> [PASS][394]
[393]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_plane@plane-panning-top-left.html
[394]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@kms_plane@plane-panning-top-left.html
* igt@kms_plane_alpha_blend@constant-alpha-max:
- shard-rkl: [SKIP][395] ([i915#14544] / [i915#7294]) -> [PASS][396] +1 other test pass
[395]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_plane_alpha_blend@constant-alpha-max.html
[396]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@kms_plane_alpha_blend@constant-alpha-max.html
* igt@kms_plane_scaling@invalid-parameters:
- shard-rkl: [SKIP][397] ([i915#14544] / [i915#8152]) -> [PASS][398]
[397]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_plane_scaling@invalid-parameters.html
[398]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@kms_plane_scaling@invalid-parameters.html
* igt@kms_plane_scaling@planes-downscale-factor-0-5-unity-scaling@pipe-a:
- shard-rkl: [SKIP][399] ([i915#12247] / [i915#14544]) -> [PASS][400] +2 other tests pass
[399]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_plane_scaling@planes-downscale-factor-0-5-unity-scaling@pipe-a.html
[400]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@kms_plane_scaling@planes-downscale-factor-0-5-unity-scaling@pipe-a.html
* igt@kms_plane_scaling@planes-downscale-factor-0-75:
- shard-rkl: [SKIP][401] ([i915#12247] / [i915#14544] / [i915#3555] / [i915#6953] / [i915#8152]) -> [PASS][402]
[401]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_plane_scaling@planes-downscale-factor-0-75.html
[402]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@kms_plane_scaling@planes-downscale-factor-0-75.html
* igt@kms_plane_scaling@planes-unity-scaling-downscale-factor-0-75:
- shard-rkl: [SKIP][403] ([i915#14544] / [i915#3555] / [i915#6953] / [i915#8152]) -> [PASS][404]
[403]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_plane_scaling@planes-unity-scaling-downscale-factor-0-75.html
[404]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@kms_plane_scaling@planes-unity-scaling-downscale-factor-0-75.html
* igt@kms_plane_scaling@planes-unity-scaling-downscale-factor-0-75@pipe-b:
- shard-rkl: [SKIP][405] ([i915#12247] / [i915#14544] / [i915#8152]) -> [PASS][406] +3 other tests pass
[405]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_plane_scaling@planes-unity-scaling-downscale-factor-0-75@pipe-b.html
[406]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@kms_plane_scaling@planes-unity-scaling-downscale-factor-0-75@pipe-b.html
* igt@kms_pm_rpm@cursor-dpms:
- shard-rkl: [SKIP][407] ([i915#14544] / [i915#1849]) -> [PASS][408]
[407]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_pm_rpm@cursor-dpms.html
[408]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@kms_pm_rpm@cursor-dpms.html
* igt@perf_pmu@most-busy-idle-check-all:
- shard-dg1: [FAIL][409] ([i915#11943]) -> [PASS][410] +1 other test pass
[409]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg1-17/igt@perf_pmu@most-busy-idle-check-all.html
[410]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-18/igt@perf_pmu@most-busy-idle-check-all.html
- shard-mtlp: [FAIL][411] ([i915#11943]) -> [PASS][412] +1 other test pass
[411]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-mtlp-7/igt@perf_pmu@most-busy-idle-check-all.html
[412]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-mtlp-2/igt@perf_pmu@most-busy-idle-check-all.html
#### Warnings ####
* igt@api_intel_bb@blit-reloc-purge-cache:
- shard-rkl: [SKIP][413] ([i915#14544] / [i915#8411]) -> [SKIP][414] ([i915#8411])
[413]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@api_intel_bb@blit-reloc-purge-cache.html
[414]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@api_intel_bb@blit-reloc-purge-cache.html
* igt@gem_create@create-ext-cpu-access-big:
- shard-rkl: [SKIP][415] ([i915#6335]) -> [SKIP][416] ([i915#14544] / [i915#6335])
[415]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-2/igt@gem_create@create-ext-cpu-access-big.html
[416]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@gem_create@create-ext-cpu-access-big.html
* igt@gem_exec_balancer@parallel-contexts:
- shard-rkl: [SKIP][417] ([i915#4525]) -> [SKIP][418] ([i915#14544] / [i915#4525])
[417]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-4/igt@gem_exec_balancer@parallel-contexts.html
[418]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@gem_exec_balancer@parallel-contexts.html
* igt@gem_exec_reloc@basic-write-read:
- shard-rkl: [SKIP][419] ([i915#3281]) -> [SKIP][420] ([i915#14544] / [i915#3281]) +3 other tests skip
[419]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-3/igt@gem_exec_reloc@basic-write-read.html
[420]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@gem_exec_reloc@basic-write-read.html
* igt@gem_exec_reloc@basic-write-read-noreloc:
- shard-rkl: [SKIP][421] ([i915#14544] / [i915#3281]) -> [SKIP][422] ([i915#3281]) +8 other tests skip
[421]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@gem_exec_reloc@basic-write-read-noreloc.html
[422]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@gem_exec_reloc@basic-write-read-noreloc.html
* igt@gem_exec_schedule@semaphore-power:
- shard-rkl: [SKIP][423] ([i915#14544] / [i915#7276]) -> [SKIP][424] ([i915#7276])
[423]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@gem_exec_schedule@semaphore-power.html
[424]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@gem_exec_schedule@semaphore-power.html
* igt@gem_huc_copy@huc-copy:
- shard-rkl: [SKIP][425] ([i915#14544] / [i915#2190]) -> [SKIP][426] ([i915#2190])
[425]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@gem_huc_copy@huc-copy.html
[426]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@gem_huc_copy@huc-copy.html
* igt@gem_lmem_evict@dontneed-evict-race:
- shard-rkl: [SKIP][427] ([i915#4613] / [i915#7582]) -> [SKIP][428] ([i915#14544] / [i915#4613] / [i915#7582])
[427]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-2/igt@gem_lmem_evict@dontneed-evict-race.html
[428]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@gem_lmem_evict@dontneed-evict-race.html
* igt@gem_lmem_swapping@heavy-verify-multi-ccs:
- shard-rkl: [SKIP][429] ([i915#14544] / [i915#4613]) -> [SKIP][430] ([i915#4613]) +1 other test skip
[429]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@gem_lmem_swapping@heavy-verify-multi-ccs.html
[430]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@gem_lmem_swapping@heavy-verify-multi-ccs.html
* igt@gem_lmem_swapping@verify-random:
- shard-rkl: [SKIP][431] ([i915#4613]) -> [SKIP][432] ([i915#14544] / [i915#4613]) +1 other test skip
[431]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@gem_lmem_swapping@verify-random.html
[432]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@gem_lmem_swapping@verify-random.html
* igt@gem_pread@display:
- shard-rkl: [SKIP][433] ([i915#3282]) -> [SKIP][434] ([i915#14544] / [i915#3282])
[433]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-3/igt@gem_pread@display.html
[434]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@gem_pread@display.html
* igt@gem_pxp@hw-rejects-pxp-buffer:
- shard-rkl: [TIMEOUT][435] ([i915#12917] / [i915#12964]) -> [SKIP][436] ([i915#13717])
[435]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@gem_pxp@hw-rejects-pxp-buffer.html
[436]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@gem_pxp@hw-rejects-pxp-buffer.html
* igt@gem_tiled_pread_pwrite:
- shard-rkl: [SKIP][437] ([i915#14544] / [i915#3282]) -> [SKIP][438] ([i915#3282]) +3 other tests skip
[437]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@gem_tiled_pread_pwrite.html
[438]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@gem_tiled_pread_pwrite.html
* igt@gem_userptr_blits@forbidden-operations:
- shard-rkl: [SKIP][439] ([i915#3282] / [i915#3297]) -> [SKIP][440] ([i915#14544] / [i915#3282] / [i915#3297])
[439]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-2/igt@gem_userptr_blits@forbidden-operations.html
[440]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@gem_userptr_blits@forbidden-operations.html
* igt@gem_userptr_blits@unsync-unmap-cycles:
- shard-rkl: [SKIP][441] ([i915#14544] / [i915#3297]) -> [SKIP][442] ([i915#3297]) +1 other test skip
[441]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@gem_userptr_blits@unsync-unmap-cycles.html
[442]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@gem_userptr_blits@unsync-unmap-cycles.html
* igt@gen9_exec_parse@allowed-all:
- shard-rkl: [SKIP][443] ([i915#2527]) -> [SKIP][444] ([i915#14544] / [i915#2527])
[443]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-7/igt@gen9_exec_parse@allowed-all.html
[444]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@gen9_exec_parse@allowed-all.html
* igt@gen9_exec_parse@basic-rejected-ctx-param:
- shard-rkl: [SKIP][445] ([i915#14544] / [i915#2527]) -> [SKIP][446] ([i915#2527])
[445]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@gen9_exec_parse@basic-rejected-ctx-param.html
[446]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@gen9_exec_parse@basic-rejected-ctx-param.html
* igt@i915_pm_freq_api@freq-basic-api:
- shard-rkl: [SKIP][447] ([i915#8399]) -> [SKIP][448] ([i915#14544] / [i915#8399])
[447]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-7/igt@i915_pm_freq_api@freq-basic-api.html
[448]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@i915_pm_freq_api@freq-basic-api.html
* igt@i915_pm_rc6_residency@rc6-idle:
- shard-rkl: [SKIP][449] ([i915#14498]) -> [SKIP][450] ([i915#14498] / [i915#14544])
[449]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@i915_pm_rc6_residency@rc6-idle.html
[450]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@i915_pm_rc6_residency@rc6-idle.html
* igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0-hflip-async-flip:
- shard-rkl: [SKIP][451] ([i915#14544]) -> [SKIP][452] ([i915#5286]) +2 other tests skip
[451]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0-hflip-async-flip.html
[452]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0-hflip-async-flip.html
* igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-180-hflip:
- shard-rkl: [SKIP][453] ([i915#5286]) -> [SKIP][454] ([i915#14544]) +2 other tests skip
[453]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-7/igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-180-hflip.html
[454]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-180-hflip.html
* igt@kms_big_fb@linear-64bpp-rotate-90:
- shard-rkl: [SKIP][455] ([i915#14544]) -> [SKIP][456] ([i915#3638])
[455]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_big_fb@linear-64bpp-rotate-90.html
[456]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@kms_big_fb@linear-64bpp-rotate-90.html
* igt@kms_big_fb@linear-8bpp-rotate-270:
- shard-rkl: [SKIP][457] ([i915#3638]) -> [SKIP][458] ([i915#14544])
[457]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-7/igt@kms_big_fb@linear-8bpp-rotate-270.html
[458]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_big_fb@linear-8bpp-rotate-270.html
* igt@kms_big_fb@y-tiled-64bpp-rotate-270:
- shard-dg1: [SKIP][459] ([i915#3638] / [i915#4423]) -> [SKIP][460] ([i915#3638]) +1 other test skip
[459]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg1-17/igt@kms_big_fb@y-tiled-64bpp-rotate-270.html
[460]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-13/igt@kms_big_fb@y-tiled-64bpp-rotate-270.html
* igt@kms_big_fb@yf-tiled-16bpp-rotate-270:
- shard-rkl: [SKIP][461] ([i915#14544]) -> [SKIP][462] +10 other tests skip
[461]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_big_fb@yf-tiled-16bpp-rotate-270.html
[462]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@kms_big_fb@yf-tiled-16bpp-rotate-270.html
* igt@kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-180:
- shard-rkl: [SKIP][463] -> [SKIP][464] ([i915#14544]) +10 other tests skip
[463]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-7/igt@kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-180.html
[464]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-180.html
* igt@kms_ccs@bad-pixel-format-4-tiled-mtl-rc-ccs-cc:
- shard-rkl: [SKIP][465] ([i915#14544]) -> [SKIP][466] ([i915#14098] / [i915#6095]) +12 other tests skip
[465]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_ccs@bad-pixel-format-4-tiled-mtl-rc-ccs-cc.html
[466]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@kms_ccs@bad-pixel-format-4-tiled-mtl-rc-ccs-cc.html
* igt@kms_ccs@bad-pixel-format-4-tiled-mtl-rc-ccs@pipe-b-hdmi-a-2:
- shard-rkl: [SKIP][467] ([i915#6095]) -> [SKIP][468] ([i915#14098] / [i915#6095])
[467]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-8/igt@kms_ccs@bad-pixel-format-4-tiled-mtl-rc-ccs@pipe-b-hdmi-a-2.html
[468]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@kms_ccs@bad-pixel-format-4-tiled-mtl-rc-ccs@pipe-b-hdmi-a-2.html
* igt@kms_ccs@ccs-on-another-bo-4-tiled-mtl-mc-ccs@pipe-b-hdmi-a-2:
- shard-rkl: [SKIP][469] ([i915#14098] / [i915#6095]) -> [SKIP][470] ([i915#6095]) +2 other tests skip
[469]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@kms_ccs@ccs-on-another-bo-4-tiled-mtl-mc-ccs@pipe-b-hdmi-a-2.html
[470]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@kms_ccs@ccs-on-another-bo-4-tiled-mtl-mc-ccs@pipe-b-hdmi-a-2.html
* igt@kms_ccs@crc-primary-rotation-180-4-tiled-lnl-ccs:
- shard-rkl: [SKIP][471] ([i915#12313]) -> [SKIP][472] ([i915#14544]) +1 other test skip
[471]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-8/igt@kms_ccs@crc-primary-rotation-180-4-tiled-lnl-ccs.html
[472]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_ccs@crc-primary-rotation-180-4-tiled-lnl-ccs.html
* igt@kms_ccs@crc-primary-suspend-4-tiled-lnl-ccs:
- shard-rkl: [SKIP][473] ([i915#12805]) -> [SKIP][474] ([i915#14544])
[473]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@kms_ccs@crc-primary-suspend-4-tiled-lnl-ccs.html
[474]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_ccs@crc-primary-suspend-4-tiled-lnl-ccs.html
* igt@kms_ccs@crc-primary-suspend-yf-tiled-ccs:
- shard-rkl: [SKIP][475] ([i915#14098] / [i915#6095]) -> [SKIP][476] ([i915#14544]) +5 other tests skip
[475]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-7/igt@kms_ccs@crc-primary-suspend-yf-tiled-ccs.html
[476]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_ccs@crc-primary-suspend-yf-tiled-ccs.html
* igt@kms_ccs@crc-sprite-planes-basic-4-tiled-dg2-mc-ccs@pipe-a-hdmi-a-4:
- shard-dg1: [SKIP][477] ([i915#4423] / [i915#6095]) -> [SKIP][478] ([i915#6095]) +1 other test skip
[477]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg1-17/igt@kms_ccs@crc-sprite-planes-basic-4-tiled-dg2-mc-ccs@pipe-a-hdmi-a-4.html
[478]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-19/igt@kms_ccs@crc-sprite-planes-basic-4-tiled-dg2-mc-ccs@pipe-a-hdmi-a-4.html
* igt@kms_cdclk@mode-transition-all-outputs:
- shard-rkl: [SKIP][479] ([i915#3742]) -> [SKIP][480] ([i915#14544] / [i915#3742])
[479]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-4/igt@kms_cdclk@mode-transition-all-outputs.html
[480]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_cdclk@mode-transition-all-outputs.html
* igt@kms_chamelium_frames@hdmi-crc-fast:
- shard-rkl: [SKIP][481] ([i915#11151] / [i915#14544] / [i915#7828]) -> [SKIP][482] ([i915#11151] / [i915#7828]) +7 other tests skip
[481]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_chamelium_frames@hdmi-crc-fast.html
[482]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@kms_chamelium_frames@hdmi-crc-fast.html
* igt@kms_chamelium_frames@hdmi-frame-dump:
- shard-rkl: [SKIP][483] ([i915#11151] / [i915#7828]) -> [SKIP][484] ([i915#11151] / [i915#14544] / [i915#7828]) +3 other tests skip
[483]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-8/igt@kms_chamelium_frames@hdmi-frame-dump.html
[484]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_chamelium_frames@hdmi-frame-dump.html
* igt@kms_content_protection@dp-mst-lic-type-0:
- shard-rkl: [SKIP][485] ([i915#14544]) -> [SKIP][486] ([i915#3116])
[485]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_content_protection@dp-mst-lic-type-0.html
[486]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@kms_content_protection@dp-mst-lic-type-0.html
* igt@kms_content_protection@dp-mst-lic-type-1:
- shard-rkl: [SKIP][487] ([i915#3116]) -> [SKIP][488] ([i915#14544])
[487]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-4/igt@kms_content_protection@dp-mst-lic-type-1.html
[488]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_content_protection@dp-mst-lic-type-1.html
* igt@kms_content_protection@lic-type-0:
- shard-dg2: [SKIP][489] ([i915#9424]) -> [FAIL][490] ([i915#7173])
[489]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg2-5/igt@kms_content_protection@lic-type-0.html
[490]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-11/igt@kms_content_protection@lic-type-0.html
* igt@kms_content_protection@mei-interface:
- shard-dg1: [SKIP][491] ([i915#9433]) -> [SKIP][492] ([i915#9424])
[491]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg1-12/igt@kms_content_protection@mei-interface.html
[492]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-17/igt@kms_content_protection@mei-interface.html
* igt@kms_cursor_crc@cursor-onscreen-512x512:
- shard-rkl: [SKIP][493] ([i915#13049]) -> [SKIP][494] ([i915#14544]) +3 other tests skip
[493]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@kms_cursor_crc@cursor-onscreen-512x512.html
[494]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_cursor_crc@cursor-onscreen-512x512.html
* igt@kms_cursor_crc@cursor-rapid-movement-32x10:
- shard-rkl: [SKIP][495] ([i915#14544]) -> [SKIP][496] ([i915#3555]) +3 other tests skip
[495]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_cursor_crc@cursor-rapid-movement-32x10.html
[496]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@kms_cursor_crc@cursor-rapid-movement-32x10.html
* igt@kms_cursor_crc@cursor-sliding-64x21:
- shard-rkl: [DMESG-FAIL][497] ([i915#12964]) -> [FAIL][498] ([i915#13566])
[497]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-7/igt@kms_cursor_crc@cursor-sliding-64x21.html
[498]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@kms_cursor_crc@cursor-sliding-64x21.html
* igt@kms_cursor_crc@cursor-sliding-max-size:
- shard-rkl: [SKIP][499] ([i915#3555]) -> [SKIP][500] ([i915#14544]) +1 other test skip
[499]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@kms_cursor_crc@cursor-sliding-max-size.html
[500]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_cursor_crc@cursor-sliding-max-size.html
* igt@kms_cursor_legacy@flip-vs-cursor-toggle:
- shard-rkl: [SKIP][501] ([i915#14544]) -> [FAIL][502] ([i915#2346])
[501]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_cursor_legacy@flip-vs-cursor-toggle.html
[502]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@kms_cursor_legacy@flip-vs-cursor-toggle.html
* igt@kms_cursor_legacy@short-busy-flip-before-cursor-atomic-transitions:
- shard-rkl: [SKIP][503] ([i915#14544]) -> [SKIP][504] ([i915#4103])
[503]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_cursor_legacy@short-busy-flip-before-cursor-atomic-transitions.html
[504]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@kms_cursor_legacy@short-busy-flip-before-cursor-atomic-transitions.html
* igt@kms_dirtyfb@drrs-dirtyfb-ioctl:
- shard-dg1: [SKIP][505] ([i915#9723]) -> [SKIP][506] ([i915#4423] / [i915#9723])
[505]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg1-15/igt@kms_dirtyfb@drrs-dirtyfb-ioctl.html
[506]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-19/igt@kms_dirtyfb@drrs-dirtyfb-ioctl.html
* igt@kms_dirtyfb@psr-dirtyfb-ioctl:
- shard-rkl: [SKIP][507] ([i915#14544]) -> [SKIP][508] ([i915#9723])
[507]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_dirtyfb@psr-dirtyfb-ioctl.html
[508]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@kms_dirtyfb@psr-dirtyfb-ioctl.html
* igt@kms_dither@fb-8bpc-vs-panel-6bpc:
- shard-rkl: [SKIP][509] ([i915#3555] / [i915#3804]) -> [SKIP][510] ([i915#14544])
[509]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-8/igt@kms_dither@fb-8bpc-vs-panel-6bpc.html
[510]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_dither@fb-8bpc-vs-panel-6bpc.html
* igt@kms_dp_link_training@uhbr-sst:
- shard-rkl: [SKIP][511] ([i915#14544]) -> [SKIP][512] ([i915#13748])
[511]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_dp_link_training@uhbr-sst.html
[512]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@kms_dp_link_training@uhbr-sst.html
* igt@kms_dsc@dsc-basic:
- shard-rkl: [SKIP][513] ([i915#3555] / [i915#3840]) -> [SKIP][514] ([i915#11190] / [i915#14544])
[513]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-7/igt@kms_dsc@dsc-basic.html
[514]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_dsc@dsc-basic.html
* igt@kms_dsc@dsc-with-formats:
- shard-rkl: [SKIP][515] ([i915#14544]) -> [SKIP][516] ([i915#3555] / [i915#3840])
[515]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_dsc@dsc-with-formats.html
[516]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@kms_dsc@dsc-with-formats.html
* igt@kms_fbcon_fbt@psr:
- shard-rkl: [SKIP][517] ([i915#3955]) -> [SKIP][518] ([i915#14544] / [i915#3955])
[517]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-7/igt@kms_fbcon_fbt@psr.html
[518]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_fbcon_fbt@psr.html
* igt@kms_feature_discovery@display-2x:
- shard-rkl: [SKIP][519] ([i915#14544] / [i915#1839]) -> [SKIP][520] ([i915#1839])
[519]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_feature_discovery@display-2x.html
[520]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@kms_feature_discovery@display-2x.html
* igt@kms_feature_discovery@psr1:
- shard-rkl: [SKIP][521] ([i915#658]) -> [SKIP][522] ([i915#14544] / [i915#658])
[521]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-8/igt@kms_feature_discovery@psr1.html
[522]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_feature_discovery@psr1.html
* igt@kms_feature_discovery@psr2:
- shard-rkl: [SKIP][523] ([i915#14544] / [i915#658]) -> [SKIP][524] ([i915#658])
[523]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_feature_discovery@psr2.html
[524]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@kms_feature_discovery@psr2.html
* igt@kms_flip@2x-blocking-wf_vblank:
- shard-rkl: [SKIP][525] ([i915#14544] / [i915#9934]) -> [SKIP][526] ([i915#9934]) +5 other tests skip
[525]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_flip@2x-blocking-wf_vblank.html
[526]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@kms_flip@2x-blocking-wf_vblank.html
* igt@kms_flip@2x-flip-vs-suspend:
- shard-glk: [INCOMPLETE][527] ([i915#12745] / [i915#4839]) -> [SKIP][528]
[527]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-glk1/igt@kms_flip@2x-flip-vs-suspend.html
[528]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-glk8/igt@kms_flip@2x-flip-vs-suspend.html
* igt@kms_flip@2x-modeset-vs-vblank-race:
- shard-rkl: [SKIP][529] ([i915#9934]) -> [SKIP][530] ([i915#14544] / [i915#9934]) +4 other tests skip
[529]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-8/igt@kms_flip@2x-modeset-vs-vblank-race.html
[530]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_flip@2x-modeset-vs-vblank-race.html
* igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs-downscaling:
- shard-rkl: [SKIP][531] ([i915#2672] / [i915#3555]) -> [SKIP][532] ([i915#14544] / [i915#3555]) +1 other test skip
[531]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs-downscaling.html
[532]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs-downscaling.html
* igt@kms_flip_scaled_crc@flip-64bpp-yftile-to-32bpp-yftile-downscaling:
- shard-rkl: [SKIP][533] ([i915#14544] / [i915#3555]) -> [SKIP][534] ([i915#2672] / [i915#3555]) +1 other test skip
[533]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_flip_scaled_crc@flip-64bpp-yftile-to-32bpp-yftile-downscaling.html
[534]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-3/igt@kms_flip_scaled_crc@flip-64bpp-yftile-to-32bpp-yftile-downscaling.html
* igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-draw-pwrite:
- shard-dg2: [SKIP][535] ([i915#10433] / [i915#3458]) -> [SKIP][536] ([i915#3458]) +1 other test skip
[535]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg2-4/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-draw-pwrite.html
[536]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-6/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-draw-pwrite.html
* igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-mmap-gtt:
- shard-rkl: [SKIP][537] ([i915#1825]) -> [SKIP][538] ([i915#14544] / [i915#1849] / [i915#5354]) +19 other tests skip
[537]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-mmap-gtt.html
[538]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-mmap-gtt.html
* igt@kms_frontbuffer_tracking@fbcpsr-2p-shrfb-fliptrack-mmap-gtt:
- shard-rkl: [SKIP][539] -> [SKIP][540] ([i915#14544] / [i915#1849] / [i915#5354])
[539]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@kms_frontbuffer_tracking@fbcpsr-2p-shrfb-fliptrack-mmap-gtt.html
[540]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_frontbuffer_tracking@fbcpsr-2p-shrfb-fliptrack-mmap-gtt.html
* igt@kms_frontbuffer_tracking@psr-1p-primscrn-indfb-plflip-blt:
- shard-rkl: [SKIP][541] ([i915#14544] / [i915#1849] / [i915#5354]) -> [SKIP][542] ([i915#3023]) +17 other tests skip
[541]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_frontbuffer_tracking@psr-1p-primscrn-indfb-plflip-blt.html
[542]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@kms_frontbuffer_tracking@psr-1p-primscrn-indfb-plflip-blt.html
* igt@kms_frontbuffer_tracking@psr-2p-scndscrn-indfb-msflip-blt:
- shard-rkl: [SKIP][543] ([i915#14544] / [i915#1849] / [i915#5354]) -> [SKIP][544] ([i915#1825]) +30 other tests skip
[543]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_frontbuffer_tracking@psr-2p-scndscrn-indfb-msflip-blt.html
[544]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@kms_frontbuffer_tracking@psr-2p-scndscrn-indfb-msflip-blt.html
* igt@kms_frontbuffer_tracking@psr-indfb-scaledprimary:
- shard-dg2: [SKIP][545] ([i915#3458]) -> [SKIP][546] ([i915#10433] / [i915#3458]) +3 other tests skip
[545]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg2-1/igt@kms_frontbuffer_tracking@psr-indfb-scaledprimary.html
[546]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg2-4/igt@kms_frontbuffer_tracking@psr-indfb-scaledprimary.html
* igt@kms_frontbuffer_tracking@psr-suspend:
- shard-rkl: [SKIP][547] ([i915#3023]) -> [SKIP][548] ([i915#14544] / [i915#1849] / [i915#5354]) +10 other tests skip
[547]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-7/igt@kms_frontbuffer_tracking@psr-suspend.html
[548]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_frontbuffer_tracking@psr-suspend.html
* igt@kms_hdr@brightness-with-hdr:
- shard-rkl: [SKIP][549] ([i915#14544]) -> [SKIP][550] ([i915#12713])
[549]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_hdr@brightness-with-hdr.html
[550]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@kms_hdr@brightness-with-hdr.html
* igt@kms_hdr@static-toggle:
- shard-rkl: [SKIP][551] ([i915#14544]) -> [SKIP][552] ([i915#3555] / [i915#8228])
[551]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_hdr@static-toggle.html
[552]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@kms_hdr@static-toggle.html
* igt@kms_joiner@basic-big-joiner:
- shard-rkl: [SKIP][553] ([i915#10656]) -> [SKIP][554] ([i915#10656] / [i915#14544])
[553]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-2/igt@kms_joiner@basic-big-joiner.html
[554]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_joiner@basic-big-joiner.html
* igt@kms_joiner@basic-force-ultra-joiner:
- shard-rkl: [SKIP][555] ([i915#12394]) -> [SKIP][556] ([i915#12394] / [i915#14544])
[555]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-7/igt@kms_joiner@basic-force-ultra-joiner.html
[556]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_joiner@basic-force-ultra-joiner.html
* igt@kms_joiner@basic-max-non-joiner:
- shard-rkl: [SKIP][557] ([i915#13688] / [i915#14544]) -> [SKIP][558] ([i915#13688])
[557]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_joiner@basic-max-non-joiner.html
[558]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@kms_joiner@basic-max-non-joiner.html
* igt@kms_joiner@invalid-modeset-ultra-joiner:
- shard-rkl: [SKIP][559] ([i915#12339]) -> [SKIP][560] ([i915#12339] / [i915#14544])
[559]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-7/igt@kms_joiner@invalid-modeset-ultra-joiner.html
[560]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_joiner@invalid-modeset-ultra-joiner.html
* igt@kms_multipipe_modeset@basic-max-pipe-crc-check:
- shard-rkl: [SKIP][561] ([i915#14544] / [i915#4070] / [i915#4816]) -> [SKIP][562] ([i915#1839] / [i915#4816])
[561]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_multipipe_modeset@basic-max-pipe-crc-check.html
[562]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@kms_multipipe_modeset@basic-max-pipe-crc-check.html
* igt@kms_plane_multiple@2x-tiling-yf:
- shard-rkl: [SKIP][563] ([i915#14544]) -> [SKIP][564] ([i915#13958])
[563]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_plane_multiple@2x-tiling-yf.html
[564]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@kms_plane_multiple@2x-tiling-yf.html
* igt@kms_plane_multiple@tiling-yf:
- shard-rkl: [SKIP][565] ([i915#14544]) -> [SKIP][566] ([i915#14259])
[565]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_plane_multiple@tiling-yf.html
[566]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@kms_plane_multiple@tiling-yf.html
* igt@kms_plane_scaling@2x-scaler-multi-pipe:
- shard-rkl: [SKIP][567] -> [SKIP][568] ([i915#14544] / [i915#8152])
[567]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@kms_plane_scaling@2x-scaler-multi-pipe.html
[568]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_plane_scaling@2x-scaler-multi-pipe.html
* igt@kms_plane_scaling@plane-scaler-unity-scaling-with-rotation@pipe-a:
- shard-rkl: [SKIP][569] ([i915#12247]) -> [SKIP][570] ([i915#12247] / [i915#14544])
[569]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-2/igt@kms_plane_scaling@plane-scaler-unity-scaling-with-rotation@pipe-a.html
[570]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_plane_scaling@plane-scaler-unity-scaling-with-rotation@pipe-a.html
* igt@kms_plane_scaling@plane-scaler-unity-scaling-with-rotation@pipe-b:
- shard-rkl: [SKIP][571] ([i915#12247]) -> [SKIP][572] ([i915#12247] / [i915#14544] / [i915#8152]) +1 other test skip
[571]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-2/igt@kms_plane_scaling@plane-scaler-unity-scaling-with-rotation@pipe-b.html
[572]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_plane_scaling@plane-scaler-unity-scaling-with-rotation@pipe-b.html
* igt@kms_pm_backlight@basic-brightness:
- shard-rkl: [SKIP][573] ([i915#14544] / [i915#5354]) -> [SKIP][574] ([i915#5354])
[573]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_pm_backlight@basic-brightness.html
[574]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@kms_pm_backlight@basic-brightness.html
* igt@kms_pm_backlight@brightness-with-dpms:
- shard-rkl: [SKIP][575] ([i915#12343] / [i915#14544]) -> [SKIP][576] ([i915#12343])
[575]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_pm_backlight@brightness-with-dpms.html
[576]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@kms_pm_backlight@brightness-with-dpms.html
* igt@kms_pm_dc@dc3co-vpb-simulation:
- shard-rkl: [SKIP][577] ([i915#9685]) -> [SKIP][578] ([i915#14544] / [i915#9685])
[577]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-4/igt@kms_pm_dc@dc3co-vpb-simulation.html
[578]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_pm_dc@dc3co-vpb-simulation.html
* igt@kms_pm_dc@dc6-dpms:
- shard-rkl: [SKIP][579] ([i915#3361]) -> [FAIL][580] ([i915#9295])
[579]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-8/igt@kms_pm_dc@dc6-dpms.html
[580]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-7/igt@kms_pm_dc@dc6-dpms.html
* igt@kms_pm_lpsp@kms-lpsp:
- shard-rkl: [SKIP][581] ([i915#3828]) -> [SKIP][582] ([i915#9340])
[581]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-4/igt@kms_pm_lpsp@kms-lpsp.html
[582]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@kms_pm_lpsp@kms-lpsp.html
* igt@kms_pm_rpm@modeset-lpsp-stress:
- shard-rkl: [SKIP][583] ([i915#9519]) -> [SKIP][584] ([i915#14544] / [i915#9519])
[583]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-8/igt@kms_pm_rpm@modeset-lpsp-stress.html
[584]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_pm_rpm@modeset-lpsp-stress.html
* igt@kms_prime@basic-crc-hybrid:
- shard-rkl: [SKIP][585] ([i915#6524]) -> [SKIP][586] ([i915#14544] / [i915#6524])
[585]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-4/igt@kms_prime@basic-crc-hybrid.html
[586]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_prime@basic-crc-hybrid.html
* igt@kms_prime@d3hot:
- shard-rkl: [SKIP][587] ([i915#14544] / [i915#6524]) -> [SKIP][588] ([i915#6524])
[587]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_prime@d3hot.html
[588]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@kms_prime@d3hot.html
* igt@kms_psr2_sf@fbc-pr-primary-plane-update-sf-dmg-area:
- shard-rkl: [SKIP][589] ([i915#11520] / [i915#14544]) -> [SKIP][590] ([i915#11520]) +4 other tests skip
[589]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_psr2_sf@fbc-pr-primary-plane-update-sf-dmg-area.html
[590]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@kms_psr2_sf@fbc-pr-primary-plane-update-sf-dmg-area.html
* igt@kms_psr2_sf@pr-overlay-plane-update-continuous-sf:
- shard-rkl: [SKIP][591] ([i915#11520]) -> [SKIP][592] ([i915#11520] / [i915#14544]) +4 other tests skip
[591]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-2/igt@kms_psr2_sf@pr-overlay-plane-update-continuous-sf.html
[592]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_psr2_sf@pr-overlay-plane-update-continuous-sf.html
* igt@kms_psr2_su@page_flip-nv12:
- shard-rkl: [SKIP][593] ([i915#9683]) -> [SKIP][594] ([i915#14544] / [i915#9683]) +1 other test skip
[593]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-8/igt@kms_psr2_su@page_flip-nv12.html
[594]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_psr2_su@page_flip-nv12.html
* igt@kms_psr@fbc-psr2-cursor-plane-move:
- shard-dg1: [SKIP][595] ([i915#1072] / [i915#9732]) -> [SKIP][596] ([i915#1072] / [i915#4423] / [i915#9732])
[595]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-dg1-15/igt@kms_psr@fbc-psr2-cursor-plane-move.html
[596]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-dg1-19/igt@kms_psr@fbc-psr2-cursor-plane-move.html
* igt@kms_psr@psr2-cursor-mmap-gtt:
- shard-rkl: [SKIP][597] ([i915#1072] / [i915#9732]) -> [SKIP][598] ([i915#1072] / [i915#14544] / [i915#9732]) +10 other tests skip
[597]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@kms_psr@psr2-cursor-mmap-gtt.html
[598]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_psr@psr2-cursor-mmap-gtt.html
* igt@kms_psr@psr2-primary-mmap-gtt:
- shard-rkl: [SKIP][599] ([i915#1072] / [i915#14544] / [i915#9732]) -> [SKIP][600] ([i915#1072] / [i915#9732]) +15 other tests skip
[599]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_psr@psr2-primary-mmap-gtt.html
[600]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-2/igt@kms_psr@psr2-primary-mmap-gtt.html
* igt@kms_rotation_crc@primary-4-tiled-reflect-x-180:
- shard-rkl: [SKIP][601] ([i915#14544]) -> [SKIP][602] ([i915#5289])
[601]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_rotation_crc@primary-4-tiled-reflect-x-180.html
[602]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@kms_rotation_crc@primary-4-tiled-reflect-x-180.html
* igt@kms_rotation_crc@primary-yf-tiled-reflect-x-270:
- shard-rkl: [SKIP][603] ([i915#5289]) -> [SKIP][604] ([i915#14544])
[603]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-2/igt@kms_rotation_crc@primary-yf-tiled-reflect-x-270.html
[604]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_rotation_crc@primary-yf-tiled-reflect-x-270.html
* igt@kms_setmode@invalid-clone-exclusive-crtc:
- shard-rkl: [SKIP][605] ([i915#14544] / [i915#3555]) -> [SKIP][606] ([i915#3555])
[605]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_setmode@invalid-clone-exclusive-crtc.html
[606]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-5/igt@kms_setmode@invalid-clone-exclusive-crtc.html
* igt@kms_tiled_display@basic-test-pattern:
- shard-rkl: [SKIP][607] ([i915#14544]) -> [SKIP][608] ([i915#8623])
[607]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_tiled_display@basic-test-pattern.html
[608]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-8/igt@kms_tiled_display@basic-test-pattern.html
* igt@kms_vblank@ts-continuation-modeset-rpm:
- shard-rkl: [DMESG-WARN][609] ([i915#12964]) -> [SKIP][610] ([i915#14544]) +1 other test skip
[609]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-7/igt@kms_vblank@ts-continuation-modeset-rpm.html
[610]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_vblank@ts-continuation-modeset-rpm.html
* igt@kms_vrr@flip-basic-fastset:
- shard-rkl: [SKIP][611] ([i915#9906]) -> [SKIP][612] ([i915#14544])
[611]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@kms_vrr@flip-basic-fastset.html
[612]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@kms_vrr@flip-basic-fastset.html
* igt@kms_vrr@seamless-rr-switch-virtual:
- shard-rkl: [SKIP][613] ([i915#14544]) -> [SKIP][614] ([i915#9906])
[613]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-6/igt@kms_vrr@seamless-rr-switch-virtual.html
[614]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-4/igt@kms_vrr@seamless-rr-switch-virtual.html
* igt@perf@mi-rpc:
- shard-rkl: [SKIP][615] ([i915#2434]) -> [SKIP][616] ([i915#14544] / [i915#2434])
[615]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@perf@mi-rpc.html
[616]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@perf@mi-rpc.html
* igt@prime_vgem@basic-read:
- shard-rkl: [SKIP][617] ([i915#3291] / [i915#3708]) -> [SKIP][618] ([i915#14544] / [i915#3291] / [i915#3708])
[617]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-5/igt@prime_vgem@basic-read.html
[618]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@prime_vgem@basic-read.html
* igt@prime_vgem@fence-write-hang:
- shard-rkl: [SKIP][619] ([i915#3708]) -> [SKIP][620] ([i915#14544] / [i915#3708])
[619]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-8/igt@prime_vgem@fence-write-hang.html
[620]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@prime_vgem@fence-write-hang.html
* igt@sriov_basic@enable-vfs-bind-unbind-each:
- shard-rkl: [SKIP][621] ([i915#9917]) -> [SKIP][622] ([i915#14544] / [i915#9917])
[621]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_17086/shard-rkl-8/igt@sriov_basic@enable-vfs-bind-unbind-each.html
[622]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/shard-rkl-6/igt@sriov_basic@enable-vfs-bind-unbind-each.html
{name}: This element is suppressed. This means it is ignored when computing
the status of the difference (SUCCESS, WARNING, or FAILURE).
[i915#10055]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/10055
[i915#10056]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/10056
[i915#10307]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/10307
[i915#10433]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/10433
[i915#10434]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/10434
[i915#10656]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/10656
[i915#1072]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/1072
[i915#11078]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/11078
[i915#11151]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/11151
[i915#11190]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/11190
[i915#11520]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/11520
[i915#11681]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/11681
[i915#11943]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/11943
[i915#12061]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12061
[i915#12247]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12247
[i915#12313]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12313
[i915#12316]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12316
[i915#12339]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12339
[i915#12343]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12343
[i915#12388]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12388
[i915#12394]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12394
[i915#12655]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12655
[i915#12713]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12713
[i915#12745]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12745
[i915#12755]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12755
[i915#12761]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12761
[i915#12796]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12796
[i915#12805]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12805
[i915#12917]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12917
[i915#12942]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12942
[i915#12964]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12964
[i915#13046]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13046
[i915#13049]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13049
[i915#13179]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13179
[i915#13356]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13356
[i915#13398]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13398
[i915#13476]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13476
[i915#13566]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13566
[i915#13688]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13688
[i915#13717]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13717
[i915#13748]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13748
[i915#13783]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13783
[i915#13784]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13784
[i915#13790]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13790
[i915#13820]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13820
[i915#13958]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13958
[i915#14098]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14098
[i915#14118]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14118
[i915#14259]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14259
[i915#14433]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14433
[i915#14498]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14498
[i915#14544]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14544
[i915#14600]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14600
[i915#14702]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14702
[i915#14712]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14712
[i915#14850]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14850
[i915#1769]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/1769
[i915#1825]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/1825
[i915#1839]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/1839
[i915#1849]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/1849
[i915#2065]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/2065
[i915#2190]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/2190
[i915#2346]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/2346
[i915#2434]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/2434
[i915#2437]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/2437
[i915#2527]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/2527
[i915#2582]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/2582
[i915#2587]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/2587
[i915#2658]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/2658
[i915#2672]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/2672
[i915#2681]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/2681
[i915#280]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/280
[i915#2856]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/2856
[i915#3023]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3023
[i915#3116]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3116
[i915#3281]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3281
[i915#3282]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3282
[i915#3291]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3291
[i915#3297]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3297
[i915#3299]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3299
[i915#3361]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3361
[i915#3458]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3458
[i915#3469]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3469
[i915#3539]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3539
[i915#3555]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3555
[i915#3637]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3637
[i915#3638]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3638
[i915#3708]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3708
[i915#3742]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3742
[i915#3804]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3804
[i915#3828]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3828
[i915#3840]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3840
[i915#3955]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/3955
[i915#4070]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4070
[i915#4077]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4077
[i915#4079]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4079
[i915#4083]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4083
[i915#4103]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4103
[i915#4212]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4212
[i915#4213]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4213
[i915#4215]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4215
[i915#4270]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4270
[i915#4349]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4349
[i915#4387]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4387
[i915#4391]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4391
[i915#4423]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4423
[i915#4525]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4525
[i915#4537]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4537
[i915#4538]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4538
[i915#4613]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4613
[i915#4771]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4771
[i915#4812]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4812
[i915#4816]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4816
[i915#4817]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4817
[i915#4839]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4839
[i915#4852]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4852
[i915#4854]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4854
[i915#4860]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4860
[i915#4879]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4879
[i915#4880]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4880
[i915#4958]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/4958
[i915#5138]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/5138
[i915#5190]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/5190
[i915#5286]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/5286
[i915#5289]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/5289
[i915#5354]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/5354
[i915#5439]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/5439
[i915#5566]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/5566
[i915#5784]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/5784
[i915#5882]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/5882
[i915#6095]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/6095
[i915#6113]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/6113
[i915#6230]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/6230
[i915#6335]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/6335
[i915#6412]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/6412
[i915#6524]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/6524
[i915#658]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/658
[i915#6621]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/6621
[i915#6880]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/6880
[i915#6944]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/6944
[i915#6953]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/6953
[i915#7116]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/7116
[i915#7118]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/7118
[i915#7173]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/7173
[i915#7276]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/7276
[i915#7294]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/7294
[i915#7582]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/7582
[i915#7828]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/7828
[i915#8152]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8152
[i915#8213]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8213
[i915#8228]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8228
[i915#8289]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8289
[i915#8381]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8381
[i915#8399]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8399
[i915#8411]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8411
[i915#8428]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8428
[i915#8555]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8555
[i915#8562]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8562
[i915#8623]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8623
[i915#8708]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8708
[i915#8810]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8810
[i915#8813]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8813
[i915#8814]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8814
[i915#8825]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8825
[i915#8826]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/8826
[i915#9295]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9295
[i915#9323]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9323
[i915#9340]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9340
[i915#9412]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9412
[i915#9423]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9423
[i915#9424]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9424
[i915#9433]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9433
[i915#9519]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9519
[i915#9683]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9683
[i915#9685]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9685
[i915#9688]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9688
[i915#9723]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9723
[i915#9732]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9732
[i915#9738]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9738
[i915#9808]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9808
[i915#9906]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9906
[i915#9917]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9917
[i915#9934]: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9934
Build changes
-------------
* CI: CI-20190529 -> None
* IGT: IGT_8514 -> IGTPW_13656
* Piglit: piglit_4509 -> None
CI-20190529: 20190529
CI_DRM_17086: 669eae0e8de0328817f4ce7d3c4a3ef834850751 @ git://anongit.freedesktop.org/gfx-ci/linux
IGTPW_13656: 13656
IGT_8514: 8514
piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_13656/index.html
[-- Attachment #2: Type: text/html, Size: 203170 bytes --]
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH i-g-t 2/5] lib/xe: Add xe_vm_madvise ioctl support
2025-08-28 16:58 ` [PATCH i-g-t 2/5] lib/xe: Add xe_vm_madvise ioctl support nishit.sharma
@ 2025-08-29 13:56 ` Gurram, Pravalika
0 siblings, 0 replies; 19+ messages in thread
From: Gurram, Pravalika @ 2025-08-29 13:56 UTC (permalink / raw)
To: Sharma, Nishit, igt-dev@lists.freedesktop.org,
Ghimiray, Himal Prasad, Brost, Matthew
> -----Original Message-----
> From: Sharma, Nishit <nishit.sharma@intel.com>
> Sent: Thursday, August 28, 2025 10:28 PM
> To: igt-dev@lists.freedesktop.org; Gurram, Pravalika
> <pravalika.gurram@intel.com>; Ghimiray, Himal Prasad
> <himal.prasad.ghimiray@intel.com>; Brost, Matthew
> <matthew.brost@intel.com>; Sharma, Nishit <nishit.sharma@intel.com>
> Subject: [PATCH i-g-t 2/5] lib/xe: Add xe_vm_madvise ioctl support
>
> From: Nishit Sharma <nishit.sharma@intel.com>
>
> Added xe_vm_madvise() which issues madvise ioctl DRM_IOCTL_XE_MADVISE
> for VM region advising the driver about expected usage or memory policy for
> specified address range. MADVISE ioctl requires pointer to drm_xe_madvise
> structure as one of the inputs. Depending upon type of madvise operation like
> Atomic, Preferred LOC or PAT required members of drm_xe_madvise structure
> are initialized and passed in MADVISE ioctl.
>
> Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
> ---
> lib/xe/xe_ioctl.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++
> lib/xe/xe_ioctl.h | 5 ++++-
> 2 files changed, 60 insertions(+), 1 deletion(-)
>
> diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c index 1e95af409..43bad8452
> 100644
> --- a/lib/xe/xe_ioctl.c
> +++ b/lib/xe/xe_ioctl.c
> @@ -585,3 +585,59 @@ int64_t xe_wait_ufence(int fd, uint64_t *addr, uint64_t
> value,
> igt_assert_eq(__xe_wait_ufence(fd, addr, value, exec_queue,
> &timeout), 0);
> return timeout;
> }
> +
> +int __xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range,
> + uint64_t ext, uint32_t type, uint32_t op_val, uint16_t policy) {
> + struct drm_xe_madvise madvise = {};
> +
> + madvise.extensions = ext;
> + madvise.vm_id = vm;
> + madvise.start = addr;
> + madvise.range = range;
> +
> + if (type == DRM_XE_MEM_RANGE_ATTR_ATOMIC) {
> + madvise.type = DRM_XE_MEM_RANGE_ATTR_ATOMIC;
> + madvise.atomic.val = op_val;
> + } else if (type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC) {
> + madvise.type =
> DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC;
> + madvise.preferred_mem_loc.devmem_fd = op_val;
> + madvise.preferred_mem_loc.migration_policy = policy;
> + igt_debug("madvise.preferred_mem_loc.devmem_fd = %d\n",
> + madvise.preferred_mem_loc.devmem_fd);
> + } else if (type == DRM_XE_MEM_RANGE_ATTR_PAT) {
> + madvise.type = DRM_XE_MEM_RANGE_ATTR_PAT;
> + madvise.pat_index.val = op_val;
> + } else {
> + igt_warn("Unknown attribute\n");
> + return -EINVAL;
> + }
> +
> + if (igt_ioctl(fd, DRM_IOCTL_XE_MADVISE, &madvise))
> + return -errno;
> +
> + return 0;
> +}
> +
Hope you will take care of the previous comments in next rev
-- Pravalika
> +/**
> + * xe_vm_madvise:
> + * @fd: xe device fd
> + * @vm: vm_id of the virtual range
> + * @addr: start of the virtual address range
> + * @range: size of the virtual address range
> + * @ext: Pointer to the first extension struct, if any
> + * @type: type of attribute
> + * @op_val: fd/atomic value/pat index, depending upon type of operation
> + * @policy: Page migration policy
> + *
> + * Function initializes different members of struct drm_xe_madvise and
> +calls
> + * MADVISE IOCTL .
> + *
> + * Returns 0 if success and asserts otherwise.
> + */
> +int xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range,
> + uint64_t ext, uint32_t type, uint32_t op_val, uint16_t policy) {
> + igt_assert_eq(__xe_vm_madvise(fd, vm, addr, range, ext, type, op_val,
> policy), 0);
> + return 0;
> +}
> diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h index 6302d1a7d..a5996cf65
> 100644
> --- a/lib/xe/xe_ioctl.h
> +++ b/lib/xe/xe_ioctl.h
> @@ -99,5 +99,8 @@ int __xe_wait_ufence(int fd, uint64_t *addr, uint64_t
> value,
> uint32_t exec_queue, int64_t *timeout); int64_t
> xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
> uint32_t exec_queue, int64_t timeout);
> -
> +int __xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range,
> uint64_t ext,
> + uint32_t type, uint32_t op_val, uint16_t policy); int
> +xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range, uint64_t
> ext,
> + uint32_t type, uint32_t op_val, uint16_t policy);
> #endif /* XE_IOCTL_H */
> --
> 2.43.0
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH i-g-t 3/5] lib/xe: Add Helper to get memory attributes
2025-08-28 16:58 ` [PATCH i-g-t 3/5] lib/xe: Add Helper to get memory attributes nishit.sharma
@ 2025-08-29 14:02 ` Gurram, Pravalika
0 siblings, 0 replies; 19+ messages in thread
From: Gurram, Pravalika @ 2025-08-29 14:02 UTC (permalink / raw)
To: Sharma, Nishit, igt-dev@lists.freedesktop.org,
Ghimiray, Himal Prasad, Brost, Matthew
> -----Original Message-----
> From: Sharma, Nishit <nishit.sharma@intel.com>
> Sent: Thursday, August 28, 2025 10:28 PM
> To: igt-dev@lists.freedesktop.org; Gurram, Pravalika
> <pravalika.gurram@intel.com>; Ghimiray, Himal Prasad
> <himal.prasad.ghimiray@intel.com>; Brost, Matthew
> <matthew.brost@intel.com>; Sharma, Nishit <nishit.sharma@intel.com>
> Subject: [PATCH i-g-t 3/5] lib/xe: Add Helper to get memory attributes
>
> From: Nishit Sharma <nishit.sharma@intel.com>
>
> xe_vm_print_mem_attr_values_in_range() function added which calls
> QUERY_MEM_RANGES_ATTRS ioctl to get different memory attributes from
> KMD and then prints memory attributes returned by KMD for different access
> policies like atomic access, preferred loc and pat index.
>
> Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
> ---
> lib/xe/xe_ioctl.c | 92 +++++++++++++++++++++++++++++++++++++++++++++++
> lib/xe/xe_ioctl.h | 4 +++
> 2 files changed, 96 insertions(+)
>
> diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c index 43bad8452..4ab2ef39c
> 100644
> --- a/lib/xe/xe_ioctl.c
> +++ b/lib/xe/xe_ioctl.c
> @@ -57,6 +57,98 @@ uint64_t xe_bb_size(int fd, uint64_t reqsize)
> xe_get_default_alignment(fd)); }
>
> +int xe_vm_number_vmas_in_range(int fd, struct
> +drm_xe_vm_query_mem_range_attr *vmas_attr) {
> + if (igt_ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS,
> vmas_attr))
> + return -errno;
> + return 0;
> +}
> +
> +int xe_vm_vma_attrs(int fd, struct drm_xe_vm_query_mem_range_attr
> *vmas_attr,
> + struct drm_xe_mem_range_attr *mem_attr) {
> + if (!mem_attr)
> + return -EINVAL;
> +
> + vmas_attr->vector_of_mem_attr = (uintptr_t)mem_attr;
> +
> + if (igt_ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS,
> vmas_attr))
> + return -errno;
> +
> + return 0;
> +}
> +
> +/**
> + * xe_vm_print_mem_attr_values_in_range:
> + * @fd: xe device fd
> + * @vm: vm_id of the virtual range
> + * @start: start of the virtual address range
> + * @range: size of the virtual address range
> + *
> + * Calls QUERY_MEM_RANGES_ATTRS ioctl to get memory attributes for
> +different
> + * memory ranges from KMD. prints memory attributes as returned by KMD
> +for
> + * atomic, prefrred loc and pat index types.
> + *
> + * Returns 0 for success or error for failure */
> +
> +int xe_vm_print_mem_attr_values_in_range(int fd, uint32_t vm, uint64_t
> +start, uint64_t range) {
> +
> + void *ptr_start, *ptr;
> + int err;
> + struct drm_xe_vm_query_mem_range_attr query = {
> + .vm_id = vm,
> + .start = start,
> + .range = range,
> + .num_mem_ranges = 0,
> + .sizeof_mem_range_attr = 0,
> + .vector_of_mem_attr = (uintptr_t)NULL,
> + };
> +
> + igt_debug("mem_attr_values_in_range called start = %"PRIu64"\n range
> = %"PRIu64"\n",
> + start, range);
> +
> + err = xe_vm_number_vmas_in_range(fd, &query);
> + if (err || !query.num_mem_ranges || !query.sizeof_mem_range_attr) {
> + igt_warn("ioctl failed for xe_vm_number_vmas_in_range\n");
> + igt_debug("vmas_in_range err = %d query.num_mem_ranges =
> %u query.sizeof_mem_range_attr=%lld\n",
> + err, query.num_mem_ranges,
> query.sizeof_mem_range_attr);
> + return err;
> + }
> +
> + /* Allocate buffer for the memory region attributes */
> + ptr = malloc(query.num_mem_ranges * query.sizeof_mem_range_attr);
> + ptr_start = ptr;
> +
> + if (!ptr)
> + return -ENOMEM;
> +
> + err = xe_vm_vma_attrs(fd, &query, ptr);
> + if (err) {
> + igt_warn("ioctl failed for vma_attrs err = %d\n", err);
> + return err;
> + }
> +
> + /* Iterate over the returned memory region attributes */
> + for (unsigned int i = 0; i < query.num_mem_ranges; ++i) {
> + struct drm_xe_mem_range_attr *mem_attrs = (struct
> +drm_xe_mem_range_attr *)ptr;
> +
> + igt_info("vma_id = %d\nvma_start = 0x%016llx\nvma_end =
> 0x%016llx\n"
> + "vma:atomic = %d\nvma:pat_index =
> %d\nvma:preferred_loc_region = %d\n"
> + "vma:preferred_loc_devmem_fd = %d\n\n\n",
> i, mem_attrs->start,
> + mem_attrs->end,
> + mem_attrs->atomic.val, mem_attrs-
> >pat_index.val,
> + mem_attrs-
> >preferred_mem_loc.migration_policy,
> + mem_attrs->preferred_mem_loc.devmem_fd);
> +
Move this igt_info to igt_debug
-- Pravalika
> + ptr += query.sizeof_mem_range_attr;
> + }
> +
> + free(ptr_start);
> + return 0;
> +}
> +
> uint32_t xe_vm_create(int fd, uint32_t flags, uint64_t ext) {
> struct drm_xe_vm_create create = {
> diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h index a5996cf65..ae16af233
> 100644
> --- a/lib/xe/xe_ioctl.h
> +++ b/lib/xe/xe_ioctl.h
> @@ -103,4 +103,8 @@ int __xe_vm_madvise(int fd, uint32_t vm, uint64_t addr,
> uint64_t range, uint64_t
> uint32_t type, uint32_t op_val, uint16_t policy); int
> xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range, uint64_t ext,
> uint32_t type, uint32_t op_val, uint16_t policy);
> +int xe_vm_number_vmas_in_range(int fd, struct
> +drm_xe_vm_query_mem_range_attr *vmas_attr); int xe_vm_vma_attrs(int fd,
> struct drm_xe_vm_query_mem_range_attr *vmas_attr,
> + struct drm_xe_mem_range_attr *mem_attr); int
> +xe_vm_print_mem_attr_values_in_range(int fd, uint32_t vm, uint64_t
> +start, uint64_t range);
> #endif /* XE_IOCTL_H */
> --
> 2.43.0
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH i-g-t 4/5] tests/intel/xe_exec_system_allocator: Add preferred_loc_smem test
2025-08-28 16:58 ` [PATCH i-g-t 4/5] tests/intel/xe_exec_system_allocator: Add preferred_loc_smem test nishit.sharma
@ 2025-08-29 14:21 ` Gurram, Pravalika
2025-08-29 19:55 ` Matthew Brost
1 sibling, 0 replies; 19+ messages in thread
From: Gurram, Pravalika @ 2025-08-29 14:21 UTC (permalink / raw)
To: Sharma, Nishit, igt-dev@lists.freedesktop.org,
Ghimiray, Himal Prasad, Brost, Matthew
> -----Original Message-----
> From: Sharma, Nishit <nishit.sharma@intel.com>
> Sent: Thursday, August 28, 2025 10:28 PM
> To: igt-dev@lists.freedesktop.org; Gurram, Pravalika
> <pravalika.gurram@intel.com>; Ghimiray, Himal Prasad
> <himal.prasad.ghimiray@intel.com>; Brost, Matthew
> <matthew.brost@intel.com>; Sharma, Nishit <nishit.sharma@intel.com>
> Subject: [PATCH i-g-t 4/5] tests/intel/xe_exec_system_allocator: Add
> preferred_loc_smem test
>
> From: Nishit Sharma <nishit.sharma@intel.com>
>
> Added preferred-loc-smem test which is called in combination with other tests
> as well. In this test the buffer object preferred location is system memory.
> MADVISE ioctl is called with preferred_loc attribute and default_system system
> memory as preferred location.
>
> Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
> ---
> tests/intel/xe_exec_system_allocator.c | 225 ++++++++++++++++++++-----
> 1 file changed, 187 insertions(+), 38 deletions(-)
>
> diff --git a/tests/intel/xe_exec_system_allocator.c
> b/tests/intel/xe_exec_system_allocator.c
> index 007d9bdc0..70ca5fc2e 100644
> --- a/tests/intel/xe_exec_system_allocator.c
> +++ b/tests/intel/xe_exec_system_allocator.c
> @@ -138,7 +138,6 @@ static void signal_pdata(struct process_data *pdata)
> #define CPU_FAULT_THREADS (0x1 << 2)
> #define CPU_FAULT_PROCESS (0x1 << 3)
> #define CPU_FAULT_SAME_PAGE (0x1 << 4)
> -
> static void process_check(void *ptr, uint64_t alloc_size, uint64_t stride,
> unsigned int flags)
> {
> @@ -406,6 +405,39 @@ static void __aligned_partial_free(struct
> aligned_alloc_type *aligned_alloc_typ
> aligned_alloc_type->__size - aligned_alloc_type->size -
> begin_size); }
>
> +#define MAX_N_EXEC_QUEUES 16
> +
> +#define MMAP (0x1 << 0)
> +#define NEW (0x1 << 1)
> +#define BO_UNMAP (0x1 << 2)
> +#define FREE (0x1 << 3)
> +#define BUSY (0x1 << 4)
> +#define BO_MAP (0x1 << 5)
> +#define RACE (0x1 << 6)
> +#define SKIP_MEMSET (0x1 << 7)
> +#define FAULT (0x1 << 8)
> +#define FILE_BACKED (0x1 << 9)
> +#define LOCK (0x1 << 10)
> +#define MMAP_SHARED (0x1 << 11)
> +#define HUGE_PAGE (0x1 << 12)
> +#define SHARED_ALLOC (0x1 << 13)
> +#define FORK_READ (0x1 << 14)
> +#define FORK_READ_AFTER (0x1 << 15)
> +#define MREMAP (0x1 << 16)
> +#define DONTUNMAP (0x1 << 17)
> +#define READ_ONLY_REMAP (0x1 << 18)
> +#define SYNC_EXEC (0x1 << 19)
> +#define EVERY_OTHER_CHECK (0x1 << 20)
> +#define MULTI_FAULT (0x1 << 21)
> +#define PREFETCH (0x1 << 22)
> +#define THREADS (0x1 << 23)
> +#define PROCESSES (0x1 << 24)
> +#define PREFETCH_BENCHMARK (0x1 << 25)
> +#define PREFETCH_SYS_BENCHMARK (0x1 << 26)
> +#define PREFERRED_LOC_SMEM (0x1 << 27)
> +
> +#define N_MULTI_FAULT 4
> +
> /**
> * SUBTEST: unaligned-alloc
> * Description: allocate unaligned sizes of memory @@ -460,7 +492,7 @@
> many_allocs(int fd, struct drm_xe_engine_class_instance *eci,
> uint32_t *bos = NULL;
> struct timespec tv = {};
> uint64_t submit, read, elapsed;
> - int i;
> + int i, err;
>
> vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0); @@ -
> 500,6 +532,15 @@ many_allocs(int fd, struct drm_xe_engine_class_instance
> *eci,
> alloc.ptr = aligned_alloc(SZ_2M, alloc_size);
> igt_assert(alloc.ptr);
> }
> +
> + if (flags & PREFERRED_LOC_SMEM) {
> + err = xe_vm_madvise(fd, vm,
> to_user_pointer(alloc.ptr), alloc_size, 0,
> +
> DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
> +
> DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
> + if (err)
> + igt_warn("MADVISE_FAILURE err = %s,
> vm =%u data=%"PRIu64" alloc_size =%"PRIu64"\n",
> + strerror(errno), vm,
> to_user_pointer(alloc.ptr), alloc_size);
> + }
The condition if (flags & PREFERRED_LOC_SMEM) is never triggered unless many_allocs is used with PREFERRED_LOC_SMEM.
Please verify if this condition is actually needed. If not, consider removing it.
The reason for changing the #define placement was to access PREFERRED_LOC_SMEM, but if it's not required, do not move this section to the top.
> allocs[i] = alloc;
>
> touch_all_pages(fd, exec_queue, allocs[i].ptr, alloc_size, stride,
> @@ -662,7 +703,7 @@ partial(int fd, struct drm_xe_engine_class_instance *eci,
> unsigned int flags)
> size_t bo_size = SZ_2M, unmap_offset = 0;
> uint32_t vm, exec_queue;
> u64 *exec_ufence = NULL;
> - int i;
> + int i, err;
> void *old, *new = NULL;
> struct aligned_alloc_type alloc;
>
> @@ -688,6 +729,15 @@ partial(int fd, struct drm_xe_engine_class_instance
> *eci, unsigned int flags)
> xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0,
> FIVE_SEC);
> data[0].vm_sync = 0;
>
> + if (flags & PREFERRED_LOC_SMEM) {
> + err = xe_vm_madvise(fd, vm, to_user_pointer(data), bo_size, 0,
> +
> DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
> +
> DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
> + if (err)
> + igt_warn("MADVISE_FAILURE err = %s, vm =%u
> data=%"PRIu64" alloc_size = %zu\n",
> + strerror(errno), vm, to_user_pointer(data),
> bo_size);
> + }
> +
The condition if (flags & PREFERRED_LOC_SMEM) is never triggered unless partial is used with PREFERRED_LOC_SMEM.
Please verify if this condition is actually needed. If not, consider removing it.
The reason for changing the #define placement was to access PREFERRED_LOC_SMEM, but if it's not required, do not move this section to the top.
> exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
> PROT_WRITE, MAP_SHARED |
> MAP_ANONYMOUS, -1, 0);
> @@ -747,38 +797,6 @@ partial(int fd, struct drm_xe_engine_class_instance
> *eci, unsigned int flags)
> xe_vm_destroy(fd, vm);
> }
>
> -#define MAX_N_EXEC_QUEUES 16
> -
> -#define MMAP (0x1 << 0)
> -#define NEW (0x1 << 1)
> -#define BO_UNMAP (0x1 << 2)
> -#define FREE (0x1 << 3)
> -#define BUSY (0x1 << 4)
> -#define BO_MAP (0x1 << 5)
> -#define RACE (0x1 << 6)
> -#define SKIP_MEMSET (0x1 << 7)
> -#define FAULT (0x1 << 8)
> -#define FILE_BACKED (0x1 << 9)
> -#define LOCK (0x1 << 10)
> -#define MMAP_SHARED (0x1 << 11)
> -#define HUGE_PAGE (0x1 << 12)
> -#define SHARED_ALLOC (0x1 << 13)
> -#define FORK_READ (0x1 << 14)
> -#define FORK_READ_AFTER (0x1 << 15)
> -#define MREMAP (0x1 << 16)
> -#define DONTUNMAP (0x1 << 17)
> -#define READ_ONLY_REMAP (0x1 << 18)
> -#define SYNC_EXEC (0x1 << 19)
> -#define EVERY_OTHER_CHECK (0x1 << 20)
> -#define MULTI_FAULT (0x1 << 21)
> -#define PREFETCH (0x1 << 22)
> -#define THREADS (0x1 << 23)
> -#define PROCESSES (0x1 << 24)
> -#define PREFETCH_BENCHMARK (0x1 << 25)
> -#define PREFETCH_SYS_BENCHMARK (0x1 << 26)
> -
> -#define N_MULTI_FAULT 4
> -
> /**
> * SUBTEST: once-%s
> * Description: Run %arg[1] system allocator test only once @@ -951,6 +969,80
> @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> * @mmap-new-nomemset: mmap a new buffer for each
> exec, skip memset of buffers
> * @mmap-new-huge-nomemset: mmap huge page new buffer
> for each exec, skip memset of buffers
> * @mmap-new-race-nomemset: mmap a new buffer for each
> exec with race between cpu and gpu access, skip memset of buffers
> + * @free-nomemset-preferred-loc-smem: malloc and free buffer for each
> exec and perform preferred loc madvise operation
> + * @free-preferred-loc-smem: free buffer for each exec and perform
> preferred loc madvise operation
> + * @free-race-nomemset-preferred-loc-smem: free buffer for each exec with
> race between cpu and gpu access and perform madvise operation skipping
> memset
> + * @free-race-preferred-loc-smem: free buffer for each exec with race
> between cpu and gpu access and perform madvise operation
> + * @malloc-bo-unmap-nomemset-preferred-loc-smem: malloc single buffer
> for all execs, bind and unbind a BO to same address, skip memset and perform
> madvise operation
> + * @malloc-busy-nomemset-preferred-loc-smem: malloc single buffer
> for all execs, try to unbind while buffer valid, skip memset of buffers and
> perform madvise operation
> + * @malloc-busy-preferred-loc-smem: malloc single buffer for all execs, try to
> unbind while buffer valid and perform madvise operation
> + * @malloc-fork-read-after-preferred-loc-smem: malloc single buffer
> for all execs, fork a process to read test output, perform madvise operation
> + * @malloc-fork-read-preferred-loc-smem: malloc single buffer for all
> execs, fork a process to read test output, perform madvise operation
> + * @malloc-mlock-nomemset-preferred-loc-smem: malloc and mlock
> single buffer for all execs, skip memset of buffers, perform madvise operation
> + * @malloc-mlock-preferred-loc-smem: malloc and mlock single buffer
> for all execs, perform madvise operation
> + * @malloc-multi-fault-preferred-loc-smem: malloc single buffer for all
> execs and perform madvise operation
> + * @malloc-nomemset-preferred-loc-smem: malloc single buffer for all
> execs, skip memset of buffers and perform madvise operation
> + * @malloc-preferred-loc-smem: malloc single buffer for all execs, issue
> a command which will trigger multiple faults, perform madvise operation
> + * @malloc-prefetch-preferred-loc-smem: malloc single buffer for all
> execs, prefetch buffer before each exec, perform madvise operation
> + * @malloc-prefetch-race-preferred-loc-smem: malloc single buffer for all
> execs, prefetch buffer before each exec, perform madvise operation
> + * @malloc-race-nomemset-preferred-loc-smem: malloc single buffer
> for all execs with race between cpu and gpu access, perform madvise operation
> + * @malloc-race-preferred-loc-smem: malloc single buffer for all execs with
> race between cpu and gpu access, perform madvise operation
> + * @free-race-nomemset-preferred-loc-smem: malloc and free buffer for each
> exec with race between cpu and gpu access, perform madvise operation
> + * @free-race-preferred-loc-smem: malloc and free buffer for each exec
> with race between cpu and gpu access, perform madvise operation
> + * @malloc-bo-unmap-nomemset-preferred-loc-smem: malloc single buffer for
> all execs, bind and unbind a BO to same address before execs, perform madvise
> operation
> + * @malloc-bo-unmap-preferred-loc-smem: malloc single buffer for all
> execs and perform madvise operation
> + * @malloc-busy-nomemset-preferred-loc-smem: malloc single buffer
> for all execs and perform madvise operation
> + * @malloc-busy-preferred-loc-smem: malloc single buffer for all execs and
> perform madvise
> + * @mmap-file-mlock-nomemset-preferred-loc-smem: mmap and mlock
> single buffer, with file backing, perform madvise
> + * @mmap-file-mlock-preferred-loc-smem: mmap and mlock single buffer,
> with file backing, perform madvise
> + * @mmap-file-nomemset-preferred-loc-smem: mmap single buffer,
> with file backing and perform madvise
> + * @mmap-file-preferred-loc-smem: mmap single buffer, with file backing
> and perform madvise
> + * @mmap-free-huge-nomemset-preferred-loc-smem: mmap huge page and
> free buffer for each exec and perform madvise
> + * @mmap-free-huge-preferred-loc-smem: mmap huge page and free
> buffer for each exec and perform madvise
> + * @mmap-free-nomemset-preferred-loc-smem: mmap and free buffer
> for each exec and perform madvise
> + * @mmap-free-preferred-loc-smem: mmap and free buffer for each exec
> and perform madvise
> + * @mmap-free-race-nomemset-preferred-loc-smem:
> + * @mmap-free-race-preferred-loc-smem:
> + * @mmap-huge-nomemset-preferred-loc-smem:
> + * @mmap-huge-preferred-loc-smem:
> + * @mmap-mlock-nomemset-preferred-loc-smem:
> + * @mmap-mlock-preferred-loc-smem:
> + * @mmap-new-huge-nomemset-preferred-loc-smem:
> + * @mmap-new-huge-preferred-loc-smem:
> + * @mmap-new-nomemset-preferred-loc-smem:
> + * @mmap-new-preferred-loc-smem:
> + * @mmap-new-race-nomemset-preferred-loc-smem:
> + * @mmap-new-race-preferred-loc-smem:
> + * @mmap-nomemset-preferred-loc-smem:
> + * @mmap-preferred-loc-smem:
> + * @mmap-prefetch-preferred-loc-smem:
> + * @mmap-prefetch-shared-preferred-loc-smem:
> + * @mmap-race-nomemset-preferred-loc-smem:
> + * @mmap-race-preferred-loc-smem:
> + * @mmap-remap-dontunmap-eocheck-preferred-loc-smem:
> + * @mmap-remap-dontunmap-preferred-loc-smem:
> + * @mmap-remap-eocheck-preferred-loc-smem:
> + * @mmap-remap-preferred-loc-smem:
> + * @mmap-remap-ro-dontunmap-eocheck-preferred-loc-smem:
> + * @mmap-remap-ro-dontunmap-preferred-loc-smem:
> + * @mmap-remap-ro-eocheck-preferred-loc-smem:
> + * @mmap-remap-ro-preferred-loc-smem:
> + * @mmap-shared-nomemset-preferred-loc-smem:
> + * @mmap-shared-preferred-loc-smem:
> + * @mmap-shared-remap-dontunmap-eocheck-preferred-loc-smem:
> + * @mmap-shared-remap-dontunmap-preferred-loc-smem:
> + * @mmap-shared-remap-eocheck-preferred-loc-smem:
> + * @mmap-shared-remap-preferred-loc-smem:
> + * @new-bo-map-nomemset-preferred-loc-smem:
> + * @new-bo-map-preferred-loc-smem:
> + * @new-busy-nomemset-preferred-loc-smem:
> + * @new-busy-preferred-loc-smem:
> + * @new-nomemset-preferred-loc-smem:
> + * @new-preferred-loc-smem:
> + * @new-prefetch-preferred-loc-smem:
> + * @new-race-nomemset-preferred-loc-smem:
> + * @new-race-preferred-loc-smem:
> + * @prefetch-benchmark:
> *
> * SUBTEST: prefetch-benchmark
> * Description: Prefetch a 64M buffer 128 times, measure bandwidth of prefetch
> @@ -1020,7 +1112,7 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
> uint32_t bo = 0, bind_sync = 0;
> void **pending_free;
> u64 *exec_ufence = NULL, *bind_ufence = NULL;
> - int i, j, b, file_fd = -1, prev_idx, pf_count;
> + int i, j, b, file_fd = -1, prev_idx, pf_count, err;
> bool free_vm = false;
> size_t aligned_size = bo_size ?: xe_get_default_alignment(fd);
> size_t orig_size = bo_size;
> @@ -1133,6 +1225,15 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
>
> addr = to_user_pointer(data);
>
> + if (flags & PREFERRED_LOC_SMEM) {
> + err = xe_vm_madvise(fd, vm, to_user_pointer(data), bo_size, 0,
> +
> DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
> +
> DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
> + if (err)
> + igt_warn("MADVISE_FAILURE err = %s, vm =%u
> data=%"PRIu64" alloc_size = %zu\n",
> + strerror(errno), vm, to_user_pointer(data),
> bo_size);
> + }
> +
> if (flags & BO_UNMAP) {
> bo_flags =
> DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> bo = xe_bo_create(fd, vm, bo_size,
> @@ -1202,7 +1303,7 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
> uint64_t batch_addr = addr + batch_offset;
> uint64_t sdi_offset = (char *)&data[idx].data - (char *)data;
> uint64_t sdi_addr = addr + sdi_offset;
> - int e = i % n_exec_queues, err;
> + int e = i % n_exec_queues;
> bool fault_inject = (FAULT & flags) && i == n_execs / 2;
> bool fault_injected = (FAULT & flags) && i > n_execs;
>
> @@ -1232,6 +1333,16 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
> aligned_alloc_type = __aligned_alloc(aligned_size,
> bo_size);
> next_data = aligned_alloc_type.ptr;
> igt_assert(next_data);
> +
> + if (flags & PREFERRED_LOC_SMEM) {
> + err = xe_vm_madvise(fd, vm,
> to_user_pointer(next_data), bo_size, 0,
> +
> DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
> +
> DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
> + if (err)
> + igt_warn("MADVISE_FAILURE err = %s,
> vm =%u data=%"PRIu64" alloc_size = %zu\n",
> + strerror(errno), vm,
> to_user_pointer(next_data), bo_size);
> + }
> +
> __aligned_partial_free(&aligned_alloc_type);
>
> b = 0;
> @@ -1253,6 +1364,7 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
> sync[0].addr = to_user_pointer(bind_ufence);
>
> start = igt_nsec_elapsed(&tv);
> +
> xe_vm_prefetch_async(fd, vm, 0, 0, addr, bo_size, sync,
> 1, region);
> end = igt_nsec_elapsed(&tv);
> @@ -1355,6 +1467,7 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
> } else {
> igt_assert_eq(data[idx].data,
> READ_VALUE(&data[idx]));
> +
> if (flags &
> PREFETCH_SYS_BENCHMARK) {
> struct timespec tv = {};
> u64 start, end;
> @@ -1429,6 +1542,7 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
> aligned_alloc_type =
> __aligned_alloc(aligned_size, bo_size);
> data = aligned_alloc_type.ptr;
> igt_assert(data);
> +
> __aligned_partial_free(&aligned_alloc_type);
>
> bo_flags =
> DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> @@ -1450,6 +1564,7 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
> }
> bo = 0;
> data = aligned_alloc(aligned_size, bo_size);
> +
> igt_assert(data);
> }
> addr = to_user_pointer(data);
> @@ -1460,6 +1575,15 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
> prev_idx = idx;
> }
>
> + if (flags & PREFERRED_LOC_SMEM) {
> + err = xe_vm_madvise(fd, vm, to_user_pointer(data), bo_size, 0,
> +
> DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
> +
> DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
> + if (err)
> + igt_warn("MADVISE_FAILURE err = %s, vm =%u
> data=%"PRIu64" alloc_size = %zu\n",
> + strerror(errno), vm, to_user_pointer(data),
> bo_size);
> + }
> +
Do we need to set the PREFERRED_LOC_SMEM after reading data ?
> if (flags & PREFETCH_BENCHMARK) {
> igt_info("Prefetch VRAM execution took %.3fms, %.1f5
> GB/s\n",
> 1e-6 * prefetch_ns,
> @@ -1587,6 +1711,7 @@ threads(int fd, int n_exec_queues, int n_execs, size_t
> bo_size,
> uint32_t vm = 0;
> bool go = false;
> void *alloc = NULL;
> + int err;
>
> if ((FILE_BACKED | FORK_READ) & flags)
> return;
> @@ -1614,6 +1739,15 @@ threads(int fd, int n_exec_queues, int n_execs,
> size_t bo_size,
> alloc = aligned_alloc(SZ_2M, alloc_size);
> igt_assert(alloc);
>
> + if (flags & PREFERRED_LOC_SMEM) {
> + err = xe_vm_madvise(fd, vm, to_user_pointer(alloc),
> alloc_size, 0,
> +
> DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
> +
> DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
> + if (err)
> + igt_warn("MADVISE_FAILURE err = %s, vm =%u
> data=%"PRIu64" alloc_size = %zu\n",
> + strerror(errno), vm,
> to_user_pointer(alloc), alloc_size);
> + }
> +
> memset(alloc, 5, alloc_size);
> flags &= ~SHARED_ALLOC;
> }
> @@ -1831,6 +1965,7 @@ igt_main
> { NULL },
> };
> int fd;
> + int num_sections;
>
> igt_fixture {
> struct xe_device *xe;
> @@ -1843,7 +1978,21 @@ igt_main
> open_sync_file();
> }
>
> - for (const struct section *s = sections; s->name; s++) {
> +
> + num_sections = 0;
Please move initialization to declaration
-- Pravalika
> + for (const struct section *s = sections; s[num_sections].name;
> num_sections++)
> + ;
> +
> + for (int i = 0; i < num_sections * 2; i++) {
> + struct section *s = §ions[i % num_sections];
> +
> + if (i/num_sections == 0) {
> + static char modified_name[256];
> + snprintf(modified_name, sizeof(modified_name), "%s-
> preferred-loc-smem", s->name);
> + s->name = modified_name;
> + s->flags |= PREFERRED_LOC_SMEM;
> + }
> +
> igt_subtest_f("once-%s", s->name)
> xe_for_each_engine(fd, hwe)
> test_exec(fd, hwe, 1, 1, 0, 0, 0, NULL,
> --
> 2.43.0
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH i-g-t v5 5/5] tests/intel/xe_exec_system_allocator: Add atomic_batch test in IGT
2025-08-28 16:58 ` [PATCH i-g-t v5 5/5] tests/intel/xe_exec_system_allocator: Add atomic_batch test in IGT nishit.sharma
@ 2025-08-29 14:37 ` Gurram, Pravalika
2025-08-29 20:07 ` Matthew Brost
2025-08-29 20:26 ` Matthew Brost
2 siblings, 0 replies; 19+ messages in thread
From: Gurram, Pravalika @ 2025-08-29 14:37 UTC (permalink / raw)
To: Sharma, Nishit, igt-dev@lists.freedesktop.org,
Ghimiray, Himal Prasad, Brost, Matthew
> -----Original Message-----
> From: Sharma, Nishit <nishit.sharma@intel.com>
> Sent: Thursday, August 28, 2025 10:28 PM
> To: igt-dev@lists.freedesktop.org; Gurram, Pravalika
> <pravalika.gurram@intel.com>; Ghimiray, Himal Prasad
> <himal.prasad.ghimiray@intel.com>; Brost, Matthew
> <matthew.brost@intel.com>; Sharma, Nishit <nishit.sharma@intel.com>
> Subject: [PATCH i-g-t v5 5/5] tests/intel/xe_exec_system_allocator: Add
> atomic_batch test in IGT
>
> From: Nishit Sharma <nishit.sharma@intel.com>
>
> ATOMIC_BATCH flag is introduced when true MI_ATOMIC | MI_ATOMIC_INC
> operation will be called. This will avoid writing another function which performs
> atomic increment operations. ATOMIC_BATCH flag is passed as argument in
> write_dword() if true then value will be written on passed address and
> incremented by ATOMIC_INC operation. For all memory operations this flag will
> be used to verify if ATOMIC operation is working or not.
>
> Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
> ---
> lib/xe/xe_ioctl.c | 18 +-
> tests/intel/xe_exec_system_allocator.c | 545 ++++++++++++++++++++-----
> 2 files changed, 445 insertions(+), 118 deletions(-)
>
> diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c index 4ab2ef39c..71a427b4d
> 100644
> --- a/lib/xe/xe_ioctl.c
> +++ b/lib/xe/xe_ioctl.c
> @@ -688,19 +688,26 @@ int __xe_vm_madvise(int fd, uint32_t vm, uint64_t
> addr, uint64_t range,
> madvise.start = addr;
> madvise.range = range;
>
> - if (type == DRM_XE_MEM_RANGE_ATTR_ATOMIC) {
> + switch (type) {
> + case DRM_XE_MEM_RANGE_ATTR_ATOMIC:
> madvise.type = DRM_XE_MEM_RANGE_ATTR_ATOMIC;
> madvise.atomic.val = op_val;
> - } else if (type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC) {
> + break;
> +
> + case DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC:
> madvise.type =
> DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC;
> madvise.preferred_mem_loc.devmem_fd = op_val;
> madvise.preferred_mem_loc.migration_policy = policy;
> igt_debug("madvise.preferred_mem_loc.devmem_fd = %d\n",
> madvise.preferred_mem_loc.devmem_fd);
> - } else if (type == DRM_XE_MEM_RANGE_ATTR_PAT) {
> + break;
> +
> + case DRM_XE_MEM_RANGE_ATTR_PAT:
> madvise.type = DRM_XE_MEM_RANGE_ATTR_PAT;
> madvise.pat_index.val = op_val;
> - } else {
> + break;
> +
> + default:
> igt_warn("Unknown attribute\n");
> return -EINVAL;
> }
Please move these changes to the lib changes
> @@ -730,6 +737,5 @@ int __xe_vm_madvise(int fd, uint32_t vm, uint64_t addr,
> uint64_t range, int xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t
> range,
> uint64_t ext, uint32_t type, uint32_t op_val, uint16_t policy) {
> - igt_assert_eq(__xe_vm_madvise(fd, vm, addr, range, ext, type, op_val,
> policy), 0);
> - return 0;
> + return __xe_vm_madvise(fd, vm, addr, range, ext, type, op_val,
> +policy);
> }
> diff --git a/tests/intel/xe_exec_system_allocator.c
> b/tests/intel/xe_exec_system_allocator.c
> index 70ca5fc2e..d0a8431a2 100644
> --- a/tests/intel/xe_exec_system_allocator.c
> +++ b/tests/intel/xe_exec_system_allocator.c
> @@ -21,6 +21,7 @@
> #include "lib/intel_reg.h"
> #include "xe_drm.h"
>
> +#include "intel_pat.h"
> #include "xe/xe_gt.h"
> #include "xe/xe_ioctl.h"
> #include "xe/xe_query.h"
> @@ -29,6 +30,14 @@
> #define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
> #define QUARTER_SEC (NSEC_PER_SEC / 4)
> #define FIVE_SEC (5LL * NSEC_PER_SEC)
Space here
> +struct test_exec_data {
> + uint32_t batch[32];
> + uint64_t pad;
> + uint64_t vm_sync;
> + uint64_t exec_sync;
> + uint32_t data;
> + uint32_t expected_data;
> +};
>
> struct batch_data {
> uint32_t batch[16];
> @@ -37,6 +46,7 @@ struct batch_data {
> uint32_t expected_data;
> };
>
> +#define VAL_ATOMIC_EXPECTED 56
> #define WRITE_VALUE(data__, i__) ({ \
> if (!(data__)->expected_data) \
> (data__)->expected_data = rand() << 12 | (i__); \
> @@ -53,10 +63,19 @@ static void __write_dword(uint32_t *batch, uint64_t
> sdi_addr, uint32_t wdata,
> batch[(*idx)++] = wdata;
> }
>
> -static void write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
> - int *idx)
> +static void write_dword(struct test_exec_data *data, uint64_t sdi_addr,
> uint32_t wdata,
> + int *idx, bool atomic)
> {
> - __write_dword(batch, sdi_addr, wdata, idx);
> + uint32_t *batch = data->batch;
> +
> + if (atomic) {
> + data->data = 55;
> + batch[(*idx)++] = MI_ATOMIC | MI_ATOMIC_INC;
> + batch[(*idx)++] = sdi_addr;
> + batch[(*idx)++] = sdi_addr >> 32;
> + } else {
> + __write_dword(batch, sdi_addr, wdata, idx);
> + }
> batch[(*idx)++] = MI_BATCH_BUFFER_END; }
>
> @@ -271,7 +290,7 @@ check_all_pages_threads(void *ptr, uint64_t alloc_size,
> uint64_t stride,
>
> static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
> uint64_t alloc_size, uint64_t stride,
> - struct timespec *tv, uint64_t *submit)
> + struct timespec *tv, uint64_t *submit, bool atomic)
> {
> struct drm_xe_sync sync[1] = {
> { .type = DRM_XE_SYNC_TYPE_USER_FENCE, @@ -302,7
> +321,8 @@ static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
> uint64_t sdi_addr = addr + sdi_offset;
> int b = 0;
>
> - write_dword(data->batch, sdi_addr, WRITE_VALUE(data, i),
> &b);
> + write_dword((struct test_exec_data *)data, sdi_addr,
> WRITE_VALUE(data, i),
> + &b, atomic ? true : false);
> igt_assert(b <= ARRAY_SIZE(data->batch));
> }
>
> @@ -407,34 +427,45 @@ static void __aligned_partial_free(struct
> aligned_alloc_type *aligned_alloc_typ
>
> #define MAX_N_EXEC_QUEUES 16
>
> -#define MMAP (0x1 << 0)
> -#define NEW (0x1 << 1)
> -#define BO_UNMAP (0x1 << 2)
> -#define FREE (0x1 << 3)
> -#define BUSY (0x1 << 4)
> -#define BO_MAP (0x1 << 5)
> -#define RACE (0x1 << 6)
> -#define SKIP_MEMSET (0x1 << 7)
> -#define FAULT (0x1 << 8)
> -#define FILE_BACKED (0x1 << 9)
> -#define LOCK (0x1 << 10)
> -#define MMAP_SHARED (0x1 << 11)
> -#define HUGE_PAGE (0x1 << 12)
> -#define SHARED_ALLOC (0x1 << 13)
> -#define FORK_READ (0x1 << 14)
> -#define FORK_READ_AFTER (0x1 << 15)
> -#define MREMAP (0x1 << 16)
> -#define DONTUNMAP (0x1 << 17)
> -#define READ_ONLY_REMAP (0x1 << 18)
> -#define SYNC_EXEC (0x1 << 19)
> -#define EVERY_OTHER_CHECK (0x1 << 20)
> -#define MULTI_FAULT (0x1 << 21)
> -#define PREFETCH (0x1 << 22)
> -#define THREADS (0x1 << 23)
> -#define PROCESSES (0x1 << 24)
> -#define PREFETCH_BENCHMARK (0x1 << 25)
> -#define PREFETCH_SYS_BENCHMARK (0x1 << 26)
> -#define PREFERRED_LOC_SMEM (0x1 << 27)
> +#define MMAP (0x1 << 0)
> +#define NEW (0x1 << 1)
> +#define BO_UNMAP (0x1 << 2)
> +#define FREE (0x1 << 3)
> +#define BUSY (0x1 << 4)
> +#define BO_MAP (0x1 << 5)
> +#define RACE (0x1 << 6)
> +#define SKIP_MEMSET (0x1 << 7)
> +#define FAULT (0x1 << 8)
> +#define FILE_BACKED (0x1 << 9)
> +#define LOCK (0x1 << 10)
> +#define MMAP_SHARED (0x1 << 11)
> +#define HUGE_PAGE (0x1 << 12)
> +#define SHARED_ALLOC (0x1 << 13)
> +#define FORK_READ (0x1 << 14)
> +#define FORK_READ_AFTER (0x1 << 15)
> +#define MREMAP (0x1 << 16)
> +#define DONTUNMAP (0x1 << 17)
> +#define READ_ONLY_REMAP (0x1 << 18)
> +#define SYNC_EXEC (0x1 << 19)
> +#define EVERY_OTHER_CHECK (0x1 << 20)
> +#define MULTI_FAULT (0x1 << 21)
> +#define PREFETCH (0x1 << 22)
> +#define THREADS (0x1 << 23)
> +#define PROCESSES (0x1 << 24)
> +#define PREFETCH_BENCHMARK (0x1 << 25)
> +#define PREFETCH_SYS_BENCHMARK (0x1 << 26)
> +#define PREFERRED_LOC_SMEM (0x1 << 27)
> +#define ATOMIC_BATCH (0x1 << 28)
> +#define MIGRATE_ALL_PAGES (0x1 << 29)
> +#define PREFERRED_LOC_ATOMIC_DEVICE (0x1 << 30)
> +#define PREFERRED_LOC_ATOMIC_GL (0x1ull << 31)
> +#define PREFERRED_LOC_ATOMIC_CPU (0x1ull << 32)
> +#define MADVISE_MULTI_VMA (0x1ull << 33)
> +#define MADVISE_SPLIT_VMA (0x1ull << 34)
> +#define MADVISE_ATOMIC_VMA (0x1ull << 35)
> +#define PREFETCH_SPLIT_VMA (0x1ull << 36)
> +#define PREFETCH_CHANGE_ATTR (0x1ull << 37)
> +#define PREFETCH_SAME_ATTR (0x1ull << 38)
>
> #define N_MULTI_FAULT 4
>
> @@ -478,6 +509,47 @@ static void __aligned_partial_free(struct
> aligned_alloc_type *aligned_alloc_typ
> * SUBTEST: processes-evict-malloc-mix-bo
> * Description: multi-process trigger eviction of VRAM allocated via malloc and
> BO create
> * Test category: stress test
> + *
> + * SUBTEST: madvise-multi-vma
> + * Description: performs multiple madvise operations on multiple
> + virtual memory area using atomic device attributes
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-split-vma
> + * Description: perform madvise operations on multiple type VMAs (BO
> + and CPU VMAs)
> + * Test category: perform madvise operations on multiple type VMAs (BO
> + and CPU VMAs)
> + *
> + * SUBTEST: madvise-atomic-vma
> + * Description: perform madvise atomic operations on BO in VRAM/SMEM if
> + atomic ATTR global/device
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-split-vma-with-mapping
> + * Description: performs prefetch and page migration
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-preffered-loc-atomic-vram
> + * Description: performs both atomic and preferred loc madvise
> + operations atomic device attributes set
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-preffered-loc-atomic-gl
> + * Description: performs both atomic and preferred loc madvise
> + operations with atomic global attributes set
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-preffered-loc-atomic-cpu
> + * Description: performs both atomic and preferred loc madvise
> + operations with atomic cpu attributes set
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-preffered-loc-sram-migrate-pages
> + * Description: performs preferred loc madvise operations and migrating
> + all pages in smem
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-no-range-invalidate-same-attr
> + * Description: performs atomic global madvise operation, prefetch and
> + again madvise operation with same atomic attribute
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-range-invalidate-change-attr
> + * Description: performs atomic global madvise operation, prefetch and
> + again madvise operation with different atomic attribute
> + * Test category: functionality test
> + *
> */
>
> static void
> @@ -544,7 +616,7 @@ many_allocs(int fd, struct
> drm_xe_engine_class_instance *eci,
> allocs[i] = alloc;
>
> touch_all_pages(fd, exec_queue, allocs[i].ptr, alloc_size, stride,
> - &tv, &submit);
> + &tv, &submit, flags & ATOMIC_BATCH);
> }
>
> if (barrier)
> @@ -692,7 +764,7 @@ partial(int fd, struct drm_xe_engine_class_instance *eci,
> unsigned int flags)
> .num_syncs = 1,
> .syncs = to_user_pointer(sync),
> };
> - struct {
> + struct batch_data {
> uint32_t batch[16];
> uint64_t pad;
> uint64_t vm_sync;
> @@ -750,7 +822,8 @@ partial(int fd, struct drm_xe_engine_class_instance *eci,
> unsigned int flags)
> uint64_t sdi_addr = addr + sdi_offset;
> int b = 0;
>
> - write_dword(data[i].batch, sdi_addr, WRITE_VALUE(&data[i],
> i), &b);
> + write_dword((struct test_exec_data *)&data[i], sdi_addr,
> WRITE_VALUE(&data[i], i),
> + &b, ATOMIC_BATCH ? true : false);
> igt_assert(b <= ARRAY_SIZE(data[i].batch));
>
> if (!i)
> @@ -773,7 +846,10 @@ partial(int fd, struct drm_xe_engine_class_instance
> *eci, unsigned int flags)
> xe_wait_ufence(fd, new ?: exec_ufence, USER_FENCE_VALUE,
> exec_queue, FIVE_SEC);
> if (i || (flags & CPU_FAULT))
> - igt_assert_eq(data[i].data, READ_VALUE(&data[i]));
> + igt_assert_eq(data[i].data,
> + flags & ATOMIC_BATCH
> + ? VAL_ATOMIC_EXPECTED
> + : READ_VALUE(&data[i]));
> exec_ufence[0] = 0;
>
> if (!i) {
> @@ -1001,48 +1077,47 @@ partial(int fd, struct drm_xe_engine_class_instance
> *eci, unsigned int flags)
> * @mmap-free-huge-preferred-loc-smem: mmap huge page and free
> buffer for each exec and perform madvise
> * @mmap-free-nomemset-preferred-loc-smem: mmap and free buffer
> for each exec and perform madvise
> * @mmap-free-preferred-loc-smem: mmap and free buffer for each exec
> and perform madvise
> - * @mmap-free-race-nomemset-preferred-loc-smem:
> - * @mmap-free-race-preferred-loc-smem:
> - * @mmap-huge-nomemset-preferred-loc-smem:
> - * @mmap-huge-preferred-loc-smem:
> - * @mmap-mlock-nomemset-preferred-loc-smem:
> - * @mmap-mlock-preferred-loc-smem:
> - * @mmap-new-huge-nomemset-preferred-loc-smem:
> - * @mmap-new-huge-preferred-loc-smem:
> - * @mmap-new-nomemset-preferred-loc-smem:
> - * @mmap-new-preferred-loc-smem:
> - * @mmap-new-race-nomemset-preferred-loc-smem:
> - * @mmap-new-race-preferred-loc-smem:
> - * @mmap-nomemset-preferred-loc-smem:
> - * @mmap-preferred-loc-smem:
> - * @mmap-prefetch-preferred-loc-smem:
> - * @mmap-prefetch-shared-preferred-loc-smem:
> - * @mmap-race-nomemset-preferred-loc-smem:
> - * @mmap-race-preferred-loc-smem:
> - * @mmap-remap-dontunmap-eocheck-preferred-loc-smem:
> - * @mmap-remap-dontunmap-preferred-loc-smem:
> - * @mmap-remap-eocheck-preferred-loc-smem:
> - * @mmap-remap-preferred-loc-smem:
> - * @mmap-remap-ro-dontunmap-eocheck-preferred-loc-smem:
> - * @mmap-remap-ro-dontunmap-preferred-loc-smem:
> - * @mmap-remap-ro-eocheck-preferred-loc-smem:
> - * @mmap-remap-ro-preferred-loc-smem:
> - * @mmap-shared-nomemset-preferred-loc-smem:
> - * @mmap-shared-preferred-loc-smem:
> - * @mmap-shared-remap-dontunmap-eocheck-preferred-loc-smem:
> - * @mmap-shared-remap-dontunmap-preferred-loc-smem:
> - * @mmap-shared-remap-eocheck-preferred-loc-smem:
> - * @mmap-shared-remap-preferred-loc-smem:
> - * @new-bo-map-nomemset-preferred-loc-smem:
> - * @new-bo-map-preferred-loc-smem:
> - * @new-busy-nomemset-preferred-loc-smem:
> - * @new-busy-preferred-loc-smem:
> - * @new-nomemset-preferred-loc-smem:
> - * @new-preferred-loc-smem:
> - * @new-prefetch-preferred-loc-smem:
> - * @new-race-nomemset-preferred-loc-smem:
> - * @new-race-preferred-loc-smem:
> - * @prefetch-benchmark:
> + * @mmap-free-race-nomemset-preferred-loc-smem: mmap and free buffer
> + for each exec with race between cpu and gpu access, perform madvise
> + operation
> + * @mmap-free-race-preferred-loc-smem: mmap and free buffer for each
> + exec with race between cpu and gpu access, perform madvise operation
> + * @mmap-huge-nomemset-preferred-loc-smem: mmap huge page single
> buffer
> + for all execs, skips memset and perform madvise operation
> + * @mmap-huge-preferred-loc-smem: mmap huge page single buffer for all
> + execs, perform madvise operation
> + * @mmap-mlock-nomemset-preferred-loc-smem: mmap and mlock of a
> buffer
> + with preferred location set to system memory, skipping memset
> + * @mmap-mlock-preferred-loc-smem: mmap and mlock of a buffer with
> + preferred location set to system memory
> + * @mmap-new-huge-nomemset-preferred-loc-smem: mmap of a newly
> + allocated buffer using huge pages, with preferred location set to
> + system memory and skipping memset
> + * @mmap-new-huge-preferred-loc-smem: mmap of a newly allocated buffer
> + using huge pages, with preferred location set to system memory
> + * @mmap-new-nomemset-preferred-loc-smem: mmap of a newly allocated
> + buffer with preferred location set to system memory and skipping
> + memset
> + * @mmap-new-preferred-loc-smem: mmap of a newly allocated buffer with
> + preferred location set to system memory
> + * @mmap-new-race-nomemset-preferred-loc-smem: mmap of a newly
> + allocated buffer with preferred location set to system memory and
> + skipping memset
> + * @mmap-new-race-preferred-loc-smem: mmap of a newly allocated buffer
> + with preferred location set to system memory
> + * @mmap-nomemset-preferred-loc-smem: mmap of a buffer with preferred
> + location set to system memory, skipping memset
> + * @mmap-preferred-loc-smem: mmap of a buffer with preferred location
> + set to system memory
> + * @mmap-prefetch-preferred-loc-smem: prefetching mmap buffer with
> + preferred location set to system memory
> + * @mmap-prefetch-shared-preferred-loc-smem: mmap of a shared buffer
> + with prefetch and preferred location set to system memory
> + * @mmap-race-nomemset-preferred-loc-smem: Tests mmap of a buffer with
> + preferred location set to system memory, skipping memset
> + * @mmap-race-preferred-loc-smem: mmap buffer with race between GPU
> and
> + CPU access with preferred location set to system memory
> + * @mmap-remap-dontunmap-eocheck-preferred-loc-smem: mmap and remap
> of
> + a buffer with preferred location set to system memory, does not unmap
> + after use
> + * @mmap-remap-dontunmap-preferred-loc-smem: mmap and remap of a
> buffer
> + with preferred location set to system memory, does not unmap after use
> + * @mmap-remap-eocheck-preferred-loc-smem: mmap and remap of a buffer
> + with preferred location set to system memory
> + * @mmap-remap-preferred-loc-smem: mmap and remap of a buffer with
> + preferred location set to system memory
> + * @mmap-remap-ro-dontunmap-eocheck-preferred-loc-smem: mmap and
> remap
> + of a read-only buffer with preferred location set to system memory,
> + does not unmap after use
> + * @mmap-remap-ro-dontunmap-preferred-loc-smem: mmap and remap of a
> + read-only buffer with preferred location set to system memory, does
> + not unmap after use
> + * @mmap-remap-ro-eocheck-preferred-loc-smem: mmap and remap of a
> + read-only buffer with preferred location set to system memory
> + * @mmap-remap-ro-preferred-loc-smem: mmap and remap of a read-only
> + buffer with preferred location set to system memory
> + * @mmap-shared-nomemset-preferred-loc-smem: mmap of a shared buffer
> + with preferred location set to system memory, skipping memset
> + * @mmap-shared-preferred-loc-smem: mmap of a shared buffer with
> + preferred location set to system memory
> + * @mmap-shared-remap-dontunmap-eocheck-preferred-loc-smem: mmap
> and
> + remap of a shared buffer with preferred location set to system memory,
> + does not unmap after use
> + * @mmap-shared-remap-dontunmap-preferred-loc-smem: mmap and remap
> of a
> + shared buffer with preferred location set to system memory
> + * @mmap-shared-remap-eocheck-preferred-loc-smem: mmap and remap of a
> + shared buffer with preferred location set to system memory with end of
> + check validation
> + * @mmap-shared-remap-preferred-loc-smem: mmap and remap of a shared
> + buffer with preferred location set to system memory without end of
> + check validation
> + * @new-bo-map-nomemset-preferred-loc-smem: Tests allocation and
> + mapping of a new buffer object with preferred location set to system
> + memory, skipping memset
> + * @new-bo-map-preferred-loc-smem: ests allocation and mapping of a new
> + buffer object with preferred location set to system memory
> + * @new-busy-nomemset-preferred-loc-smem: Tests allocation and usage of
> + a new busy buffer object with preferred location set to system memory,
> + skipping memset
> + * @new-busy-preferred-loc-smem: ests allocation and usage of a new
> + busy buffer object with preferred location set to system memory
> + * @new-nomemset-preferred-loc-smem: Tests allocation of a new buffer
> + object with preferred location set to system memory, skipping memset
> + * @new-preferred-loc-smem: Tests allocation of a new buffer object
> + with preferred location set to system memory
> + * @new-prefetch-preferred-loc-smem: Tests allocation and prefetching
> + of a new buffer object with preferred location set to system memory
> + * @new-race-nomemset-preferred-loc-smem: Tests allocation of a new
> + buffer object with preferred location set to system memory, skipping
> + memset
> + * @new-race-preferred-loc-smem: tests allocation of a new buffer
> + object with preferred location set to system memory
> *
> * SUBTEST: prefetch-benchmark
> * Description: Prefetch a 64M buffer 128 times, measure bandwidth of prefetch
> @@ -1072,16 +1147,6 @@ partial(int fd, struct drm_xe_engine_class_instance
> *eci, unsigned int flags)
> * Description: Create multiple threads with a faults on different hardware
> engines to same addresses, racing between CPU and GPU access
> * Test category: stress test
> */
> -
> -struct test_exec_data {
> - uint32_t batch[32];
> - uint64_t pad;
> - uint64_t vm_sync;
> - uint64_t exec_sync;
> - uint32_t data;
> - uint32_t expected_data;
> -};
> -
> static void igt_require_hugepages(void) {
> igt_skip_on_f(!igt_get_meminfo("HugePages_Total"),
> @@ -1090,11 +1155,37 @@ static void igt_require_hugepages(void)
> "No huge pages available!\n");
> }
>
> +static int
> +xe_vm_madvixe_pat_attr(int fd, uint32_t vm, uint64_t addr, uint64_t range,
> + int pat_index)
> +{
> + return xe_vm_madvise(fd, vm, addr, range, 0,
> + DRM_XE_MEM_RANGE_ATTR_PAT, pat_index, 0); }
> +
> +static int
> +xe_vm_madvise_atomic_attr(int fd, uint32_t vm, uint64_t addr, uint64_t
> range,
> + int mem_attr)
> +{
> + return xe_vm_madvise(fd, vm, addr, range, 0,
> + DRM_XE_MEM_RANGE_ATTR_ATOMIC,
> + mem_attr, 0);
> +}
> +
> +static int
> +xe_vm_madvise_migrate_pages(int fd, uint32_t vm, uint64_t addr,
> +uint64_t range) {
> + return xe_vm_madvise(fd, vm, addr, range, 0,
> + DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
> + DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM,
> + DRM_XE_MIGRATE_ALL_PAGES);
> +}
> +
> static void
> test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> int n_exec_queues, int n_execs, size_t bo_size,
> size_t stride, uint32_t vm, void *alloc, pthread_barrier_t *barrier,
> - unsigned int flags)
> + unsigned long long flags)
> {
> uint64_t addr;
> struct drm_xe_sync sync[1] = {
> @@ -1107,9 +1198,10 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
> .syncs = to_user_pointer(sync),
> };
> uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> - struct test_exec_data *data, *next_data = NULL;
> + struct test_exec_data *data, *next_data = NULL, *org_data;
> uint32_t bo_flags;
> uint32_t bo = 0, bind_sync = 0;
> + uint32_t val;
> void **pending_free;
> u64 *exec_ufence = NULL, *bind_ufence = NULL;
> int i, j, b, file_fd = -1, prev_idx, pf_count, err; @@ -1234,6 +1326,133
> @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> strerror(errno), vm, to_user_pointer(data),
> bo_size);
> }
>
> + if (flags & PREFERRED_LOC_SMEM) {
> + if (flags & MIGRATE_ALL_PAGES) {
> + err = xe_vm_madvise_migrate_pages(fd, vm,
> to_user_pointer(data), bo_size);
> + if (err)
> + igt_warn("failure in page migration err = %s,
> vm =%u, data=%"PRIu64"\n",
> + strerror(errno), vm,
> to_user_pointer(data));
> + } else {
> + err = xe_vm_madvise(fd, vm, to_user_pointer(data),
> bo_size, 0,
> +
> DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
> +
> DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
> + }
> + }
Here if & else doing same setting preferred LOC as SRAM can you please check
can you please add test coverage for DRM_XE_ATOMIC_UNDEFINED also
and for all atomic cases migrations is done. Can you please for atomic case with out migration
> + if (flags & PREFERRED_LOC_ATOMIC_DEVICE) {
> + err = xe_vm_madvise_migrate_pages(fd, vm,
> to_user_pointer(data), bo_size);
> + if (err)
> + igt_warn("failure in page migration err = %s, vm =%u
> data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> +
> + err = xe_vm_madvise_atomic_attr(fd, vm,
> to_user_pointer(data), bo_size,
> + DRM_XE_ATOMIC_DEVICE);
> + if (err)
> + igt_warn("failure in atomic device attr err = %s, vm =%u
> data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + }
> + if (flags & PREFERRED_LOC_ATOMIC_GL) {
> + err = xe_vm_madvise_migrate_pages(fd, vm,
> to_user_pointer(data), bo_size);
> + if (err)
> + igt_warn("failure in page migration err = %s, vm =%u
> data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + err = xe_vm_madvise_atomic_attr(fd, vm,
> to_user_pointer(data), bo_size,
> + DRM_XE_ATOMIC_GLOBAL);
> + if (err)
> + igt_warn("failure in atomic global attr err = %s, vm =%u
> data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + }
> + if (flags & PREFERRED_LOC_ATOMIC_CPU) {
> + err = xe_vm_madvise_migrate_pages(fd, vm,
> to_user_pointer(data), bo_size);
> + if (err)
> + igt_warn("failure in page migration err = %s, vm =%u
> data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> +
> + err = xe_vm_madvise_atomic_attr(fd, vm,
> to_user_pointer(data), bo_size,
> + DRM_XE_ATOMIC_CPU);
> + if (err)
> + igt_warn("failure in atomic cpu attr err = %s, vm =%u
> data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + }
> + if (flags & MADVISE_MULTI_VMA) {
> + if (bo_size)
> + bo_size = ALIGN(bo_size, SZ_4K);
> + err = xe_vm_madvise_atomic_attr(fd, vm,
> to_user_pointer(data) + bo_size/2,
> + bo_size/2,
> DRM_XE_ATOMIC_DEVICE);
> + if (err)
> + igt_warn("failure in atomic device attr err = %s, vm =%u
> data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> +
After setting the atomic at a specific location and write the pat_value next to this only to avoid confusion
remove the pat values.
> + err = xe_vm_madvise_atomic_attr(fd, vm,
> to_user_pointer(data) + bo_size,
> + bo_size,
> DRM_XE_ATOMIC_DEVICE);
> + if (err)
> + igt_warn("failure in atomic multi_vma err = %s, vm =%u
> data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> +
> + err = xe_vm_madvixe_pat_attr(fd, vm, to_user_pointer(data),
> bo_size, intel_get_pat_idx_wb(fd));
> + if (err)
> + igt_warn("failure in pat attr index 4 err = %s, vm =%u
> data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> +
> + err = xe_vm_madvixe_pat_attr(fd, vm, to_user_pointer(data),
> bo_size, intel_get_pat_idx_wb(fd));
> + if (err)
> + igt_warn("failure in pat attr index 3 err = %s, vm =%u
> data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> +
> + err = xe_vm_madvixe_pat_attr(fd, vm, to_user_pointer(data) +
> bo_size/2, bo_size/2,
> + intel_get_pat_idx_wb(fd));
> + if (err)
> + igt_warn("failure in pat attr index 8 err = %s, vm =%u
> data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + }
> + if (flags & MADVISE_SPLIT_VMA) {
> + if (bo_size)
> + bo_size = ALIGN(bo_size, SZ_4K);
> +
> + bo_flags =
> DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> + bo = xe_bo_create(fd, vm, bo_size,
> + vram_if_possible(fd, eci->gt_id), bo_flags);
> + xe_vm_bind_async(fd, vm, 0, bo, 0, to_user_pointer(data) +
> bo_size/2,
> + bo_size/2, 0, 0);
> +
> + __xe_vm_bind_assert(fd, vm, 0, 0, 0, to_user_pointer(data) +
> bo_size/2,
> + bo_size/2, DRM_XE_VM_BIND_OP_MAP,
> +
> DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, sync,
> + 1, 0, 0);
> + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0,
> FIVE_SEC);
> + data[0].vm_sync = 0;
> + gem_close(fd, bo);
> + bo = 0;
> +
> + err = xe_vm_madvise_atomic_attr(fd, vm,
> to_user_pointer(data),
> + bo_size/2,
> DRM_XE_ATOMIC_DEVICE);
> + if (err)
> + igt_warn("failure in split atomic device attr err = %s, vm
> =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + }
> + if (flags & MADVISE_ATOMIC_VMA) {
> + if (bo_size)
> + bo_size = ALIGN(bo_size, SZ_4K);
> +
> + bo_flags =
> DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> + bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, eci-
> >gt_id), bo_flags);
> + xe_vm_bind_async(fd, vm, 0, bo, 0, to_user_pointer(data),
> bo_size, 0,
> +0);
> +
> + __xe_vm_bind_assert(fd, vm, 0, 0, 0, to_user_pointer(data),
> bo_size,
> + DRM_XE_VM_BIND_OP_MAP,
> +
> DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, sync,
> + 1, 0, 0);
> + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0,
> FIVE_SEC);
> + data[0].vm_sync = 0;
> + gem_close(fd, bo);
> + bo = 0;
> +
> + err = xe_vm_madvise_atomic_attr(fd, vm,
> to_user_pointer(data), bo_size/2,
> + DRM_XE_ATOMIC_GLOBAL);
> + if (err)
> + igt_warn("failure in atomic vma err = %s, vm =%u
> data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + }
> +
> if (flags & BO_UNMAP) {
> bo_flags =
> DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> bo = xe_bo_create(fd, vm, bo_size,
> @@ -1307,6 +1526,16 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
> bool fault_inject = (FAULT & flags) && i == n_execs / 2;
> bool fault_injected = (FAULT & flags) && i > n_execs;
>
> + if (flags & MADVISE_MULTI_VMA) {
> + addr = addr + bo_size;
> + org_data = data;
> + data = from_user_pointer(addr);
> + batch_offset = (char *)&(data[idx].batch) - (char *)data;
> + batch_addr = addr + batch_offset;
> + sdi_offset = (char *)&(data[idx].data) - (char *)data;
> + sdi_addr = addr + sdi_offset;
> + }
> +
We are rearranging the address for Multi VMA, but I don't see that happening for Split VMA.
In the Split VMA case, we are binding the first half of the VMA, but shouldn't we also shift the address before writing data, similar to Multi VMA?
My understanding is that we should update the address to point to the correct region before writing.
Can you confirm if this?
eg:
------------------------------------------------
if (flags & MADVISE_SPLIT_VMA) {
int b = 0;
uint64_t first_half_addr = addr;
uint64_t sdi_offset = (char *)&data[idx].data - (char *)data;
uint64_t sdi_addr_1 = first_half_addr + sdi_offset;
write_dword(&data[idx], sdi_addr_1,
WRITE_VALUE(&data[idx], idx), &b,
flags & ATOMIC_BATCH ? true : false);
igt_assert(b <= ARRAY_SIZE(data[idx].batch));
b = 0;
uint64_t second_half_addr = addr + bo_size/2;
uint64_t sdi_addr_2 = second_half_addr + sdi_offset;
write_dword(&data[idx], sdi_addr_2,
WRITE_VALUE(&data[idx], idx), &b,
flags & ATOMIC_BATCH ? true : false);
igt_assert(b <= ARRAY_SIZE(data[idx].batch));
}
------------------------------------------------------------------
> if (barrier)
> pthread_barrier_wait(barrier);
>
> @@ -1316,18 +1545,74 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
> __write_dword(data[idx].batch,
> sdi_addr + j * orig_size,
> WRITE_VALUE(&data[idx], idx),
> &b);
> - write_dword(data[idx].batch, sdi_addr + j * orig_size,
> - WRITE_VALUE(&data[idx], idx), &b);
> + write_dword(&data[idx], sdi_addr + j * orig_size,
> + WRITE_VALUE(&data[idx], idx), &b,
> + flags & ATOMIC_BATCH ? true : false);
> igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> } else if (!(flags & EVERY_OTHER_CHECK)) {
> b = 0;
> - write_dword(data[idx].batch, sdi_addr,
> - WRITE_VALUE(&data[idx], idx), &b);
> + write_dword(&data[idx], sdi_addr,
> + WRITE_VALUE(&data[idx], idx), &b,
> + flags & ATOMIC_BATCH ? true : false);
> igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> + if (flags & PREFETCH) {
> + if (flags & PREFETCH_SPLIT_VMA) {
> + bo_size = ALIGN(bo_size, SZ_4K);
> +
> + xe_vm_prefetch_async(fd, vm, 0, 0,
> addr, bo_size, NULL, 0, 0);
> +
> + igt_info("num_vmas before madvise =
> %d \n", val);
> +
> + val =
> xe_vm_print_mem_attr_values_in_range(fd, vm, addr,
> +bo_size);
> +
> + err =
> xe_vm_madvise_migrate_pages(fd, vm, to_user_pointer(data), bo_size/2);
> + if (err)
> + igt_warn("failure in prefetch
> split vma err = %s, vm =%u data=%"PRIu64"\n",
> +
> strerror(errno), vm, to_user_pointer(data));
> + igt_info("num_vmas after madvise= %d
> \n", val);
> + val =
> xe_vm_print_mem_attr_values_in_range(fd, vm, addr, bo_size);
> + } else if (flags & PREFETCH_SAME_ATTR) {
> + err = xe_vm_madvise_atomic_attr(fd,
> vm, to_user_pointer(data), bo_size,
> +
> DRM_XE_ATOMIC_GLOBAL);
> + if (err)
> + igt_warn("failure in prefetch
> same attr err = %s, vm =%u data=%"PRIu64"\n",
> +
> strerror(errno), vm, to_user_pointer(data));
> + val =
> xe_vm_print_mem_attr_values_in_range(fd, vm, addr, bo_size);
> + xe_vm_prefetch_async(fd, vm, 0, 0,
> addr, bo_size, NULL, 0, DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC);
> + err = xe_vm_madvise_atomic_attr(fd,
> vm, to_user_pointer(data), bo_size/2,
> +
> DRM_XE_ATOMIC_GLOBAL);
> + if (err)
> + igt_warn("failure in prefetch
> atomic attr err = %s, vm =%u data=%"PRIu64"\n",
> +
> strerror(errno), vm, to_user_pointer(data));
> + } else if (flags & PREFETCH_CHANGE_ATTR) {
> + err = xe_vm_madvise_atomic_attr(fd,
> vm, to_user_pointer(data), bo_size,
> +
> DRM_XE_ATOMIC_GLOBAL);
> + if (err)
> + igt_warn("failure in prefetch
> atomic attr err = %s, vm =%u data=%"PRIu64"\n",
> +
> strerror(errno), vm, to_user_pointer(data));
> + val =
> xe_vm_print_mem_attr_values_in_range(fd, vm, addr,
> +bo_size);
> +
> + xe_vm_prefetch_async(fd, vm, 0, 0,
> addr, bo_size, NULL, 0,
> +DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC);
> +
> + err = xe_vm_madvise_atomic_attr(fd,
> vm, to_user_pointer(data), bo_size,
> +
> DRM_XE_ATOMIC_DEVICE);
> + if (err)
> + igt_warn("failure in prefetch
> change attr err = %s, vm =%u data=%"PRIu64"\n",
> +
> strerror(errno), vm, to_user_pointer(data));
> + val =
> xe_vm_print_mem_attr_values_in_range(fd, vm, addr, bo_size);
> + }
> + } else {
Please remove this else case you already writing on top?
please check why CI is not listing all new test cases in pre merge results
-- Pravalika
> + b = 0;
> + write_dword((struct test_exec_data
> *)&data[idx], sdi_addr,
> + WRITE_VALUE(&data[idx],
> idx), &b,
> + flags & ATOMIC_BATCH ?
> true : false);
> + igt_assert(b <=
> ARRAY_SIZE(data[idx].batch));
> + }
> } else if (flags & EVERY_OTHER_CHECK && !odd(i)) {
> b = 0;
> - write_dword(data[idx].batch, sdi_addr,
> - WRITE_VALUE(&data[idx], idx), &b);
> + write_dword(&data[idx], sdi_addr,
> + WRITE_VALUE(&data[idx], idx), &b,
> + flags & ATOMIC_BATCH ? true : false);
> igt_assert(b <= ARRAY_SIZE(data[idx].batch));
>
> aligned_alloc_type = __aligned_alloc(aligned_size,
> bo_size); @@ -1346,10 +1631,11 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
> __aligned_partial_free(&aligned_alloc_type);
>
> b = 0;
> - write_dword(data[next_idx].batch,
> + write_dword(&data[next_idx],
> to_user_pointer(next_data) +
> (char *)&data[next_idx].data - (char *)data,
> - WRITE_VALUE(&data[next_idx], next_idx),
> &b);
> + WRITE_VALUE(&data[next_idx], next_idx),
> &b,
> + flags & ATOMIC_BATCH ? true : false);
> igt_assert(b <= ARRAY_SIZE(data[next_idx].batch));
> }
>
> @@ -1404,9 +1690,18 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
> exec_queues[e], &timeout);
> igt_assert(err == -ETIME || err == -EIO);
> } else {
> - xe_wait_ufence(fd, exec_ufence ? exec_ufence :
> - &data[idx].exec_sync,
> USER_FENCE_VALUE,
> - exec_queues[e], FIVE_SEC);
> + if (flags & PREFERRED_LOC_ATOMIC_CPU) {
> + int64_t timeout = QUARTER_SEC;
> + err = __xe_wait_ufence(fd, exec_ufence ?
> exec_ufence :
> + &data[idx].exec_sync,
> + USER_FENCE_VALUE,
> + exec_queues[e],
> &timeout);
> + if (err)
> + goto cleanup;
> + } else
> + xe_wait_ufence(fd, exec_ufence ? exec_ufence
> :
> + &data[idx].exec_sync,
> USER_FENCE_VALUE,
> + exec_queues[e], FIVE_SEC);
> if (flags & LOCK && !i)
> munlock(data, bo_size);
>
> @@ -1456,17 +1751,17 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
> if (flags & FORK_READ) {
> igt_fork(child, 1)
> igt_assert_eq(data[idx].data,
> -
> READ_VALUE(&data[idx]));
> + flags &
> ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED :
> +READ_VALUE(&data[idx]));
> if (!(flags & FORK_READ_AFTER))
> igt_assert_eq(data[idx].data,
> -
> READ_VALUE(&data[idx]));
> + flags &
> ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED :
> +READ_VALUE(&data[idx]));
> igt_waitchildren();
> if (flags & FORK_READ_AFTER)
> igt_assert_eq(data[idx].data,
> -
> READ_VALUE(&data[idx]));
> + flags &
> ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED :
> +READ_VALUE(&data[idx]));
> } else {
> igt_assert_eq(data[idx].data,
> - READ_VALUE(&data[idx]));
> + flags & ATOMIC_BATCH ?
> VAL_ATOMIC_EXPECTED :
> +READ_VALUE(&data[idx]));
>
> if (flags &
> PREFETCH_SYS_BENCHMARK) {
> struct timespec tv = {};
> @@ -1494,13 +1789,13 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
> ((void *)data)
> + j * orig_size;
>
>
> igt_assert_eq(__data[idx].data,
> -
> READ_VALUE(&data[idx]));
> + flags &
> ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED :
> +READ_VALUE(&data[idx]));
> }
> }
> }
> if (flags & EVERY_OTHER_CHECK)
> igt_assert_eq(data[prev_idx].data,
> -
> READ_VALUE(&data[prev_idx]));
> + flags & ATOMIC_BATCH ?
> VAL_ATOMIC_EXPECTED :
> +READ_VALUE(&data[prev_idx]));
> }
> }
>
> @@ -1521,6 +1816,9 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
> gem_close(fd, bo);
> }
>
> + if (flags & MADVISE_MULTI_VMA)
> + data = org_data;
> +
> if (flags & NEW) {
> if (flags & MMAP) {
> if (flags & FREE)
> @@ -1610,6 +1908,7 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
> pf_count, pf_count_after);
> }
>
> +cleanup:
> if (bo) {
> sync[0].addr = to_user_pointer(bind_ufence);
> __xe_vm_bind_assert(fd, vm, 0,
> @@ -1864,7 +2163,7 @@ processes(int fd, int n_exec_queues, int n_execs,
> size_t bo_size,
>
> struct section {
> const char *name;
> - unsigned int flags;
> + unsigned long long flags;
> };
>
> igt_main
> @@ -1964,6 +2263,19 @@ igt_main
> { "malloc-mix-bo", MIX_BO_ALLOC },
> { NULL },
> };
> + const struct section msections[] = {
> + { "preffered-loc-sram-migrate-pages", PREFERRED_LOC_SMEM
> | MIGRATE_ALL_PAGES | ATOMIC_BATCH },
> + { "preffered-loc-atomic-vram",
> PREFERRED_LOC_ATOMIC_DEVICE | ATOMIC_BATCH },
> + { "preffered-loc-atomic-gl", PREFERRED_LOC_ATOMIC_GL |
> ATOMIC_BATCH },
> + { "preffered-loc-atomic-cpu", PREFERRED_LOC_ATOMIC_CPU |
> ATOMIC_BATCH },
> + { "multi-vma", MADVISE_MULTI_VMA | ATOMIC_BATCH },
> + { "split-vma", MADVISE_SPLIT_VMA | ATOMIC_BATCH },
> + { "atomic-vma", MADVISE_ATOMIC_VMA | ATOMIC_BATCH },
> + { "split-vma-with-mapping", PREFETCH | PREFETCH_SPLIT_VMA
> | ATOMIC_BATCH },
> + { "range-invalidate-change-attr", PREFETCH |
> PREFETCH_CHANGE_ATTR | ATOMIC_BATCH },
> + { "no-range-invalidate-same-attr", PREFETCH |
> PREFETCH_SAME_ATTR | ATOMIC_BATCH },
> + { NULL },
> + };
> int fd;
> int num_sections;
>
> @@ -1983,10 +2295,11 @@ igt_main
> for (const struct section *s = sections; s[num_sections].name;
> num_sections++)
> ;
>
> - for (int i = 0; i < num_sections * 2; i++) {
> - struct section *s = §ions[i % num_sections];
> + for (int i = 0; i < num_sections * 3; i++) {
> + struct section p = sections[i % num_sections];
> + struct section *s = &p;
>
> - if (i/num_sections == 0) {
> + if (i/num_sections == 1) {
> static char modified_name[256];
> snprintf(modified_name, sizeof(modified_name), "%s-
> preferred-loc-smem", s->name);
> s->name = modified_name;
> @@ -2175,6 +2488,14 @@ igt_main
> processes_evict(fd, SZ_8M, SZ_1M, s->flags);
> }
>
> + for (const struct section *s = msections; s->name; s++) {
> + igt_subtest_f("madvise-%s", s->name) {
> + xe_for_each_engine(fd, hwe)
> + test_exec(fd, hwe, 1, 1, SZ_64K, 0, 0, NULL,
> + NULL, s->flags);
> + }
> + }
> +
> igt_fixture {
> xe_device_put(fd);
> drm_close_driver(fd);
> --
> 2.43.0
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH i-g-t 1/5] DO-NOT-MERGE: include/drm-uapi: Add drm_xe_madvise structure
2025-08-29 14:54 [PATCH i-g-t 0/5] nishit.sharma
@ 2025-08-29 14:54 ` nishit.sharma
0 siblings, 0 replies; 19+ messages in thread
From: nishit.sharma @ 2025-08-29 14:54 UTC (permalink / raw)
To: igt-dev, pravalika.gurram, himal.prasad.ghimiray, matthew.brost,
nishit.sharma
From: Nishit Sharma <nishit.sharma@intel.com>
Defined IOCTL number for madvise operation. Added drm_xe_madvise
which is passed as Input to MADVISE IOCTL.
Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
---
include/drm-uapi/xe_drm.h | 289 ++++++++++++++++++++++++++++++++++++--
1 file changed, 281 insertions(+), 8 deletions(-)
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index a52f95593..e9a27a844 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -3,8 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
-#ifndef _XE_DRM_H_
-#define _XE_DRM_H_
+#ifndef _UAPI_XE_DRM_H_
+#define _UAPI_XE_DRM_H_
#include "drm.h"
@@ -81,6 +81,8 @@ extern "C" {
* - &DRM_IOCTL_XE_EXEC
* - &DRM_IOCTL_XE_WAIT_USER_FENCE
* - &DRM_IOCTL_XE_OBSERVATION
+ * - &DRM_IOCTL_XE_MADVISE
+ * - &DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS
*/
/*
@@ -102,6 +104,8 @@ extern "C" {
#define DRM_XE_EXEC 0x09
#define DRM_XE_WAIT_USER_FENCE 0x0a
#define DRM_XE_OBSERVATION 0x0b
+#define DRM_XE_MADVISE 0x0c
+#define DRM_XE_VM_QUERY_MEM_REGION_ATTRS 0x0d
/* Must be kept compact -- no holes */
@@ -117,6 +121,8 @@ extern "C" {
#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
+#define DRM_IOCTL_XE_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
+#define DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_MEM_REGION_ATTRS, struct drm_xe_vm_query_mem_range_attr)
/**
* DOC: Xe IOCTL Extensions
@@ -134,7 +140,7 @@ extern "C" {
* redefine the interface more easily than an ever growing struct of
* increasing complexity, and for large parts of that interface to be
* entirely optional. The downside is more pointer chasing; chasing across
- * the boundary with pointers encapsulated inside u64.
+ * the __user boundary with pointers encapsulated inside u64.
*
* Example chaining:
*
@@ -925,9 +931,9 @@ struct drm_xe_gem_mmap_offset {
* - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
* exec submissions to its exec_queues that don't have an upper time
* limit on the job execution time. But exec submissions to these
- * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ,
- * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF,
- * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
+ * don't allow any of the sync types DRM_XE_SYNC_TYPE_SYNCOBJ,
+ * DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ, used as out-syncobjs, that is,
+ * together with sync flag DRM_XE_SYNC_FLAG_SIGNAL.
* LR VMs can be created in recoverable page-fault mode using
* DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
* If that flag is omitted, the UMD can not rely on the slightly
@@ -1003,6 +1009,10 @@ struct drm_xe_vm_destroy {
* valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
* mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
* handle MBZ, and the BO offset MBZ.
+ *
+ * The @prefetch_mem_region_instance for %DRM_XE_VM_BIND_OP_PREFETCH can also be:
+ * - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in
+ * the memory region advised by madvise.
*/
struct drm_xe_vm_bind_op {
/** @extensions: Pointer to the first extension struct, if any */
@@ -1108,6 +1118,7 @@ struct drm_xe_vm_bind_op {
/** @flags: Bind flags */
__u32 flags;
+#define DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC -1
/**
* @prefetch_mem_region_instance: Memory region to prefetch VMA to.
* It is a region instance, not a mask.
@@ -1394,7 +1405,7 @@ struct drm_xe_sync {
/**
* @timeline_value: Input for the timeline sync object. Needs to be
- * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ.
+ * different than 0 when used with %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ.
*/
__u64 timeline_value;
@@ -1974,8 +1985,270 @@ struct drm_xe_query_eu_stall {
__u64 sampling_rates[];
};
+/**
+ * struct drm_xe_madvise - Input of &DRM_IOCTL_XE_MADVISE
+ *
+ * This structure is used to set memory attributes for a virtual address range
+ * in a VM. The type of attribute is specified by @type, and the corresponding
+ * union member is used to provide additional parameters for @type.
+ *
+ * Supported attribute types:
+ * - DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC: Set preferred memory location.
+ * - DRM_XE_MEM_RANGE_ATTR_ATOMIC: Set atomic access policy.
+ * - DRM_XE_MEM_RANGE_ATTR_PAT: Set page attribute table index.
+ *
+ * Example:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_madvise madvise = {
+ * .vm_id = vm_id,
+ * .start = 0x100000,
+ * .range = 0x2000,
+ * .type = DRM_XE_MEM_RANGE_ATTR_ATOMIC,
+ * .atomic_val = DRM_XE_ATOMIC_DEVICE,
+ * .pad = 0,
+ * };
+ *
+ * ioctl(fd, DRM_IOCTL_XE_MADVISE, &madvise);
+ *
+ */
+struct drm_xe_madvise {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @start: start of the virtual address range */
+ __u64 start;
+
+ /** @range: size of the virtual address range */
+ __u64 range;
+
+ /** @vm_id: vm_id of the virtual range */
+ __u32 vm_id;
+
+#define DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC 0
+#define DRM_XE_MEM_RANGE_ATTR_ATOMIC 1
+#define DRM_XE_MEM_RANGE_ATTR_PAT 2
+ /** @type: type of attribute */
+ __u32 type;
+
+ union {
+ /**
+ * @preferred_mem_loc: preferred memory location
+ *
+ * Used when @type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC
+ *
+ * Supported values for @preferred_mem_loc.devmem_fd:
+ * - DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE: set vram of faulting tile as preferred loc
+ * - DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM: set smem as preferred loc
+ *
+ * Supported values for @preferred_mem_loc.migration_policy:
+ * - DRM_XE_MIGRATE_ALL_PAGES
+ * - DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES
+ */
+ struct {
+#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE 0
+#define DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM -1
+ /** @preferred_mem_loc.devmem_fd: fd for preferred loc */
+ __u32 devmem_fd;
+
+#define DRM_XE_MIGRATE_ALL_PAGES 0
+#define DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES 1
+ /** @preferred_mem_loc.migration_policy: Page migration policy */
+ __u16 migration_policy;
+
+ /** @preferred_mem_loc.pad : MBZ */
+ __u16 pad;
+
+ /** @preferred_mem_loc.reserved : Reserved */
+ __u64 reserved;
+ } preferred_mem_loc;
+
+ /**
+ * @atomic: Atomic access policy
+ *
+ * Used when @type == DRM_XE_MEM_RANGE_ATTR_ATOMIC.
+ *
+ * Supported values for @atomic.val:
+ * - DRM_XE_ATOMIC_UNDEFINED: Undefined or default behaviour
+ * Support both GPU and CPU atomic operations for system allocator
+ * Support GPU atomic operations for normal(bo) allocator
+ * - DRM_XE_ATOMIC_DEVICE: Support GPU atomic operations
+ * - DRM_XE_ATOMIC_GLOBAL: Support both GPU and CPU atomic operations
+ * - DRM_XE_ATOMIC_CPU: Support CPU atomic
+ */
+ struct {
+#define DRM_XE_ATOMIC_UNDEFINED 0
+#define DRM_XE_ATOMIC_DEVICE 1
+#define DRM_XE_ATOMIC_GLOBAL 2
+#define DRM_XE_ATOMIC_CPU 3
+ /** @atomic.val: value of atomic operation */
+ __u32 val;
+
+ /** @atomic.pad: MBZ */
+ __u32 pad;
+
+ /** @atomic.reserved: Reserved */
+ __u64 reserved;
+ } atomic;
+
+ /**
+ * @pat_index: Page attribute table index
+ *
+ * Used when @type == DRM_XE_MEM_RANGE_ATTR_PAT.
+ */
+ struct {
+ /** @pat_index.val: PAT index value */
+ __u32 val;
+
+ /** @pat_index.pad: MBZ */
+ __u32 pad;
+
+ /** @pat_index.reserved: Reserved */
+ __u64 reserved;
+ } pat_index;
+ };
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_mem_range_attr - Output of &DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS
+ *
+ * This structure is provided by userspace and filled by KMD in response to the
+ * DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS ioctl. It describes memory attributes of
+ * a memory ranges within a user specified address range in a VM.
+ *
+ * The structure includes information such as atomic access policy,
+ * page attribute table (PAT) index, and preferred memory location.
+ * Userspace allocates an array of these structures and passes a pointer to the
+ * ioctl to retrieve attributes for each memory ranges
+ *
+ * @extensions: Pointer to the first extension struct, if any
+ * @start: Start address of the memory range
+ * @end: End address of the virtual memory range
+ *
+ */
+struct drm_xe_mem_range_attr {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @start: start of the memory range */
+ __u64 start;
+
+ /** @end: end of the memory range */
+ __u64 end;
+
+ /** @preferred_mem_loc: preferred memory location */
+ struct {
+ /** @preferred_mem_loc.devmem_fd: fd for preferred loc */
+ __u32 devmem_fd;
+
+ /** @preferred_mem_loc.migration_policy: Page migration policy */
+ __u32 migration_policy;
+ } preferred_mem_loc;
+
+ struct {
+ /** @atomic.val: atomic attribute */
+ __u32 val;
+
+ /** @atomic.reserved: Reserved */
+ __u32 reserved;
+ } atomic;
+
+ struct {
+ /** @pat_index.val: PAT index */
+ __u32 val;
+
+ /** @pat_index.reserved: Reserved */
+ __u32 reserved;
+ } pat_index;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_query_mem_range_attr - Input of &DRM_IOCTL_XE_VM_QUERY_MEM_ATTRIBUTES
+ *
+ * This structure is used to query memory attributes of memory regions
+ * within a user specified address range in a VM. It provides detailed
+ * information about each memory range, including atomic access policy,
+ * page attribute table (PAT) index, and preferred memory location.
+ *
+ * Userspace first calls the ioctl with @num_mem_ranges = 0,
+ * @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL to retrieve
+ * the number of memory regions and size of each memory range attribute.
+ * Then, it allocates a buffer of that size and calls the ioctl again to fill
+ * the buffer with memory range attributes.
+ *
+ * If second call fails with -ENOSPC, it means memory ranges changed between
+ * first call and now, retry IOCTL again with @num_mem_ranges = 0,
+ * @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL followed by
+ * Second ioctl call.
+ *
+ * Example:
+ *
+ * .. code-block:: C
+ * struct drm_xe_vm_query_mem_range_attr query = {
+ * .vm_id = vm_id,
+ * .start = 0x100000,
+ * .range = 0x2000,
+ * };
+ *
+ * // First ioctl call to get num of mem regions and sizeof each attribute
+ * ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
+ *
+ * // Allocate buffer for the memory region attributes
+ * void *ptr = malloc(query.num_mem_ranges * query.sizeof_mem_range_attr);
+ *
+ * query.vector_of_mem_attr = (uintptr_t)ptr;
+ *
+ * // Second ioctl call to actually fill the memory attributes
+ * ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
+ *
+ * // Iterate over the returned memory region attributes
+ * for (unsigned int i = 0; i < query.num_mem_ranges; ++i) {
+ * struct drm_xe_mem_range_attr *attr = (struct drm_xe_mem_range_attr *)ptr;
+ *
+ * // Do something with attr
+ *
+ * // Move pointer by one entry
+ * ptr += query.sizeof_mem_range_attr;
+ * }
+ *
+ * free(ptr);
+ */
+struct drm_xe_vm_query_mem_range_attr {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @vm_id: vm_id of the virtual range */
+ __u32 vm_id;
+
+ /** @num_mem_ranges: number of mem_ranges in range */
+ __u32 num_mem_ranges;
+
+ /** @start: start of the virtual address range */
+ __u64 start;
+
+ /** @range: size of the virtual address range */
+ __u64 range;
+
+ /** @sizeof_mem_range_attr: size of struct drm_xe_mem_range_attr */
+ __u64 sizeof_mem_range_attr;
+
+ /** @vector_of_ops: userptr to array of struct drm_xe_mem_range_attr */
+ __u64 vector_of_mem_attr;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+
+};
+
#if defined(__cplusplus)
}
#endif
-#endif /* _XE_DRM_H_ */
+#endif /* _UAPI_XE_DRM_H_ */
--
2.43.0
^ permalink raw reply related [flat|nested] 19+ messages in thread
* Re: [PATCH i-g-t 4/5] tests/intel/xe_exec_system_allocator: Add preferred_loc_smem test
2025-08-28 16:58 ` [PATCH i-g-t 4/5] tests/intel/xe_exec_system_allocator: Add preferred_loc_smem test nishit.sharma
2025-08-29 14:21 ` Gurram, Pravalika
@ 2025-08-29 19:55 ` Matthew Brost
1 sibling, 0 replies; 19+ messages in thread
From: Matthew Brost @ 2025-08-29 19:55 UTC (permalink / raw)
To: nishit.sharma; +Cc: igt-dev, pravalika.gurram, himal.prasad.ghimiray
On Thu, Aug 28, 2025 at 04:58:16PM +0000, nishit.sharma@intel.com wrote:
> From: Nishit Sharma <nishit.sharma@intel.com>
>
> Added preferred-loc-smem test which is called in combination with other
> tests as well. In this test the buffer object preferred location is
> system memory. MADVISE ioctl is called with preferred_loc attribute and
> default_system system memory as preferred location.
>
> Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
> ---
> tests/intel/xe_exec_system_allocator.c | 225 ++++++++++++++++++++-----
> 1 file changed, 187 insertions(+), 38 deletions(-)
>
> diff --git a/tests/intel/xe_exec_system_allocator.c b/tests/intel/xe_exec_system_allocator.c
> index 007d9bdc0..70ca5fc2e 100644
> --- a/tests/intel/xe_exec_system_allocator.c
> +++ b/tests/intel/xe_exec_system_allocator.c
> @@ -138,7 +138,6 @@ static void signal_pdata(struct process_data *pdata)
> #define CPU_FAULT_THREADS (0x1 << 2)
> #define CPU_FAULT_PROCESS (0x1 << 3)
> #define CPU_FAULT_SAME_PAGE (0x1 << 4)
> -
> static void process_check(void *ptr, uint64_t alloc_size, uint64_t stride,
> unsigned int flags)
> {
> @@ -406,6 +405,39 @@ static void __aligned_partial_free(struct aligned_alloc_type *aligned_alloc_typ
> aligned_alloc_type->__size - aligned_alloc_type->size - begin_size);
> }
>
> +#define MAX_N_EXEC_QUEUES 16
> +
> +#define MMAP (0x1 << 0)
> +#define NEW (0x1 << 1)
> +#define BO_UNMAP (0x1 << 2)
> +#define FREE (0x1 << 3)
> +#define BUSY (0x1 << 4)
> +#define BO_MAP (0x1 << 5)
> +#define RACE (0x1 << 6)
> +#define SKIP_MEMSET (0x1 << 7)
> +#define FAULT (0x1 << 8)
> +#define FILE_BACKED (0x1 << 9)
> +#define LOCK (0x1 << 10)
> +#define MMAP_SHARED (0x1 << 11)
> +#define HUGE_PAGE (0x1 << 12)
> +#define SHARED_ALLOC (0x1 << 13)
> +#define FORK_READ (0x1 << 14)
> +#define FORK_READ_AFTER (0x1 << 15)
> +#define MREMAP (0x1 << 16)
> +#define DONTUNMAP (0x1 << 17)
> +#define READ_ONLY_REMAP (0x1 << 18)
> +#define SYNC_EXEC (0x1 << 19)
> +#define EVERY_OTHER_CHECK (0x1 << 20)
> +#define MULTI_FAULT (0x1 << 21)
> +#define PREFETCH (0x1 << 22)
> +#define THREADS (0x1 << 23)
> +#define PROCESSES (0x1 << 24)
> +#define PREFETCH_BENCHMARK (0x1 << 25)
> +#define PREFETCH_SYS_BENCHMARK (0x1 << 26)
> +#define PREFERRED_LOC_SMEM (0x1 << 27)
> +
These flags are for sections that call test_exec() only.
> +#define N_MULTI_FAULT 4
> +
> /**
> * SUBTEST: unaligned-alloc
> * Description: allocate unaligned sizes of memory
> @@ -460,7 +492,7 @@ many_allocs(int fd, struct drm_xe_engine_class_instance *eci,
> uint32_t *bos = NULL;
> struct timespec tv = {};
> uint64_t submit, read, elapsed;
> - int i;
> + int i, err;
>
> vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> @@ -500,6 +532,15 @@ many_allocs(int fd, struct drm_xe_engine_class_instance *eci,
> alloc.ptr = aligned_alloc(SZ_2M, alloc_size);
> igt_assert(alloc.ptr);
> }
> +
> + if (flags & PREFERRED_LOC_SMEM) {
> + err = xe_vm_madvise(fd, vm, to_user_pointer(alloc.ptr), alloc_size, 0,
> + DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
> + DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
> + if (err)
> + igt_warn("MADVISE_FAILURE err = %s, vm =%u data=%"PRIu64" alloc_size =%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(alloc.ptr), alloc_size);
> + }
I agree with Pravalika, this is never set at the caller.
Also see above PREFERRED_LOC_SMEM flags shouldn't be used here, the
flags for many_allocs are defined here.
136 /* many_alloc flags */
137 #define MIX_BO_ALLOC (0x1 << 0)
138 #define BENCHMARK (0x1 << 1)
139 #define CPU_FAULT_THREADS (0x1 << 2)
140 #define CPU_FAULT_PROCESS (0x1 << 3)
141 #define CPU_FAULT_SAME_PAGE (0x1 << 4)
If you want call madvise in many_alloc(), define a flag there. I don't
think it actually makes sense set DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM in
many_allocs() though as these sections are focused on migration perf /
eviction.
> allocs[i] = alloc;
>
> touch_all_pages(fd, exec_queue, allocs[i].ptr, alloc_size, stride,
> @@ -662,7 +703,7 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> size_t bo_size = SZ_2M, unmap_offset = 0;
> uint32_t vm, exec_queue;
> u64 *exec_ufence = NULL;
> - int i;
> + int i, err;
> void *old, *new = NULL;
> struct aligned_alloc_type alloc;
>
> @@ -688,6 +729,15 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
> data[0].vm_sync = 0;
>
> + if (flags & PREFERRED_LOC_SMEM) {
> + err = xe_vm_madvise(fd, vm, to_user_pointer(data), bo_size, 0,
> + DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
> + DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
> + if (err)
> + igt_warn("MADVISE_FAILURE err = %s, vm =%u data=%"PRIu64" alloc_size = %zu\n",
> + strerror(errno), vm, to_user_pointer(data), bo_size);
> + }
> +
Same as above, partial flags are defined here;
605 #define CPU_FAULT (0x1 << 0)
606 #define REMAP (0x1 << 1)
607 #define MIDDLE (0x1 << 2)
Also this section really is only interesting if migrations happens, so
DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM doesn't make a ton of sense here.
> exec_ufence = mmap(NULL, SZ_4K, PROT_READ |
> PROT_WRITE, MAP_SHARED |
> MAP_ANONYMOUS, -1, 0);
> @@ -747,38 +797,6 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> xe_vm_destroy(fd, vm);
> }
>
> -#define MAX_N_EXEC_QUEUES 16
> -
> -#define MMAP (0x1 << 0)
> -#define NEW (0x1 << 1)
> -#define BO_UNMAP (0x1 << 2)
> -#define FREE (0x1 << 3)
> -#define BUSY (0x1 << 4)
> -#define BO_MAP (0x1 << 5)
> -#define RACE (0x1 << 6)
> -#define SKIP_MEMSET (0x1 << 7)
> -#define FAULT (0x1 << 8)
> -#define FILE_BACKED (0x1 << 9)
> -#define LOCK (0x1 << 10)
> -#define MMAP_SHARED (0x1 << 11)
> -#define HUGE_PAGE (0x1 << 12)
> -#define SHARED_ALLOC (0x1 << 13)
> -#define FORK_READ (0x1 << 14)
> -#define FORK_READ_AFTER (0x1 << 15)
> -#define MREMAP (0x1 << 16)
> -#define DONTUNMAP (0x1 << 17)
> -#define READ_ONLY_REMAP (0x1 << 18)
> -#define SYNC_EXEC (0x1 << 19)
> -#define EVERY_OTHER_CHECK (0x1 << 20)
> -#define MULTI_FAULT (0x1 << 21)
> -#define PREFETCH (0x1 << 22)
> -#define THREADS (0x1 << 23)
> -#define PROCESSES (0x1 << 24)
> -#define PREFETCH_BENCHMARK (0x1 << 25)
> -#define PREFETCH_SYS_BENCHMARK (0x1 << 26)
> -
> -#define N_MULTI_FAULT 4
> -
As stated above, please don't move these flags.
> /**
> * SUBTEST: once-%s
> * Description: Run %arg[1] system allocator test only once
> @@ -951,6 +969,80 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> * @mmap-new-nomemset: mmap a new buffer for each exec, skip memset of buffers
> * @mmap-new-huge-nomemset: mmap huge page new buffer for each exec, skip memset of buffers
> * @mmap-new-race-nomemset: mmap a new buffer for each exec with race between cpu and gpu access, skip memset of buffers
> + * @free-nomemset-preferred-loc-smem: malloc and free buffer for each exec and perform preferred loc madvise operation
> + * @free-preferred-loc-smem: free buffer for each exec and perform preferred loc madvise operation
> + * @free-race-nomemset-preferred-loc-smem: free buffer for each exec with race between cpu and gpu access and perform madvise operation skipping memset
> + * @free-race-preferred-loc-smem: free buffer for each exec with race between cpu and gpu access and perform madvise operation
> + * @malloc-bo-unmap-nomemset-preferred-loc-smem: malloc single buffer for all execs, bind and unbind a BO to same address, skip memset and perform madvise operation
> + * @malloc-busy-nomemset-preferred-loc-smem: malloc single buffer for all execs, try to unbind while buffer valid, skip memset of buffers and perform madvise operation
> + * @malloc-busy-preferred-loc-smem: malloc single buffer for all execs, try to unbind while buffer valid and perform madvise operation
> + * @malloc-fork-read-after-preferred-loc-smem: malloc single buffer for all execs, fork a process to read test output, perform madvise operation
> + * @malloc-fork-read-preferred-loc-smem: malloc single buffer for all execs, fork a process to read test output, perform madvise operation
> + * @malloc-mlock-nomemset-preferred-loc-smem: malloc and mlock single buffer for all execs, skip memset of buffers, perform madvise operation
> + * @malloc-mlock-preferred-loc-smem: malloc and mlock single buffer for all execs, perform madvise operation
> + * @malloc-multi-fault-preferred-loc-smem: malloc single buffer for all execs and perform madvise operation
> + * @malloc-nomemset-preferred-loc-smem: malloc single buffer for all execs, skip memset of buffers and perform madvise operation
> + * @malloc-preferred-loc-smem: malloc single buffer for all execs, issue a command which will trigger multiple faults, perform madvise operation
> + * @malloc-prefetch-preferred-loc-smem: malloc single buffer for all execs, prefetch buffer before each exec, perform madvise operation
> + * @malloc-prefetch-race-preferred-loc-smem: malloc single buffer for all execs, prefetch buffer before each exec, perform madvise operation
> + * @malloc-race-nomemset-preferred-loc-smem: malloc single buffer for all execs with race between cpu and gpu access, perform madvise operation
> + * @malloc-race-preferred-loc-smem: malloc single buffer for all execs with race between cpu and gpu access, perform madvise operation
> + * @free-race-nomemset-preferred-loc-smem: malloc and free buffer for each exec with race between cpu and gpu access, perform madvise operation
> + * @free-race-preferred-loc-smem: malloc and free buffer for each exec with race between cpu and gpu access, perform madvise operation
> + * @malloc-bo-unmap-nomemset-preferred-loc-smem: malloc single buffer for all execs, bind and unbind a BO to same address before execs, perform madvise operation
> + * @malloc-bo-unmap-preferred-loc-smem: malloc single buffer for all execs and perform madvise operation
> + * @malloc-busy-nomemset-preferred-loc-smem: malloc single buffer for all execs and perform madvise operation
> + * @malloc-busy-preferred-loc-smem: malloc single buffer for all execs and perform madvise
> + * @mmap-file-mlock-nomemset-preferred-loc-smem: mmap and mlock single buffer, with file backing, perform madvise
> + * @mmap-file-mlock-preferred-loc-smem: mmap and mlock single buffer, with file backing, perform madvise
> + * @mmap-file-nomemset-preferred-loc-smem: mmap single buffer, with file backing and perform madvise
> + * @mmap-file-preferred-loc-smem: mmap single buffer, with file backing and perform madvise
> + * @mmap-free-huge-nomemset-preferred-loc-smem: mmap huge page and free buffer for each exec and perform madvise
> + * @mmap-free-huge-preferred-loc-smem: mmap huge page and free buffer for each exec and perform madvise
> + * @mmap-free-nomemset-preferred-loc-smem: mmap and free buffer for each exec and perform madvise
> + * @mmap-free-preferred-loc-smem: mmap and free buffer for each exec and perform madvise
> + * @mmap-free-race-nomemset-preferred-loc-smem:
> + * @mmap-free-race-preferred-loc-smem:
> + * @mmap-huge-nomemset-preferred-loc-smem:
> + * @mmap-huge-preferred-loc-smem:
> + * @mmap-mlock-nomemset-preferred-loc-smem:
> + * @mmap-mlock-preferred-loc-smem:
> + * @mmap-new-huge-nomemset-preferred-loc-smem:
> + * @mmap-new-huge-preferred-loc-smem:
> + * @mmap-new-nomemset-preferred-loc-smem:
> + * @mmap-new-preferred-loc-smem:
> + * @mmap-new-race-nomemset-preferred-loc-smem:
> + * @mmap-new-race-preferred-loc-smem:
> + * @mmap-nomemset-preferred-loc-smem:
> + * @mmap-preferred-loc-smem:
> + * @mmap-prefetch-preferred-loc-smem:
> + * @mmap-prefetch-shared-preferred-loc-smem:
> + * @mmap-race-nomemset-preferred-loc-smem:
> + * @mmap-race-preferred-loc-smem:
> + * @mmap-remap-dontunmap-eocheck-preferred-loc-smem:
> + * @mmap-remap-dontunmap-preferred-loc-smem:
> + * @mmap-remap-eocheck-preferred-loc-smem:
> + * @mmap-remap-preferred-loc-smem:
> + * @mmap-remap-ro-dontunmap-eocheck-preferred-loc-smem:
> + * @mmap-remap-ro-dontunmap-preferred-loc-smem:
> + * @mmap-remap-ro-eocheck-preferred-loc-smem:
> + * @mmap-remap-ro-preferred-loc-smem:
> + * @mmap-shared-nomemset-preferred-loc-smem:
> + * @mmap-shared-preferred-loc-smem:
> + * @mmap-shared-remap-dontunmap-eocheck-preferred-loc-smem:
> + * @mmap-shared-remap-dontunmap-preferred-loc-smem:
> + * @mmap-shared-remap-eocheck-preferred-loc-smem:
> + * @mmap-shared-remap-preferred-loc-smem:
> + * @new-bo-map-nomemset-preferred-loc-smem:
> + * @new-bo-map-preferred-loc-smem:
> + * @new-busy-nomemset-preferred-loc-smem:
> + * @new-busy-preferred-loc-smem:
> + * @new-nomemset-preferred-loc-smem:
> + * @new-preferred-loc-smem:
> + * @new-prefetch-preferred-loc-smem:
> + * @new-race-nomemset-preferred-loc-smem:
> + * @new-race-preferred-loc-smem:
> + * @prefetch-benchmark:
> *
> * SUBTEST: prefetch-benchmark
> * Description: Prefetch a 64M buffer 128 times, measure bandwidth of prefetch
> @@ -1020,7 +1112,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> uint32_t bo = 0, bind_sync = 0;
> void **pending_free;
> u64 *exec_ufence = NULL, *bind_ufence = NULL;
> - int i, j, b, file_fd = -1, prev_idx, pf_count;
> + int i, j, b, file_fd = -1, prev_idx, pf_count, err;
> bool free_vm = false;
> size_t aligned_size = bo_size ?: xe_get_default_alignment(fd);
> size_t orig_size = bo_size;
> @@ -1133,6 +1225,15 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>
> addr = to_user_pointer(data);
>
> + if (flags & PREFERRED_LOC_SMEM) {
> + err = xe_vm_madvise(fd, vm, to_user_pointer(data), bo_size, 0,
> + DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
> + DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
> + if (err)
> + igt_warn("MADVISE_FAILURE err = %s, vm =%u data=%"PRIu64" alloc_size = %zu\n",
> + strerror(errno), vm, to_user_pointer(data), bo_size);
> + }
What I would here, rather than have PREFERRED_LOC_SMEM, maybe flag like
MADVISE_SWIZZLE. On each pass of the main loop, switch between
DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM and
DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE. I think that should create pretty
good coverage.
> +
> if (flags & BO_UNMAP) {
> bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> bo = xe_bo_create(fd, vm, bo_size,
> @@ -1202,7 +1303,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> uint64_t batch_addr = addr + batch_offset;
> uint64_t sdi_offset = (char *)&data[idx].data - (char *)data;
> uint64_t sdi_addr = addr + sdi_offset;
> - int e = i % n_exec_queues, err;
> + int e = i % n_exec_queues;
> bool fault_inject = (FAULT & flags) && i == n_execs / 2;
> bool fault_injected = (FAULT & flags) && i > n_execs;
>
> @@ -1232,6 +1333,16 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
> next_data = aligned_alloc_type.ptr;
> igt_assert(next_data);
> +
> + if (flags & PREFERRED_LOC_SMEM) {
> + err = xe_vm_madvise(fd, vm, to_user_pointer(next_data), bo_size, 0,
> + DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
> + DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
> + if (err)
> + igt_warn("MADVISE_FAILURE err = %s, vm =%u data=%"PRIu64" alloc_size = %zu\n",
> + strerror(errno), vm, to_user_pointer(next_data), bo_size);
> + }
> +
> __aligned_partial_free(&aligned_alloc_type);
>
> b = 0;
> @@ -1253,6 +1364,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> sync[0].addr = to_user_pointer(bind_ufence);
>
> start = igt_nsec_elapsed(&tv);
> +
> xe_vm_prefetch_async(fd, vm, 0, 0, addr, bo_size, sync,
> 1, region);
> end = igt_nsec_elapsed(&tv);
> @@ -1355,6 +1467,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> } else {
> igt_assert_eq(data[idx].data,
> READ_VALUE(&data[idx]));
> +
> if (flags & PREFETCH_SYS_BENCHMARK) {
> struct timespec tv = {};
> u64 start, end;
> @@ -1429,6 +1542,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
> data = aligned_alloc_type.ptr;
> igt_assert(data);
> +
> __aligned_partial_free(&aligned_alloc_type);
>
> bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> @@ -1450,6 +1564,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> }
> bo = 0;
> data = aligned_alloc(aligned_size, bo_size);
> +
> igt_assert(data);
> }
> addr = to_user_pointer(data);
> @@ -1460,6 +1575,15 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> prev_idx = idx;
> }
>
> + if (flags & PREFERRED_LOC_SMEM) {
> + err = xe_vm_madvise(fd, vm, to_user_pointer(data), bo_size, 0,
> + DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
> + DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
> + if (err)
> + igt_warn("MADVISE_FAILURE err = %s, vm =%u data=%"PRIu64" alloc_size = %zu\n",
> + strerror(errno), vm, to_user_pointer(data), bo_size);
> + }
> +
> if (flags & PREFETCH_BENCHMARK) {
> igt_info("Prefetch VRAM execution took %.3fms, %.1f5 GB/s\n",
> 1e-6 * prefetch_ns,
> @@ -1587,6 +1711,7 @@ threads(int fd, int n_exec_queues, int n_execs, size_t bo_size,
> uint32_t vm = 0;
> bool go = false;
> void *alloc = NULL;
> + int err;
>
> if ((FILE_BACKED | FORK_READ) & flags)
> return;
> @@ -1614,6 +1739,15 @@ threads(int fd, int n_exec_queues, int n_execs, size_t bo_size,
> alloc = aligned_alloc(SZ_2M, alloc_size);
> igt_assert(alloc);
>
> + if (flags & PREFERRED_LOC_SMEM) {
> + err = xe_vm_madvise(fd, vm, to_user_pointer(alloc), alloc_size, 0,
> + DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
> + DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
> + if (err)
> + igt_warn("MADVISE_FAILURE err = %s, vm =%u data=%"PRIu64" alloc_size = %zu\n",
> + strerror(errno), vm, to_user_pointer(alloc), alloc_size);
> + }
> +
> memset(alloc, 5, alloc_size);
> flags &= ~SHARED_ALLOC;
> }
> @@ -1831,6 +1965,7 @@ igt_main
> { NULL },
> };
> int fd;
> + int num_sections;
>
> igt_fixture {
> struct xe_device *xe;
> @@ -1843,7 +1978,21 @@ igt_main
> open_sync_file();
> }
>
> - for (const struct section *s = sections; s->name; s++) {
> +
> + num_sections = 0;
> + for (const struct section *s = sections; s[num_sections].name; num_sections++)
> + ;
> +
> + for (int i = 0; i < num_sections * 2; i++) {
> + struct section *s = §ions[i % num_sections];
> +
> + if (i/num_sections == 0) {
You are adding too many sections. I'd pick a interesting combination of
flags and add table entries for them.
I'd suggest:
malloc-madvise
malloc-prefetch-madvise
mmap-remap-madvise
new-madvise
free-madvise
mmap-free-madvise
If isn't clear copy the existing flags in those sections and add the
suggested MADVISE_SWIZZLE.
Matt
> + static char modified_name[256];
> + snprintf(modified_name, sizeof(modified_name), "%s-preferred-loc-smem", s->name);
> + s->name = modified_name;
> + s->flags |= PREFERRED_LOC_SMEM;
> + }
> +
> igt_subtest_f("once-%s", s->name)
> xe_for_each_engine(fd, hwe)
> test_exec(fd, hwe, 1, 1, 0, 0, 0, NULL,
> --
> 2.43.0
>
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH i-g-t v5 5/5] tests/intel/xe_exec_system_allocator: Add atomic_batch test in IGT
2025-08-28 16:58 ` [PATCH i-g-t v5 5/5] tests/intel/xe_exec_system_allocator: Add atomic_batch test in IGT nishit.sharma
2025-08-29 14:37 ` Gurram, Pravalika
@ 2025-08-29 20:07 ` Matthew Brost
2025-08-29 20:26 ` Matthew Brost
2 siblings, 0 replies; 19+ messages in thread
From: Matthew Brost @ 2025-08-29 20:07 UTC (permalink / raw)
To: nishit.sharma; +Cc: igt-dev, pravalika.gurram, himal.prasad.ghimiray
On Thu, Aug 28, 2025 at 04:58:17PM +0000, nishit.sharma@intel.com wrote:
> From: Nishit Sharma <nishit.sharma@intel.com>
>
> ATOMIC_BATCH flag is introduced when true MI_ATOMIC | MI_ATOMIC_INC
> operation will be called. This will avoid writing another function which
> performs atomic increment operations. ATOMIC_BATCH flag is passed as
> argument in write_dword() if true then value will be written on passed
> address and incremented by ATOMIC_INC operation. For all memory
> operations this flag will be used to verify if ATOMIC operation is
> working or not.
>
> Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
> ---
> lib/xe/xe_ioctl.c | 18 +-
> tests/intel/xe_exec_system_allocator.c | 545 ++++++++++++++++++++-----
> 2 files changed, 445 insertions(+), 118 deletions(-)
>
> diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
> index 4ab2ef39c..71a427b4d 100644
> --- a/lib/xe/xe_ioctl.c
> +++ b/lib/xe/xe_ioctl.c
> @@ -688,19 +688,26 @@ int __xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range,
> madvise.start = addr;
> madvise.range = range;
>
> - if (type == DRM_XE_MEM_RANGE_ATTR_ATOMIC) {
> + switch (type) {
> + case DRM_XE_MEM_RANGE_ATTR_ATOMIC:
> madvise.type = DRM_XE_MEM_RANGE_ATTR_ATOMIC;
> madvise.atomic.val = op_val;
> - } else if (type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC) {
> + break;
> +
> + case DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC:
> madvise.type = DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC;
> madvise.preferred_mem_loc.devmem_fd = op_val;
> madvise.preferred_mem_loc.migration_policy = policy;
> igt_debug("madvise.preferred_mem_loc.devmem_fd = %d\n",
> madvise.preferred_mem_loc.devmem_fd);
> - } else if (type == DRM_XE_MEM_RANGE_ATTR_PAT) {
> + break;
> +
> + case DRM_XE_MEM_RANGE_ATTR_PAT:
> madvise.type = DRM_XE_MEM_RANGE_ATTR_PAT;
> madvise.pat_index.val = op_val;
> - } else {
> + break;
> +
> + default:
> igt_warn("Unknown attribute\n");
> return -EINVAL;
> }
> @@ -730,6 +737,5 @@ int __xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range,
> int xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range,
> uint64_t ext, uint32_t type, uint32_t op_val, uint16_t policy)
> {
> - igt_assert_eq(__xe_vm_madvise(fd, vm, addr, range, ext, type, op_val, policy), 0);
> - return 0;
> + return __xe_vm_madvise(fd, vm, addr, range, ext, type, op_val, policy);
> }
> diff --git a/tests/intel/xe_exec_system_allocator.c b/tests/intel/xe_exec_system_allocator.c
> index 70ca5fc2e..d0a8431a2 100644
> --- a/tests/intel/xe_exec_system_allocator.c
> +++ b/tests/intel/xe_exec_system_allocator.c
> @@ -21,6 +21,7 @@
> #include "lib/intel_reg.h"
> #include "xe_drm.h"
>
> +#include "intel_pat.h"
> #include "xe/xe_gt.h"
> #include "xe/xe_ioctl.h"
> #include "xe/xe_query.h"
> @@ -29,6 +30,14 @@
> #define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
> #define QUARTER_SEC (NSEC_PER_SEC / 4)
> #define FIVE_SEC (5LL * NSEC_PER_SEC)
> +struct test_exec_data {
> + uint32_t batch[32];
> + uint64_t pad;
> + uint64_t vm_sync;
> + uint64_t exec_sync;
> + uint32_t data;
> + uint32_t expected_data;
> +};
>
> struct batch_data {
> uint32_t batch[16];
> @@ -37,6 +46,7 @@ struct batch_data {
> uint32_t expected_data;
> };
>
> +#define VAL_ATOMIC_EXPECTED 56
> #define WRITE_VALUE(data__, i__) ({ \
> if (!(data__)->expected_data) \
> (data__)->expected_data = rand() << 12 | (i__); \
> @@ -53,10 +63,19 @@ static void __write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
> batch[(*idx)++] = wdata;
> }
>
> -static void write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
> - int *idx)
> +static void write_dword(struct test_exec_data *data, uint64_t sdi_addr, uint32_t wdata,
> + int *idx, bool atomic)
> {
> - __write_dword(batch, sdi_addr, wdata, idx);
> + uint32_t *batch = data->batch;
> +
> + if (atomic) {
> + data->data = 55;
> + batch[(*idx)++] = MI_ATOMIC | MI_ATOMIC_INC;
> + batch[(*idx)++] = sdi_addr;
> + batch[(*idx)++] = sdi_addr >> 32;
> + } else {
> + __write_dword(batch, sdi_addr, wdata, idx);
> + }
> batch[(*idx)++] = MI_BATCH_BUFFER_END;
> }
>
> @@ -271,7 +290,7 @@ check_all_pages_threads(void *ptr, uint64_t alloc_size, uint64_t stride,
>
> static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
> uint64_t alloc_size, uint64_t stride,
> - struct timespec *tv, uint64_t *submit)
> + struct timespec *tv, uint64_t *submit, bool atomic)
> {
> struct drm_xe_sync sync[1] = {
> { .type = DRM_XE_SYNC_TYPE_USER_FENCE,
> @@ -302,7 +321,8 @@ static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
> uint64_t sdi_addr = addr + sdi_offset;
> int b = 0;
>
> - write_dword(data->batch, sdi_addr, WRITE_VALUE(data, i), &b);
> + write_dword((struct test_exec_data *)data, sdi_addr, WRITE_VALUE(data, i),
> + &b, atomic ? true : false);
> igt_assert(b <= ARRAY_SIZE(data->batch));
> }
>
> @@ -407,34 +427,45 @@ static void __aligned_partial_free(struct aligned_alloc_type *aligned_alloc_typ
>
> #define MAX_N_EXEC_QUEUES 16
>
> -#define MMAP (0x1 << 0)
> -#define NEW (0x1 << 1)
> -#define BO_UNMAP (0x1 << 2)
> -#define FREE (0x1 << 3)
> -#define BUSY (0x1 << 4)
> -#define BO_MAP (0x1 << 5)
> -#define RACE (0x1 << 6)
> -#define SKIP_MEMSET (0x1 << 7)
> -#define FAULT (0x1 << 8)
> -#define FILE_BACKED (0x1 << 9)
> -#define LOCK (0x1 << 10)
> -#define MMAP_SHARED (0x1 << 11)
> -#define HUGE_PAGE (0x1 << 12)
> -#define SHARED_ALLOC (0x1 << 13)
> -#define FORK_READ (0x1 << 14)
> -#define FORK_READ_AFTER (0x1 << 15)
> -#define MREMAP (0x1 << 16)
> -#define DONTUNMAP (0x1 << 17)
> -#define READ_ONLY_REMAP (0x1 << 18)
> -#define SYNC_EXEC (0x1 << 19)
> -#define EVERY_OTHER_CHECK (0x1 << 20)
> -#define MULTI_FAULT (0x1 << 21)
> -#define PREFETCH (0x1 << 22)
> -#define THREADS (0x1 << 23)
> -#define PROCESSES (0x1 << 24)
> -#define PREFETCH_BENCHMARK (0x1 << 25)
> -#define PREFETCH_SYS_BENCHMARK (0x1 << 26)
> -#define PREFERRED_LOC_SMEM (0x1 << 27)
> +#define MMAP (0x1 << 0)
> +#define NEW (0x1 << 1)
> +#define BO_UNMAP (0x1 << 2)
> +#define FREE (0x1 << 3)
> +#define BUSY (0x1 << 4)
> +#define BO_MAP (0x1 << 5)
> +#define RACE (0x1 << 6)
> +#define SKIP_MEMSET (0x1 << 7)
> +#define FAULT (0x1 << 8)
> +#define FILE_BACKED (0x1 << 9)
> +#define LOCK (0x1 << 10)
> +#define MMAP_SHARED (0x1 << 11)
> +#define HUGE_PAGE (0x1 << 12)
> +#define SHARED_ALLOC (0x1 << 13)
> +#define FORK_READ (0x1 << 14)
> +#define FORK_READ_AFTER (0x1 << 15)
> +#define MREMAP (0x1 << 16)
> +#define DONTUNMAP (0x1 << 17)
> +#define READ_ONLY_REMAP (0x1 << 18)
> +#define SYNC_EXEC (0x1 << 19)
> +#define EVERY_OTHER_CHECK (0x1 << 20)
> +#define MULTI_FAULT (0x1 << 21)
> +#define PREFETCH (0x1 << 22)
> +#define THREADS (0x1 << 23)
> +#define PROCESSES (0x1 << 24)
> +#define PREFETCH_BENCHMARK (0x1 << 25)
> +#define PREFETCH_SYS_BENCHMARK (0x1 << 26)
> +#define PREFERRED_LOC_SMEM (0x1 << 27)
> +#define ATOMIC_BATCH (0x1 << 28)
> +#define MIGRATE_ALL_PAGES (0x1 << 29)
> +#define PREFERRED_LOC_ATOMIC_DEVICE (0x1 << 30)
> +#define PREFERRED_LOC_ATOMIC_GL (0x1ull << 31)
> +#define PREFERRED_LOC_ATOMIC_CPU (0x1ull << 32)
> +#define MADVISE_MULTI_VMA (0x1ull << 33)
> +#define MADVISE_SPLIT_VMA (0x1ull << 34)
> +#define MADVISE_ATOMIC_VMA (0x1ull << 35)
> +#define PREFETCH_SPLIT_VMA (0x1ull << 36)
> +#define PREFETCH_CHANGE_ATTR (0x1ull << 37)
> +#define PREFETCH_SAME_ATTR (0x1ull << 38)
>
> #define N_MULTI_FAULT 4
>
> @@ -478,6 +509,47 @@ static void __aligned_partial_free(struct aligned_alloc_type *aligned_alloc_typ
> * SUBTEST: processes-evict-malloc-mix-bo
> * Description: multi-process trigger eviction of VRAM allocated via malloc and BO create
> * Test category: stress test
> + *
> + * SUBTEST: madvise-multi-vma
> + * Description: performs multiple madvise operations on multiple virtual memory area using atomic device attributes
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-split-vma
> + * Description: perform madvise operations on multiple type VMAs (BO and CPU VMAs)
> + * Test category: perform madvise operations on multiple type VMAs (BO and CPU VMAs)
> + *
> + * SUBTEST: madvise-atomic-vma
> + * Description: perform madvise atomic operations on BO in VRAM/SMEM if atomic ATTR global/device
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-split-vma-with-mapping
> + * Description: performs prefetch and page migration
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-preffered-loc-atomic-vram
> + * Description: performs both atomic and preferred loc madvise operations atomic device attributes set
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-preffered-loc-atomic-gl
> + * Description: performs both atomic and preferred loc madvise operations with atomic global attributes set
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-preffered-loc-atomic-cpu
> + * Description: performs both atomic and preferred loc madvise operations with atomic cpu attributes set
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-preffered-loc-sram-migrate-pages
> + * Description: performs preferred loc madvise operations and migrating all pages in smem
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-no-range-invalidate-same-attr
> + * Description: performs atomic global madvise operation, prefetch and again madvise operation with same atomic attribute
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-range-invalidate-change-attr
> + * Description: performs atomic global madvise operation, prefetch and again madvise operation with different atomic attribute
> + * Test category: functionality test
> + *
> */
>
> static void
> @@ -544,7 +616,7 @@ many_allocs(int fd, struct drm_xe_engine_class_instance *eci,
> allocs[i] = alloc;
>
> touch_all_pages(fd, exec_queue, allocs[i].ptr, alloc_size, stride,
> - &tv, &submit);
> + &tv, &submit, flags & ATOMIC_BATCH);
> }
>
> if (barrier)
> @@ -692,7 +764,7 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> .num_syncs = 1,
> .syncs = to_user_pointer(sync),
> };
> - struct {
> + struct batch_data {
> uint32_t batch[16];
> uint64_t pad;
> uint64_t vm_sync;
> @@ -750,7 +822,8 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> uint64_t sdi_addr = addr + sdi_offset;
> int b = 0;
>
> - write_dword(data[i].batch, sdi_addr, WRITE_VALUE(&data[i], i), &b);
> + write_dword((struct test_exec_data *)&data[i], sdi_addr, WRITE_VALUE(&data[i], i),
> + &b, ATOMIC_BATCH ? true : false);
> igt_assert(b <= ARRAY_SIZE(data[i].batch));
>
> if (!i)
> @@ -773,7 +846,10 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> xe_wait_ufence(fd, new ?: exec_ufence, USER_FENCE_VALUE,
> exec_queue, FIVE_SEC);
> if (i || (flags & CPU_FAULT))
> - igt_assert_eq(data[i].data, READ_VALUE(&data[i]));
> + igt_assert_eq(data[i].data,
> + flags & ATOMIC_BATCH
> + ? VAL_ATOMIC_EXPECTED
> + : READ_VALUE(&data[i]));
> exec_ufence[0] = 0;
>
> if (!i) {
> @@ -1001,48 +1077,47 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> * @mmap-free-huge-preferred-loc-smem: mmap huge page and free buffer for each exec and perform madvise
> * @mmap-free-nomemset-preferred-loc-smem: mmap and free buffer for each exec and perform madvise
> * @mmap-free-preferred-loc-smem: mmap and free buffer for each exec and perform madvise
> - * @mmap-free-race-nomemset-preferred-loc-smem:
> - * @mmap-free-race-preferred-loc-smem:
> - * @mmap-huge-nomemset-preferred-loc-smem:
> - * @mmap-huge-preferred-loc-smem:
> - * @mmap-mlock-nomemset-preferred-loc-smem:
> - * @mmap-mlock-preferred-loc-smem:
> - * @mmap-new-huge-nomemset-preferred-loc-smem:
> - * @mmap-new-huge-preferred-loc-smem:
> - * @mmap-new-nomemset-preferred-loc-smem:
> - * @mmap-new-preferred-loc-smem:
> - * @mmap-new-race-nomemset-preferred-loc-smem:
> - * @mmap-new-race-preferred-loc-smem:
> - * @mmap-nomemset-preferred-loc-smem:
> - * @mmap-preferred-loc-smem:
> - * @mmap-prefetch-preferred-loc-smem:
> - * @mmap-prefetch-shared-preferred-loc-smem:
> - * @mmap-race-nomemset-preferred-loc-smem:
> - * @mmap-race-preferred-loc-smem:
> - * @mmap-remap-dontunmap-eocheck-preferred-loc-smem:
> - * @mmap-remap-dontunmap-preferred-loc-smem:
> - * @mmap-remap-eocheck-preferred-loc-smem:
> - * @mmap-remap-preferred-loc-smem:
> - * @mmap-remap-ro-dontunmap-eocheck-preferred-loc-smem:
> - * @mmap-remap-ro-dontunmap-preferred-loc-smem:
> - * @mmap-remap-ro-eocheck-preferred-loc-smem:
> - * @mmap-remap-ro-preferred-loc-smem:
> - * @mmap-shared-nomemset-preferred-loc-smem:
> - * @mmap-shared-preferred-loc-smem:
> - * @mmap-shared-remap-dontunmap-eocheck-preferred-loc-smem:
> - * @mmap-shared-remap-dontunmap-preferred-loc-smem:
> - * @mmap-shared-remap-eocheck-preferred-loc-smem:
> - * @mmap-shared-remap-preferred-loc-smem:
> - * @new-bo-map-nomemset-preferred-loc-smem:
> - * @new-bo-map-preferred-loc-smem:
> - * @new-busy-nomemset-preferred-loc-smem:
> - * @new-busy-preferred-loc-smem:
> - * @new-nomemset-preferred-loc-smem:
> - * @new-preferred-loc-smem:
> - * @new-prefetch-preferred-loc-smem:
> - * @new-race-nomemset-preferred-loc-smem:
> - * @new-race-preferred-loc-smem:
> - * @prefetch-benchmark:
> + * @mmap-free-race-nomemset-preferred-loc-smem: mmap and free buffer for each exec with race between cpu and gpu access, perform madvise operation
> + * @mmap-free-race-preferred-loc-smem: mmap and free buffer for each exec with race between cpu and gpu access, perform madvise operation
> + * @mmap-huge-nomemset-preferred-loc-smem: mmap huge page single buffer for all execs, skips memset and perform madvise operation
> + * @mmap-huge-preferred-loc-smem: mmap huge page single buffer for all execs, perform madvise operation
> + * @mmap-mlock-nomemset-preferred-loc-smem: mmap and mlock of a buffer with preferred location set to system memory, skipping memset
> + * @mmap-mlock-preferred-loc-smem: mmap and mlock of a buffer with preferred location set to system memory
> + * @mmap-new-huge-nomemset-preferred-loc-smem: mmap of a newly allocated buffer using huge pages, with preferred location set to system memory and skipping memset
> + * @mmap-new-huge-preferred-loc-smem: mmap of a newly allocated buffer using huge pages, with preferred location set to system memory
> + * @mmap-new-nomemset-preferred-loc-smem: mmap of a newly allocated buffer with preferred location set to system memory and skipping memset
> + * @mmap-new-preferred-loc-smem: mmap of a newly allocated buffer with preferred location set to system memory
> + * @mmap-new-race-nomemset-preferred-loc-smem: mmap of a newly allocated buffer with preferred location set to system memory and skipping memset
> + * @mmap-new-race-preferred-loc-smem: mmap of a newly allocated buffer with preferred location set to system memory
> + * @mmap-nomemset-preferred-loc-smem: mmap of a buffer with preferred location set to system memory, skipping memset
> + * @mmap-preferred-loc-smem: mmap of a buffer with preferred location set to system memory
> + * @mmap-prefetch-preferred-loc-smem: prefetching mmap buffer with preferred location set to system memory
> + * @mmap-prefetch-shared-preferred-loc-smem: mmap of a shared buffer with prefetch and preferred location set to system memory
> + * @mmap-race-nomemset-preferred-loc-smem: Tests mmap of a buffer with preferred location set to system memory, skipping memset
> + * @mmap-race-preferred-loc-smem: mmap buffer with race between GPU and CPU access with preferred location set to system memory
> + * @mmap-remap-dontunmap-eocheck-preferred-loc-smem: mmap and remap of a buffer with preferred location set to system memory, does not unmap after use
> + * @mmap-remap-dontunmap-preferred-loc-smem: mmap and remap of a buffer with preferred location set to system memory, does not unmap after use
> + * @mmap-remap-eocheck-preferred-loc-smem: mmap and remap of a buffer with preferred location set to system memory
> + * @mmap-remap-preferred-loc-smem: mmap and remap of a buffer with preferred location set to system memory
> + * @mmap-remap-ro-dontunmap-eocheck-preferred-loc-smem: mmap and remap of a read-only buffer with preferred location set to system memory, does not unmap after use
> + * @mmap-remap-ro-dontunmap-preferred-loc-smem: mmap and remap of a read-only buffer with preferred location set to system memory, does not unmap after use
> + * @mmap-remap-ro-eocheck-preferred-loc-smem: mmap and remap of a read-only buffer with preferred location set to system memory
> + * @mmap-remap-ro-preferred-loc-smem: mmap and remap of a read-only buffer with preferred location set to system memory
> + * @mmap-shared-nomemset-preferred-loc-smem: mmap of a shared buffer with preferred location set to system memory, skipping memset
> + * @mmap-shared-preferred-loc-smem: mmap of a shared buffer with preferred location set to system memory
> + * @mmap-shared-remap-dontunmap-eocheck-preferred-loc-smem: mmap and remap of a shared buffer with preferred location set to system memory, does not unmap after use
> + * @mmap-shared-remap-dontunmap-preferred-loc-smem: mmap and remap of a shared buffer with preferred location set to system memory
> + * @mmap-shared-remap-eocheck-preferred-loc-smem: mmap and remap of a shared buffer with preferred location set to system memory with end of check validation
> + * @mmap-shared-remap-preferred-loc-smem: mmap and remap of a shared buffer with preferred location set to system memory without end of check validation
> + * @new-bo-map-nomemset-preferred-loc-smem: Tests allocation and mapping of a new buffer object with preferred location set to system memory, skipping memset
> + * @new-bo-map-preferred-loc-smem: ests allocation and mapping of a new buffer object with preferred location set to system memory
> + * @new-busy-nomemset-preferred-loc-smem: Tests allocation and usage of a new busy buffer object with preferred location set to system memory, skipping memset
> + * @new-busy-preferred-loc-smem: ests allocation and usage of a new busy buffer object with preferred location set to system memory
> + * @new-nomemset-preferred-loc-smem: Tests allocation of a new buffer object with preferred location set to system memory, skipping memset
> + * @new-preferred-loc-smem: Tests allocation of a new buffer object with preferred location set to system memory
> + * @new-prefetch-preferred-loc-smem: Tests allocation and prefetching of a new buffer object with preferred location set to system memory
> + * @new-race-nomemset-preferred-loc-smem: Tests allocation of a new buffer object with preferred location set to system memory, skipping memset
> + * @new-race-preferred-loc-smem: tests allocation of a new buffer object with preferred location set to system memory
> *
> * SUBTEST: prefetch-benchmark
> * Description: Prefetch a 64M buffer 128 times, measure bandwidth of prefetch
> @@ -1072,16 +1147,6 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> * Description: Create multiple threads with a faults on different hardware engines to same addresses, racing between CPU and GPU access
> * Test category: stress test
> */
> -
> -struct test_exec_data {
> - uint32_t batch[32];
> - uint64_t pad;
> - uint64_t vm_sync;
> - uint64_t exec_sync;
> - uint32_t data;
> - uint32_t expected_data;
> -};
> -
> static void igt_require_hugepages(void)
> {
> igt_skip_on_f(!igt_get_meminfo("HugePages_Total"),
> @@ -1090,11 +1155,37 @@ static void igt_require_hugepages(void)
> "No huge pages available!\n");
> }
>
> +static int
> +xe_vm_madvixe_pat_attr(int fd, uint32_t vm, uint64_t addr, uint64_t range,
> + int pat_index)
> +{
> + return xe_vm_madvise(fd, vm, addr, range, 0,
> + DRM_XE_MEM_RANGE_ATTR_PAT, pat_index, 0);
> +}
> +
> +static int
> +xe_vm_madvise_atomic_attr(int fd, uint32_t vm, uint64_t addr, uint64_t range,
> + int mem_attr)
> +{
> + return xe_vm_madvise(fd, vm, addr, range, 0,
> + DRM_XE_MEM_RANGE_ATTR_ATOMIC,
> + mem_attr, 0);
> +}
> +
> +static int
> +xe_vm_madvise_migrate_pages(int fd, uint32_t vm, uint64_t addr, uint64_t range)
> +{
> + return xe_vm_madvise(fd, vm, addr, range, 0,
> + DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
> + DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM,
> + DRM_XE_MIGRATE_ALL_PAGES);
> +}
> +
> static void
> test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> int n_exec_queues, int n_execs, size_t bo_size,
> size_t stride, uint32_t vm, void *alloc, pthread_barrier_t *barrier,
> - unsigned int flags)
> + unsigned long long flags)
> {
> uint64_t addr;
> struct drm_xe_sync sync[1] = {
> @@ -1107,9 +1198,10 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> .syncs = to_user_pointer(sync),
> };
> uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> - struct test_exec_data *data, *next_data = NULL;
> + struct test_exec_data *data, *next_data = NULL, *org_data;
> uint32_t bo_flags;
> uint32_t bo = 0, bind_sync = 0;
> + uint32_t val;
> void **pending_free;
> u64 *exec_ufence = NULL, *bind_ufence = NULL;
> int i, j, b, file_fd = -1, prev_idx, pf_count, err;
> @@ -1234,6 +1326,133 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> strerror(errno), vm, to_user_pointer(data), bo_size);
> }
>
> + if (flags & PREFERRED_LOC_SMEM) {
> + if (flags & MIGRATE_ALL_PAGES) {
> + err = xe_vm_madvise_migrate_pages(fd, vm, to_user_pointer(data), bo_size);
> + if (err)
> + igt_warn("failure in page migration err = %s, vm =%u, data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + } else {
> + err = xe_vm_madvise(fd, vm, to_user_pointer(data), bo_size, 0,
> + DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
> + DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
> + }
> + }
> + if (flags & PREFERRED_LOC_ATOMIC_DEVICE) {
> + err = xe_vm_madvise_migrate_pages(fd, vm, to_user_pointer(data), bo_size);
> + if (err)
> + igt_warn("failure in page migration err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> +
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
> + DRM_XE_ATOMIC_DEVICE);
> + if (err)
> + igt_warn("failure in atomic device attr err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + }
> + if (flags & PREFERRED_LOC_ATOMIC_GL) {
> + err = xe_vm_madvise_migrate_pages(fd, vm, to_user_pointer(data), bo_size);
> + if (err)
> + igt_warn("failure in page migration err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
> + DRM_XE_ATOMIC_GLOBAL);
> + if (err)
> + igt_warn("failure in atomic global attr err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + }
> + if (flags & PREFERRED_LOC_ATOMIC_CPU) {
> + err = xe_vm_madvise_migrate_pages(fd, vm, to_user_pointer(data), bo_size);
> + if (err)
> + igt_warn("failure in page migration err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> +
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
> + DRM_XE_ATOMIC_CPU);
> + if (err)
> + igt_warn("failure in atomic cpu attr err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + }
> + if (flags & MADVISE_MULTI_VMA) {
> + if (bo_size)
> + bo_size = ALIGN(bo_size, SZ_4K);
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data) + bo_size/2,
> + bo_size/2, DRM_XE_ATOMIC_DEVICE);
> + if (err)
> + igt_warn("failure in atomic device attr err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> +
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data) + bo_size,
> + bo_size, DRM_XE_ATOMIC_DEVICE);
> + if (err)
> + igt_warn("failure in atomic multi_vma err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> +
> + err = xe_vm_madvixe_pat_attr(fd, vm, to_user_pointer(data), bo_size, intel_get_pat_idx_wb(fd));
> + if (err)
> + igt_warn("failure in pat attr index 4 err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> +
> + err = xe_vm_madvixe_pat_attr(fd, vm, to_user_pointer(data), bo_size, intel_get_pat_idx_wb(fd));
> + if (err)
> + igt_warn("failure in pat attr index 3 err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> +
> + err = xe_vm_madvixe_pat_attr(fd, vm, to_user_pointer(data) + bo_size/2, bo_size/2,
> + intel_get_pat_idx_wb(fd));
> + if (err)
> + igt_warn("failure in pat attr index 8 err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + }
> + if (flags & MADVISE_SPLIT_VMA) {
> + if (bo_size)
> + bo_size = ALIGN(bo_size, SZ_4K);
> +
> + bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> + bo = xe_bo_create(fd, vm, bo_size,
> + vram_if_possible(fd, eci->gt_id), bo_flags);
> + xe_vm_bind_async(fd, vm, 0, bo, 0, to_user_pointer(data) + bo_size/2,
> + bo_size/2, 0, 0);
> +
> + __xe_vm_bind_assert(fd, vm, 0, 0, 0, to_user_pointer(data) + bo_size/2,
> + bo_size/2, DRM_XE_VM_BIND_OP_MAP,
> + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, sync,
> + 1, 0, 0);
> + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
> + data[0].vm_sync = 0;
> + gem_close(fd, bo);
> + bo = 0;
> +
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data),
> + bo_size/2, DRM_XE_ATOMIC_DEVICE);
> + if (err)
> + igt_warn("failure in split atomic device attr err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + }
> + if (flags & MADVISE_ATOMIC_VMA) {
> + if (bo_size)
> + bo_size = ALIGN(bo_size, SZ_4K);
> +
> + bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> + bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id), bo_flags);
> + xe_vm_bind_async(fd, vm, 0, bo, 0, to_user_pointer(data), bo_size, 0, 0);
> +
> + __xe_vm_bind_assert(fd, vm, 0, 0, 0, to_user_pointer(data), bo_size,
> + DRM_XE_VM_BIND_OP_MAP,
> + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, sync,
> + 1, 0, 0);
> + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
> + data[0].vm_sync = 0;
> + gem_close(fd, bo);
> + bo = 0;
> +
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size/2,
> + DRM_XE_ATOMIC_GLOBAL);
> + if (err)
> + igt_warn("failure in atomic vma err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + }
> +
> if (flags & BO_UNMAP) {
> bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> bo = xe_bo_create(fd, vm, bo_size,
> @@ -1307,6 +1526,16 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> bool fault_inject = (FAULT & flags) && i == n_execs / 2;
> bool fault_injected = (FAULT & flags) && i > n_execs;
>
> + if (flags & MADVISE_MULTI_VMA) {
> + addr = addr + bo_size;
> + org_data = data;
> + data = from_user_pointer(addr);
> + batch_offset = (char *)&(data[idx].batch) - (char *)data;
> + batch_addr = addr + batch_offset;
> + sdi_offset = (char *)&(data[idx].data) - (char *)data;
> + sdi_addr = addr + sdi_offset;
> + }
> +
> if (barrier)
> pthread_barrier_wait(barrier);
>
> @@ -1316,18 +1545,74 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> __write_dword(data[idx].batch,
> sdi_addr + j * orig_size,
> WRITE_VALUE(&data[idx], idx), &b);
> - write_dword(data[idx].batch, sdi_addr + j * orig_size,
> - WRITE_VALUE(&data[idx], idx), &b);
> + write_dword(&data[idx], sdi_addr + j * orig_size,
> + WRITE_VALUE(&data[idx], idx), &b,
> + flags & ATOMIC_BATCH ? true : false);
> igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> } else if (!(flags & EVERY_OTHER_CHECK)) {
> b = 0;
> - write_dword(data[idx].batch, sdi_addr,
> - WRITE_VALUE(&data[idx], idx), &b);
> + write_dword(&data[idx], sdi_addr,
> + WRITE_VALUE(&data[idx], idx), &b,
> + flags & ATOMIC_BATCH ? true : false);
> igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> + if (flags & PREFETCH) {
> + if (flags & PREFETCH_SPLIT_VMA) {
> + bo_size = ALIGN(bo_size, SZ_4K);
> +
> + xe_vm_prefetch_async(fd, vm, 0, 0, addr, bo_size, NULL, 0, 0);
> +
> + igt_info("num_vmas before madvise = %d \n", val);
> +
> + val = xe_vm_print_mem_attr_values_in_range(fd, vm, addr, bo_size);
> +
> + err = xe_vm_madvise_migrate_pages(fd, vm, to_user_pointer(data), bo_size/2);
> + if (err)
> + igt_warn("failure in prefetch split vma err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + igt_info("num_vmas after madvise= %d \n", val);
> + val = xe_vm_print_mem_attr_values_in_range(fd, vm, addr, bo_size);
> + } else if (flags & PREFETCH_SAME_ATTR) {
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
> + DRM_XE_ATOMIC_GLOBAL);
> + if (err)
> + igt_warn("failure in prefetch same attr err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + val = xe_vm_print_mem_attr_values_in_range(fd, vm, addr, bo_size);
> + xe_vm_prefetch_async(fd, vm, 0, 0, addr, bo_size, NULL, 0, DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC);
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size/2,
> + DRM_XE_ATOMIC_GLOBAL);
> + if (err)
> + igt_warn("failure in prefetch atomic attr err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + } else if (flags & PREFETCH_CHANGE_ATTR) {
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
> + DRM_XE_ATOMIC_GLOBAL);
> + if (err)
> + igt_warn("failure in prefetch atomic attr err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + val = xe_vm_print_mem_attr_values_in_range(fd, vm, addr, bo_size);
> +
> + xe_vm_prefetch_async(fd, vm, 0, 0, addr, bo_size, NULL, 0, DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC);
> +
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
> + DRM_XE_ATOMIC_DEVICE);
> + if (err)
> + igt_warn("failure in prefetch change attr err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + val = xe_vm_print_mem_attr_values_in_range(fd, vm, addr, bo_size);
> + }
> + } else {
> + b = 0;
> + write_dword((struct test_exec_data *)&data[idx], sdi_addr,
> + WRITE_VALUE(&data[idx], idx), &b,
> + flags & ATOMIC_BATCH ? true : false);
> + igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> + }
> } else if (flags & EVERY_OTHER_CHECK && !odd(i)) {
> b = 0;
> - write_dword(data[idx].batch, sdi_addr,
> - WRITE_VALUE(&data[idx], idx), &b);
> + write_dword(&data[idx], sdi_addr,
> + WRITE_VALUE(&data[idx], idx), &b,
> + flags & ATOMIC_BATCH ? true : false);
> igt_assert(b <= ARRAY_SIZE(data[idx].batch));
>
> aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
> @@ -1346,10 +1631,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> __aligned_partial_free(&aligned_alloc_type);
>
> b = 0;
> - write_dword(data[next_idx].batch,
> + write_dword(&data[next_idx],
> to_user_pointer(next_data) +
> (char *)&data[next_idx].data - (char *)data,
> - WRITE_VALUE(&data[next_idx], next_idx), &b);
> + WRITE_VALUE(&data[next_idx], next_idx), &b,
> + flags & ATOMIC_BATCH ? true : false);
> igt_assert(b <= ARRAY_SIZE(data[next_idx].batch));
> }
>
> @@ -1404,9 +1690,18 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> exec_queues[e], &timeout);
> igt_assert(err == -ETIME || err == -EIO);
> } else {
> - xe_wait_ufence(fd, exec_ufence ? exec_ufence :
> - &data[idx].exec_sync, USER_FENCE_VALUE,
> - exec_queues[e], FIVE_SEC);
> + if (flags & PREFERRED_LOC_ATOMIC_CPU) {
> + int64_t timeout = QUARTER_SEC;
> + err = __xe_wait_ufence(fd, exec_ufence ? exec_ufence :
> + &data[idx].exec_sync,
> + USER_FENCE_VALUE,
> + exec_queues[e], &timeout);
> + if (err)
> + goto cleanup;
> + } else
> + xe_wait_ufence(fd, exec_ufence ? exec_ufence :
> + &data[idx].exec_sync, USER_FENCE_VALUE,
> + exec_queues[e], FIVE_SEC);
> if (flags & LOCK && !i)
> munlock(data, bo_size);
>
> @@ -1456,17 +1751,17 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> if (flags & FORK_READ) {
> igt_fork(child, 1)
> igt_assert_eq(data[idx].data,
> - READ_VALUE(&data[idx]));
> + flags & ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED : READ_VALUE(&data[idx]));
> if (!(flags & FORK_READ_AFTER))
> igt_assert_eq(data[idx].data,
> - READ_VALUE(&data[idx]));
> + flags & ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED : READ_VALUE(&data[idx]));
> igt_waitchildren();
> if (flags & FORK_READ_AFTER)
> igt_assert_eq(data[idx].data,
> - READ_VALUE(&data[idx]));
> + flags & ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED : READ_VALUE(&data[idx]));
> } else {
> igt_assert_eq(data[idx].data,
> - READ_VALUE(&data[idx]));
> + flags & ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED : READ_VALUE(&data[idx]));
>
> if (flags & PREFETCH_SYS_BENCHMARK) {
> struct timespec tv = {};
> @@ -1494,13 +1789,13 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> ((void *)data) + j * orig_size;
>
> igt_assert_eq(__data[idx].data,
> - READ_VALUE(&data[idx]));
> + flags & ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED : READ_VALUE(&data[idx]));
> }
> }
> }
> if (flags & EVERY_OTHER_CHECK)
> igt_assert_eq(data[prev_idx].data,
> - READ_VALUE(&data[prev_idx]));
> + flags & ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED : READ_VALUE(&data[prev_idx]));
> }
> }
>
> @@ -1521,6 +1816,9 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> gem_close(fd, bo);
> }
>
> + if (flags & MADVISE_MULTI_VMA)
> + data = org_data;
> +
> if (flags & NEW) {
> if (flags & MMAP) {
> if (flags & FREE)
> @@ -1610,6 +1908,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> pf_count, pf_count_after);
> }
>
> +cleanup:
> if (bo) {
> sync[0].addr = to_user_pointer(bind_ufence);
> __xe_vm_bind_assert(fd, vm, 0,
> @@ -1864,7 +2163,7 @@ processes(int fd, int n_exec_queues, int n_execs, size_t bo_size,
>
> struct section {
> const char *name;
> - unsigned int flags;
> + unsigned long long flags;
> };
>
> igt_main
> @@ -1964,6 +2263,19 @@ igt_main
> { "malloc-mix-bo", MIX_BO_ALLOC },
> { NULL },
> };
> + const struct section msections[] = {
> + { "preffered-loc-sram-migrate-pages", PREFERRED_LOC_SMEM | MIGRATE_ALL_PAGES | ATOMIC_BATCH },
> + { "preffered-loc-atomic-vram", PREFERRED_LOC_ATOMIC_DEVICE | ATOMIC_BATCH },
> + { "preffered-loc-atomic-gl", PREFERRED_LOC_ATOMIC_GL | ATOMIC_BATCH },
> + { "preffered-loc-atomic-cpu", PREFERRED_LOC_ATOMIC_CPU | ATOMIC_BATCH },
> + { "multi-vma", MADVISE_MULTI_VMA | ATOMIC_BATCH },
> + { "split-vma", MADVISE_SPLIT_VMA | ATOMIC_BATCH },
> + { "atomic-vma", MADVISE_ATOMIC_VMA | ATOMIC_BATCH },
> + { "split-vma-with-mapping", PREFETCH | PREFETCH_SPLIT_VMA | ATOMIC_BATCH },
> + { "range-invalidate-change-attr", PREFETCH | PREFETCH_CHANGE_ATTR | ATOMIC_BATCH },
> + { "no-range-invalidate-same-attr", PREFETCH | PREFETCH_SAME_ATTR | ATOMIC_BATCH },
> + { NULL },
> + };
> int fd;
> int num_sections;
>
> @@ -1983,10 +2295,11 @@ igt_main
> for (const struct section *s = sections; s[num_sections].name; num_sections++)
> ;
>
> - for (int i = 0; i < num_sections * 2; i++) {
> - struct section *s = §ions[i % num_sections];
> + for (int i = 0; i < num_sections * 3; i++) {
> + struct section p = sections[i % num_sections];
> + struct section *s = &p;
>
> - if (i/num_sections == 0) {
> + if (i/num_sections == 1) {
> static char modified_name[256];
> snprintf(modified_name, sizeof(modified_name), "%s-preferred-loc-smem", s->name);
> s->name = modified_name;
> @@ -2175,6 +2488,14 @@ igt_main
> processes_evict(fd, SZ_8M, SZ_1M, s->flags);
> }
>
> + for (const struct section *s = msections; s->name; s++) {
> + igt_subtest_f("madvise-%s", s->name) {
> + xe_for_each_engine(fd, hwe)
> + test_exec(fd, hwe, 1, 1, SZ_64K, 0, 0, NULL,
> + NULL, s->flags);
Since these sections are pretty directed and test_exec() is really
designed to do bunch random things all at the same time (e.g., like my
swillze suggestion in prior patch), I'd write a dedicated test function
for these madvise tests. People complain test_exec is already too
complicated / not modular enough, I intend to clean this function up a
bit when I have time. Adding more cases to that function will make this
worse.
If you'd like to reuse parts of the functionality in test_exec(), break
out the parts you need into helpers and then call them from you new
function which implements the directed tests for madvise. This would
help make test_exec() a bit more readable too.
Matt
> + }
> + }
> +
> igt_fixture {
> xe_device_put(fd);
> drm_close_driver(fd);
> --
> 2.43.0
>
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH i-g-t v5 5/5] tests/intel/xe_exec_system_allocator: Add atomic_batch test in IGT
2025-08-28 16:58 ` [PATCH i-g-t v5 5/5] tests/intel/xe_exec_system_allocator: Add atomic_batch test in IGT nishit.sharma
2025-08-29 14:37 ` Gurram, Pravalika
2025-08-29 20:07 ` Matthew Brost
@ 2025-08-29 20:26 ` Matthew Brost
2 siblings, 0 replies; 19+ messages in thread
From: Matthew Brost @ 2025-08-29 20:26 UTC (permalink / raw)
To: nishit.sharma; +Cc: igt-dev, pravalika.gurram, himal.prasad.ghimiray
On Thu, Aug 28, 2025 at 04:58:17PM +0000, nishit.sharma@intel.com wrote:
> From: Nishit Sharma <nishit.sharma@intel.com>
>
> ATOMIC_BATCH flag is introduced when true MI_ATOMIC | MI_ATOMIC_INC
> operation will be called. This will avoid writing another function which
> performs atomic increment operations. ATOMIC_BATCH flag is passed as
> argument in write_dword() if true then value will be written on passed
> address and incremented by ATOMIC_INC operation. For all memory
> operations this flag will be used to verify if ATOMIC operation is
> working or not.
>
> Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
> ---
> lib/xe/xe_ioctl.c | 18 +-
> tests/intel/xe_exec_system_allocator.c | 545 ++++++++++++++++++++-----
> 2 files changed, 445 insertions(+), 118 deletions(-)
>
> diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
> index 4ab2ef39c..71a427b4d 100644
> --- a/lib/xe/xe_ioctl.c
> +++ b/lib/xe/xe_ioctl.c
> @@ -688,19 +688,26 @@ int __xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range,
> madvise.start = addr;
> madvise.range = range;
>
> - if (type == DRM_XE_MEM_RANGE_ATTR_ATOMIC) {
> + switch (type) {
> + case DRM_XE_MEM_RANGE_ATTR_ATOMIC:
> madvise.type = DRM_XE_MEM_RANGE_ATTR_ATOMIC;
> madvise.atomic.val = op_val;
> - } else if (type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC) {
> + break;
> +
> + case DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC:
> madvise.type = DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC;
> madvise.preferred_mem_loc.devmem_fd = op_val;
> madvise.preferred_mem_loc.migration_policy = policy;
> igt_debug("madvise.preferred_mem_loc.devmem_fd = %d\n",
> madvise.preferred_mem_loc.devmem_fd);
> - } else if (type == DRM_XE_MEM_RANGE_ATTR_PAT) {
> + break;
> +
> + case DRM_XE_MEM_RANGE_ATTR_PAT:
> madvise.type = DRM_XE_MEM_RANGE_ATTR_PAT;
> madvise.pat_index.val = op_val;
> - } else {
> + break;
> +
> + default:
> igt_warn("Unknown attribute\n");
> return -EINVAL;
> }
> @@ -730,6 +737,5 @@ int __xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range,
> int xe_vm_madvise(int fd, uint32_t vm, uint64_t addr, uint64_t range,
> uint64_t ext, uint32_t type, uint32_t op_val, uint16_t policy)
> {
> - igt_assert_eq(__xe_vm_madvise(fd, vm, addr, range, ext, type, op_val, policy), 0);
> - return 0;
> + return __xe_vm_madvise(fd, vm, addr, range, ext, type, op_val, policy);
> }
> diff --git a/tests/intel/xe_exec_system_allocator.c b/tests/intel/xe_exec_system_allocator.c
> index 70ca5fc2e..d0a8431a2 100644
> --- a/tests/intel/xe_exec_system_allocator.c
> +++ b/tests/intel/xe_exec_system_allocator.c
> @@ -21,6 +21,7 @@
> #include "lib/intel_reg.h"
> #include "xe_drm.h"
>
> +#include "intel_pat.h"
> #include "xe/xe_gt.h"
> #include "xe/xe_ioctl.h"
> #include "xe/xe_query.h"
> @@ -29,6 +30,14 @@
> #define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
> #define QUARTER_SEC (NSEC_PER_SEC / 4)
> #define FIVE_SEC (5LL * NSEC_PER_SEC)
> +struct test_exec_data {
> + uint32_t batch[32];
> + uint64_t pad;
> + uint64_t vm_sync;
> + uint64_t exec_sync;
> + uint32_t data;
> + uint32_t expected_data;
> +};
>
> struct batch_data {
> uint32_t batch[16];
> @@ -37,6 +46,7 @@ struct batch_data {
> uint32_t expected_data;
> };
>
> +#define VAL_ATOMIC_EXPECTED 56
> #define WRITE_VALUE(data__, i__) ({ \
> if (!(data__)->expected_data) \
> (data__)->expected_data = rand() << 12 | (i__); \
> @@ -53,10 +63,19 @@ static void __write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
> batch[(*idx)++] = wdata;
> }
>
> -static void write_dword(uint32_t *batch, uint64_t sdi_addr, uint32_t wdata,
> - int *idx)
> +static void write_dword(struct test_exec_data *data, uint64_t sdi_addr, uint32_t wdata,
> + int *idx, bool atomic)
> {
> - __write_dword(batch, sdi_addr, wdata, idx);
> + uint32_t *batch = data->batch;
> +
> + if (atomic) {
> + data->data = 55;
> + batch[(*idx)++] = MI_ATOMIC | MI_ATOMIC_INC;
> + batch[(*idx)++] = sdi_addr;
> + batch[(*idx)++] = sdi_addr >> 32;
> + } else {
> + __write_dword(batch, sdi_addr, wdata, idx);
> + }
> batch[(*idx)++] = MI_BATCH_BUFFER_END;
> }
>
> @@ -271,7 +290,7 @@ check_all_pages_threads(void *ptr, uint64_t alloc_size, uint64_t stride,
>
> static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
> uint64_t alloc_size, uint64_t stride,
> - struct timespec *tv, uint64_t *submit)
> + struct timespec *tv, uint64_t *submit, bool atomic)
> {
> struct drm_xe_sync sync[1] = {
> { .type = DRM_XE_SYNC_TYPE_USER_FENCE,
> @@ -302,7 +321,8 @@ static void touch_all_pages(int fd, uint32_t exec_queue, void *ptr,
> uint64_t sdi_addr = addr + sdi_offset;
> int b = 0;
>
> - write_dword(data->batch, sdi_addr, WRITE_VALUE(data, i), &b);
> + write_dword((struct test_exec_data *)data, sdi_addr, WRITE_VALUE(data, i),
> + &b, atomic ? true : false);
> igt_assert(b <= ARRAY_SIZE(data->batch));
> }
>
> @@ -407,34 +427,45 @@ static void __aligned_partial_free(struct aligned_alloc_type *aligned_alloc_typ
>
> #define MAX_N_EXEC_QUEUES 16
>
> -#define MMAP (0x1 << 0)
> -#define NEW (0x1 << 1)
> -#define BO_UNMAP (0x1 << 2)
> -#define FREE (0x1 << 3)
> -#define BUSY (0x1 << 4)
> -#define BO_MAP (0x1 << 5)
> -#define RACE (0x1 << 6)
> -#define SKIP_MEMSET (0x1 << 7)
> -#define FAULT (0x1 << 8)
> -#define FILE_BACKED (0x1 << 9)
> -#define LOCK (0x1 << 10)
> -#define MMAP_SHARED (0x1 << 11)
> -#define HUGE_PAGE (0x1 << 12)
> -#define SHARED_ALLOC (0x1 << 13)
> -#define FORK_READ (0x1 << 14)
> -#define FORK_READ_AFTER (0x1 << 15)
> -#define MREMAP (0x1 << 16)
> -#define DONTUNMAP (0x1 << 17)
> -#define READ_ONLY_REMAP (0x1 << 18)
> -#define SYNC_EXEC (0x1 << 19)
> -#define EVERY_OTHER_CHECK (0x1 << 20)
> -#define MULTI_FAULT (0x1 << 21)
> -#define PREFETCH (0x1 << 22)
> -#define THREADS (0x1 << 23)
> -#define PROCESSES (0x1 << 24)
> -#define PREFETCH_BENCHMARK (0x1 << 25)
> -#define PREFETCH_SYS_BENCHMARK (0x1 << 26)
> -#define PREFERRED_LOC_SMEM (0x1 << 27)
> +#define MMAP (0x1 << 0)
> +#define NEW (0x1 << 1)
> +#define BO_UNMAP (0x1 << 2)
> +#define FREE (0x1 << 3)
> +#define BUSY (0x1 << 4)
> +#define BO_MAP (0x1 << 5)
> +#define RACE (0x1 << 6)
> +#define SKIP_MEMSET (0x1 << 7)
> +#define FAULT (0x1 << 8)
> +#define FILE_BACKED (0x1 << 9)
> +#define LOCK (0x1 << 10)
> +#define MMAP_SHARED (0x1 << 11)
> +#define HUGE_PAGE (0x1 << 12)
> +#define SHARED_ALLOC (0x1 << 13)
> +#define FORK_READ (0x1 << 14)
> +#define FORK_READ_AFTER (0x1 << 15)
> +#define MREMAP (0x1 << 16)
> +#define DONTUNMAP (0x1 << 17)
> +#define READ_ONLY_REMAP (0x1 << 18)
> +#define SYNC_EXEC (0x1 << 19)
> +#define EVERY_OTHER_CHECK (0x1 << 20)
> +#define MULTI_FAULT (0x1 << 21)
> +#define PREFETCH (0x1 << 22)
> +#define THREADS (0x1 << 23)
> +#define PROCESSES (0x1 << 24)
> +#define PREFETCH_BENCHMARK (0x1 << 25)
> +#define PREFETCH_SYS_BENCHMARK (0x1 << 26)
> +#define PREFERRED_LOC_SMEM (0x1 << 27)
> +#define ATOMIC_BATCH (0x1 << 28)
> +#define MIGRATE_ALL_PAGES (0x1 << 29)
> +#define PREFERRED_LOC_ATOMIC_DEVICE (0x1 << 30)
> +#define PREFERRED_LOC_ATOMIC_GL (0x1ull << 31)
> +#define PREFERRED_LOC_ATOMIC_CPU (0x1ull << 32)
> +#define MADVISE_MULTI_VMA (0x1ull << 33)
> +#define MADVISE_SPLIT_VMA (0x1ull << 34)
> +#define MADVISE_ATOMIC_VMA (0x1ull << 35)
> +#define PREFETCH_SPLIT_VMA (0x1ull << 36)
> +#define PREFETCH_CHANGE_ATTR (0x1ull << 37)
> +#define PREFETCH_SAME_ATTR (0x1ull << 38)
>
> #define N_MULTI_FAULT 4
>
> @@ -478,6 +509,47 @@ static void __aligned_partial_free(struct aligned_alloc_type *aligned_alloc_typ
> * SUBTEST: processes-evict-malloc-mix-bo
> * Description: multi-process trigger eviction of VRAM allocated via malloc and BO create
> * Test category: stress test
> + *
> + * SUBTEST: madvise-multi-vma
> + * Description: performs multiple madvise operations on multiple virtual memory area using atomic device attributes
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-split-vma
> + * Description: perform madvise operations on multiple type VMAs (BO and CPU VMAs)
> + * Test category: perform madvise operations on multiple type VMAs (BO and CPU VMAs)
> + *
> + * SUBTEST: madvise-atomic-vma
> + * Description: perform madvise atomic operations on BO in VRAM/SMEM if atomic ATTR global/device
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-split-vma-with-mapping
> + * Description: performs prefetch and page migration
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-preffered-loc-atomic-vram
> + * Description: performs both atomic and preferred loc madvise operations atomic device attributes set
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-preffered-loc-atomic-gl
> + * Description: performs both atomic and preferred loc madvise operations with atomic global attributes set
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-preffered-loc-atomic-cpu
> + * Description: performs both atomic and preferred loc madvise operations with atomic cpu attributes set
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-preffered-loc-sram-migrate-pages
> + * Description: performs preferred loc madvise operations and migrating all pages in smem
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-no-range-invalidate-same-attr
> + * Description: performs atomic global madvise operation, prefetch and again madvise operation with same atomic attribute
> + * Test category: functionality test
> + *
> + * SUBTEST: madvise-range-invalidate-change-attr
> + * Description: performs atomic global madvise operation, prefetch and again madvise operation with different atomic attribute
> + * Test category: functionality test
> + *
> */
>
> static void
> @@ -544,7 +616,7 @@ many_allocs(int fd, struct drm_xe_engine_class_instance *eci,
> allocs[i] = alloc;
>
> touch_all_pages(fd, exec_queue, allocs[i].ptr, alloc_size, stride,
> - &tv, &submit);
> + &tv, &submit, flags & ATOMIC_BATCH);
> }
>
> if (barrier)
> @@ -692,7 +764,7 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> .num_syncs = 1,
> .syncs = to_user_pointer(sync),
> };
> - struct {
> + struct batch_data {
> uint32_t batch[16];
> uint64_t pad;
> uint64_t vm_sync;
> @@ -750,7 +822,8 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> uint64_t sdi_addr = addr + sdi_offset;
> int b = 0;
>
> - write_dword(data[i].batch, sdi_addr, WRITE_VALUE(&data[i], i), &b);
> + write_dword((struct test_exec_data *)&data[i], sdi_addr, WRITE_VALUE(&data[i], i),
> + &b, ATOMIC_BATCH ? true : false);
> igt_assert(b <= ARRAY_SIZE(data[i].batch));
>
> if (!i)
> @@ -773,7 +846,10 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> xe_wait_ufence(fd, new ?: exec_ufence, USER_FENCE_VALUE,
> exec_queue, FIVE_SEC);
> if (i || (flags & CPU_FAULT))
> - igt_assert_eq(data[i].data, READ_VALUE(&data[i]));
> + igt_assert_eq(data[i].data,
> + flags & ATOMIC_BATCH
> + ? VAL_ATOMIC_EXPECTED
> + : READ_VALUE(&data[i]));
> exec_ufence[0] = 0;
>
> if (!i) {
> @@ -1001,48 +1077,47 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> * @mmap-free-huge-preferred-loc-smem: mmap huge page and free buffer for each exec and perform madvise
> * @mmap-free-nomemset-preferred-loc-smem: mmap and free buffer for each exec and perform madvise
> * @mmap-free-preferred-loc-smem: mmap and free buffer for each exec and perform madvise
> - * @mmap-free-race-nomemset-preferred-loc-smem:
> - * @mmap-free-race-preferred-loc-smem:
> - * @mmap-huge-nomemset-preferred-loc-smem:
> - * @mmap-huge-preferred-loc-smem:
> - * @mmap-mlock-nomemset-preferred-loc-smem:
> - * @mmap-mlock-preferred-loc-smem:
> - * @mmap-new-huge-nomemset-preferred-loc-smem:
> - * @mmap-new-huge-preferred-loc-smem:
> - * @mmap-new-nomemset-preferred-loc-smem:
> - * @mmap-new-preferred-loc-smem:
> - * @mmap-new-race-nomemset-preferred-loc-smem:
> - * @mmap-new-race-preferred-loc-smem:
> - * @mmap-nomemset-preferred-loc-smem:
> - * @mmap-preferred-loc-smem:
> - * @mmap-prefetch-preferred-loc-smem:
> - * @mmap-prefetch-shared-preferred-loc-smem:
> - * @mmap-race-nomemset-preferred-loc-smem:
> - * @mmap-race-preferred-loc-smem:
> - * @mmap-remap-dontunmap-eocheck-preferred-loc-smem:
> - * @mmap-remap-dontunmap-preferred-loc-smem:
> - * @mmap-remap-eocheck-preferred-loc-smem:
> - * @mmap-remap-preferred-loc-smem:
> - * @mmap-remap-ro-dontunmap-eocheck-preferred-loc-smem:
> - * @mmap-remap-ro-dontunmap-preferred-loc-smem:
> - * @mmap-remap-ro-eocheck-preferred-loc-smem:
> - * @mmap-remap-ro-preferred-loc-smem:
> - * @mmap-shared-nomemset-preferred-loc-smem:
> - * @mmap-shared-preferred-loc-smem:
> - * @mmap-shared-remap-dontunmap-eocheck-preferred-loc-smem:
> - * @mmap-shared-remap-dontunmap-preferred-loc-smem:
> - * @mmap-shared-remap-eocheck-preferred-loc-smem:
> - * @mmap-shared-remap-preferred-loc-smem:
> - * @new-bo-map-nomemset-preferred-loc-smem:
> - * @new-bo-map-preferred-loc-smem:
> - * @new-busy-nomemset-preferred-loc-smem:
> - * @new-busy-preferred-loc-smem:
> - * @new-nomemset-preferred-loc-smem:
> - * @new-preferred-loc-smem:
> - * @new-prefetch-preferred-loc-smem:
> - * @new-race-nomemset-preferred-loc-smem:
> - * @new-race-preferred-loc-smem:
> - * @prefetch-benchmark:
> + * @mmap-free-race-nomemset-preferred-loc-smem: mmap and free buffer for each exec with race between cpu and gpu access, perform madvise operation
> + * @mmap-free-race-preferred-loc-smem: mmap and free buffer for each exec with race between cpu and gpu access, perform madvise operation
> + * @mmap-huge-nomemset-preferred-loc-smem: mmap huge page single buffer for all execs, skips memset and perform madvise operation
> + * @mmap-huge-preferred-loc-smem: mmap huge page single buffer for all execs, perform madvise operation
> + * @mmap-mlock-nomemset-preferred-loc-smem: mmap and mlock of a buffer with preferred location set to system memory, skipping memset
> + * @mmap-mlock-preferred-loc-smem: mmap and mlock of a buffer with preferred location set to system memory
> + * @mmap-new-huge-nomemset-preferred-loc-smem: mmap of a newly allocated buffer using huge pages, with preferred location set to system memory and skipping memset
> + * @mmap-new-huge-preferred-loc-smem: mmap of a newly allocated buffer using huge pages, with preferred location set to system memory
> + * @mmap-new-nomemset-preferred-loc-smem: mmap of a newly allocated buffer with preferred location set to system memory and skipping memset
> + * @mmap-new-preferred-loc-smem: mmap of a newly allocated buffer with preferred location set to system memory
> + * @mmap-new-race-nomemset-preferred-loc-smem: mmap of a newly allocated buffer with preferred location set to system memory and skipping memset
> + * @mmap-new-race-preferred-loc-smem: mmap of a newly allocated buffer with preferred location set to system memory
> + * @mmap-nomemset-preferred-loc-smem: mmap of a buffer with preferred location set to system memory, skipping memset
> + * @mmap-preferred-loc-smem: mmap of a buffer with preferred location set to system memory
> + * @mmap-prefetch-preferred-loc-smem: prefetching mmap buffer with preferred location set to system memory
> + * @mmap-prefetch-shared-preferred-loc-smem: mmap of a shared buffer with prefetch and preferred location set to system memory
> + * @mmap-race-nomemset-preferred-loc-smem: Tests mmap of a buffer with preferred location set to system memory, skipping memset
> + * @mmap-race-preferred-loc-smem: mmap buffer with race between GPU and CPU access with preferred location set to system memory
> + * @mmap-remap-dontunmap-eocheck-preferred-loc-smem: mmap and remap of a buffer with preferred location set to system memory, does not unmap after use
> + * @mmap-remap-dontunmap-preferred-loc-smem: mmap and remap of a buffer with preferred location set to system memory, does not unmap after use
> + * @mmap-remap-eocheck-preferred-loc-smem: mmap and remap of a buffer with preferred location set to system memory
> + * @mmap-remap-preferred-loc-smem: mmap and remap of a buffer with preferred location set to system memory
> + * @mmap-remap-ro-dontunmap-eocheck-preferred-loc-smem: mmap and remap of a read-only buffer with preferred location set to system memory, does not unmap after use
> + * @mmap-remap-ro-dontunmap-preferred-loc-smem: mmap and remap of a read-only buffer with preferred location set to system memory, does not unmap after use
> + * @mmap-remap-ro-eocheck-preferred-loc-smem: mmap and remap of a read-only buffer with preferred location set to system memory
> + * @mmap-remap-ro-preferred-loc-smem: mmap and remap of a read-only buffer with preferred location set to system memory
> + * @mmap-shared-nomemset-preferred-loc-smem: mmap of a shared buffer with preferred location set to system memory, skipping memset
> + * @mmap-shared-preferred-loc-smem: mmap of a shared buffer with preferred location set to system memory
> + * @mmap-shared-remap-dontunmap-eocheck-preferred-loc-smem: mmap and remap of a shared buffer with preferred location set to system memory, does not unmap after use
> + * @mmap-shared-remap-dontunmap-preferred-loc-smem: mmap and remap of a shared buffer with preferred location set to system memory
> + * @mmap-shared-remap-eocheck-preferred-loc-smem: mmap and remap of a shared buffer with preferred location set to system memory with end of check validation
> + * @mmap-shared-remap-preferred-loc-smem: mmap and remap of a shared buffer with preferred location set to system memory without end of check validation
> + * @new-bo-map-nomemset-preferred-loc-smem: Tests allocation and mapping of a new buffer object with preferred location set to system memory, skipping memset
> + * @new-bo-map-preferred-loc-smem: ests allocation and mapping of a new buffer object with preferred location set to system memory
> + * @new-busy-nomemset-preferred-loc-smem: Tests allocation and usage of a new busy buffer object with preferred location set to system memory, skipping memset
> + * @new-busy-preferred-loc-smem: ests allocation and usage of a new busy buffer object with preferred location set to system memory
> + * @new-nomemset-preferred-loc-smem: Tests allocation of a new buffer object with preferred location set to system memory, skipping memset
> + * @new-preferred-loc-smem: Tests allocation of a new buffer object with preferred location set to system memory
> + * @new-prefetch-preferred-loc-smem: Tests allocation and prefetching of a new buffer object with preferred location set to system memory
> + * @new-race-nomemset-preferred-loc-smem: Tests allocation of a new buffer object with preferred location set to system memory, skipping memset
> + * @new-race-preferred-loc-smem: tests allocation of a new buffer object with preferred location set to system memory
> *
> * SUBTEST: prefetch-benchmark
> * Description: Prefetch a 64M buffer 128 times, measure bandwidth of prefetch
> @@ -1072,16 +1147,6 @@ partial(int fd, struct drm_xe_engine_class_instance *eci, unsigned int flags)
> * Description: Create multiple threads with a faults on different hardware engines to same addresses, racing between CPU and GPU access
> * Test category: stress test
> */
> -
> -struct test_exec_data {
> - uint32_t batch[32];
> - uint64_t pad;
> - uint64_t vm_sync;
> - uint64_t exec_sync;
> - uint32_t data;
> - uint32_t expected_data;
> -};
> -
> static void igt_require_hugepages(void)
> {
> igt_skip_on_f(!igt_get_meminfo("HugePages_Total"),
> @@ -1090,11 +1155,37 @@ static void igt_require_hugepages(void)
> "No huge pages available!\n");
> }
>
> +static int
> +xe_vm_madvixe_pat_attr(int fd, uint32_t vm, uint64_t addr, uint64_t range,
> + int pat_index)
> +{
> + return xe_vm_madvise(fd, vm, addr, range, 0,
> + DRM_XE_MEM_RANGE_ATTR_PAT, pat_index, 0);
> +}
> +
> +static int
> +xe_vm_madvise_atomic_attr(int fd, uint32_t vm, uint64_t addr, uint64_t range,
> + int mem_attr)
> +{
> + return xe_vm_madvise(fd, vm, addr, range, 0,
> + DRM_XE_MEM_RANGE_ATTR_ATOMIC,
> + mem_attr, 0);
> +}
> +
> +static int
> +xe_vm_madvise_migrate_pages(int fd, uint32_t vm, uint64_t addr, uint64_t range)
> +{
> + return xe_vm_madvise(fd, vm, addr, range, 0,
> + DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
> + DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM,
> + DRM_XE_MIGRATE_ALL_PAGES);
> +}
> +
> static void
> test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> int n_exec_queues, int n_execs, size_t bo_size,
> size_t stride, uint32_t vm, void *alloc, pthread_barrier_t *barrier,
> - unsigned int flags)
> + unsigned long long flags)
> {
> uint64_t addr;
> struct drm_xe_sync sync[1] = {
> @@ -1107,9 +1198,10 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> .syncs = to_user_pointer(sync),
> };
> uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> - struct test_exec_data *data, *next_data = NULL;
> + struct test_exec_data *data, *next_data = NULL, *org_data;
> uint32_t bo_flags;
> uint32_t bo = 0, bind_sync = 0;
> + uint32_t val;
> void **pending_free;
> u64 *exec_ufence = NULL, *bind_ufence = NULL;
> int i, j, b, file_fd = -1, prev_idx, pf_count, err;
> @@ -1234,6 +1326,133 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> strerror(errno), vm, to_user_pointer(data), bo_size);
> }
>
> + if (flags & PREFERRED_LOC_SMEM) {
> + if (flags & MIGRATE_ALL_PAGES) {
> + err = xe_vm_madvise_migrate_pages(fd, vm, to_user_pointer(data), bo_size);
> + if (err)
> + igt_warn("failure in page migration err = %s, vm =%u, data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + } else {
> + err = xe_vm_madvise(fd, vm, to_user_pointer(data), bo_size, 0,
> + DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC,
> + DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM, 0);
This is not how error checking in done in IGTs. We just assert success
where is makes sense. So in this page all xe_vm_madvise functions should
return void and all the in base library that calls the IOCTL, assert
success.
Matt
> + }
> + }
> + if (flags & PREFERRED_LOC_ATOMIC_DEVICE) {
> + err = xe_vm_madvise_migrate_pages(fd, vm, to_user_pointer(data), bo_size);
> + if (err)
> + igt_warn("failure in page migration err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> +
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
> + DRM_XE_ATOMIC_DEVICE);
> + if (err)
> + igt_warn("failure in atomic device attr err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + }
> + if (flags & PREFERRED_LOC_ATOMIC_GL) {
> + err = xe_vm_madvise_migrate_pages(fd, vm, to_user_pointer(data), bo_size);
> + if (err)
> + igt_warn("failure in page migration err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
> + DRM_XE_ATOMIC_GLOBAL);
> + if (err)
> + igt_warn("failure in atomic global attr err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + }
> + if (flags & PREFERRED_LOC_ATOMIC_CPU) {
> + err = xe_vm_madvise_migrate_pages(fd, vm, to_user_pointer(data), bo_size);
> + if (err)
> + igt_warn("failure in page migration err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> +
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
> + DRM_XE_ATOMIC_CPU);
> + if (err)
> + igt_warn("failure in atomic cpu attr err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + }
> + if (flags & MADVISE_MULTI_VMA) {
> + if (bo_size)
> + bo_size = ALIGN(bo_size, SZ_4K);
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data) + bo_size/2,
> + bo_size/2, DRM_XE_ATOMIC_DEVICE);
> + if (err)
> + igt_warn("failure in atomic device attr err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> +
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data) + bo_size,
> + bo_size, DRM_XE_ATOMIC_DEVICE);
> + if (err)
> + igt_warn("failure in atomic multi_vma err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> +
> + err = xe_vm_madvixe_pat_attr(fd, vm, to_user_pointer(data), bo_size, intel_get_pat_idx_wb(fd));
> + if (err)
> + igt_warn("failure in pat attr index 4 err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> +
> + err = xe_vm_madvixe_pat_attr(fd, vm, to_user_pointer(data), bo_size, intel_get_pat_idx_wb(fd));
> + if (err)
> + igt_warn("failure in pat attr index 3 err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> +
> + err = xe_vm_madvixe_pat_attr(fd, vm, to_user_pointer(data) + bo_size/2, bo_size/2,
> + intel_get_pat_idx_wb(fd));
> + if (err)
> + igt_warn("failure in pat attr index 8 err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + }
> + if (flags & MADVISE_SPLIT_VMA) {
> + if (bo_size)
> + bo_size = ALIGN(bo_size, SZ_4K);
> +
> + bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> + bo = xe_bo_create(fd, vm, bo_size,
> + vram_if_possible(fd, eci->gt_id), bo_flags);
> + xe_vm_bind_async(fd, vm, 0, bo, 0, to_user_pointer(data) + bo_size/2,
> + bo_size/2, 0, 0);
> +
> + __xe_vm_bind_assert(fd, vm, 0, 0, 0, to_user_pointer(data) + bo_size/2,
> + bo_size/2, DRM_XE_VM_BIND_OP_MAP,
> + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, sync,
> + 1, 0, 0);
> + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
> + data[0].vm_sync = 0;
> + gem_close(fd, bo);
> + bo = 0;
> +
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data),
> + bo_size/2, DRM_XE_ATOMIC_DEVICE);
> + if (err)
> + igt_warn("failure in split atomic device attr err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + }
> + if (flags & MADVISE_ATOMIC_VMA) {
> + if (bo_size)
> + bo_size = ALIGN(bo_size, SZ_4K);
> +
> + bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> + bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd, eci->gt_id), bo_flags);
> + xe_vm_bind_async(fd, vm, 0, bo, 0, to_user_pointer(data), bo_size, 0, 0);
> +
> + __xe_vm_bind_assert(fd, vm, 0, 0, 0, to_user_pointer(data), bo_size,
> + DRM_XE_VM_BIND_OP_MAP,
> + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR, sync,
> + 1, 0, 0);
> + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0, FIVE_SEC);
> + data[0].vm_sync = 0;
> + gem_close(fd, bo);
> + bo = 0;
> +
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size/2,
> + DRM_XE_ATOMIC_GLOBAL);
> + if (err)
> + igt_warn("failure in atomic vma err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + }
> +
> if (flags & BO_UNMAP) {
> bo_flags = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
> bo = xe_bo_create(fd, vm, bo_size,
> @@ -1307,6 +1526,16 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> bool fault_inject = (FAULT & flags) && i == n_execs / 2;
> bool fault_injected = (FAULT & flags) && i > n_execs;
>
> + if (flags & MADVISE_MULTI_VMA) {
> + addr = addr + bo_size;
> + org_data = data;
> + data = from_user_pointer(addr);
> + batch_offset = (char *)&(data[idx].batch) - (char *)data;
> + batch_addr = addr + batch_offset;
> + sdi_offset = (char *)&(data[idx].data) - (char *)data;
> + sdi_addr = addr + sdi_offset;
> + }
> +
> if (barrier)
> pthread_barrier_wait(barrier);
>
> @@ -1316,18 +1545,74 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> __write_dword(data[idx].batch,
> sdi_addr + j * orig_size,
> WRITE_VALUE(&data[idx], idx), &b);
> - write_dword(data[idx].batch, sdi_addr + j * orig_size,
> - WRITE_VALUE(&data[idx], idx), &b);
> + write_dword(&data[idx], sdi_addr + j * orig_size,
> + WRITE_VALUE(&data[idx], idx), &b,
> + flags & ATOMIC_BATCH ? true : false);
> igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> } else if (!(flags & EVERY_OTHER_CHECK)) {
> b = 0;
> - write_dword(data[idx].batch, sdi_addr,
> - WRITE_VALUE(&data[idx], idx), &b);
> + write_dword(&data[idx], sdi_addr,
> + WRITE_VALUE(&data[idx], idx), &b,
> + flags & ATOMIC_BATCH ? true : false);
> igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> + if (flags & PREFETCH) {
> + if (flags & PREFETCH_SPLIT_VMA) {
> + bo_size = ALIGN(bo_size, SZ_4K);
> +
> + xe_vm_prefetch_async(fd, vm, 0, 0, addr, bo_size, NULL, 0, 0);
> +
> + igt_info("num_vmas before madvise = %d \n", val);
> +
> + val = xe_vm_print_mem_attr_values_in_range(fd, vm, addr, bo_size);
> +
> + err = xe_vm_madvise_migrate_pages(fd, vm, to_user_pointer(data), bo_size/2);
> + if (err)
> + igt_warn("failure in prefetch split vma err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + igt_info("num_vmas after madvise= %d \n", val);
> + val = xe_vm_print_mem_attr_values_in_range(fd, vm, addr, bo_size);
> + } else if (flags & PREFETCH_SAME_ATTR) {
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
> + DRM_XE_ATOMIC_GLOBAL);
> + if (err)
> + igt_warn("failure in prefetch same attr err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + val = xe_vm_print_mem_attr_values_in_range(fd, vm, addr, bo_size);
> + xe_vm_prefetch_async(fd, vm, 0, 0, addr, bo_size, NULL, 0, DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC);
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size/2,
> + DRM_XE_ATOMIC_GLOBAL);
> + if (err)
> + igt_warn("failure in prefetch atomic attr err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + } else if (flags & PREFETCH_CHANGE_ATTR) {
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
> + DRM_XE_ATOMIC_GLOBAL);
> + if (err)
> + igt_warn("failure in prefetch atomic attr err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + val = xe_vm_print_mem_attr_values_in_range(fd, vm, addr, bo_size);
> +
> + xe_vm_prefetch_async(fd, vm, 0, 0, addr, bo_size, NULL, 0, DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC);
> +
> + err = xe_vm_madvise_atomic_attr(fd, vm, to_user_pointer(data), bo_size,
> + DRM_XE_ATOMIC_DEVICE);
> + if (err)
> + igt_warn("failure in prefetch change attr err = %s, vm =%u data=%"PRIu64"\n",
> + strerror(errno), vm, to_user_pointer(data));
> + val = xe_vm_print_mem_attr_values_in_range(fd, vm, addr, bo_size);
> + }
> + } else {
> + b = 0;
> + write_dword((struct test_exec_data *)&data[idx], sdi_addr,
> + WRITE_VALUE(&data[idx], idx), &b,
> + flags & ATOMIC_BATCH ? true : false);
> + igt_assert(b <= ARRAY_SIZE(data[idx].batch));
> + }
> } else if (flags & EVERY_OTHER_CHECK && !odd(i)) {
> b = 0;
> - write_dword(data[idx].batch, sdi_addr,
> - WRITE_VALUE(&data[idx], idx), &b);
> + write_dword(&data[idx], sdi_addr,
> + WRITE_VALUE(&data[idx], idx), &b,
> + flags & ATOMIC_BATCH ? true : false);
> igt_assert(b <= ARRAY_SIZE(data[idx].batch));
>
> aligned_alloc_type = __aligned_alloc(aligned_size, bo_size);
> @@ -1346,10 +1631,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> __aligned_partial_free(&aligned_alloc_type);
>
> b = 0;
> - write_dword(data[next_idx].batch,
> + write_dword(&data[next_idx],
> to_user_pointer(next_data) +
> (char *)&data[next_idx].data - (char *)data,
> - WRITE_VALUE(&data[next_idx], next_idx), &b);
> + WRITE_VALUE(&data[next_idx], next_idx), &b,
> + flags & ATOMIC_BATCH ? true : false);
> igt_assert(b <= ARRAY_SIZE(data[next_idx].batch));
> }
>
> @@ -1404,9 +1690,18 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> exec_queues[e], &timeout);
> igt_assert(err == -ETIME || err == -EIO);
> } else {
> - xe_wait_ufence(fd, exec_ufence ? exec_ufence :
> - &data[idx].exec_sync, USER_FENCE_VALUE,
> - exec_queues[e], FIVE_SEC);
> + if (flags & PREFERRED_LOC_ATOMIC_CPU) {
> + int64_t timeout = QUARTER_SEC;
> + err = __xe_wait_ufence(fd, exec_ufence ? exec_ufence :
> + &data[idx].exec_sync,
> + USER_FENCE_VALUE,
> + exec_queues[e], &timeout);
> + if (err)
> + goto cleanup;
> + } else
> + xe_wait_ufence(fd, exec_ufence ? exec_ufence :
> + &data[idx].exec_sync, USER_FENCE_VALUE,
> + exec_queues[e], FIVE_SEC);
> if (flags & LOCK && !i)
> munlock(data, bo_size);
>
> @@ -1456,17 +1751,17 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> if (flags & FORK_READ) {
> igt_fork(child, 1)
> igt_assert_eq(data[idx].data,
> - READ_VALUE(&data[idx]));
> + flags & ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED : READ_VALUE(&data[idx]));
> if (!(flags & FORK_READ_AFTER))
> igt_assert_eq(data[idx].data,
> - READ_VALUE(&data[idx]));
> + flags & ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED : READ_VALUE(&data[idx]));
> igt_waitchildren();
> if (flags & FORK_READ_AFTER)
> igt_assert_eq(data[idx].data,
> - READ_VALUE(&data[idx]));
> + flags & ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED : READ_VALUE(&data[idx]));
> } else {
> igt_assert_eq(data[idx].data,
> - READ_VALUE(&data[idx]));
> + flags & ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED : READ_VALUE(&data[idx]));
>
> if (flags & PREFETCH_SYS_BENCHMARK) {
> struct timespec tv = {};
> @@ -1494,13 +1789,13 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> ((void *)data) + j * orig_size;
>
> igt_assert_eq(__data[idx].data,
> - READ_VALUE(&data[idx]));
> + flags & ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED : READ_VALUE(&data[idx]));
> }
> }
> }
> if (flags & EVERY_OTHER_CHECK)
> igt_assert_eq(data[prev_idx].data,
> - READ_VALUE(&data[prev_idx]));
> + flags & ATOMIC_BATCH ? VAL_ATOMIC_EXPECTED : READ_VALUE(&data[prev_idx]));
> }
> }
>
> @@ -1521,6 +1816,9 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> gem_close(fd, bo);
> }
>
> + if (flags & MADVISE_MULTI_VMA)
> + data = org_data;
> +
> if (flags & NEW) {
> if (flags & MMAP) {
> if (flags & FREE)
> @@ -1610,6 +1908,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> pf_count, pf_count_after);
> }
>
> +cleanup:
> if (bo) {
> sync[0].addr = to_user_pointer(bind_ufence);
> __xe_vm_bind_assert(fd, vm, 0,
> @@ -1864,7 +2163,7 @@ processes(int fd, int n_exec_queues, int n_execs, size_t bo_size,
>
> struct section {
> const char *name;
> - unsigned int flags;
> + unsigned long long flags;
> };
>
> igt_main
> @@ -1964,6 +2263,19 @@ igt_main
> { "malloc-mix-bo", MIX_BO_ALLOC },
> { NULL },
> };
> + const struct section msections[] = {
> + { "preffered-loc-sram-migrate-pages", PREFERRED_LOC_SMEM | MIGRATE_ALL_PAGES | ATOMIC_BATCH },
> + { "preffered-loc-atomic-vram", PREFERRED_LOC_ATOMIC_DEVICE | ATOMIC_BATCH },
> + { "preffered-loc-atomic-gl", PREFERRED_LOC_ATOMIC_GL | ATOMIC_BATCH },
> + { "preffered-loc-atomic-cpu", PREFERRED_LOC_ATOMIC_CPU | ATOMIC_BATCH },
> + { "multi-vma", MADVISE_MULTI_VMA | ATOMIC_BATCH },
> + { "split-vma", MADVISE_SPLIT_VMA | ATOMIC_BATCH },
> + { "atomic-vma", MADVISE_ATOMIC_VMA | ATOMIC_BATCH },
> + { "split-vma-with-mapping", PREFETCH | PREFETCH_SPLIT_VMA | ATOMIC_BATCH },
> + { "range-invalidate-change-attr", PREFETCH | PREFETCH_CHANGE_ATTR | ATOMIC_BATCH },
> + { "no-range-invalidate-same-attr", PREFETCH | PREFETCH_SAME_ATTR | ATOMIC_BATCH },
> + { NULL },
> + };
> int fd;
> int num_sections;
>
> @@ -1983,10 +2295,11 @@ igt_main
> for (const struct section *s = sections; s[num_sections].name; num_sections++)
> ;
>
> - for (int i = 0; i < num_sections * 2; i++) {
> - struct section *s = §ions[i % num_sections];
> + for (int i = 0; i < num_sections * 3; i++) {
> + struct section p = sections[i % num_sections];
> + struct section *s = &p;
>
> - if (i/num_sections == 0) {
> + if (i/num_sections == 1) {
> static char modified_name[256];
> snprintf(modified_name, sizeof(modified_name), "%s-preferred-loc-smem", s->name);
> s->name = modified_name;
> @@ -2175,6 +2488,14 @@ igt_main
> processes_evict(fd, SZ_8M, SZ_1M, s->flags);
> }
>
> + for (const struct section *s = msections; s->name; s++) {
> + igt_subtest_f("madvise-%s", s->name) {
> + xe_for_each_engine(fd, hwe)
> + test_exec(fd, hwe, 1, 1, SZ_64K, 0, 0, NULL,
> + NULL, s->flags);
> + }
> + }
> +
> igt_fixture {
> xe_device_put(fd);
> drm_close_driver(fd);
> --
> 2.43.0
>
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH i-g-t 1/5] DO-NOT-MERGE: include/drm-uapi: Add drm_xe_madvise structure
2025-09-01 16:40 [PATCH i-g-t 0/5] Madvise Tests in IGT nishit.sharma
@ 2025-09-01 16:40 ` nishit.sharma
0 siblings, 0 replies; 19+ messages in thread
From: nishit.sharma @ 2025-09-01 16:40 UTC (permalink / raw)
To: igt-dev, pravalika.gurram, himal.prasad.ghimiray, matthew.brost,
nishit.sharma
From: Nishit Sharma <nishit.sharma@intel.com>
Defined IOCTL number for madvise operation. Added drm_xe_madvise
which is passed as Input to MADVISE IOCTL.
Signed-off-by: Nishit Sharma <nishit.sharma@intel.com>
---
include/drm-uapi/xe_drm.h | 289 ++++++++++++++++++++++++++++++++++++--
1 file changed, 281 insertions(+), 8 deletions(-)
diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
index a52f95593..e9a27a844 100644
--- a/include/drm-uapi/xe_drm.h
+++ b/include/drm-uapi/xe_drm.h
@@ -3,8 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
-#ifndef _XE_DRM_H_
-#define _XE_DRM_H_
+#ifndef _UAPI_XE_DRM_H_
+#define _UAPI_XE_DRM_H_
#include "drm.h"
@@ -81,6 +81,8 @@ extern "C" {
* - &DRM_IOCTL_XE_EXEC
* - &DRM_IOCTL_XE_WAIT_USER_FENCE
* - &DRM_IOCTL_XE_OBSERVATION
+ * - &DRM_IOCTL_XE_MADVISE
+ * - &DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS
*/
/*
@@ -102,6 +104,8 @@ extern "C" {
#define DRM_XE_EXEC 0x09
#define DRM_XE_WAIT_USER_FENCE 0x0a
#define DRM_XE_OBSERVATION 0x0b
+#define DRM_XE_MADVISE 0x0c
+#define DRM_XE_VM_QUERY_MEM_REGION_ATTRS 0x0d
/* Must be kept compact -- no holes */
@@ -117,6 +121,8 @@ extern "C" {
#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
+#define DRM_IOCTL_XE_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
+#define DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_MEM_REGION_ATTRS, struct drm_xe_vm_query_mem_range_attr)
/**
* DOC: Xe IOCTL Extensions
@@ -134,7 +140,7 @@ extern "C" {
* redefine the interface more easily than an ever growing struct of
* increasing complexity, and for large parts of that interface to be
* entirely optional. The downside is more pointer chasing; chasing across
- * the boundary with pointers encapsulated inside u64.
+ * the __user boundary with pointers encapsulated inside u64.
*
* Example chaining:
*
@@ -925,9 +931,9 @@ struct drm_xe_gem_mmap_offset {
* - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
* exec submissions to its exec_queues that don't have an upper time
* limit on the job execution time. But exec submissions to these
- * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ,
- * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF,
- * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
+ * don't allow any of the sync types DRM_XE_SYNC_TYPE_SYNCOBJ,
+ * DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ, used as out-syncobjs, that is,
+ * together with sync flag DRM_XE_SYNC_FLAG_SIGNAL.
* LR VMs can be created in recoverable page-fault mode using
* DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
* If that flag is omitted, the UMD can not rely on the slightly
@@ -1003,6 +1009,10 @@ struct drm_xe_vm_destroy {
* valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
* mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
* handle MBZ, and the BO offset MBZ.
+ *
+ * The @prefetch_mem_region_instance for %DRM_XE_VM_BIND_OP_PREFETCH can also be:
+ * - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in
+ * the memory region advised by madvise.
*/
struct drm_xe_vm_bind_op {
/** @extensions: Pointer to the first extension struct, if any */
@@ -1108,6 +1118,7 @@ struct drm_xe_vm_bind_op {
/** @flags: Bind flags */
__u32 flags;
+#define DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC -1
/**
* @prefetch_mem_region_instance: Memory region to prefetch VMA to.
* It is a region instance, not a mask.
@@ -1394,7 +1405,7 @@ struct drm_xe_sync {
/**
* @timeline_value: Input for the timeline sync object. Needs to be
- * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ.
+ * different than 0 when used with %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ.
*/
__u64 timeline_value;
@@ -1974,8 +1985,270 @@ struct drm_xe_query_eu_stall {
__u64 sampling_rates[];
};
+/**
+ * struct drm_xe_madvise - Input of &DRM_IOCTL_XE_MADVISE
+ *
+ * This structure is used to set memory attributes for a virtual address range
+ * in a VM. The type of attribute is specified by @type, and the corresponding
+ * union member is used to provide additional parameters for @type.
+ *
+ * Supported attribute types:
+ * - DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC: Set preferred memory location.
+ * - DRM_XE_MEM_RANGE_ATTR_ATOMIC: Set atomic access policy.
+ * - DRM_XE_MEM_RANGE_ATTR_PAT: Set page attribute table index.
+ *
+ * Example:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_madvise madvise = {
+ * .vm_id = vm_id,
+ * .start = 0x100000,
+ * .range = 0x2000,
+ * .type = DRM_XE_MEM_RANGE_ATTR_ATOMIC,
+ * .atomic_val = DRM_XE_ATOMIC_DEVICE,
+ * .pad = 0,
+ * };
+ *
+ * ioctl(fd, DRM_IOCTL_XE_MADVISE, &madvise);
+ *
+ */
+struct drm_xe_madvise {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @start: start of the virtual address range */
+ __u64 start;
+
+ /** @range: size of the virtual address range */
+ __u64 range;
+
+ /** @vm_id: vm_id of the virtual range */
+ __u32 vm_id;
+
+#define DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC 0
+#define DRM_XE_MEM_RANGE_ATTR_ATOMIC 1
+#define DRM_XE_MEM_RANGE_ATTR_PAT 2
+ /** @type: type of attribute */
+ __u32 type;
+
+ union {
+ /**
+ * @preferred_mem_loc: preferred memory location
+ *
+ * Used when @type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC
+ *
+ * Supported values for @preferred_mem_loc.devmem_fd:
+ * - DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE: set vram of faulting tile as preferred loc
+ * - DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM: set smem as preferred loc
+ *
+ * Supported values for @preferred_mem_loc.migration_policy:
+ * - DRM_XE_MIGRATE_ALL_PAGES
+ * - DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES
+ */
+ struct {
+#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE 0
+#define DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM -1
+ /** @preferred_mem_loc.devmem_fd: fd for preferred loc */
+ __u32 devmem_fd;
+
+#define DRM_XE_MIGRATE_ALL_PAGES 0
+#define DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES 1
+ /** @preferred_mem_loc.migration_policy: Page migration policy */
+ __u16 migration_policy;
+
+ /** @preferred_mem_loc.pad : MBZ */
+ __u16 pad;
+
+ /** @preferred_mem_loc.reserved : Reserved */
+ __u64 reserved;
+ } preferred_mem_loc;
+
+ /**
+ * @atomic: Atomic access policy
+ *
+ * Used when @type == DRM_XE_MEM_RANGE_ATTR_ATOMIC.
+ *
+ * Supported values for @atomic.val:
+ * - DRM_XE_ATOMIC_UNDEFINED: Undefined or default behaviour
+ * Support both GPU and CPU atomic operations for system allocator
+ * Support GPU atomic operations for normal(bo) allocator
+ * - DRM_XE_ATOMIC_DEVICE: Support GPU atomic operations
+ * - DRM_XE_ATOMIC_GLOBAL: Support both GPU and CPU atomic operations
+ * - DRM_XE_ATOMIC_CPU: Support CPU atomic
+ */
+ struct {
+#define DRM_XE_ATOMIC_UNDEFINED 0
+#define DRM_XE_ATOMIC_DEVICE 1
+#define DRM_XE_ATOMIC_GLOBAL 2
+#define DRM_XE_ATOMIC_CPU 3
+ /** @atomic.val: value of atomic operation */
+ __u32 val;
+
+ /** @atomic.pad: MBZ */
+ __u32 pad;
+
+ /** @atomic.reserved: Reserved */
+ __u64 reserved;
+ } atomic;
+
+ /**
+ * @pat_index: Page attribute table index
+ *
+ * Used when @type == DRM_XE_MEM_RANGE_ATTR_PAT.
+ */
+ struct {
+ /** @pat_index.val: PAT index value */
+ __u32 val;
+
+ /** @pat_index.pad: MBZ */
+ __u32 pad;
+
+ /** @pat_index.reserved: Reserved */
+ __u64 reserved;
+ } pat_index;
+ };
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_mem_range_attr - Output of &DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS
+ *
+ * This structure is provided by userspace and filled by KMD in response to the
+ * DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS ioctl. It describes memory attributes of
+ * a memory ranges within a user specified address range in a VM.
+ *
+ * The structure includes information such as atomic access policy,
+ * page attribute table (PAT) index, and preferred memory location.
+ * Userspace allocates an array of these structures and passes a pointer to the
+ * ioctl to retrieve attributes for each memory ranges
+ *
+ * @extensions: Pointer to the first extension struct, if any
+ * @start: Start address of the memory range
+ * @end: End address of the virtual memory range
+ *
+ */
+struct drm_xe_mem_range_attr {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @start: start of the memory range */
+ __u64 start;
+
+ /** @end: end of the memory range */
+ __u64 end;
+
+ /** @preferred_mem_loc: preferred memory location */
+ struct {
+ /** @preferred_mem_loc.devmem_fd: fd for preferred loc */
+ __u32 devmem_fd;
+
+ /** @preferred_mem_loc.migration_policy: Page migration policy */
+ __u32 migration_policy;
+ } preferred_mem_loc;
+
+ struct {
+ /** @atomic.val: atomic attribute */
+ __u32 val;
+
+ /** @atomic.reserved: Reserved */
+ __u32 reserved;
+ } atomic;
+
+ struct {
+ /** @pat_index.val: PAT index */
+ __u32 val;
+
+ /** @pat_index.reserved: Reserved */
+ __u32 reserved;
+ } pat_index;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_query_mem_range_attr - Input of &DRM_IOCTL_XE_VM_QUERY_MEM_ATTRIBUTES
+ *
+ * This structure is used to query memory attributes of memory regions
+ * within a user specified address range in a VM. It provides detailed
+ * information about each memory range, including atomic access policy,
+ * page attribute table (PAT) index, and preferred memory location.
+ *
+ * Userspace first calls the ioctl with @num_mem_ranges = 0,
+ * @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL to retrieve
+ * the number of memory regions and size of each memory range attribute.
+ * Then, it allocates a buffer of that size and calls the ioctl again to fill
+ * the buffer with memory range attributes.
+ *
+ * If second call fails with -ENOSPC, it means memory ranges changed between
+ * first call and now, retry IOCTL again with @num_mem_ranges = 0,
+ * @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL followed by
+ * Second ioctl call.
+ *
+ * Example:
+ *
+ * .. code-block:: C
+ * struct drm_xe_vm_query_mem_range_attr query = {
+ * .vm_id = vm_id,
+ * .start = 0x100000,
+ * .range = 0x2000,
+ * };
+ *
+ * // First ioctl call to get num of mem regions and sizeof each attribute
+ * ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
+ *
+ * // Allocate buffer for the memory region attributes
+ * void *ptr = malloc(query.num_mem_ranges * query.sizeof_mem_range_attr);
+ *
+ * query.vector_of_mem_attr = (uintptr_t)ptr;
+ *
+ * // Second ioctl call to actually fill the memory attributes
+ * ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
+ *
+ * // Iterate over the returned memory region attributes
+ * for (unsigned int i = 0; i < query.num_mem_ranges; ++i) {
+ * struct drm_xe_mem_range_attr *attr = (struct drm_xe_mem_range_attr *)ptr;
+ *
+ * // Do something with attr
+ *
+ * // Move pointer by one entry
+ * ptr += query.sizeof_mem_range_attr;
+ * }
+ *
+ * free(ptr);
+ */
+struct drm_xe_vm_query_mem_range_attr {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @vm_id: vm_id of the virtual range */
+ __u32 vm_id;
+
+ /** @num_mem_ranges: number of mem_ranges in range */
+ __u32 num_mem_ranges;
+
+ /** @start: start of the virtual address range */
+ __u64 start;
+
+ /** @range: size of the virtual address range */
+ __u64 range;
+
+ /** @sizeof_mem_range_attr: size of struct drm_xe_mem_range_attr */
+ __u64 sizeof_mem_range_attr;
+
+ /** @vector_of_ops: userptr to array of struct drm_xe_mem_range_attr */
+ __u64 vector_of_mem_attr;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+
+};
+
#if defined(__cplusplus)
}
#endif
-#endif /* _XE_DRM_H_ */
+#endif /* _UAPI_XE_DRM_H_ */
--
2.43.0
^ permalink raw reply related [flat|nested] 19+ messages in thread
end of thread, other threads:[~2025-09-01 16:40 UTC | newest]
Thread overview: 19+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-08-28 16:58 [PATCH i-g-t 0/5] Madvise Tests in IGT nishit.sharma
2025-08-28 16:58 ` [PATCH i-g-t 1/5] DO-NOT-MERGE: include/drm-uapi: Add drm_xe_madvise structure nishit.sharma
2025-08-28 16:58 ` [PATCH i-g-t 2/5] lib/xe: Add xe_vm_madvise ioctl support nishit.sharma
2025-08-29 13:56 ` Gurram, Pravalika
2025-08-28 16:58 ` [PATCH i-g-t 3/5] lib/xe: Add Helper to get memory attributes nishit.sharma
2025-08-29 14:02 ` Gurram, Pravalika
2025-08-28 16:58 ` [PATCH i-g-t 4/5] tests/intel/xe_exec_system_allocator: Add preferred_loc_smem test nishit.sharma
2025-08-29 14:21 ` Gurram, Pravalika
2025-08-29 19:55 ` Matthew Brost
2025-08-28 16:58 ` [PATCH i-g-t v5 5/5] tests/intel/xe_exec_system_allocator: Add atomic_batch test in IGT nishit.sharma
2025-08-29 14:37 ` Gurram, Pravalika
2025-08-29 20:07 ` Matthew Brost
2025-08-29 20:26 ` Matthew Brost
2025-08-29 2:40 ` ✓ Xe.CI.BAT: success for Madvise Tests in IGT (rev5) Patchwork
2025-08-29 2:55 ` ✓ i915.CI.BAT: " Patchwork
2025-08-29 12:11 ` ✓ Xe.CI.Full: " Patchwork
2025-08-29 13:38 ` ✓ i915.CI.Full: " Patchwork
-- strict thread matches above, loose matches on Subject: below --
2025-08-29 14:54 [PATCH i-g-t 0/5] nishit.sharma
2025-08-29 14:54 ` [PATCH i-g-t 1/5] DO-NOT-MERGE: include/drm-uapi: Add drm_xe_madvise structure nishit.sharma
2025-09-01 16:40 [PATCH i-g-t 0/5] Madvise Tests in IGT nishit.sharma
2025-09-01 16:40 ` [PATCH i-g-t 1/5] DO-NOT-MERGE: include/drm-uapi: Add drm_xe_madvise structure nishit.sharma
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).