* [PATCH v3 01/19] Introduce drm_gpuvm_sm_map_ops_flags enums for sm_map_ops
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
@ 2025-05-27 16:39 ` Himal Prasad Ghimiray
2025-05-27 16:39 ` [PATCH v3 02/19] drm/xe/uapi: Add madvise interface Himal Prasad Ghimiray
` (25 subsequent siblings)
26 siblings, 0 replies; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:39 UTC (permalink / raw)
To: intel-xe
Cc: Himal Prasad Ghimiray, Danilo Krummrich, Matthew Brost,
Boris Brezillon, dri-devel
- DRM_GPUVM_SM_MAP_NOT_MADVISE: Default sm_map operations for the input
range.
- DRM_GPUVM_SKIP_GEM_OBJ_VA_SPLIT_MADVISE: This flag is used by
drm_gpuvm_sm_map_ops_create to iterate over GPUVMA's in the
user-provided range and split the existing non-GEM object VMA if the
start or end of the input range lies within it. The operations can
create up to 2 REMAPS and 2 MAPs. The purpose of this operation is to be
used by the Xe driver to assign attributes to GPUVMA's within the
user-defined range. Unlike drm_gpuvm_sm_map_ops_flags in default mode,
the operation with this flag will never have UNMAPs and
merges, and can be without any final operations.
v2
- use drm_gpuvm_sm_map_ops_create with flags instead of defining new
ops_create (Danilo)
- Add doc (Danilo)
v3
- Fix doc
- Fix unmapping check
Cc: Danilo Krummrich <dakr@redhat.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Boris Brezillon <bbrezillon@kernel.org>
Cc: <dri-devel@lists.freedesktop.org>
Signed-off-by: Himal Prasad Ghimiray<himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/drm_gpuvm.c | 93 ++++++++++++++++++++------
drivers/gpu/drm/nouveau/nouveau_uvmm.c | 1 +
drivers/gpu/drm/xe/xe_vm.c | 1 +
include/drm/drm_gpuvm.h | 25 ++++++-
4 files changed, 98 insertions(+), 22 deletions(-)
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index f9eb56f24bef..8e06fe79fb9b 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -2102,10 +2102,13 @@ static int
__drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
const struct drm_gpuvm_ops *ops, void *priv,
u64 req_addr, u64 req_range,
+ enum drm_gpuvm_sm_map_ops_flags flags,
struct drm_gem_object *req_obj, u64 req_offset)
{
struct drm_gpuva *va, *next;
u64 req_end = req_addr + req_range;
+ bool is_madvise_ops = (flags == DRM_GPUVM_SKIP_GEM_OBJ_VA_SPLIT_MADVISE);
+ bool needs_map = !is_madvise_ops;
int ret;
if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
@@ -2118,26 +2121,35 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
u64 range = va->va.range;
u64 end = addr + range;
bool merge = !!va->gem.obj;
+ bool skip_madvise_ops = is_madvise_ops && merge;
+ needs_map = false;
if (addr == req_addr) {
merge &= obj == req_obj &&
offset == req_offset;
if (end == req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
- if (ret)
- return ret;
+ if (!is_madvise_ops) {
+ ret = op_unmap_cb(ops, priv, va, merge);
+ if (ret)
+ return ret;
+ }
break;
}
if (end < req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
- if (ret)
- return ret;
+ if (!is_madvise_ops) {
+ ret = op_unmap_cb(ops, priv, va, merge);
+ if (ret)
+ return ret;
+ }
continue;
}
if (end > req_end) {
+ if (skip_madvise_ops)
+ break;
+
struct drm_gpuva_op_map n = {
.va.addr = req_end,
.va.range = range - req_range,
@@ -2152,6 +2164,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, NULL, &n, &u);
if (ret)
return ret;
+
+ if (is_madvise_ops)
+ needs_map = true;
break;
}
} else if (addr < req_addr) {
@@ -2169,20 +2184,42 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
u.keep = merge;
if (end == req_end) {
+ if (skip_madvise_ops)
+ break;
+
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
+
+ if (is_madvise_ops)
+ needs_map = true;
+
break;
}
if (end < req_end) {
+ if (skip_madvise_ops)
+ continue;
+
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
+
+ if (is_madvise_ops) {
+ ret = op_map_cb(ops, priv, req_addr,
+ min(end - req_addr, req_end - end),
+ NULL, req_offset);
+ if (ret)
+ return ret;
+ }
+
continue;
}
if (end > req_end) {
+ if (skip_madvise_ops)
+ break;
+
struct drm_gpuva_op_map n = {
.va.addr = req_end,
.va.range = end - req_end,
@@ -2194,6 +2231,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, &n, &u);
if (ret)
return ret;
+
+ if (is_madvise_ops)
+ needs_map = true;
break;
}
} else if (addr > req_addr) {
@@ -2202,20 +2242,29 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
(addr - req_addr);
if (end == req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
- if (ret)
- return ret;
+ if (!is_madvise_ops) {
+ ret = op_unmap_cb(ops, priv, va, merge);
+ if (ret)
+ return ret;
+ }
+
break;
}
if (end < req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
- if (ret)
- return ret;
+ if (!is_madvise_ops) {
+ ret = op_unmap_cb(ops, priv, va, merge);
+ if (ret)
+ return ret;
+ }
+
continue;
}
if (end > req_end) {
+ if (skip_madvise_ops)
+ break;
+
struct drm_gpuva_op_map n = {
.va.addr = req_end,
.va.range = end - req_end,
@@ -2230,14 +2279,16 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, NULL, &n, &u);
if (ret)
return ret;
+
+ if (is_madvise_ops)
+ return op_map_cb(ops, priv, addr,
+ (req_end - addr), NULL, req_offset);
break;
}
}
}
-
- return op_map_cb(ops, priv,
- req_addr, req_range,
- req_obj, req_offset);
+ return needs_map ? op_map_cb(ops, priv, req_addr,
+ req_range, req_obj, req_offset) : 0;
}
static int
@@ -2336,15 +2387,15 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
struct drm_gem_object *req_obj, u64 req_offset)
{
const struct drm_gpuvm_ops *ops = gpuvm->ops;
+ enum drm_gpuvm_sm_map_ops_flags flags = DRM_GPUVM_SM_MAP_NOT_MADVISE;
if (unlikely(!(ops && ops->sm_step_map &&
ops->sm_step_remap &&
ops->sm_step_unmap)))
return -EINVAL;
- return __drm_gpuvm_sm_map(gpuvm, ops, priv,
- req_addr, req_range,
- req_obj, req_offset);
+ return __drm_gpuvm_sm_map(gpuvm, ops, priv, req_addr, req_range,
+ flags, req_obj, req_offset);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
@@ -2486,6 +2537,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @req_addr: the start address of the new mapping
* @req_range: the range of the new mapping
+ * @drm_gpuvm_sm_map_ops_flag: ops flag determining madvise or not
* @req_obj: the &drm_gem_object to map
* @req_offset: the offset within the &drm_gem_object
*
@@ -2516,6 +2568,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
struct drm_gpuva_ops *
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
u64 req_addr, u64 req_range,
+ enum drm_gpuvm_sm_map_ops_flags flags,
struct drm_gem_object *req_obj, u64 req_offset)
{
struct drm_gpuva_ops *ops;
@@ -2535,7 +2588,7 @@ drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
args.ops = ops;
ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
- req_addr, req_range,
+ req_addr, req_range, flags,
req_obj, req_offset);
if (ret)
goto err_free_ops;
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 48f105239f42..26e13fcdbdb8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1303,6 +1303,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
op->va.addr,
op->va.range,
+ DRM_GPUVM_SM_MAP_NOT_MADVISE,
op->gem.obj,
op->gem.offset);
if (IS_ERR(op->ops)) {
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 5a978da411b0..7dd8742f7cd9 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2311,6 +2311,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
case DRM_XE_VM_BIND_OP_MAP:
case DRM_XE_VM_BIND_OP_MAP_USERPTR:
ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
+ DRM_GPUVM_SM_MAP_NOT_MADVISE,
obj, bo_offset_or_userptr);
break;
case DRM_XE_VM_BIND_OP_UNMAP:
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index 2a9629377633..c589b886a4fd 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -211,6 +211,27 @@ enum drm_gpuvm_flags {
DRM_GPUVM_USERBITS = BIT(1),
};
+/**
+ * enum drm_gpuvm_sm_map_ops_flags - flags for drm_gpuvm split/merge ops
+ */
+enum drm_gpuvm_sm_map_ops_flags {
+ /**
+ * @DRM_GPUVM_SM_MAP_NOT_MADVISE: DEFAULT sm_map ops
+ */
+ DRM_GPUVM_SM_MAP_NOT_MADVISE = 0,
+
+ /**
+ * @DRM_GPUVM_SKIP_GEM_OBJ_VA_SPLIT_MADVISE: This flag is used by
+ * drm_gpuvm_sm_map_ops_create to iterate over GPUVMA's in the
+ * user-provided range and split the existing non-GEM object VMA if the
+ * start or end of the input range lies within it. The operations can
+ * create up to 2 REMAPS and 2 MAPs. Unlike drm_gpuvm_sm_map_ops_flags
+ * in default mode, the operation with this flag will never have UNMAPs and
+ * merges, and can be without any final operations.
+ */
+ DRM_GPUVM_SKIP_GEM_OBJ_VA_SPLIT_MADVISE = BIT(0),
+};
+
/**
* struct drm_gpuvm - DRM GPU VA Manager
*
@@ -1059,8 +1080,8 @@ struct drm_gpuva_ops {
#define drm_gpuva_next_op(op) list_next_entry(op, entry)
struct drm_gpuva_ops *
-drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
- u64 addr, u64 range,
+drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, u64 addr, u64 range,
+ enum drm_gpuvm_sm_map_ops_flags flags,
struct drm_gem_object *obj, u64 offset);
struct drm_gpuva_ops *
drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* [PATCH v3 02/19] drm/xe/uapi: Add madvise interface
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
2025-05-27 16:39 ` [PATCH v3 01/19] Introduce drm_gpuvm_sm_map_ops_flags enums for sm_map_ops Himal Prasad Ghimiray
@ 2025-05-27 16:39 ` Himal Prasad Ghimiray
2025-05-28 16:27 ` Matthew Brost
` (2 more replies)
2025-05-27 16:39 ` [PATCH v3 03/19] drm/xe/vm: Add attributes struct as member of vma Himal Prasad Ghimiray
` (24 subsequent siblings)
26 siblings, 3 replies; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:39 UTC (permalink / raw)
To: intel-xe; +Cc: Himal Prasad Ghimiray
This commit introduces a new madvise interface to support
driver-specific ioctl operations. The madvise interface allows for more
efficient memory management by providing hints to the driver about the
expected memory usage and pte update policy for gpuvma.
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
include/uapi/drm/xe_drm.h | 97 +++++++++++++++++++++++++++++++++++++++
1 file changed, 97 insertions(+)
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 9c08738c3b91..e0d75226a724 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -81,6 +81,7 @@ extern "C" {
* - &DRM_IOCTL_XE_EXEC
* - &DRM_IOCTL_XE_WAIT_USER_FENCE
* - &DRM_IOCTL_XE_OBSERVATION
+ * - &DRM_IOCTL_XE_MADVISE
*/
/*
@@ -102,6 +103,7 @@ extern "C" {
#define DRM_XE_EXEC 0x09
#define DRM_XE_WAIT_USER_FENCE 0x0a
#define DRM_XE_OBSERVATION 0x0b
+#define DRM_XE_MADVISE 0x0c
/* Must be kept compact -- no holes */
@@ -117,6 +119,7 @@ extern "C" {
#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
+#define DRM_IOCTL_XE_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
/**
* DOC: Xe IOCTL Extensions
@@ -1965,6 +1968,100 @@ struct drm_xe_query_eu_stall {
__u64 sampling_rates[];
};
+struct drm_xe_madvise_ops {
+ /** @start: start of the virtual address range */
+ __u64 start;
+
+ /** @size: size of the virtual address range */
+ __u64 range;
+
+#define DRM_XE_VMA_ATTR_PREFERRED_LOC 0
+#define DRM_XE_VMA_ATTR_ATOMIC 1
+#define DRM_XE_VMA_ATTR_PAT 2
+#define DRM_XE_VMA_ATTR_PURGEABLE_STATE 3
+ /** @type: type of attribute */
+ __u32 type;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ union {
+ struct {
+#define DRM_XE_VMA_ATOMIC_UNDEFINED 0
+#define DRM_XE_VMA_ATOMIC_DEVICE 1
+#define DRM_XE_VMA_ATOMIC_GLOBAL 2
+#define DRM_XE_VMA_ATOMIC_CPU 3
+ /** @val: value of atomic operation*/
+ __u32 val;
+
+ /** @reserved: Reserved */
+ __u32 reserved;
+ } atomic;
+
+ struct {
+#define DRM_XE_VMA_PURGEABLE_STATE_WILLNEED 0
+#define DRM_XE_VMA_PURGEABLE_STATE_DONTNEED 1
+#define DRM_XE_VMA_PURGEABLE_STATE_PURGED 2
+ /** @val: value for DRM_XE_VMA_ATTR_PURGEABLE_STATE */
+ __u32 val;
+
+ /** @reserved: Reserved */
+ __u32 reserved;
+ } purge_state_val;
+
+ struct {
+ /** @pat_index */
+ __u32 val;
+
+ /** @reserved: Reserved */
+ __u32 reserved;
+ } pat_index;
+#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVMEM_FD 0
+ /** @preferred_mem_loc: preferred memory location */
+ struct {
+ __u32 devmem_fd;
+
+#define MIGRATE_ALL_PAGES 0
+#define MIGRATE_ONLY_SYSTEM_PAGES 1
+ __u32 migration_policy;
+ } preferred_mem_loc;
+ };
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_madvise - Input of &DRM_IOCTL_XE_MADVISE
+ *
+ * Set memory attributes to a virtual address range
+ */
+struct drm_xe_madvise {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @vm_id: vm_id of the virtual range */
+ __u32 vm_id;
+
+ /** @num_ops: number of madvises in ioctl */
+ __u32 num_ops;
+
+ union {
+ /** @ops: used if num_ops == 1 */
+ struct drm_xe_madvise_ops ops;
+
+ /**
+ * @vector_of_ops: userptr to array of struct
+ * drm_xe_vm_madvise_op if num_ops > 1
+ */
+ __u64 vector_of_ops;
+ };
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+
+};
+
#if defined(__cplusplus)
}
#endif
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* Re: [PATCH v3 02/19] drm/xe/uapi: Add madvise interface
2025-05-27 16:39 ` [PATCH v3 02/19] drm/xe/uapi: Add madvise interface Himal Prasad Ghimiray
@ 2025-05-28 16:27 ` Matthew Brost
2025-05-28 17:03 ` Souza, Jose
2025-05-29 18:00 ` Matthew Brost
2 siblings, 0 replies; 72+ messages in thread
From: Matthew Brost @ 2025-05-28 16:27 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:09:46PM +0530, Himal Prasad Ghimiray wrote:
> This commit introduces a new madvise interface to support
> driver-specific ioctl operations. The madvise interface allows for more
> efficient memory management by providing hints to the driver about the
> expected memory usage and pte update policy for gpuvma.
>
A bunch of nits, but uAPI itself LGTM. Let's see if we can get an ack
from Thomas on this too.
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> include/uapi/drm/xe_drm.h | 97 +++++++++++++++++++++++++++++++++++++++
> 1 file changed, 97 insertions(+)
>
> diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
> index 9c08738c3b91..e0d75226a724 100644
> --- a/include/uapi/drm/xe_drm.h
> +++ b/include/uapi/drm/xe_drm.h
> @@ -81,6 +81,7 @@ extern "C" {
> * - &DRM_IOCTL_XE_EXEC
> * - &DRM_IOCTL_XE_WAIT_USER_FENCE
> * - &DRM_IOCTL_XE_OBSERVATION
> + * - &DRM_IOCTL_XE_MADVISE
> */
>
> /*
> @@ -102,6 +103,7 @@ extern "C" {
> #define DRM_XE_EXEC 0x09
> #define DRM_XE_WAIT_USER_FENCE 0x0a
> #define DRM_XE_OBSERVATION 0x0b
> +#define DRM_XE_MADVISE 0x0c
>
> /* Must be kept compact -- no holes */
>
> @@ -117,6 +119,7 @@ extern "C" {
> #define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
> #define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
> #define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
> +#define DRM_IOCTL_XE_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
>
> /**
> * DOC: Xe IOCTL Extensions
> @@ -1965,6 +1968,100 @@ struct drm_xe_query_eu_stall {
> __u64 sampling_rates[];
> };
>
Structure kernel doc. I think here is where tou describe the defines too.
> +struct drm_xe_madvise_ops {
> + /** @start: start of the virtual address range */
> + __u64 start;
> +
> + /** @size: size of the virtual address range */
> + __u64 range;
> +
> +#define DRM_XE_VMA_ATTR_PREFERRED_LOC 0
> +#define DRM_XE_VMA_ATTR_ATOMIC 1
> +#define DRM_XE_VMA_ATTR_PAT 2
> +#define DRM_XE_VMA_ATTR_PURGEABLE_STATE 3
> + /** @type: type of attribute */
> + __u32 type;
> +
> + /** @pad: MBZ */
> + __u32 pad;
> +
> + union {
Structure kernel doc. Maybe the DRM_XE_VMA_ATOMIC_* are documented here.
> + struct {
> +#define DRM_XE_VMA_ATOMIC_UNDEFINED 0
> +#define DRM_XE_VMA_ATOMIC_DEVICE 1
> +#define DRM_XE_VMA_ATOMIC_GLOBAL 2
> +#define DRM_XE_VMA_ATOMIC_CPU 3
> + /** @val: value of atomic operation*/
Weird alignment.
s/val/atomic.val/
> + __u32 val;
> +
> + /** @reserved: Reserved */
Weird alignment.
s/reserved/atomic.reserved/
> + __u32 reserved;
> + } atomic;
> +
Structure kernel doc. Maybe DRM_XE_VMA_PURGEABLE_STATE_* are documented
here.
Basically same comments as above on the below structures too, not going
to type out.
> + struct {
> +#define DRM_XE_VMA_PURGEABLE_STATE_WILLNEED 0
> +#define DRM_XE_VMA_PURGEABLE_STATE_DONTNEED 1
> +#define DRM_XE_VMA_PURGEABLE_STATE_PURGED 2
> + /** @val: value for DRM_XE_VMA_ATTR_PURGEABLE_STATE */
> + __u32 val;
> +
> + /** @reserved: Reserved */
> + __u32 reserved;
> + } purge_state_val;
> +
> + struct {
> + /** @pat_index */
> + __u32 val;
> +
> + /** @reserved: Reserved */
> + __u32 reserved;
> + } pat_index;
#define DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM -1
> +#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVMEM_FD 0
s/DRM_XE_PREFERRED_LOC_DEFAULT_DEVMEM_FD/DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE ?
> + /** @preferred_mem_loc: preferred memory location */
> + struct {
> + __u32 devmem_fd;
> +
> +#define MIGRATE_ALL_PAGES 0
> +#define MIGRATE_ONLY_SYSTEM_PAGES 1
s/MIGRATE_/DRM_XE_MIGRATE_/
> + __u32 migration_policy;
> + } preferred_mem_loc;
> + };
> +
> + /** @reserved: Reserved */
> + __u64 reserved[2];
> +};
> +
> +/**
> + * struct drm_xe_madvise - Input of &DRM_IOCTL_XE_MADVISE
> + *
> + * Set memory attributes to a virtual address range
Maybe include a code example?
Matt
> + */
> +struct drm_xe_madvise {
> + /** @extensions: Pointer to the first extension struct, if any */
> + __u64 extensions;
> +
> + /** @vm_id: vm_id of the virtual range */
> + __u32 vm_id;
> +
> + /** @num_ops: number of madvises in ioctl */
> + __u32 num_ops;
> +
> + union {
> + /** @ops: used if num_ops == 1 */
> + struct drm_xe_madvise_ops ops;
> +
> + /**
> + * @vector_of_ops: userptr to array of struct
> + * drm_xe_vm_madvise_op if num_ops > 1
> + */
> + __u64 vector_of_ops;
> + };
> +
> + /** @reserved: Reserved */
> + __u64 reserved[2];
> +
> +};
> +
> #if defined(__cplusplus)
> }
> #endif
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 02/19] drm/xe/uapi: Add madvise interface
2025-05-27 16:39 ` [PATCH v3 02/19] drm/xe/uapi: Add madvise interface Himal Prasad Ghimiray
2025-05-28 16:27 ` Matthew Brost
@ 2025-05-28 17:03 ` Souza, Jose
2025-05-29 18:03 ` Matthew Brost
2025-05-29 18:00 ` Matthew Brost
2 siblings, 1 reply; 72+ messages in thread
From: Souza, Jose @ 2025-05-28 17:03 UTC (permalink / raw)
To: intel-xe@lists.freedesktop.org, Ghimiray, Himal Prasad
On Tue, 2025-05-27 at 22:09 +0530, Himal Prasad Ghimiray wrote:
> This commit introduces a new madvise interface to support
> driver-specific ioctl operations. The madvise interface allows for more
> efficient memory management by providing hints to the driver about the
> expected memory usage and pte update policy for gpuvma.
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> include/uapi/drm/xe_drm.h | 97 +++++++++++++++++++++++++++++++++++++++
> 1 file changed, 97 insertions(+)
>
> diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
> index 9c08738c3b91..e0d75226a724 100644
> --- a/include/uapi/drm/xe_drm.h
> +++ b/include/uapi/drm/xe_drm.h
> @@ -81,6 +81,7 @@ extern "C" {
> * - &DRM_IOCTL_XE_EXEC
> * - &DRM_IOCTL_XE_WAIT_USER_FENCE
> * - &DRM_IOCTL_XE_OBSERVATION
> + * - &DRM_IOCTL_XE_MADVISE
> */
>
> /*
> @@ -102,6 +103,7 @@ extern "C" {
> #define DRM_XE_EXEC 0x09
> #define DRM_XE_WAIT_USER_FENCE 0x0a
> #define DRM_XE_OBSERVATION 0x0b
> +#define DRM_XE_MADVISE 0x0c
>
> /* Must be kept compact -- no holes */
>
> @@ -117,6 +119,7 @@ extern "C" {
> #define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
> #define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
> #define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
> +#define DRM_IOCTL_XE_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
>
> /**
> * DOC: Xe IOCTL Extensions
> @@ -1965,6 +1968,100 @@ struct drm_xe_query_eu_stall {
> __u64 sampling_rates[];
> };
>
> +struct drm_xe_madvise_ops {
> + /** @start: start of the virtual address range */
> + __u64 start;
> +
> + /** @size: size of the virtual address range */
> + __u64 range;
> +
> +#define DRM_XE_VMA_ATTR_PREFERRED_LOC 0
> +#define DRM_XE_VMA_ATTR_ATOMIC 1
> +#define DRM_XE_VMA_ATTR_PAT 2
> +#define DRM_XE_VMA_ATTR_PURGEABLE_STATE 3
> + /** @type: type of attribute */
> + __u32 type;
> +
> + /** @pad: MBZ */
> + __u32 pad;
> +
> + union {
> + struct {
> +#define DRM_XE_VMA_ATOMIC_UNDEFINED 0
> +#define DRM_XE_VMA_ATOMIC_DEVICE 1
> +#define DRM_XE_VMA_ATOMIC_GLOBAL 2
> +#define DRM_XE_VMA_ATOMIC_CPU 3
> + /** @val: value of atomic operation*/
> + __u32 val;
> +
> + /** @reserved: Reserved */
> + __u32 reserved;
> + } atomic;
> +
> + struct {
> +#define DRM_XE_VMA_PURGEABLE_STATE_WILLNEED 0
> +#define DRM_XE_VMA_PURGEABLE_STATE_DONTNEED 1
> +#define DRM_XE_VMA_PURGEABLE_STATE_PURGED 2
> + /** @val: value for DRM_XE_VMA_ATTR_PURGEABLE_STATE */
> + __u32 val;
> +
> + /** @reserved: Reserved */
> + __u32 reserved;
> + } purge_state_val;
The set purgeable uAPI looks good to me, so this part is
Acked-by: José Roberto de Souza <jose.souza@intel.com>
> +
> + struct {
> + /** @pat_index */
> + __u32 val;
> +
> + /** @reserved: Reserved */
> + __u32 reserved;
> + } pat_index;
> +#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVMEM_FD 0
> + /** @preferred_mem_loc: preferred memory location */
> + struct {
> + __u32 devmem_fd;
> +
> +#define MIGRATE_ALL_PAGES 0
> +#define MIGRATE_ONLY_SYSTEM_PAGES 1
> + __u32 migration_policy;
> + } preferred_mem_loc;
> + };
> +
> + /** @reserved: Reserved */
> + __u64 reserved[2];
> +};
> +
> +/**
> + * struct drm_xe_madvise - Input of &DRM_IOCTL_XE_MADVISE
> + *
> + * Set memory attributes to a virtual address range
> + */
> +struct drm_xe_madvise {
> + /** @extensions: Pointer to the first extension struct, if any */
> + __u64 extensions;
> +
> + /** @vm_id: vm_id of the virtual range */
> + __u32 vm_id;
> +
> + /** @num_ops: number of madvises in ioctl */
> + __u32 num_ops;
> +
> + union {
> + /** @ops: used if num_ops == 1 */
> + struct drm_xe_madvise_ops ops;
> +
> + /**
> + * @vector_of_ops: userptr to array of struct
> + * drm_xe_vm_madvise_op if num_ops > 1
> + */
> + __u64 vector_of_ops;
> + };
> +
> + /** @reserved: Reserved */
> + __u64 reserved[2];
> +
> +};
> +
> #if defined(__cplusplus)
> }
> #endif
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 02/19] drm/xe/uapi: Add madvise interface
2025-05-28 17:03 ` Souza, Jose
@ 2025-05-29 18:03 ` Matthew Brost
0 siblings, 0 replies; 72+ messages in thread
From: Matthew Brost @ 2025-05-29 18:03 UTC (permalink / raw)
To: Souza, Jose; +Cc: intel-xe@lists.freedesktop.org, Ghimiray, Himal Prasad
On Wed, May 28, 2025 at 05:03:07PM +0000, Souza, Jose wrote:
> On Tue, 2025-05-27 at 22:09 +0530, Himal Prasad Ghimiray wrote:
> > This commit introduces a new madvise interface to support
> > driver-specific ioctl operations. The madvise interface allows for more
> > efficient memory management by providing hints to the driver about the
> > expected memory usage and pte update policy for gpuvma.
> >
> > Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> > ---
> > include/uapi/drm/xe_drm.h | 97 +++++++++++++++++++++++++++++++++++++++
> > 1 file changed, 97 insertions(+)
> >
> > diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
> > index 9c08738c3b91..e0d75226a724 100644
> > --- a/include/uapi/drm/xe_drm.h
> > +++ b/include/uapi/drm/xe_drm.h
> > @@ -81,6 +81,7 @@ extern "C" {
> > * - &DRM_IOCTL_XE_EXEC
> > * - &DRM_IOCTL_XE_WAIT_USER_FENCE
> > * - &DRM_IOCTL_XE_OBSERVATION
> > + * - &DRM_IOCTL_XE_MADVISE
> > */
> >
> > /*
> > @@ -102,6 +103,7 @@ extern "C" {
> > #define DRM_XE_EXEC 0x09
> > #define DRM_XE_WAIT_USER_FENCE 0x0a
> > #define DRM_XE_OBSERVATION 0x0b
> > +#define DRM_XE_MADVISE 0x0c
> >
> > /* Must be kept compact -- no holes */
> >
> > @@ -117,6 +119,7 @@ extern "C" {
> > #define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
> > #define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
> > #define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
> > +#define DRM_IOCTL_XE_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
> >
> > /**
> > * DOC: Xe IOCTL Extensions
> > @@ -1965,6 +1968,100 @@ struct drm_xe_query_eu_stall {
> > __u64 sampling_rates[];
> > };
> >
> > +struct drm_xe_madvise_ops {
> > + /** @start: start of the virtual address range */
> > + __u64 start;
> > +
> > + /** @size: size of the virtual address range */
> > + __u64 range;
> > +
> > +#define DRM_XE_VMA_ATTR_PREFERRED_LOC 0
> > +#define DRM_XE_VMA_ATTR_ATOMIC 1
> > +#define DRM_XE_VMA_ATTR_PAT 2
> > +#define DRM_XE_VMA_ATTR_PURGEABLE_STATE 3
> > + /** @type: type of attribute */
> > + __u32 type;
> > +
> > + /** @pad: MBZ */
> > + __u32 pad;
> > +
> > + union {
> > + struct {
> > +#define DRM_XE_VMA_ATOMIC_UNDEFINED 0
> > +#define DRM_XE_VMA_ATOMIC_DEVICE 1
> > +#define DRM_XE_VMA_ATOMIC_GLOBAL 2
> > +#define DRM_XE_VMA_ATOMIC_CPU 3
> > + /** @val: value of atomic operation*/
> > + __u32 val;
> > +
> > + /** @reserved: Reserved */
> > + __u32 reserved;
> > + } atomic;
> > +
> > + struct {
> > +#define DRM_XE_VMA_PURGEABLE_STATE_WILLNEED 0
> > +#define DRM_XE_VMA_PURGEABLE_STATE_DONTNEED 1
> > +#define DRM_XE_VMA_PURGEABLE_STATE_PURGED 2
> > + /** @val: value for DRM_XE_VMA_ATTR_PURGEABLE_STATE */
> > + __u32 val;
> > +
> > + /** @reserved: Reserved */
> > + __u32 reserved;
> > + } purge_state_val;
>
> The set purgeable uAPI looks good to me, so this part is
>
> Acked-by: José Roberto de Souza <jose.souza@intel.com>
>
I suggest pulling the purge_state out in the initial megre if purgable
is not implemented in the series which merge. Good to confirm we have
the bits available in uAPI + Mesa is good with this - we can add these
back in once the implementation lands.
Matt
> > +
> > + struct {
> > + /** @pat_index */
> > + __u32 val;
> > +
> > + /** @reserved: Reserved */
> > + __u32 reserved;
> > + } pat_index;
> > +#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVMEM_FD 0
> > + /** @preferred_mem_loc: preferred memory location */
> > + struct {
> > + __u32 devmem_fd;
> > +
> > +#define MIGRATE_ALL_PAGES 0
> > +#define MIGRATE_ONLY_SYSTEM_PAGES 1
> > + __u32 migration_policy;
> > + } preferred_mem_loc;
> > + };
> > +
> > + /** @reserved: Reserved */
> > + __u64 reserved[2];
> > +};
> > +
> > +/**
> > + * struct drm_xe_madvise - Input of &DRM_IOCTL_XE_MADVISE
> > + *
> > + * Set memory attributes to a virtual address range
> > + */
> > +struct drm_xe_madvise {
> > + /** @extensions: Pointer to the first extension struct, if any */
> > + __u64 extensions;
> > +
> > + /** @vm_id: vm_id of the virtual range */
> > + __u32 vm_id;
> > +
> > + /** @num_ops: number of madvises in ioctl */
> > + __u32 num_ops;
> > +
> > + union {
> > + /** @ops: used if num_ops == 1 */
> > + struct drm_xe_madvise_ops ops;
> > +
> > + /**
> > + * @vector_of_ops: userptr to array of struct
> > + * drm_xe_vm_madvise_op if num_ops > 1
> > + */
> > + __u64 vector_of_ops;
> > + };
> > +
> > + /** @reserved: Reserved */
> > + __u64 reserved[2];
> > +
> > +};
> > +
> > #if defined(__cplusplus)
> > }
> > #endif
^ permalink raw reply [flat|nested] 72+ messages in thread
* Re: [PATCH v3 02/19] drm/xe/uapi: Add madvise interface
2025-05-27 16:39 ` [PATCH v3 02/19] drm/xe/uapi: Add madvise interface Himal Prasad Ghimiray
2025-05-28 16:27 ` Matthew Brost
2025-05-28 17:03 ` Souza, Jose
@ 2025-05-29 18:00 ` Matthew Brost
2025-06-10 4:32 ` Ghimiray, Himal Prasad
2 siblings, 1 reply; 72+ messages in thread
From: Matthew Brost @ 2025-05-29 18:00 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:09:46PM +0530, Himal Prasad Ghimiray wrote:
> This commit introduces a new madvise interface to support
> driver-specific ioctl operations. The madvise interface allows for more
> efficient memory management by providing hints to the driver about the
> expected memory usage and pte update policy for gpuvma.
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> include/uapi/drm/xe_drm.h | 97 +++++++++++++++++++++++++++++++++++++++
> 1 file changed, 97 insertions(+)
>
> diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
> index 9c08738c3b91..e0d75226a724 100644
> --- a/include/uapi/drm/xe_drm.h
> +++ b/include/uapi/drm/xe_drm.h
> @@ -81,6 +81,7 @@ extern "C" {
> * - &DRM_IOCTL_XE_EXEC
> * - &DRM_IOCTL_XE_WAIT_USER_FENCE
> * - &DRM_IOCTL_XE_OBSERVATION
> + * - &DRM_IOCTL_XE_MADVISE
> */
>
> /*
> @@ -102,6 +103,7 @@ extern "C" {
> #define DRM_XE_EXEC 0x09
> #define DRM_XE_WAIT_USER_FENCE 0x0a
> #define DRM_XE_OBSERVATION 0x0b
> +#define DRM_XE_MADVISE 0x0c
>
> /* Must be kept compact -- no holes */
>
> @@ -117,6 +119,7 @@ extern "C" {
> #define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
> #define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
> #define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
> +#define DRM_IOCTL_XE_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
Missed this in prior review - DRM_IOW as I don't think madvise returns
any values to user space, does it?
Matt
>
> /**
> * DOC: Xe IOCTL Extensions
> @@ -1965,6 +1968,100 @@ struct drm_xe_query_eu_stall {
> __u64 sampling_rates[];
> };
>
> +struct drm_xe_madvise_ops {
> + /** @start: start of the virtual address range */
> + __u64 start;
> +
> + /** @size: size of the virtual address range */
> + __u64 range;
> +
> +#define DRM_XE_VMA_ATTR_PREFERRED_LOC 0
> +#define DRM_XE_VMA_ATTR_ATOMIC 1
> +#define DRM_XE_VMA_ATTR_PAT 2
> +#define DRM_XE_VMA_ATTR_PURGEABLE_STATE 3
> + /** @type: type of attribute */
> + __u32 type;
> +
> + /** @pad: MBZ */
> + __u32 pad;
> +
> + union {
> + struct {
> +#define DRM_XE_VMA_ATOMIC_UNDEFINED 0
> +#define DRM_XE_VMA_ATOMIC_DEVICE 1
> +#define DRM_XE_VMA_ATOMIC_GLOBAL 2
> +#define DRM_XE_VMA_ATOMIC_CPU 3
> + /** @val: value of atomic operation*/
> + __u32 val;
> +
> + /** @reserved: Reserved */
> + __u32 reserved;
> + } atomic;
> +
> + struct {
> +#define DRM_XE_VMA_PURGEABLE_STATE_WILLNEED 0
> +#define DRM_XE_VMA_PURGEABLE_STATE_DONTNEED 1
> +#define DRM_XE_VMA_PURGEABLE_STATE_PURGED 2
> + /** @val: value for DRM_XE_VMA_ATTR_PURGEABLE_STATE */
> + __u32 val;
> +
> + /** @reserved: Reserved */
> + __u32 reserved;
> + } purge_state_val;
> +
> + struct {
> + /** @pat_index */
> + __u32 val;
> +
> + /** @reserved: Reserved */
> + __u32 reserved;
> + } pat_index;
> +#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVMEM_FD 0
> + /** @preferred_mem_loc: preferred memory location */
> + struct {
> + __u32 devmem_fd;
> +
> +#define MIGRATE_ALL_PAGES 0
> +#define MIGRATE_ONLY_SYSTEM_PAGES 1
> + __u32 migration_policy;
> + } preferred_mem_loc;
> + };
> +
> + /** @reserved: Reserved */
> + __u64 reserved[2];
> +};
> +
> +/**
> + * struct drm_xe_madvise - Input of &DRM_IOCTL_XE_MADVISE
> + *
> + * Set memory attributes to a virtual address range
> + */
> +struct drm_xe_madvise {
> + /** @extensions: Pointer to the first extension struct, if any */
> + __u64 extensions;
> +
> + /** @vm_id: vm_id of the virtual range */
> + __u32 vm_id;
> +
> + /** @num_ops: number of madvises in ioctl */
> + __u32 num_ops;
> +
> + union {
> + /** @ops: used if num_ops == 1 */
> + struct drm_xe_madvise_ops ops;
> +
> + /**
> + * @vector_of_ops: userptr to array of struct
> + * drm_xe_vm_madvise_op if num_ops > 1
> + */
> + __u64 vector_of_ops;
> + };
> +
> + /** @reserved: Reserved */
> + __u64 reserved[2];
> +
> +};
> +
> #if defined(__cplusplus)
> }
> #endif
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 02/19] drm/xe/uapi: Add madvise interface
2025-05-29 18:00 ` Matthew Brost
@ 2025-06-10 4:32 ` Ghimiray, Himal Prasad
0 siblings, 0 replies; 72+ messages in thread
From: Ghimiray, Himal Prasad @ 2025-06-10 4:32 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-xe
On 29-05-2025 23:30, Matthew Brost wrote:
> On Tue, May 27, 2025 at 10:09:46PM +0530, Himal Prasad Ghimiray wrote:
>> This commit introduces a new madvise interface to support
>> driver-specific ioctl operations. The madvise interface allows for more
>> efficient memory management by providing hints to the driver about the
>> expected memory usage and pte update policy for gpuvma.
>>
>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
>> ---
>> include/uapi/drm/xe_drm.h | 97 +++++++++++++++++++++++++++++++++++++++
>> 1 file changed, 97 insertions(+)
>>
>> diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
>> index 9c08738c3b91..e0d75226a724 100644
>> --- a/include/uapi/drm/xe_drm.h
>> +++ b/include/uapi/drm/xe_drm.h
>> @@ -81,6 +81,7 @@ extern "C" {
>> * - &DRM_IOCTL_XE_EXEC
>> * - &DRM_IOCTL_XE_WAIT_USER_FENCE
>> * - &DRM_IOCTL_XE_OBSERVATION
>> + * - &DRM_IOCTL_XE_MADVISE
>> */
>>
>> /*
>> @@ -102,6 +103,7 @@ extern "C" {
>> #define DRM_XE_EXEC 0x09
>> #define DRM_XE_WAIT_USER_FENCE 0x0a
>> #define DRM_XE_OBSERVATION 0x0b
>> +#define DRM_XE_MADVISE 0x0c
>>
>> /* Must be kept compact -- no holes */
>>
>> @@ -117,6 +119,7 @@ extern "C" {
>> #define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
>> #define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
>> #define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
>> +#define DRM_IOCTL_XE_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
>
> Missed this in prior review - DRM_IOW as I don't think madvise returns
> any values to user space, does it?
true. DRM_IOW is sufficient.
>
> Matt
>
>>
>> /**
>> * DOC: Xe IOCTL Extensions
>> @@ -1965,6 +1968,100 @@ struct drm_xe_query_eu_stall {
>> __u64 sampling_rates[];
>> };
>>
>> +struct drm_xe_madvise_ops {
>> + /** @start: start of the virtual address range */
>> + __u64 start;
>> +
>> + /** @size: size of the virtual address range */
>> + __u64 range;
>> +
>> +#define DRM_XE_VMA_ATTR_PREFERRED_LOC 0
>> +#define DRM_XE_VMA_ATTR_ATOMIC 1
>> +#define DRM_XE_VMA_ATTR_PAT 2
>> +#define DRM_XE_VMA_ATTR_PURGEABLE_STATE 3
>> + /** @type: type of attribute */
>> + __u32 type;
>> +
>> + /** @pad: MBZ */
>> + __u32 pad;
>> +
>> + union {
>> + struct {
>> +#define DRM_XE_VMA_ATOMIC_UNDEFINED 0
>> +#define DRM_XE_VMA_ATOMIC_DEVICE 1
>> +#define DRM_XE_VMA_ATOMIC_GLOBAL 2
>> +#define DRM_XE_VMA_ATOMIC_CPU 3
>> + /** @val: value of atomic operation*/
>> + __u32 val;
>> +
>> + /** @reserved: Reserved */
>> + __u32 reserved;
>> + } atomic;
>> +
>> + struct {
>> +#define DRM_XE_VMA_PURGEABLE_STATE_WILLNEED 0
>> +#define DRM_XE_VMA_PURGEABLE_STATE_DONTNEED 1
>> +#define DRM_XE_VMA_PURGEABLE_STATE_PURGED 2
>> + /** @val: value for DRM_XE_VMA_ATTR_PURGEABLE_STATE */
>> + __u32 val;
>> +
>> + /** @reserved: Reserved */
>> + __u32 reserved;
>> + } purge_state_val;
>> +
>> + struct {
>> + /** @pat_index */
>> + __u32 val;
>> +
>> + /** @reserved: Reserved */
>> + __u32 reserved;
>> + } pat_index;
>> +#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVMEM_FD 0
>> + /** @preferred_mem_loc: preferred memory location */
>> + struct {
>> + __u32 devmem_fd;
>> +
>> +#define MIGRATE_ALL_PAGES 0
>> +#define MIGRATE_ONLY_SYSTEM_PAGES 1
>> + __u32 migration_policy;
>> + } preferred_mem_loc;
>> + };
>> +
>> + /** @reserved: Reserved */
>> + __u64 reserved[2];
>> +};
>> +
>> +/**
>> + * struct drm_xe_madvise - Input of &DRM_IOCTL_XE_MADVISE
>> + *
>> + * Set memory attributes to a virtual address range
>> + */
>> +struct drm_xe_madvise {
>> + /** @extensions: Pointer to the first extension struct, if any */
>> + __u64 extensions;
>> +
>> + /** @vm_id: vm_id of the virtual range */
>> + __u32 vm_id;
>> +
>> + /** @num_ops: number of madvises in ioctl */
>> + __u32 num_ops;
>> +
>> + union {
>> + /** @ops: used if num_ops == 1 */
>> + struct drm_xe_madvise_ops ops;
>> +
>> + /**
>> + * @vector_of_ops: userptr to array of struct
>> + * drm_xe_vm_madvise_op if num_ops > 1
>> + */
>> + __u64 vector_of_ops;
>> + };
>> +
>> + /** @reserved: Reserved */
>> + __u64 reserved[2];
>> +
>> +};
>> +
>> #if defined(__cplusplus)
>> }
>> #endif
>> --
>> 2.34.1
>>
^ permalink raw reply [flat|nested] 72+ messages in thread
* [PATCH v3 03/19] drm/xe/vm: Add attributes struct as member of vma
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
2025-05-27 16:39 ` [PATCH v3 01/19] Introduce drm_gpuvm_sm_map_ops_flags enums for sm_map_ops Himal Prasad Ghimiray
2025-05-27 16:39 ` [PATCH v3 02/19] drm/xe/uapi: Add madvise interface Himal Prasad Ghimiray
@ 2025-05-27 16:39 ` Himal Prasad Ghimiray
2025-05-28 16:46 ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 04/19] drm/xe/vma: Move pat_index to vma attributes Himal Prasad Ghimiray
` (23 subsequent siblings)
26 siblings, 1 reply; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:39 UTC (permalink / raw)
To: intel-xe; +Cc: Himal Prasad Ghimiray
The attribute of xe_vma will determine the migration policy and the
encoding of the page table entries (PTEs) for that vma.
This attribute helps manage how memory pages are moved and how their
addresses are translated. It will be used by madvise to set the
behavior of the vma.
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/xe/xe_vm_types.h | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index bfc145baad49..5d4bbe547d0d 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -77,6 +77,19 @@ struct xe_userptr {
#endif
};
+/**
+ * struct xe_vma_mem_attr - memory attributes associated with vma
+ */
+struct xe_vma_mem_attr {
+ /** @preferred_loc: perferred memory_location*/
+ struct {
+ u32 migration_policy; /* represents migration policies */
+ u32 devmem_fd; /* devmem_fd used for determining pagemap_fd requested by user */
+ } preferred_loc;
+ /** @atomic_access: The atomic access type for the vma */
+ u32 atomic_access;
+};
+
struct xe_vma {
/** @gpuva: Base GPUVA object */
struct drm_gpuva gpuva;
@@ -128,6 +141,13 @@ struct xe_vma {
* Needs to be signalled before UNMAP can be processed.
*/
struct xe_user_fence *ufence;
+
+ /**
+ * @attr: The attributes of vma which determines the migration policy
+ * and encoding of the PTEs for this vma.
+ */
+ struct xe_vma_mem_attr attr;
+
};
/**
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* Re: [PATCH v3 03/19] drm/xe/vm: Add attributes struct as member of vma
2025-05-27 16:39 ` [PATCH v3 03/19] drm/xe/vm: Add attributes struct as member of vma Himal Prasad Ghimiray
@ 2025-05-28 16:46 ` Matthew Brost
0 siblings, 0 replies; 72+ messages in thread
From: Matthew Brost @ 2025-05-28 16:46 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:09:47PM +0530, Himal Prasad Ghimiray wrote:
> The attribute of xe_vma will determine the migration policy and the
> encoding of the page table entries (PTEs) for that vma.
> This attribute helps manage how memory pages are moved and how their
> addresses are translated. It will be used by madvise to set the
> behavior of the vma.
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/xe/xe_vm_types.h | 20 ++++++++++++++++++++
> 1 file changed, 20 insertions(+)
>
> diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
> index bfc145baad49..5d4bbe547d0d 100644
> --- a/drivers/gpu/drm/xe/xe_vm_types.h
> +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> @@ -77,6 +77,19 @@ struct xe_userptr {
> #endif
> };
>
> +/**
> + * struct xe_vma_mem_attr - memory attributes associated with vma
> + */
> +struct xe_vma_mem_attr {
> + /** @preferred_loc: perferred memory_location*/
> + struct {
/** @preferred_loc.migration_policy: ... */
> + u32 migration_policy; /* represents migration policies */
/** @preferred_loc.devmem_fd: ... */
I'd also mention that -1, 0 mean system memory or closes device memory */
Matt
> + u32 devmem_fd; /* devmem_fd used for determining pagemap_fd requested by user */
> + } preferred_loc;
> + /** @atomic_access: The atomic access type for the vma */
> + u32 atomic_access;
> +};
> +
> struct xe_vma {
> /** @gpuva: Base GPUVA object */
> struct drm_gpuva gpuva;
> @@ -128,6 +141,13 @@ struct xe_vma {
> * Needs to be signalled before UNMAP can be processed.
> */
> struct xe_user_fence *ufence;
> +
> + /**
> + * @attr: The attributes of vma which determines the migration policy
> + * and encoding of the PTEs for this vma.
> + */
> + struct xe_vma_mem_attr attr;
> +
> };
>
> /**
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread
* [PATCH v3 04/19] drm/xe/vma: Move pat_index to vma attributes
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (2 preceding siblings ...)
2025-05-27 16:39 ` [PATCH v3 03/19] drm/xe/vm: Add attributes struct as member of vma Himal Prasad Ghimiray
@ 2025-05-27 16:39 ` Himal Prasad Ghimiray
2025-05-28 22:51 ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 05/19] drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter Himal Prasad Ghimiray
` (22 subsequent siblings)
26 siblings, 1 reply; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:39 UTC (permalink / raw)
To: intel-xe; +Cc: Himal Prasad Ghimiray
The PAT index determines how PTEs are encoded and can be modified by
madvise. Therefore, it is now part of the vma attributes.
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/xe/xe_pt.c | 2 +-
drivers/gpu/drm/xe/xe_vm.c | 6 +++---
drivers/gpu/drm/xe/xe_vm_types.h | 10 ++++------
3 files changed, 8 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index c9c41fbe125c..39bc1964089e 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -518,7 +518,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
{
struct xe_pt_stage_bind_walk *xe_walk =
container_of(walk, typeof(*xe_walk), base);
- u16 pat_index = xe_walk->vma->pat_index;
+ u16 pat_index = xe_walk->vma->attr.pat_index;
struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), base);
struct xe_vm *vm = xe_walk->vm;
struct xe_pt *xe_child;
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 7dd8742f7cd9..a48e1bc8b76a 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1221,7 +1221,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
if (vm->xe->info.has_atomic_enable_pte_bit)
vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
- vma->pat_index = pat_index;
+ vma->attr.pat_index = pat_index;
if (bo) {
struct drm_gpuvm_bo *vm_bo;
@@ -2666,7 +2666,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
if (op->base.remap.prev) {
vma = new_vma(vm, op->base.remap.prev,
- old->pat_index, flags);
+ old->attr.pat_index, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -2696,7 +2696,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
if (op->base.remap.next) {
vma = new_vma(vm, op->base.remap.next,
- old->pat_index, flags);
+ old->attr.pat_index, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 5d4bbe547d0d..2f58c7bb0a85 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -88,6 +88,10 @@ struct xe_vma_mem_attr {
} preferred_loc;
/** @atomic_access: The atomic access type for the vma */
u32 atomic_access;
+ /**
+ * @pat_index: The pat index to use when encoding the PTEs for this vma.
+ */
+ u16 pat_index;
};
struct xe_vma {
@@ -131,11 +135,6 @@ struct xe_vma {
/** @tile_staged: bind is staged for this VMA */
u8 tile_staged;
- /**
- * @pat_index: The pat index to use when encoding the PTEs for this vma.
- */
- u16 pat_index;
-
/**
* @ufence: The user fence that was provided with MAP.
* Needs to be signalled before UNMAP can be processed.
@@ -147,7 +146,6 @@ struct xe_vma {
* and encoding of the PTEs for this vma.
*/
struct xe_vma_mem_attr attr;
-
};
/**
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* Re: [PATCH v3 04/19] drm/xe/vma: Move pat_index to vma attributes
2025-05-27 16:39 ` [PATCH v3 04/19] drm/xe/vma: Move pat_index to vma attributes Himal Prasad Ghimiray
@ 2025-05-28 22:51 ` Matthew Brost
0 siblings, 0 replies; 72+ messages in thread
From: Matthew Brost @ 2025-05-28 22:51 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:09:48PM +0530, Himal Prasad Ghimiray wrote:
> The PAT index determines how PTEs are encoded and can be modified by
> madvise. Therefore, it is now part of the vma attributes.
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
> ---
> drivers/gpu/drm/xe/xe_pt.c | 2 +-
> drivers/gpu/drm/xe/xe_vm.c | 6 +++---
> drivers/gpu/drm/xe/xe_vm_types.h | 10 ++++------
> 3 files changed, 8 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
> index c9c41fbe125c..39bc1964089e 100644
> --- a/drivers/gpu/drm/xe/xe_pt.c
> +++ b/drivers/gpu/drm/xe/xe_pt.c
> @@ -518,7 +518,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
> {
> struct xe_pt_stage_bind_walk *xe_walk =
> container_of(walk, typeof(*xe_walk), base);
> - u16 pat_index = xe_walk->vma->pat_index;
> + u16 pat_index = xe_walk->vma->attr.pat_index;
> struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), base);
> struct xe_vm *vm = xe_walk->vm;
> struct xe_pt *xe_child;
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 7dd8742f7cd9..a48e1bc8b76a 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -1221,7 +1221,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
> if (vm->xe->info.has_atomic_enable_pte_bit)
> vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
>
> - vma->pat_index = pat_index;
> + vma->attr.pat_index = pat_index;
>
> if (bo) {
> struct drm_gpuvm_bo *vm_bo;
> @@ -2666,7 +2666,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>
> if (op->base.remap.prev) {
> vma = new_vma(vm, op->base.remap.prev,
> - old->pat_index, flags);
> + old->attr.pat_index, flags);
> if (IS_ERR(vma))
> return PTR_ERR(vma);
>
> @@ -2696,7 +2696,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>
> if (op->base.remap.next) {
> vma = new_vma(vm, op->base.remap.next,
> - old->pat_index, flags);
> + old->attr.pat_index, flags);
> if (IS_ERR(vma))
> return PTR_ERR(vma);
>
> diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
> index 5d4bbe547d0d..2f58c7bb0a85 100644
> --- a/drivers/gpu/drm/xe/xe_vm_types.h
> +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> @@ -88,6 +88,10 @@ struct xe_vma_mem_attr {
> } preferred_loc;
> /** @atomic_access: The atomic access type for the vma */
> u32 atomic_access;
> + /**
> + * @pat_index: The pat index to use when encoding the PTEs for this vma.
> + */
> + u16 pat_index;
> };
>
> struct xe_vma {
> @@ -131,11 +135,6 @@ struct xe_vma {
> /** @tile_staged: bind is staged for this VMA */
> u8 tile_staged;
>
> - /**
> - * @pat_index: The pat index to use when encoding the PTEs for this vma.
> - */
> - u16 pat_index;
> -
> /**
> * @ufence: The user fence that was provided with MAP.
> * Needs to be signalled before UNMAP can be processed.
> @@ -147,7 +146,6 @@ struct xe_vma {
> * and encoding of the PTEs for this vma.
> */
> struct xe_vma_mem_attr attr;
> -
> };
>
> /**
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread
* [PATCH v3 05/19] drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (3 preceding siblings ...)
2025-05-27 16:39 ` [PATCH v3 04/19] drm/xe/vma: Move pat_index to vma attributes Himal Prasad Ghimiray
@ 2025-05-27 16:39 ` Himal Prasad Ghimiray
2025-05-28 22:58 ` Matthew Brost
2025-06-02 6:19 ` Dan Carpenter
2025-05-27 16:39 ` [PATCH v3 06/19] drm/gpusvm: Make drm_gpusvm_for_each_* macros public Himal Prasad Ghimiray
` (21 subsequent siblings)
26 siblings, 2 replies; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:39 UTC (permalink / raw)
To: intel-xe; +Cc: Himal Prasad Ghimiray
This change simplifies the logic by ensuring that remapped previous or
next VMAs are created with the same memory attributes as the original VMA.
By passing struct xe_vma_mem_attr as a parameter, we maintain consistency
in memory attributes.
-v2
*dst = *src (Matthew Brost)
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/xe/xe_vm.c | 25 ++++++++++++++++++++-----
1 file changed, 20 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index a48e1bc8b76a..de6ecff237a6 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2436,8 +2436,13 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
+static void cp_vma_mem_attr(struct xe_vma_mem_attr *dst, struct xe_vma_mem_attr *src)
+{
+ *dst = *src;
+}
+
static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
- u16 pat_index, unsigned int flags)
+ struct xe_vma_mem_attr attr, unsigned int flags)
{
struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
struct drm_exec exec;
@@ -2466,7 +2471,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
}
vma = xe_vma_create(vm, bo, op->gem.offset,
op->va.addr, op->va.addr +
- op->va.range - 1, pat_index, flags);
+ op->va.range - 1, attr.pat_index, flags);
if (IS_ERR(vma))
goto err_unlock;
@@ -2483,6 +2488,8 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
prep_vma_destroy(vm, vma, false);
xe_vma_destroy_unlocked(vma);
vma = ERR_PTR(err);
+ } else {
+ cp_vma_mem_attr(&vma->attr, &attr);
}
return vma;
@@ -2609,6 +2616,14 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
{
+ struct xe_vma_mem_attr default_attr = {
+ .preferred_loc = {
+ .devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVMEM_FD,
+ },
+ .atomic_access = DRM_XE_VMA_ATOMIC_UNDEFINED,
+ .pat_index = op->map.pat_index
+ };
+
flags |= op->map.read_only ?
VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ?
@@ -2618,7 +2633,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
flags |= op->map.is_cpu_addr_mirror ?
VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
- vma = new_vma(vm, &op->base.map, op->map.pat_index,
+ vma = new_vma(vm, &op->base.map, default_attr,
flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -2666,7 +2681,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
if (op->base.remap.prev) {
vma = new_vma(vm, op->base.remap.prev,
- old->attr.pat_index, flags);
+ old->attr, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -2696,7 +2711,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
if (op->base.remap.next) {
vma = new_vma(vm, op->base.remap.next,
- old->attr.pat_index, flags);
+ old->attr, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* Re: [PATCH v3 05/19] drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter
2025-05-27 16:39 ` [PATCH v3 05/19] drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter Himal Prasad Ghimiray
@ 2025-05-28 22:58 ` Matthew Brost
2025-06-02 6:19 ` Dan Carpenter
1 sibling, 0 replies; 72+ messages in thread
From: Matthew Brost @ 2025-05-28 22:58 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:09:49PM +0530, Himal Prasad Ghimiray wrote:
> This change simplifies the logic by ensuring that remapped previous or
> next VMAs are created with the same memory attributes as the original VMA.
> By passing struct xe_vma_mem_attr as a parameter, we maintain consistency
> in memory attributes.
>
> -v2
> *dst = *src (Matthew Brost)
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/xe/xe_vm.c | 25 ++++++++++++++++++++-----
> 1 file changed, 20 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index a48e1bc8b76a..de6ecff237a6 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2436,8 +2436,13 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
>
> ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
>
> +static void cp_vma_mem_attr(struct xe_vma_mem_attr *dst, struct xe_vma_mem_attr *src)
> +{
> + *dst = *src;
> +}
I'm not sure if this worth a helper.
> +
> static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
> - u16 pat_index, unsigned int flags)
> + struct xe_vma_mem_attr attr, unsigned int flags)
I'd make attr a pointer.
> {
> struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
> struct drm_exec exec;
> @@ -2466,7 +2471,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
> }
> vma = xe_vma_create(vm, bo, op->gem.offset,
> op->va.addr, op->va.addr +
> - op->va.range - 1, pat_index, flags);
> + op->va.range - 1, attr.pat_index, flags);
I'd pass attr in here and set vma->attr in that function.
> if (IS_ERR(vma))
> goto err_unlock;
>
> @@ -2483,6 +2488,8 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
> prep_vma_destroy(vm, vma, false);
> xe_vma_destroy_unlocked(vma);
> vma = ERR_PTR(err);
> + } else {
> + cp_vma_mem_attr(&vma->attr, &attr);
> }
>
> return vma;
> @@ -2609,6 +2616,14 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
> switch (op->base.op) {
> case DRM_GPUVA_OP_MAP:
> {
> + struct xe_vma_mem_attr default_attr = {
> + .preferred_loc = {
> + .devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVMEM_FD,
I'd indent .devmem_fd here.
Also should be initialize the migration policy?
> + },
> + .atomic_access = DRM_XE_VMA_ATOMIC_UNDEFINED,
> + .pat_index = op->map.pat_index
s/op->map.pat_index/op->map.pat_index,/
Is typically the style.
Matt
> + };
> +
> flags |= op->map.read_only ?
> VMA_CREATE_FLAG_READ_ONLY : 0;
> flags |= op->map.is_null ?
> @@ -2618,7 +2633,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
> flags |= op->map.is_cpu_addr_mirror ?
> VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
>
> - vma = new_vma(vm, &op->base.map, op->map.pat_index,
> + vma = new_vma(vm, &op->base.map, default_attr,
> flags);
> if (IS_ERR(vma))
> return PTR_ERR(vma);
> @@ -2666,7 +2681,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>
> if (op->base.remap.prev) {
> vma = new_vma(vm, op->base.remap.prev,
> - old->attr.pat_index, flags);
> + old->attr, flags);
> if (IS_ERR(vma))
> return PTR_ERR(vma);
>
> @@ -2696,7 +2711,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>
> if (op->base.remap.next) {
> vma = new_vma(vm, op->base.remap.next,
> - old->attr.pat_index, flags);
> + old->attr, flags);
> if (IS_ERR(vma))
> return PTR_ERR(vma);
>
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 05/19] drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter
2025-05-27 16:39 ` [PATCH v3 05/19] drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter Himal Prasad Ghimiray
2025-05-28 22:58 ` Matthew Brost
@ 2025-06-02 6:19 ` Dan Carpenter
1 sibling, 0 replies; 72+ messages in thread
From: Dan Carpenter @ 2025-06-02 6:19 UTC (permalink / raw)
To: oe-kbuild, Himal Prasad Ghimiray, intel-xe
Cc: lkp, oe-kbuild-all, Himal Prasad Ghimiray
Hi Himal,
kernel test robot noticed the following build warnings:
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Himal-Prasad-Ghimiray/Introduce-drm_gpuvm_sm_map_ops_flags-enums-for-sm_map_ops/20250528-041919
base: https://gitlab.freedesktop.org/drm/xe/kernel.git drm-xe-next
patch link: https://lore.kernel.org/r/20250527164003.1068118-6-himal.prasad.ghimiray%40intel.com
patch subject: [PATCH v3 05/19] drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter
config: loongarch-randconfig-r073-20250529 (https://download.01.org/0day-ci/archive/20250530/202505300251.vkfxetWu-lkp@intel.com/config)
compiler: loongarch64-linux-gcc (GCC) 15.1.0
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
| Closes: https://lore.kernel.org/r/202505300251.vkfxetWu-lkp@intel.com/
New smatch warnings:
drivers/gpu/drm/xe/xe_vm.c:2492 new_vma() error: 'vma' dereferencing possible ERR_PTR()
vim +/vma +2492 drivers/gpu/drm/xe/xe_vm.c
b06d47be7c83165 Matthew Brost 2023-07-07 2444 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
341ac2942dd2087 Himal Prasad Ghimiray 2025-05-27 2445 struct xe_vma_mem_attr attr, unsigned int flags)
b06d47be7c83165 Matthew Brost 2023-07-07 2446 {
b06d47be7c83165 Matthew Brost 2023-07-07 2447 struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
24f947d58fe554c Thomas Hellström 2023-12-12 2448 struct drm_exec exec;
b06d47be7c83165 Matthew Brost 2023-07-07 2449 struct xe_vma *vma;
33991ae8f40a824 Matthew Brost 2024-06-17 2450 int err = 0;
b06d47be7c83165 Matthew Brost 2023-07-07 2451
b06d47be7c83165 Matthew Brost 2023-07-07 2452 lockdep_assert_held_write(&vm->lock);
b06d47be7c83165 Matthew Brost 2023-07-07 2453
b06d47be7c83165 Matthew Brost 2023-07-07 2454 if (bo) {
d2197029026021e Dave Airlie 2023-12-22 2455 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
24f947d58fe554c Thomas Hellström 2023-12-12 2456 drm_exec_until_all_locked(&exec) {
24f947d58fe554c Thomas Hellström 2023-12-12 2457 err = 0;
24f947d58fe554c Thomas Hellström 2023-12-12 2458 if (!bo->vm) {
24f947d58fe554c Thomas Hellström 2023-12-12 2459 err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
24f947d58fe554c Thomas Hellström 2023-12-12 2460 drm_exec_retry_on_contention(&exec);
24f947d58fe554c Thomas Hellström 2023-12-12 2461 }
24f947d58fe554c Thomas Hellström 2023-12-12 2462 if (!err) {
24f947d58fe554c Thomas Hellström 2023-12-12 2463 err = drm_exec_lock_obj(&exec, &bo->ttm.base);
24f947d58fe554c Thomas Hellström 2023-12-12 2464 drm_exec_retry_on_contention(&exec);
24f947d58fe554c Thomas Hellström 2023-12-12 2465 }
24f947d58fe554c Thomas Hellström 2023-12-12 2466 if (err) {
24f947d58fe554c Thomas Hellström 2023-12-12 2467 drm_exec_fini(&exec);
b06d47be7c83165 Matthew Brost 2023-07-07 2468 return ERR_PTR(err);
dd08ebf6c3525a7 Matthew Brost 2023-03-30 2469 }
24f947d58fe554c Thomas Hellström 2023-12-12 2470 }
24f947d58fe554c Thomas Hellström 2023-12-12 2471 }
b06d47be7c83165 Matthew Brost 2023-07-07 2472 vma = xe_vma_create(vm, bo, op->gem.offset,
b06d47be7c83165 Matthew Brost 2023-07-07 2473 op->va.addr, op->va.addr +
341ac2942dd2087 Himal Prasad Ghimiray 2025-05-27 2474 op->va.range - 1, attr.pat_index, flags);
33991ae8f40a824 Matthew Brost 2024-06-17 2475 if (IS_ERR(vma))
33991ae8f40a824 Matthew Brost 2024-06-17 2476 goto err_unlock;
Missing "err = PTR_ERR(vma);"
dd08ebf6c3525a7 Matthew Brost 2023-03-30 2477
33991ae8f40a824 Matthew Brost 2024-06-17 2478 if (xe_vma_is_userptr(vma))
5bd24e78829ad56 Thomas Hellström 2024-01-31 2479 err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
33991ae8f40a824 Matthew Brost 2024-06-17 2480 else if (!xe_vma_has_no_bo(vma) && !bo->vm)
b06d47be7c83165 Matthew Brost 2023-07-07 2481 err = add_preempt_fences(vm, bo);
33991ae8f40a824 Matthew Brost 2024-06-17 2482
33991ae8f40a824 Matthew Brost 2024-06-17 2483 err_unlock:
33991ae8f40a824 Matthew Brost 2024-06-17 2484 if (bo)
33991ae8f40a824 Matthew Brost 2024-06-17 2485 drm_exec_fini(&exec);
33991ae8f40a824 Matthew Brost 2024-06-17 2486
b06d47be7c83165 Matthew Brost 2023-07-07 2487 if (err) {
b06d47be7c83165 Matthew Brost 2023-07-07 2488 prep_vma_destroy(vm, vma, false);
b06d47be7c83165 Matthew Brost 2023-07-07 2489 xe_vma_destroy_unlocked(vma);
33991ae8f40a824 Matthew Brost 2024-06-17 2490 vma = ERR_PTR(err);
341ac2942dd2087 Himal Prasad Ghimiray 2025-05-27 2491 } else {
341ac2942dd2087 Himal Prasad Ghimiray 2025-05-27 @2492 cp_vma_mem_attr(&vma->attr, &attr);
^^^^
leads to error pointer dereference.
dd08ebf6c3525a7 Matthew Brost 2023-03-30 2493 }
dd08ebf6c3525a7 Matthew Brost 2023-03-30 2494
b06d47be7c83165 Matthew Brost 2023-07-07 2495 return vma;
I was wondering why Smatch doesn't catch the missing error code but
actually we return the error code... #ABitConfusing
b06d47be7c83165 Matthew Brost 2023-07-07 2496 }
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 72+ messages in thread
* [PATCH v3 06/19] drm/gpusvm: Make drm_gpusvm_for_each_* macros public
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (4 preceding siblings ...)
2025-05-27 16:39 ` [PATCH v3 05/19] drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter Himal Prasad Ghimiray
@ 2025-05-27 16:39 ` Himal Prasad Ghimiray
2025-05-28 23:01 ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 07/19] drm/xe/vm: Add a helper xe_vm_range_tilemask_tlb_invalidation() Himal Prasad Ghimiray
` (20 subsequent siblings)
26 siblings, 1 reply; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:39 UTC (permalink / raw)
To: intel-xe; +Cc: Himal Prasad Ghimiray
The drm_gpusvm_for_each_notifier, drm_gpusvm_for_each_notifier_safe and
drm_gpusvm_for_each_range_safe macros are useful for locating notifiers
and ranges within a user-specified range. By making these macros public,
we enable broader access and utility for developers who need to leverage
them in their implementations.
v2 (Matthew Brost)
- drop inline __drm_gpusvm_range_find
- /s/notifier_iter_first/drm_gpusvm_notifier_find
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/drm_gpusvm.c | 122 +++++++----------------------------
include/drm/drm_gpusvm.h | 70 ++++++++++++++++++++
2 files changed, 95 insertions(+), 97 deletions(-)
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 7bb9eb71c9aa..e50a25fe1079 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -380,107 +380,50 @@ static void drm_gpusvm_zdd_put(struct drm_gpusvm_zdd *zdd)
}
/**
- * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
- * @notifier: Pointer to the GPU SVM notifier structure.
- * @start: Start address of the range
- * @end: End address of the range
+ * drm_gpusvm_notifier_find() - Find GPU SVM notifier from GPU SVM
+ * @gpusvm: Pointer to the GPU SVM structure.
+ * @start: Start address of the notifier
+ * @end: End address of the notifier
*
- * Return: A pointer to the drm_gpusvm_range if found or NULL
+ * Return: A pointer to the drm_gpusvm_notifier if found or NULL
*/
-struct drm_gpusvm_range *
-drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
- unsigned long end)
+struct drm_gpusvm_notifier *
+drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start,
+ unsigned long end)
{
struct interval_tree_node *itree;
- itree = interval_tree_iter_first(¬ifier->root, start, end - 1);
+ itree = interval_tree_iter_first(&gpusvm->root, start, end - 1);
if (itree)
- return container_of(itree, struct drm_gpusvm_range, itree);
+ return container_of(itree, struct drm_gpusvm_notifier, itree);
else
return NULL;
}
-EXPORT_SYMBOL_GPL(drm_gpusvm_range_find);
+EXPORT_SYMBOL_GPL(drm_gpusvm_notifier_find);
/**
- * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier
- * @range__: Iterator variable for the ranges
- * @next__: Iterator variable for the ranges temporay storage
- * @notifier__: Pointer to the GPU SVM notifier
- * @start__: Start address of the range
- * @end__: End address of the range
- *
- * This macro is used to iterate over GPU SVM ranges in a notifier while
- * removing ranges from it.
- */
-#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
- for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
- (next__) = __drm_gpusvm_range_next(range__); \
- (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
- (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
-
-/**
- * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list
- * @notifier: a pointer to the current drm_gpusvm_notifier
+ * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
+ * @notifier: Pointer to the GPU SVM notifier structure.
+ * @start: Start address of the range
+ * @end: End address of the range
*
- * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if
- * the current notifier is the last one or if the input notifier is
- * NULL.
+ * Return: A pointer to the drm_gpusvm_range if found or NULL
*/
-static struct drm_gpusvm_notifier *
-__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier)
-{
- if (notifier && !list_is_last(¬ifier->entry,
- ¬ifier->gpusvm->notifier_list))
- return list_next_entry(notifier, entry);
-
- return NULL;
-}
-
-static struct drm_gpusvm_notifier *
-notifier_iter_first(struct rb_root_cached *root, unsigned long start,
- unsigned long last)
+struct drm_gpusvm_range *
+drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
+ unsigned long end)
{
struct interval_tree_node *itree;
- itree = interval_tree_iter_first(root, start, last);
+ itree = interval_tree_iter_first(¬ifier->root, start, end - 1);
if (itree)
- return container_of(itree, struct drm_gpusvm_notifier, itree);
+ return container_of(itree, struct drm_gpusvm_range, itree);
else
return NULL;
}
-
-/**
- * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm
- * @notifier__: Iterator variable for the notifiers
- * @notifier__: Pointer to the GPU SVM notifier
- * @start__: Start address of the notifier
- * @end__: End address of the notifier
- *
- * This macro is used to iterate over GPU SVM notifiers in a gpusvm.
- */
-#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
- for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1); \
- (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
- (notifier__) = __drm_gpusvm_notifier_next(notifier__))
-
-/**
- * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm
- * @notifier__: Iterator variable for the notifiers
- * @next__: Iterator variable for the notifiers temporay storage
- * @notifier__: Pointer to the GPU SVM notifier
- * @start__: Start address of the notifier
- * @end__: End address of the notifier
- *
- * This macro is used to iterate over GPU SVM notifiers in a gpusvm while
- * removing notifiers from it.
- */
-#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
- for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1), \
- (next__) = __drm_gpusvm_notifier_next(notifier__); \
- (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
- (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_find);
/**
* drm_gpusvm_notifier_invalidate() - Invalidate a GPU SVM notifier.
@@ -581,22 +524,6 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
}
EXPORT_SYMBOL_GPL(drm_gpusvm_init);
-/**
- * drm_gpusvm_notifier_find() - Find GPU SVM notifier
- * @gpusvm: Pointer to the GPU SVM structure
- * @fault_addr: Fault address
- *
- * This function finds the GPU SVM notifier associated with the fault address.
- *
- * Return: Pointer to the GPU SVM notifier on success, NULL otherwise.
- */
-static struct drm_gpusvm_notifier *
-drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm,
- unsigned long fault_addr)
-{
- return notifier_iter_first(&gpusvm->root, fault_addr, fault_addr + 1);
-}
-
/**
* to_drm_gpusvm_notifier() - retrieve the container struct for a given rbtree node
* @node: a pointer to the rbtree node embedded within a drm_gpusvm_notifier struct
@@ -1052,7 +979,7 @@ drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
if (!mmget_not_zero(mm))
return ERR_PTR(-EFAULT);
- notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr);
+ notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr, fault_addr + 1);
if (!notifier) {
notifier = drm_gpusvm_notifier_alloc(gpusvm, fault_addr);
if (IS_ERR(notifier)) {
@@ -1216,7 +1143,8 @@ void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm,
drm_gpusvm_driver_lock_held(gpusvm);
notifier = drm_gpusvm_notifier_find(gpusvm,
- drm_gpusvm_range_start(range));
+ drm_gpusvm_range_start(range),
+ drm_gpusvm_range_start(range) + 1);
if (WARN_ON_ONCE(!notifier))
return;
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
index 6a5156476bf4..cdd89e5af4f8 100644
--- a/include/drm/drm_gpusvm.h
+++ b/include/drm/drm_gpusvm.h
@@ -373,6 +373,10 @@ const struct dev_pagemap_ops *drm_gpusvm_pagemap_ops_get(void);
bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start,
unsigned long end);
+struct drm_gpusvm_notifier *
+drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start,
+ unsigned long end);
+
struct drm_gpusvm_range *
drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
unsigned long end);
@@ -530,4 +534,70 @@ __drm_gpusvm_range_next(struct drm_gpusvm_range *range)
(range__) && (drm_gpusvm_range_start(range__) < (end__)); \
(range__) = __drm_gpusvm_range_next(range__))
+/**
+ * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier
+ * @range__: Iterator variable for the ranges
+ * @next__: Iterator variable for the ranges temporay storage
+ * @notifier__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the range
+ * @end__: End address of the range
+ *
+ * This macro is used to iterate over GPU SVM ranges in a notifier while
+ * removing ranges from it.
+ */
+#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
+ for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_range_next(range__); \
+ (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
+ (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
+
+/**
+ * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list
+ * @notifier: a pointer to the current drm_gpusvm_notifier
+ *
+ * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if
+ * the current notifier is the last one or if the input notifier is
+ * NULL.
+ */
+static inline struct drm_gpusvm_notifier *
+__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier)
+{
+ if (notifier && !list_is_last(¬ifier->entry,
+ ¬ifier->gpusvm->notifier_list))
+ return list_next_entry(notifier, entry);
+
+ return NULL;
+}
+
+/**
+ * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm
+ * @notifier__: Iterator variable for the notifiers
+ * @gpusvm__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the notifier
+ * @end__: End address of the notifier
+ *
+ * This macro is used to iterate over GPU SVM notifiers in a gpusvm.
+ */
+#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
+ for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = __drm_gpusvm_notifier_next(notifier__))
+
+/**
+ * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm
+ * @notifier__: Iterator variable for the notifiers
+ * @next__: Iterator variable for the notifiers temporay storage
+ * @gpusvm__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the notifier
+ * @end__: End address of the notifier
+ *
+ * This macro is used to iterate over GPU SVM notifiers in a gpusvm while
+ * removing notifiers from it.
+ */
+#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
+ for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_notifier_next(notifier__); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
+
#endif /* __DRM_GPUSVM_H__ */
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* Re: [PATCH v3 06/19] drm/gpusvm: Make drm_gpusvm_for_each_* macros public
2025-05-27 16:39 ` [PATCH v3 06/19] drm/gpusvm: Make drm_gpusvm_for_each_* macros public Himal Prasad Ghimiray
@ 2025-05-28 23:01 ` Matthew Brost
0 siblings, 0 replies; 72+ messages in thread
From: Matthew Brost @ 2025-05-28 23:01 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:09:50PM +0530, Himal Prasad Ghimiray wrote:
> The drm_gpusvm_for_each_notifier, drm_gpusvm_for_each_notifier_safe and
> drm_gpusvm_for_each_range_safe macros are useful for locating notifiers
> and ranges within a user-specified range. By making these macros public,
> we enable broader access and utility for developers who need to leverage
> them in their implementations.
>
> v2 (Matthew Brost)
> - drop inline __drm_gpusvm_range_find
> - /s/notifier_iter_first/drm_gpusvm_notifier_find
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
> ---
> drivers/gpu/drm/drm_gpusvm.c | 122 +++++++----------------------------
> include/drm/drm_gpusvm.h | 70 ++++++++++++++++++++
> 2 files changed, 95 insertions(+), 97 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
> index 7bb9eb71c9aa..e50a25fe1079 100644
> --- a/drivers/gpu/drm/drm_gpusvm.c
> +++ b/drivers/gpu/drm/drm_gpusvm.c
> @@ -380,107 +380,50 @@ static void drm_gpusvm_zdd_put(struct drm_gpusvm_zdd *zdd)
> }
>
> /**
> - * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
> - * @notifier: Pointer to the GPU SVM notifier structure.
> - * @start: Start address of the range
> - * @end: End address of the range
> + * drm_gpusvm_notifier_find() - Find GPU SVM notifier from GPU SVM
> + * @gpusvm: Pointer to the GPU SVM structure.
> + * @start: Start address of the notifier
> + * @end: End address of the notifier
> *
> - * Return: A pointer to the drm_gpusvm_range if found or NULL
> + * Return: A pointer to the drm_gpusvm_notifier if found or NULL
> */
> -struct drm_gpusvm_range *
> -drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
> - unsigned long end)
> +struct drm_gpusvm_notifier *
> +drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start,
> + unsigned long end)
> {
> struct interval_tree_node *itree;
>
> - itree = interval_tree_iter_first(¬ifier->root, start, end - 1);
> + itree = interval_tree_iter_first(&gpusvm->root, start, end - 1);
>
> if (itree)
> - return container_of(itree, struct drm_gpusvm_range, itree);
> + return container_of(itree, struct drm_gpusvm_notifier, itree);
> else
> return NULL;
> }
> -EXPORT_SYMBOL_GPL(drm_gpusvm_range_find);
> +EXPORT_SYMBOL_GPL(drm_gpusvm_notifier_find);
>
> /**
> - * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier
> - * @range__: Iterator variable for the ranges
> - * @next__: Iterator variable for the ranges temporay storage
> - * @notifier__: Pointer to the GPU SVM notifier
> - * @start__: Start address of the range
> - * @end__: End address of the range
> - *
> - * This macro is used to iterate over GPU SVM ranges in a notifier while
> - * removing ranges from it.
> - */
> -#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
> - for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
> - (next__) = __drm_gpusvm_range_next(range__); \
> - (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
> - (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
> -
> -/**
> - * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list
> - * @notifier: a pointer to the current drm_gpusvm_notifier
> + * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
> + * @notifier: Pointer to the GPU SVM notifier structure.
> + * @start: Start address of the range
> + * @end: End address of the range
> *
> - * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if
> - * the current notifier is the last one or if the input notifier is
> - * NULL.
> + * Return: A pointer to the drm_gpusvm_range if found or NULL
> */
> -static struct drm_gpusvm_notifier *
> -__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier)
> -{
> - if (notifier && !list_is_last(¬ifier->entry,
> - ¬ifier->gpusvm->notifier_list))
> - return list_next_entry(notifier, entry);
> -
> - return NULL;
> -}
> -
> -static struct drm_gpusvm_notifier *
> -notifier_iter_first(struct rb_root_cached *root, unsigned long start,
> - unsigned long last)
> +struct drm_gpusvm_range *
> +drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
> + unsigned long end)
> {
> struct interval_tree_node *itree;
>
> - itree = interval_tree_iter_first(root, start, last);
> + itree = interval_tree_iter_first(¬ifier->root, start, end - 1);
>
> if (itree)
> - return container_of(itree, struct drm_gpusvm_notifier, itree);
> + return container_of(itree, struct drm_gpusvm_range, itree);
> else
> return NULL;
> }
> -
> -/**
> - * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm
> - * @notifier__: Iterator variable for the notifiers
> - * @notifier__: Pointer to the GPU SVM notifier
> - * @start__: Start address of the notifier
> - * @end__: End address of the notifier
> - *
> - * This macro is used to iterate over GPU SVM notifiers in a gpusvm.
> - */
> -#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
> - for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1); \
> - (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
> - (notifier__) = __drm_gpusvm_notifier_next(notifier__))
> -
> -/**
> - * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm
> - * @notifier__: Iterator variable for the notifiers
> - * @next__: Iterator variable for the notifiers temporay storage
> - * @notifier__: Pointer to the GPU SVM notifier
> - * @start__: Start address of the notifier
> - * @end__: End address of the notifier
> - *
> - * This macro is used to iterate over GPU SVM notifiers in a gpusvm while
> - * removing notifiers from it.
> - */
> -#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
> - for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1), \
> - (next__) = __drm_gpusvm_notifier_next(notifier__); \
> - (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
> - (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
> +EXPORT_SYMBOL_GPL(drm_gpusvm_range_find);
>
> /**
> * drm_gpusvm_notifier_invalidate() - Invalidate a GPU SVM notifier.
> @@ -581,22 +524,6 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
> }
> EXPORT_SYMBOL_GPL(drm_gpusvm_init);
>
> -/**
> - * drm_gpusvm_notifier_find() - Find GPU SVM notifier
> - * @gpusvm: Pointer to the GPU SVM structure
> - * @fault_addr: Fault address
> - *
> - * This function finds the GPU SVM notifier associated with the fault address.
> - *
> - * Return: Pointer to the GPU SVM notifier on success, NULL otherwise.
> - */
> -static struct drm_gpusvm_notifier *
> -drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm,
> - unsigned long fault_addr)
> -{
> - return notifier_iter_first(&gpusvm->root, fault_addr, fault_addr + 1);
> -}
> -
> /**
> * to_drm_gpusvm_notifier() - retrieve the container struct for a given rbtree node
> * @node: a pointer to the rbtree node embedded within a drm_gpusvm_notifier struct
> @@ -1052,7 +979,7 @@ drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
> if (!mmget_not_zero(mm))
> return ERR_PTR(-EFAULT);
>
> - notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr);
> + notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr, fault_addr + 1);
> if (!notifier) {
> notifier = drm_gpusvm_notifier_alloc(gpusvm, fault_addr);
> if (IS_ERR(notifier)) {
> @@ -1216,7 +1143,8 @@ void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm,
> drm_gpusvm_driver_lock_held(gpusvm);
>
> notifier = drm_gpusvm_notifier_find(gpusvm,
> - drm_gpusvm_range_start(range));
> + drm_gpusvm_range_start(range),
> + drm_gpusvm_range_start(range) + 1);
> if (WARN_ON_ONCE(!notifier))
> return;
>
> diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
> index 6a5156476bf4..cdd89e5af4f8 100644
> --- a/include/drm/drm_gpusvm.h
> +++ b/include/drm/drm_gpusvm.h
> @@ -373,6 +373,10 @@ const struct dev_pagemap_ops *drm_gpusvm_pagemap_ops_get(void);
> bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start,
> unsigned long end);
>
> +struct drm_gpusvm_notifier *
> +drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start,
> + unsigned long end);
> +
> struct drm_gpusvm_range *
> drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
> unsigned long end);
> @@ -530,4 +534,70 @@ __drm_gpusvm_range_next(struct drm_gpusvm_range *range)
> (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
> (range__) = __drm_gpusvm_range_next(range__))
>
> +/**
> + * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier
> + * @range__: Iterator variable for the ranges
> + * @next__: Iterator variable for the ranges temporay storage
> + * @notifier__: Pointer to the GPU SVM notifier
> + * @start__: Start address of the range
> + * @end__: End address of the range
> + *
> + * This macro is used to iterate over GPU SVM ranges in a notifier while
> + * removing ranges from it.
> + */
> +#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
> + for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
> + (next__) = __drm_gpusvm_range_next(range__); \
> + (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
> + (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
> +
> +/**
> + * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list
> + * @notifier: a pointer to the current drm_gpusvm_notifier
> + *
> + * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if
> + * the current notifier is the last one or if the input notifier is
> + * NULL.
> + */
> +static inline struct drm_gpusvm_notifier *
> +__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier)
> +{
> + if (notifier && !list_is_last(¬ifier->entry,
> + ¬ifier->gpusvm->notifier_list))
> + return list_next_entry(notifier, entry);
> +
> + return NULL;
> +}
> +
> +/**
> + * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm
> + * @notifier__: Iterator variable for the notifiers
> + * @gpusvm__: Pointer to the GPU SVM notifier
> + * @start__: Start address of the notifier
> + * @end__: End address of the notifier
> + *
> + * This macro is used to iterate over GPU SVM notifiers in a gpusvm.
> + */
> +#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
> + for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)); \
> + (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
> + (notifier__) = __drm_gpusvm_notifier_next(notifier__))
> +
> +/**
> + * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm
> + * @notifier__: Iterator variable for the notifiers
> + * @next__: Iterator variable for the notifiers temporay storage
> + * @gpusvm__: Pointer to the GPU SVM notifier
> + * @start__: Start address of the notifier
> + * @end__: End address of the notifier
> + *
> + * This macro is used to iterate over GPU SVM notifiers in a gpusvm while
> + * removing notifiers from it.
> + */
> +#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
> + for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)), \
> + (next__) = __drm_gpusvm_notifier_next(notifier__); \
> + (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
> + (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
> +
> #endif /* __DRM_GPUSVM_H__ */
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread
* [PATCH v3 07/19] drm/xe/vm: Add a helper xe_vm_range_tilemask_tlb_invalidation()
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (5 preceding siblings ...)
2025-05-27 16:39 ` [PATCH v3 06/19] drm/gpusvm: Make drm_gpusvm_for_each_* macros public Himal Prasad Ghimiray
@ 2025-05-27 16:39 ` Himal Prasad Ghimiray
2025-05-28 23:12 ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 08/19] drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping Himal Prasad Ghimiray
` (19 subsequent siblings)
26 siblings, 1 reply; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:39 UTC (permalink / raw)
To: intel-xe; +Cc: Himal Prasad Ghimiray, Matthew Brost
Introduce xe_vm_range_tilemask_tlb_invalidation(), which issues a TLB
invalidation for a specified address range across GTs indicated by a
tilemask.
Suggested-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/xe/xe_svm.c | 43 +--------------
drivers/gpu/drm/xe/xe_vm.c | 103 ++++++++++++++++++++++++------------
drivers/gpu/drm/xe/xe_vm.h | 3 ++
3 files changed, 75 insertions(+), 74 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 871ac81bb04a..59e73187114d 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -167,14 +167,9 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
{
struct xe_vm *vm = gpusvm_to_vm(gpusvm);
struct xe_device *xe = vm->xe;
- struct xe_tile *tile;
struct drm_gpusvm_range *r, *first;
- struct xe_gt_tlb_invalidation_fence
- fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
u8 tile_mask = 0;
- u8 id;
- u32 fence_id = 0;
long err;
xe_svm_assert_in_notifier(vm);
@@ -220,42 +215,8 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
xe_device_wmb(xe);
- for_each_tile(tile, xe, id) {
- if (tile_mask & BIT(id)) {
- int err;
-
- xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
- &fence[fence_id], true);
-
- err = xe_gt_tlb_invalidation_range(tile->primary_gt,
- &fence[fence_id],
- adj_start,
- adj_end,
- vm->usm.asid);
- if (WARN_ON_ONCE(err < 0))
- goto wait;
- ++fence_id;
-
- if (!tile->media_gt)
- continue;
-
- xe_gt_tlb_invalidation_fence_init(tile->media_gt,
- &fence[fence_id], true);
-
- err = xe_gt_tlb_invalidation_range(tile->media_gt,
- &fence[fence_id],
- adj_start,
- adj_end,
- vm->usm.asid);
- if (WARN_ON_ONCE(err < 0))
- goto wait;
- ++fence_id;
- }
- }
-
-wait:
- for (id = 0; id < fence_id; ++id)
- xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+ err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask);
+ XE_WARN_ON(err);
range_notifier_event_end:
r = first;
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index de6ecff237a6..d60b711e97e9 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -3851,6 +3851,68 @@ void xe_vm_unlock(struct xe_vm *vm)
dma_resv_unlock(xe_vm_resv(vm));
}
+/**
+ * xe_vm_range_tilemask_tlb_invalidation - Issue a TLB invalidation on this tilemask for an
+ * address range
+ * @vm: The VM
+ * @start: start address
+ * @end: end address
+ * @tile_mask: mask for which gt's issue tlb invalidation
+ *
+ * Issue a range based TLB invalidation for gt's in tilemask
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
+ u64 end, u8 tile_mask)
+{
+ struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
+ struct xe_tile *tile;
+ u32 fence_id = 0;
+ u8 id;
+ int err;
+
+ if (!tile_mask)
+ return 0;
+
+ for_each_tile(tile, vm->xe, id) {
+ if (tile_mask & BIT(id)) {
+ xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
+ &fence[fence_id], true);
+
+ err = xe_gt_tlb_invalidation_range(tile->primary_gt,
+ &fence[fence_id],
+ start,
+ end,
+ vm->usm.asid);
+ if (WARN_ON_ONCE(err < 0))
+ goto wait;
+ ++fence_id;
+
+ if (!tile->media_gt)
+ continue;
+
+ xe_gt_tlb_invalidation_fence_init(tile->media_gt,
+ &fence[fence_id], true);
+
+ err = xe_gt_tlb_invalidation_range(tile->media_gt,
+ &fence[fence_id],
+ start,
+ end,
+ vm->usm.asid);
+ if (WARN_ON_ONCE(err < 0))
+ goto wait;
+ ++fence_id;
+ }
+ }
+
+wait:
+ for (id = 0; id < fence_id; ++id)
+ xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+
+ return err;
+}
+
/**
* xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
* @vma: VMA to invalidate
@@ -3865,11 +3927,9 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
{
struct xe_device *xe = xe_vma_vm(vma)->xe;
struct xe_tile *tile;
- struct xe_gt_tlb_invalidation_fence
- fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
- u8 id;
- u32 fence_id = 0;
+ u8 tile_mask = 0;
int ret = 0;
+ u8 id;
xe_assert(xe, !xe_vma_is_null(vma));
xe_assert(xe, !xe_vma_is_cpu_addr_mirror(vma));
@@ -3893,37 +3953,14 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
}
}
- for_each_tile(tile, xe, id) {
- if (xe_pt_zap_ptes(tile, vma)) {
- xe_device_wmb(xe);
- xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
- &fence[fence_id],
- true);
-
- ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
- &fence[fence_id], vma);
- if (ret)
- goto wait;
- ++fence_id;
+ for_each_tile(tile, xe, id)
+ if (xe_pt_zap_ptes(tile, vma))
+ tile_mask |= BIT(id);
- if (!tile->media_gt)
- continue;
+ xe_device_wmb(xe);
- xe_gt_tlb_invalidation_fence_init(tile->media_gt,
- &fence[fence_id],
- true);
-
- ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
- &fence[fence_id], vma);
- if (ret)
- goto wait;
- ++fence_id;
- }
- }
-
-wait:
- for (id = 0; id < fence_id; ++id)
- xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+ ret = xe_vm_range_tilemask_tlb_invalidation(xe_vma_vm(vma), xe_vma_start(vma),
+ xe_vma_end(vma), tile_mask);
vma->tile_invalidated = vma->tile_mask;
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 99e164852f63..1ef98113fa5b 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -228,6 +228,9 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
struct xe_svm_range *range);
+int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
+ u64 end, u8 tile_mask);
+
int xe_vm_invalidate_vma(struct xe_vma *vma);
int xe_vm_validate_protected(struct xe_vm *vm);
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* Re: [PATCH v3 07/19] drm/xe/vm: Add a helper xe_vm_range_tilemask_tlb_invalidation()
2025-05-27 16:39 ` [PATCH v3 07/19] drm/xe/vm: Add a helper xe_vm_range_tilemask_tlb_invalidation() Himal Prasad Ghimiray
@ 2025-05-28 23:12 ` Matthew Brost
2025-05-29 3:21 ` Ghimiray, Himal Prasad
0 siblings, 1 reply; 72+ messages in thread
From: Matthew Brost @ 2025-05-28 23:12 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:09:51PM +0530, Himal Prasad Ghimiray wrote:
> Introduce xe_vm_range_tilemask_tlb_invalidation(), which issues a TLB
> invalidation for a specified address range across GTs indicated by a
> tilemask.
>
> Suggested-by: Matthew Brost <matthew.brost@intel.com>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
A couple nits, but feel free to post a follow up as independent patch to
merge ahead of madvise.
> ---
> drivers/gpu/drm/xe/xe_svm.c | 43 +--------------
> drivers/gpu/drm/xe/xe_vm.c | 103 ++++++++++++++++++++++++------------
> drivers/gpu/drm/xe/xe_vm.h | 3 ++
> 3 files changed, 75 insertions(+), 74 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index 871ac81bb04a..59e73187114d 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -167,14 +167,9 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
> {
> struct xe_vm *vm = gpusvm_to_vm(gpusvm);
> struct xe_device *xe = vm->xe;
> - struct xe_tile *tile;
> struct drm_gpusvm_range *r, *first;
> - struct xe_gt_tlb_invalidation_fence
> - fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
> u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
> u8 tile_mask = 0;
> - u8 id;
> - u32 fence_id = 0;
> long err;
>
> xe_svm_assert_in_notifier(vm);
> @@ -220,42 +215,8 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
>
> xe_device_wmb(xe);
>
> - for_each_tile(tile, xe, id) {
> - if (tile_mask & BIT(id)) {
> - int err;
> -
> - xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
> - &fence[fence_id], true);
> -
> - err = xe_gt_tlb_invalidation_range(tile->primary_gt,
> - &fence[fence_id],
> - adj_start,
> - adj_end,
> - vm->usm.asid);
> - if (WARN_ON_ONCE(err < 0))
> - goto wait;
> - ++fence_id;
> -
> - if (!tile->media_gt)
> - continue;
> -
> - xe_gt_tlb_invalidation_fence_init(tile->media_gt,
> - &fence[fence_id], true);
> -
> - err = xe_gt_tlb_invalidation_range(tile->media_gt,
> - &fence[fence_id],
> - adj_start,
> - adj_end,
> - vm->usm.asid);
> - if (WARN_ON_ONCE(err < 0))
> - goto wait;
> - ++fence_id;
> - }
> - }
> -
> -wait:
> - for (id = 0; id < fence_id; ++id)
> - xe_gt_tlb_invalidation_fence_wait(&fence[id]);
> + err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask);
> + XE_WARN_ON(err);
WARN_ON_ONCE
>
> range_notifier_event_end:
> r = first;
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index de6ecff237a6..d60b711e97e9 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -3851,6 +3851,68 @@ void xe_vm_unlock(struct xe_vm *vm)
> dma_resv_unlock(xe_vm_resv(vm));
> }
>
> +/**
> + * xe_vm_range_tilemask_tlb_invalidation - Issue a TLB invalidation on this tilemask for an
> + * address range
> + * @vm: The VM
> + * @start: start address
> + * @end: end address
> + * @tile_mask: mask for which gt's issue tlb invalidation
> + *
> + * Issue a range based TLB invalidation for gt's in tilemask
> + *
> + * Returns 0 for success, negative error code otherwise.
> + */
> +int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
> + u64 end, u8 tile_mask)
> +{
> + struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
> + struct xe_tile *tile;
> + u32 fence_id = 0;
> + u8 id;
> + int err;
> +
> + if (!tile_mask)
> + return 0;
> +
> + for_each_tile(tile, vm->xe, id) {
> + if (tile_mask & BIT(id)) {
> + xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
> + &fence[fence_id], true);
> +
> + err = xe_gt_tlb_invalidation_range(tile->primary_gt,
> + &fence[fence_id],
> + start,
> + end,
> + vm->usm.asid);
> + if (WARN_ON_ONCE(err < 0))
> + goto wait;
Let's just have the WARN_ON_ONCE in the SVM code at the caller - that is
the place where we can't really fail and warrents the warn.
> + ++fence_id;
> +
> + if (!tile->media_gt)
> + continue;
> +
> + xe_gt_tlb_invalidation_fence_init(tile->media_gt,
> + &fence[fence_id], true);
> +
> + err = xe_gt_tlb_invalidation_range(tile->media_gt,
> + &fence[fence_id],
> + start,
> + end,
> + vm->usm.asid);
> + if (WARN_ON_ONCE(err < 0))
> + goto wait;
> + ++fence_id;
> + }
> + }
> +
> +wait:
> + for (id = 0; id < fence_id; ++id)
> + xe_gt_tlb_invalidation_fence_wait(&fence[id]);
> +
> + return err;
> +}
> +
> /**
> * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
> * @vma: VMA to invalidate
> @@ -3865,11 +3927,9 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
> {
> struct xe_device *xe = xe_vma_vm(vma)->xe;
> struct xe_tile *tile;
> - struct xe_gt_tlb_invalidation_fence
> - fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
> - u8 id;
> - u32 fence_id = 0;
> + u8 tile_mask = 0;
> int ret = 0;
> + u8 id;
>
> xe_assert(xe, !xe_vma_is_null(vma));
> xe_assert(xe, !xe_vma_is_cpu_addr_mirror(vma));
> @@ -3893,37 +3953,14 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
> }
> }
>
> - for_each_tile(tile, xe, id) {
> - if (xe_pt_zap_ptes(tile, vma)) {
> - xe_device_wmb(xe);
> - xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
> - &fence[fence_id],
> - true);
> -
> - ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
> - &fence[fence_id], vma);
You can delete xe_gt_tlb_invalidation_vma now as this was the only
caller.
Matt
> - if (ret)
> - goto wait;
> - ++fence_id;
> + for_each_tile(tile, xe, id)
> + if (xe_pt_zap_ptes(tile, vma))
> + tile_mask |= BIT(id);
>
> - if (!tile->media_gt)
> - continue;
> + xe_device_wmb(xe);
>
> - xe_gt_tlb_invalidation_fence_init(tile->media_gt,
> - &fence[fence_id],
> - true);
> -
> - ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
> - &fence[fence_id], vma);
> - if (ret)
> - goto wait;
> - ++fence_id;
> - }
> - }
> -
> -wait:
> - for (id = 0; id < fence_id; ++id)
> - xe_gt_tlb_invalidation_fence_wait(&fence[id]);
> + ret = xe_vm_range_tilemask_tlb_invalidation(xe_vma_vm(vma), xe_vma_start(vma),
> + xe_vma_end(vma), tile_mask);
>
> vma->tile_invalidated = vma->tile_mask;
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> index 99e164852f63..1ef98113fa5b 100644
> --- a/drivers/gpu/drm/xe/xe_vm.h
> +++ b/drivers/gpu/drm/xe/xe_vm.h
> @@ -228,6 +228,9 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
> struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
> struct xe_svm_range *range);
>
> +int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
> + u64 end, u8 tile_mask);
> +
> int xe_vm_invalidate_vma(struct xe_vma *vma);
>
> int xe_vm_validate_protected(struct xe_vm *vm);
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 07/19] drm/xe/vm: Add a helper xe_vm_range_tilemask_tlb_invalidation()
2025-05-28 23:12 ` Matthew Brost
@ 2025-05-29 3:21 ` Ghimiray, Himal Prasad
0 siblings, 0 replies; 72+ messages in thread
From: Ghimiray, Himal Prasad @ 2025-05-29 3:21 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-xe
On 29-05-2025 04:42, Matthew Brost wrote:
> On Tue, May 27, 2025 at 10:09:51PM +0530, Himal Prasad Ghimiray wrote:
>> Introduce xe_vm_range_tilemask_tlb_invalidation(), which issues a TLB
>> invalidation for a specified address range across GTs indicated by a
>> tilemask.
>>
>> Suggested-by: Matthew Brost <matthew.brost@intel.com>
>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
>
> A couple nits, but feel free to post a follow up as independent patch to
> merge ahead of madvise.
Sure
>
>> ---
>> drivers/gpu/drm/xe/xe_svm.c | 43 +--------------
>> drivers/gpu/drm/xe/xe_vm.c | 103 ++++++++++++++++++++++++------------
>> drivers/gpu/drm/xe/xe_vm.h | 3 ++
>> 3 files changed, 75 insertions(+), 74 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
>> index 871ac81bb04a..59e73187114d 100644
>> --- a/drivers/gpu/drm/xe/xe_svm.c
>> +++ b/drivers/gpu/drm/xe/xe_svm.c
>> @@ -167,14 +167,9 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
>> {
>> struct xe_vm *vm = gpusvm_to_vm(gpusvm);
>> struct xe_device *xe = vm->xe;
>> - struct xe_tile *tile;
>> struct drm_gpusvm_range *r, *first;
>> - struct xe_gt_tlb_invalidation_fence
>> - fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
>> u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
>> u8 tile_mask = 0;
>> - u8 id;
>> - u32 fence_id = 0;
>> long err;
>>
>> xe_svm_assert_in_notifier(vm);
>> @@ -220,42 +215,8 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
>>
>> xe_device_wmb(xe);
>>
>> - for_each_tile(tile, xe, id) {
>> - if (tile_mask & BIT(id)) {
>> - int err;
>> -
>> - xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
>> - &fence[fence_id], true);
>> -
>> - err = xe_gt_tlb_invalidation_range(tile->primary_gt,
>> - &fence[fence_id],
>> - adj_start,
>> - adj_end,
>> - vm->usm.asid);
>> - if (WARN_ON_ONCE(err < 0))
>> - goto wait;
>> - ++fence_id;
>> -
>> - if (!tile->media_gt)
>> - continue;
>> -
>> - xe_gt_tlb_invalidation_fence_init(tile->media_gt,
>> - &fence[fence_id], true);
>> -
>> - err = xe_gt_tlb_invalidation_range(tile->media_gt,
>> - &fence[fence_id],
>> - adj_start,
>> - adj_end,
>> - vm->usm.asid);
>> - if (WARN_ON_ONCE(err < 0))
>> - goto wait;
>> - ++fence_id;
>> - }
>> - }
>> -
>> -wait:
>> - for (id = 0; id < fence_id; ++id)
>> - xe_gt_tlb_invalidation_fence_wait(&fence[id]);
>> + err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask);
>> + XE_WARN_ON(err);
>
> WARN_ON_ONCE
ok.
>
>>
>> range_notifier_event_end:
>> r = first;
>> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
>> index de6ecff237a6..d60b711e97e9 100644
>> --- a/drivers/gpu/drm/xe/xe_vm.c
>> +++ b/drivers/gpu/drm/xe/xe_vm.c
>> @@ -3851,6 +3851,68 @@ void xe_vm_unlock(struct xe_vm *vm)
>> dma_resv_unlock(xe_vm_resv(vm));
>> }
>>
>> +/**
>> + * xe_vm_range_tilemask_tlb_invalidation - Issue a TLB invalidation on this tilemask for an
>> + * address range
>> + * @vm: The VM
>> + * @start: start address
>> + * @end: end address
>> + * @tile_mask: mask for which gt's issue tlb invalidation
>> + *
>> + * Issue a range based TLB invalidation for gt's in tilemask
>> + *
>> + * Returns 0 for success, negative error code otherwise.
>> + */
>> +int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
>> + u64 end, u8 tile_mask)
>> +{
>> + struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
>> + struct xe_tile *tile;
>> + u32 fence_id = 0;
>> + u8 id;
>> + int err;
>> +
>> + if (!tile_mask)
>> + return 0;
>> +
>> + for_each_tile(tile, vm->xe, id) {
>> + if (tile_mask & BIT(id)) {
>> + xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
>> + &fence[fence_id], true);
>> +
>> + err = xe_gt_tlb_invalidation_range(tile->primary_gt,
>> + &fence[fence_id],
>> + start,
>> + end,
>> + vm->usm.asid);
>> + if (WARN_ON_ONCE(err < 0))
>> + goto wait;
>
> Let's just have the WARN_ON_ONCE in the SVM code at the caller - that is
> the place where we can't really fail and warrents the warn.
ok
>
>> + ++fence_id;
>> +
>> + if (!tile->media_gt)
>> + continue;
>> +
>> + xe_gt_tlb_invalidation_fence_init(tile->media_gt,
>> + &fence[fence_id], true);
>> +
>> + err = xe_gt_tlb_invalidation_range(tile->media_gt,
>> + &fence[fence_id],
>> + start,
>> + end,
>> + vm->usm.asid);
>> + if (WARN_ON_ONCE(err < 0))
>> + goto wait;
>> + ++fence_id;
>> + }
>> + }
>> +
>> +wait:
>> + for (id = 0; id < fence_id; ++id)
>> + xe_gt_tlb_invalidation_fence_wait(&fence[id]);
>> +
>> + return err;
>> +}
>> +
>> /**
>> * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
>> * @vma: VMA to invalidate
>> @@ -3865,11 +3927,9 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
>> {
>> struct xe_device *xe = xe_vma_vm(vma)->xe;
>> struct xe_tile *tile;
>> - struct xe_gt_tlb_invalidation_fence
>> - fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
>> - u8 id;
>> - u32 fence_id = 0;
>> + u8 tile_mask = 0;
>> int ret = 0;
>> + u8 id;
>>
>> xe_assert(xe, !xe_vma_is_null(vma));
>> xe_assert(xe, !xe_vma_is_cpu_addr_mirror(vma));
>> @@ -3893,37 +3953,14 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
>> }
>> }
>>
>> - for_each_tile(tile, xe, id) {
>> - if (xe_pt_zap_ptes(tile, vma)) {
>> - xe_device_wmb(xe);
>> - xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
>> - &fence[fence_id],
>> - true);
>> -
>> - ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
>> - &fence[fence_id], vma);
>
> You can delete xe_gt_tlb_invalidation_vma now as this was the only
> caller.
Makes sense.
>
> Matt
>
>> - if (ret)
>> - goto wait;
>> - ++fence_id;
>> + for_each_tile(tile, xe, id)
>> + if (xe_pt_zap_ptes(tile, vma))
>> + tile_mask |= BIT(id);
>>
>> - if (!tile->media_gt)
>> - continue;
>> + xe_device_wmb(xe);
>>
>> - xe_gt_tlb_invalidation_fence_init(tile->media_gt,
>> - &fence[fence_id],
>> - true);
>> -
>> - ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
>> - &fence[fence_id], vma);
>> - if (ret)
>> - goto wait;
>> - ++fence_id;
>> - }
>> - }
>> -
>> -wait:
>> - for (id = 0; id < fence_id; ++id)
>> - xe_gt_tlb_invalidation_fence_wait(&fence[id]);
>> + ret = xe_vm_range_tilemask_tlb_invalidation(xe_vma_vm(vma), xe_vma_start(vma),
>> + xe_vma_end(vma), tile_mask);
>>
>> vma->tile_invalidated = vma->tile_mask;
>>
>> diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
>> index 99e164852f63..1ef98113fa5b 100644
>> --- a/drivers/gpu/drm/xe/xe_vm.h
>> +++ b/drivers/gpu/drm/xe/xe_vm.h
>> @@ -228,6 +228,9 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
>> struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
>> struct xe_svm_range *range);
>>
>> +int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
>> + u64 end, u8 tile_mask);
>> +
>> int xe_vm_invalidate_vma(struct xe_vma *vma);
>>
>> int xe_vm_validate_protected(struct xe_vm *vm);
>> --
>> 2.34.1
>>
^ permalink raw reply [flat|nested] 72+ messages in thread
* [PATCH v3 08/19] drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (6 preceding siblings ...)
2025-05-27 16:39 ` [PATCH v3 07/19] drm/xe/vm: Add a helper xe_vm_range_tilemask_tlb_invalidation() Himal Prasad Ghimiray
@ 2025-05-27 16:39 ` Himal Prasad Ghimiray
2025-05-28 23:15 ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 09/19] drm/xe/svm: Split system allocator vma incase of madvise call Himal Prasad Ghimiray
` (18 subsequent siblings)
26 siblings, 1 reply; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:39 UTC (permalink / raw)
To: intel-xe; +Cc: Himal Prasad Ghimiray
Introduce xe_svm_ranges_zap_ptes_in_range(), a function to zap page table
entries (PTEs) for all SVM ranges within a user-specified address range.
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/xe/xe_svm.c | 43 +++++++++++++++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_svm.h | 7 ++++++
2 files changed, 50 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 59e73187114d..a4d53c24fcbc 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -1006,6 +1006,49 @@ int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
return err;
}
+/**
+ * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
+ * @vm: Pointer to the xe_vm structure
+ * @start: Start of the input range
+ * @end: End of the input range
+ *
+ * This function removes the page table entries (PTEs) associated
+ * with the svm ranges within the given input start amnd end
+ *
+ * Return: tile_mask for which gt's need to be tlb invalidated.
+ */
+u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ struct drm_gpusvm_notifier *notifier;
+ struct xe_svm_range *range;
+ u64 adj_start, adj_end;
+ struct xe_tile *tile;
+ u8 tile_mask = 0;
+ u8 id;
+
+ down_write(&vm->svm.gpusvm.notifier_lock);
+
+ drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
+ struct drm_gpusvm_range *r = NULL;
+
+ adj_start = max(start, notifier->itree.start);
+ adj_end = min(end, notifier->itree.last + 1);
+ drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
+ range = to_xe_range(r);
+ for_each_tile(tile, vm->xe, id) {
+ if (xe_pt_zap_ptes_range(tile, vm, range)) {
+ tile_mask |= BIT(id);
+ range->tile_invalidated |= BIT(id);
+ }
+ }
+ }
+ }
+
+ up_write(&vm->svm.gpusvm.notifier_lock);
+
+ return tile_mask;
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
static struct drm_pagemap_device_addr
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 19ce4f2754a7..af8f285b6caa 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -91,6 +91,7 @@ bool xe_svm_range_validate(struct xe_vm *vm,
u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
+u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
/**
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
* @range: SVM range
@@ -305,6 +306,12 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vm
return ULONG_MAX;
}
+static inline
+u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ return 0;
+}
+
#define xe_svm_assert_in_notifier(...) do {} while (0)
#define xe_svm_range_has_dma_mapping(...) false
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* Re: [PATCH v3 08/19] drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping
2025-05-27 16:39 ` [PATCH v3 08/19] drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping Himal Prasad Ghimiray
@ 2025-05-28 23:15 ` Matthew Brost
2025-05-29 3:06 ` Ghimiray, Himal Prasad
0 siblings, 1 reply; 72+ messages in thread
From: Matthew Brost @ 2025-05-28 23:15 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:09:52PM +0530, Himal Prasad Ghimiray wrote:
> Introduce xe_svm_ranges_zap_ptes_in_range(), a function to zap page table
> entries (PTEs) for all SVM ranges within a user-specified address range.
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/xe/xe_svm.c | 43 +++++++++++++++++++++++++++++++++++++
> drivers/gpu/drm/xe/xe_svm.h | 7 ++++++
> 2 files changed, 50 insertions(+)
>
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index 59e73187114d..a4d53c24fcbc 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -1006,6 +1006,49 @@ int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
> return err;
> }
>
> +/**
> + * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
> + * @vm: Pointer to the xe_vm structure
> + * @start: Start of the input range
> + * @end: End of the input range
> + *
> + * This function removes the page table entries (PTEs) associated
> + * with the svm ranges within the given input start amnd end
> + *
> + * Return: tile_mask for which gt's need to be tlb invalidated.
> + */
> +u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
> +{
> + struct drm_gpusvm_notifier *notifier;
> + struct xe_svm_range *range;
> + u64 adj_start, adj_end;
> + struct xe_tile *tile;
> + u8 tile_mask = 0;
> + u8 id;
> +
> + down_write(&vm->svm.gpusvm.notifier_lock);
xe_svm_notifier_lock
> +
> + drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
> + struct drm_gpusvm_range *r = NULL;
> +
> + adj_start = max(start, notifier->itree.start);
> + adj_end = min(end, notifier->itree.last + 1);
> + drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
> + range = to_xe_range(r);
> + for_each_tile(tile, vm->xe, id) {
> + if (xe_pt_zap_ptes_range(tile, vm, range)) {
> + tile_mask |= BIT(id);
> + range->tile_invalidated |= BIT(id);
> + }
> + }
> + }
> + }
> +
> + up_write(&vm->svm.gpusvm.notifier_lock);
> +
xe_svm_notifier_unlock
Matt
> + return tile_mask;
> +}
> +
> #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
>
> static struct drm_pagemap_device_addr
> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
> index 19ce4f2754a7..af8f285b6caa 100644
> --- a/drivers/gpu/drm/xe/xe_svm.h
> +++ b/drivers/gpu/drm/xe/xe_svm.h
> @@ -91,6 +91,7 @@ bool xe_svm_range_validate(struct xe_vm *vm,
>
> u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
>
> +u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
> /**
> * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
> * @range: SVM range
> @@ -305,6 +306,12 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vm
> return ULONG_MAX;
> }
>
> +static inline
> +u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
> +{
> + return 0;
> +}
> +
> #define xe_svm_assert_in_notifier(...) do {} while (0)
> #define xe_svm_range_has_dma_mapping(...) false
>
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 08/19] drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping
2025-05-28 23:15 ` Matthew Brost
@ 2025-05-29 3:06 ` Ghimiray, Himal Prasad
2025-05-29 4:00 ` Matthew Brost
0 siblings, 1 reply; 72+ messages in thread
From: Ghimiray, Himal Prasad @ 2025-05-29 3:06 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-xe
On 29-05-2025 04:45, Matthew Brost wrote:
> On Tue, May 27, 2025 at 10:09:52PM +0530, Himal Prasad Ghimiray wrote:
>> Introduce xe_svm_ranges_zap_ptes_in_range(), a function to zap page table
>> entries (PTEs) for all SVM ranges within a user-specified address range.
>>
>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
>> ---
>> drivers/gpu/drm/xe/xe_svm.c | 43 +++++++++++++++++++++++++++++++++++++
>> drivers/gpu/drm/xe/xe_svm.h | 7 ++++++
>> 2 files changed, 50 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
>> index 59e73187114d..a4d53c24fcbc 100644
>> --- a/drivers/gpu/drm/xe/xe_svm.c
>> +++ b/drivers/gpu/drm/xe/xe_svm.c
>> @@ -1006,6 +1006,49 @@ int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
>> return err;
>> }
>>
>> +/**
>> + * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
>> + * @vm: Pointer to the xe_vm structure
>> + * @start: Start of the input range
>> + * @end: End of the input range
>> + *
>> + * This function removes the page table entries (PTEs) associated
>> + * with the svm ranges within the given input start amnd end
>> + *
>> + * Return: tile_mask for which gt's need to be tlb invalidated.
>> + */
>> +u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
>> +{
>> + struct drm_gpusvm_notifier *notifier;
>> + struct xe_svm_range *range;
>> + u64 adj_start, adj_end;
>> + struct xe_tile *tile;
>> + u8 tile_mask = 0;
>> + u8 id;
>> +
>> + down_write(&vm->svm.gpusvm.notifier_lock);
>
> xe_svm_notifier_lock
xe_pt_zap_ptes_range needs write_lock, whereas
xe_svm_notifier_lock/unlock provides read lock.
>
>> +
>> + drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
>> + struct drm_gpusvm_range *r = NULL;
>> +
>> + adj_start = max(start, notifier->itree.start);
>> + adj_end = min(end, notifier->itree.last + 1);
>> + drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
>> + range = to_xe_range(r);
>> + for_each_tile(tile, vm->xe, id) {
>> + if (xe_pt_zap_ptes_range(tile, vm, range)) {
>> + tile_mask |= BIT(id);
>> + range->tile_invalidated |= BIT(id);
>> + }
>> + }
>> + }
>> + }
>> +
>> + up_write(&vm->svm.gpusvm.notifier_lock);
>> +
>
> xe_svm_notifier_unlock
>
> Matt
>
>> + return tile_mask;
>> +}
>> +
>> #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
>>
>> static struct drm_pagemap_device_addr
>> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
>> index 19ce4f2754a7..af8f285b6caa 100644
>> --- a/drivers/gpu/drm/xe/xe_svm.h
>> +++ b/drivers/gpu/drm/xe/xe_svm.h
>> @@ -91,6 +91,7 @@ bool xe_svm_range_validate(struct xe_vm *vm,
>>
>> u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
>>
>> +u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
>> /**
>> * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
>> * @range: SVM range
>> @@ -305,6 +306,12 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vm
>> return ULONG_MAX;
>> }
>>
>> +static inline
>> +u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
>> +{
>> + return 0;
>> +}
>> +
>> #define xe_svm_assert_in_notifier(...) do {} while (0)
>> #define xe_svm_range_has_dma_mapping(...) false
>>
>> --
>> 2.34.1
>>
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 08/19] drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping
2025-05-29 3:06 ` Ghimiray, Himal Prasad
@ 2025-05-29 4:00 ` Matthew Brost
2025-05-30 6:29 ` Matthew Brost
0 siblings, 1 reply; 72+ messages in thread
From: Matthew Brost @ 2025-05-29 4:00 UTC (permalink / raw)
To: Ghimiray, Himal Prasad; +Cc: intel-xe
On Thu, May 29, 2025 at 08:36:28AM +0530, Ghimiray, Himal Prasad wrote:
>
>
> On 29-05-2025 04:45, Matthew Brost wrote:
> > On Tue, May 27, 2025 at 10:09:52PM +0530, Himal Prasad Ghimiray wrote:
> > > Introduce xe_svm_ranges_zap_ptes_in_range(), a function to zap page table
> > > entries (PTEs) for all SVM ranges within a user-specified address range.
> > >
> > > Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> > > ---
> > > drivers/gpu/drm/xe/xe_svm.c | 43 +++++++++++++++++++++++++++++++++++++
> > > drivers/gpu/drm/xe/xe_svm.h | 7 ++++++
> > > 2 files changed, 50 insertions(+)
> > >
> > > diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> > > index 59e73187114d..a4d53c24fcbc 100644
> > > --- a/drivers/gpu/drm/xe/xe_svm.c
> > > +++ b/drivers/gpu/drm/xe/xe_svm.c
> > > @@ -1006,6 +1006,49 @@ int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
> > > return err;
> > > }
> > > +/**
> > > + * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
> > > + * @vm: Pointer to the xe_vm structure
> > > + * @start: Start of the input range
> > > + * @end: End of the input range
> > > + *
> > > + * This function removes the page table entries (PTEs) associated
> > > + * with the svm ranges within the given input start amnd end
> > > + *
> > > + * Return: tile_mask for which gt's need to be tlb invalidated.
> > > + */
> > > +u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
> > > +{
> > > + struct drm_gpusvm_notifier *notifier;
> > > + struct xe_svm_range *range;
> > > + u64 adj_start, adj_end;
> > > + struct xe_tile *tile;
> > > + u8 tile_mask = 0;
> > > + u8 id;
> > > +
> > > + down_write(&vm->svm.gpusvm.notifier_lock);
> >
> > xe_svm_notifier_lock
>
> xe_pt_zap_ptes_range needs write_lock, whereas xe_svm_notifier_lock/unlock
> provides read lock.
Hmm, I think the assert in xe_pt_zap_ptes_range is actually wrong. I
likely just added the in notifier assertion because that was the only
user of it. We want to guarantee that only 1 KMD thread is issuing a zap
or modifying the PTEs at a time.
- The notifier lock in read mode guarantees that an invalidation
from MMU notifier doesn't race here.
- The VM lock in write mode guarantees no one is modifying the page
tables.
- The notifier lock in write mode guarantees no one is modifying the
page tables and invalidation from madvise doesn't race.
I think this complex condition can expressed in lockdep by:
lockdep_assert(lockdep_is_held_type(notifier_lock, 0) ||
(lockdep_is_held_type(notifier_lock, 1) &&
lockdep_is_held_type(vm_lock, 0)));
If this works, a comment explaining above is probably warrented.
If the above doesn't work or we deemed this to complex, maybe it fine to
just take the notifier lock in write mode...
I suggest we get another opinion here, perhaps from Thomas.
Matt
> >
> > > +
> > > + drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
> > > + struct drm_gpusvm_range *r = NULL;
> > > +
> > > + adj_start = max(start, notifier->itree.start);
> > > + adj_end = min(end, notifier->itree.last + 1);
> > > + drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
> > > + range = to_xe_range(r);
> > > + for_each_tile(tile, vm->xe, id) {
> > > + if (xe_pt_zap_ptes_range(tile, vm, range)) {
> > > + tile_mask |= BIT(id);
> > > + range->tile_invalidated |= BIT(id);
> > > + }
> > > + }
> > > + }
> > > + }
> > > +
> > > + up_write(&vm->svm.gpusvm.notifier_lock);
> > > +
> >
> > xe_svm_notifier_unlock
> >
> > Matt
> >
> > > + return tile_mask;
> > > +}
> > > +
> > > #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
> > > static struct drm_pagemap_device_addr
> > > diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
> > > index 19ce4f2754a7..af8f285b6caa 100644
> > > --- a/drivers/gpu/drm/xe/xe_svm.h
> > > +++ b/drivers/gpu/drm/xe/xe_svm.h
> > > @@ -91,6 +91,7 @@ bool xe_svm_range_validate(struct xe_vm *vm,
> > > u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
> > > +u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
> > > /**
> > > * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
> > > * @range: SVM range
> > > @@ -305,6 +306,12 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vm
> > > return ULONG_MAX;
> > > }
> > > +static inline
> > > +u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
> > > +{
> > > + return 0;
> > > +}
> > > +
> > > #define xe_svm_assert_in_notifier(...) do {} while (0)
> > > #define xe_svm_range_has_dma_mapping(...) false
> > > --
> > > 2.34.1
> > >
>
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 08/19] drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping
2025-05-29 4:00 ` Matthew Brost
@ 2025-05-30 6:29 ` Matthew Brost
2025-06-10 4:31 ` Ghimiray, Himal Prasad
0 siblings, 1 reply; 72+ messages in thread
From: Matthew Brost @ 2025-05-30 6:29 UTC (permalink / raw)
To: Ghimiray, Himal Prasad; +Cc: intel-xe
On Wed, May 28, 2025 at 09:00:27PM -0700, Matthew Brost wrote:
> On Thu, May 29, 2025 at 08:36:28AM +0530, Ghimiray, Himal Prasad wrote:
> >
> >
> > On 29-05-2025 04:45, Matthew Brost wrote:
> > > On Tue, May 27, 2025 at 10:09:52PM +0530, Himal Prasad Ghimiray wrote:
> > > > Introduce xe_svm_ranges_zap_ptes_in_range(), a function to zap page table
> > > > entries (PTEs) for all SVM ranges within a user-specified address range.
> > > >
> > > > Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> > > > ---
> > > > drivers/gpu/drm/xe/xe_svm.c | 43 +++++++++++++++++++++++++++++++++++++
> > > > drivers/gpu/drm/xe/xe_svm.h | 7 ++++++
> > > > 2 files changed, 50 insertions(+)
> > > >
> > > > diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> > > > index 59e73187114d..a4d53c24fcbc 100644
> > > > --- a/drivers/gpu/drm/xe/xe_svm.c
> > > > +++ b/drivers/gpu/drm/xe/xe_svm.c
> > > > @@ -1006,6 +1006,49 @@ int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
> > > > return err;
> > > > }
> > > > +/**
> > > > + * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
> > > > + * @vm: Pointer to the xe_vm structure
> > > > + * @start: Start of the input range
> > > > + * @end: End of the input range
> > > > + *
> > > > + * This function removes the page table entries (PTEs) associated
> > > > + * with the svm ranges within the given input start amnd end
> > > > + *
> > > > + * Return: tile_mask for which gt's need to be tlb invalidated.
> > > > + */
> > > > +u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
> > > > +{
> > > > + struct drm_gpusvm_notifier *notifier;
> > > > + struct xe_svm_range *range;
> > > > + u64 adj_start, adj_end;
> > > > + struct xe_tile *tile;
> > > > + u8 tile_mask = 0;
> > > > + u8 id;
> > > > +
> > > > + down_write(&vm->svm.gpusvm.notifier_lock);
> > >
> > > xe_svm_notifier_lock
> >
> > xe_pt_zap_ptes_range needs write_lock, whereas xe_svm_notifier_lock/unlock
> > provides read lock.
>
> Hmm, I think the assert in xe_pt_zap_ptes_range is actually wrong. I
> likely just added the in notifier assertion because that was the only
> user of it. We want to guarantee that only 1 KMD thread is issuing a zap
> or modifying the PTEs at a time.
>
> - The notifier lock in read mode guarantees that an invalidation
> from MMU notifier doesn't race here.
>
> - The VM lock in write mode guarantees no one is modifying the page
> tables.
>
> - The notifier lock in write mode guarantees no one is modifying the
> page tables and invalidation from madvise doesn't race.
>
> I think this complex condition can expressed in lockdep by:
>
> lockdep_assert(lockdep_is_held_type(notifier_lock, 0) ||
> (lockdep_is_held_type(notifier_lock, 1) &&
> lockdep_is_held_type(vm_lock, 0)));
>
> If this works, a comment explaining above is probably warrented.
>
> If the above doesn't work or we deemed this to complex, maybe it fine to
> just take the notifier lock in write mode...
>
> I suggest we get another opinion here, perhaps from Thomas.
>
> Matt
>
Actually, this locking is incorrect for another reason as well — the SVM
notifier lock needs to be held from the start of the zap until the TLB
invalidation completes. The reason is that an MMU notifier could race by
seeing tile_invalidated set, skipping the invalidation, returning, and
moving the CPU pages before the GPU has actually stopped accessing them.
Similarly, the same race condition exists for userptr and BOs being
moved. So, for each invalidation, we need to lock all dma-resv of the
BOs being invalidated, as well as the notifiers.
Therefore, I think invalidations need to be moved directly after calling
the vfunc that sets the property, using a DRM exec loop to lock all
dma-resv of the BOs in the VMA list while we have it, then take the
notifier locks, and finally issue the zap and invalidation.
All my previous replies to this patch stand too.
Matt
> > >
> > > > +
> > > > + drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
> > > > + struct drm_gpusvm_range *r = NULL;
> > > > +
> > > > + adj_start = max(start, notifier->itree.start);
> > > > + adj_end = min(end, notifier->itree.last + 1);
> > > > + drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
> > > > + range = to_xe_range(r);
> > > > + for_each_tile(tile, vm->xe, id) {
> > > > + if (xe_pt_zap_ptes_range(tile, vm, range)) {
> > > > + tile_mask |= BIT(id);
> > > > + range->tile_invalidated |= BIT(id);
> > > > + }
> > > > + }
> > > > + }
> > > > + }
> > > > +
> > > > + up_write(&vm->svm.gpusvm.notifier_lock);
> > > > +
> > >
> > > xe_svm_notifier_unlock
> > >
> > > Matt
> > >
> > > > + return tile_mask;
> > > > +}
> > > > +
> > > > #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
> > > > static struct drm_pagemap_device_addr
> > > > diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
> > > > index 19ce4f2754a7..af8f285b6caa 100644
> > > > --- a/drivers/gpu/drm/xe/xe_svm.h
> > > > +++ b/drivers/gpu/drm/xe/xe_svm.h
> > > > @@ -91,6 +91,7 @@ bool xe_svm_range_validate(struct xe_vm *vm,
> > > > u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
> > > > +u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
> > > > /**
> > > > * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
> > > > * @range: SVM range
> > > > @@ -305,6 +306,12 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vm
> > > > return ULONG_MAX;
> > > > }
> > > > +static inline
> > > > +u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
> > > > +{
> > > > + return 0;
> > > > +}
> > > > +
> > > > #define xe_svm_assert_in_notifier(...) do {} while (0)
> > > > #define xe_svm_range_has_dma_mapping(...) false
> > > > --
> > > > 2.34.1
> > > >
> >
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 08/19] drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping
2025-05-30 6:29 ` Matthew Brost
@ 2025-06-10 4:31 ` Ghimiray, Himal Prasad
0 siblings, 0 replies; 72+ messages in thread
From: Ghimiray, Himal Prasad @ 2025-06-10 4:31 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-xe
On 30-05-2025 11:59, Matthew Brost wrote:
> On Wed, May 28, 2025 at 09:00:27PM -0700, Matthew Brost wrote:
>> On Thu, May 29, 2025 at 08:36:28AM +0530, Ghimiray, Himal Prasad wrote:
>>>
>>>
>>> On 29-05-2025 04:45, Matthew Brost wrote:
>>>> On Tue, May 27, 2025 at 10:09:52PM +0530, Himal Prasad Ghimiray wrote:
>>>>> Introduce xe_svm_ranges_zap_ptes_in_range(), a function to zap page table
>>>>> entries (PTEs) for all SVM ranges within a user-specified address range.
>>>>>
>>>>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
>>>>> ---
>>>>> drivers/gpu/drm/xe/xe_svm.c | 43 +++++++++++++++++++++++++++++++++++++
>>>>> drivers/gpu/drm/xe/xe_svm.h | 7 ++++++
>>>>> 2 files changed, 50 insertions(+)
>>>>>
>>>>> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
>>>>> index 59e73187114d..a4d53c24fcbc 100644
>>>>> --- a/drivers/gpu/drm/xe/xe_svm.c
>>>>> +++ b/drivers/gpu/drm/xe/xe_svm.c
>>>>> @@ -1006,6 +1006,49 @@ int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
>>>>> return err;
>>>>> }
>>>>> +/**
>>>>> + * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
>>>>> + * @vm: Pointer to the xe_vm structure
>>>>> + * @start: Start of the input range
>>>>> + * @end: End of the input range
>>>>> + *
>>>>> + * This function removes the page table entries (PTEs) associated
>>>>> + * with the svm ranges within the given input start amnd end
>>>>> + *
>>>>> + * Return: tile_mask for which gt's need to be tlb invalidated.
>>>>> + */
>>>>> +u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
>>>>> +{
>>>>> + struct drm_gpusvm_notifier *notifier;
>>>>> + struct xe_svm_range *range;
>>>>> + u64 adj_start, adj_end;
>>>>> + struct xe_tile *tile;
>>>>> + u8 tile_mask = 0;
>>>>> + u8 id;
>>>>> +
>>>>> + down_write(&vm->svm.gpusvm.notifier_lock);
>>>>
>>>> xe_svm_notifier_lock
>>>
>>> xe_pt_zap_ptes_range needs write_lock, whereas xe_svm_notifier_lock/unlock
>>> provides read lock.
>>
>> Hmm, I think the assert in xe_pt_zap_ptes_range is actually wrong. I
>> likely just added the in notifier assertion because that was the only
>> user of it. We want to guarantee that only 1 KMD thread is issuing a zap
>> or modifying the PTEs at a time.
>>
>> - The notifier lock in read mode guarantees that an invalidation
>> from MMU notifier doesn't race here.
>>
>> - The VM lock in write mode guarantees no one is modifying the page
>> tables.
>>
>> - The notifier lock in write mode guarantees no one is modifying the
>> page tables and invalidation from madvise doesn't race.
>>
>> I think this complex condition can expressed in lockdep by:
>>
>> lockdep_assert(lockdep_is_held_type(notifier_lock, 0) ||
>> (lockdep_is_held_type(notifier_lock, 1) &&
>> lockdep_is_held_type(vm_lock, 0)));
>>
>> If this works, a comment explaining above is probably warrented.
>>
>> If the above doesn't work or we deemed this to complex, maybe it fine to
>> just take the notifier lock in write mode...
>>
>> I suggest we get another opinion here, perhaps from Thomas.
>>
>> Matt
>>
>
> Actually, this locking is incorrect for another reason as well — the SVM
> notifier lock needs to be held from the start of the zap until the TLB
> invalidation completes. The reason is that an MMU notifier could race by
> seeing tile_invalidated set, skipping the invalidation, returning, and
> moving the CPU pages before the GPU has actually stopped accessing them.
Agreed, miss at my end.
>
> Similarly, the same race condition exists for userptr and BOs being
> moved. So, for each invalidation, we need to lock all dma-resv of the
> BOs being invalidated, as well as the notifiers.
true.
>
> Therefore, I think invalidations need to be moved directly after calling
> the vfunc that sets the property, using a DRM exec loop to lock all
> dma-resv of the BOs in the VMA list while we have it, then take the
> notifier locks, and finally issue the zap and invalidation.
Makes sense.
>
> All my previous replies to this patch stand too.
>
> Matt
>
>>> >
>>>>> +
>>>>> + drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
>>>>> + struct drm_gpusvm_range *r = NULL;
>>>>> +
>>>>> + adj_start = max(start, notifier->itree.start);
>>>>> + adj_end = min(end, notifier->itree.last + 1);
>>>>> + drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
>>>>> + range = to_xe_range(r);
>>>>> + for_each_tile(tile, vm->xe, id) {
>>>>> + if (xe_pt_zap_ptes_range(tile, vm, range)) {
>>>>> + tile_mask |= BIT(id);
>>>>> + range->tile_invalidated |= BIT(id);
>>>>> + }
>>>>> + }
>>>>> + }
>>>>> + }
>>>>> +
>>>>> + up_write(&vm->svm.gpusvm.notifier_lock);
>>>>> +
>>>>
>>>> xe_svm_notifier_unlock
>>>>
>>>> Matt
>>>>
>>>>> + return tile_mask;
>>>>> +}
>>>>> +
>>>>> #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
>>>>> static struct drm_pagemap_device_addr
>>>>> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
>>>>> index 19ce4f2754a7..af8f285b6caa 100644
>>>>> --- a/drivers/gpu/drm/xe/xe_svm.h
>>>>> +++ b/drivers/gpu/drm/xe/xe_svm.h
>>>>> @@ -91,6 +91,7 @@ bool xe_svm_range_validate(struct xe_vm *vm,
>>>>> u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
>>>>> +u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
>>>>> /**
>>>>> * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
>>>>> * @range: SVM range
>>>>> @@ -305,6 +306,12 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vm
>>>>> return ULONG_MAX;
>>>>> }
>>>>> +static inline
>>>>> +u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
>>>>> +{
>>>>> + return 0;
>>>>> +}
>>>>> +
>>>>> #define xe_svm_assert_in_notifier(...) do {} while (0)
>>>>> #define xe_svm_range_has_dma_mapping(...) false
>>>>> --
>>>>> 2.34.1
>>>>>
>>>
^ permalink raw reply [flat|nested] 72+ messages in thread
* [PATCH v3 09/19] drm/xe/svm: Split system allocator vma incase of madvise call
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (7 preceding siblings ...)
2025-05-27 16:39 ` [PATCH v3 08/19] drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping Himal Prasad Ghimiray
@ 2025-05-27 16:39 ` Himal Prasad Ghimiray
2025-05-29 2:49 ` Matthew Brost
2025-06-02 6:31 ` Dan Carpenter
2025-05-27 16:39 ` [PATCH v3 10/19] drm/xe: Implement madvise ioctl for xe Himal Prasad Ghimiray
` (17 subsequent siblings)
26 siblings, 2 replies; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:39 UTC (permalink / raw)
To: intel-xe; +Cc: Himal Prasad Ghimiray
If the start or end of input address range lies within system allocator
vma split the vma to create new vma's as per input range.
v2 (Matthew Brost)
- Add lockdep_assert_write for vm->lock
- Remove unnecessary page aligned checks
- Add kerrnel-doc and comments
- Remove unnecessary unwind_ops and return
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/xe/xe_vm.c | 95 ++++++++++++++++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_vm.h | 2 +
2 files changed, 97 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index d60b711e97e9..c220bf904ee0 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -4161,3 +4161,98 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
}
kvfree(snap);
}
+
+/**
+ * xe_vm_alloc_madvise_vma - Allocate VMA's with madvise ops
+ * @vm: Pointer to the xe_vm structure
+ * @start: Starting input address
+ * @range: Size of the input range
+ *
+ * This function splits existing vma to create new vma for user provided input range
+ *
+ * Return: 0 if success
+ */
+int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
+{
+ struct xe_vma_ops vops;
+ struct drm_gpuva_ops *ops = NULL;
+ struct drm_gpuva_op *__op;
+ bool is_cpu_addr_mirror = false;
+ int err;
+
+ vm_dbg(&vm->xe->drm, "MADVISE IN: addr=0x%016llx, size=0x%016llx", start, range);
+
+ lockdep_assert_held_write(&vm->lock);
+
+ vm_dbg(&vm->xe->drm, "MADVISE_OPS_CREATE: addr=0x%016llx, size=0x%016llx", start, range);
+ ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, start, range,
+ DRM_GPUVM_SKIP_GEM_OBJ_VA_SPLIT_MADVISE,
+ NULL, start);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+
+ if (list_empty(&ops->list)) {
+ err = 0;
+ goto free_ops;
+ }
+
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+
+ if (__op->op == DRM_GPUVA_OP_REMAP) {
+ if (xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.remap.unmap->va)))
+ is_cpu_addr_mirror = true;
+ else
+ is_cpu_addr_mirror = false;
+ }
+
+ if (__op->op == DRM_GPUVA_OP_MAP)
+ /* In case of madvise ops DRM_GPUVA_OP_REMAP is always by
+ * DRM_GPUVA_OP_REMAP, so ensure we assign op->map.is_cpu_addr_mirror true
+ * if REMAP is for xe_vma_is_cpu_addr_mirror vma
+ */
+ op->map.is_cpu_addr_mirror = is_cpu_addr_mirror;
+
+ print_op(vm->xe, __op);
+ }
+
+ xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+ err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
+ if (err)
+ goto unwind_ops;
+
+ xe_vm_lock(vm, false);
+
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+ struct xe_vma *vma;
+ struct xe_vma_mem_attr temp_attr;
+
+ if (__op->op == DRM_GPUVA_OP_UNMAP) {
+ /* There should be no unmap */
+ XE_WARN_ON("UNEXPECTED UNMAP");
+ xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), NULL);
+ } else if (__op->op == DRM_GPUVA_OP_REMAP) {
+ vma = gpuva_to_vma(op->base.remap.unmap->va);
+ /* Store attributes for REMAP UNMAPPED VMA, so they can be assigned
+ * to newly MAPPED vma.
+ */
+ cp_vma_mem_attr(&temp_attr, &vma->attr);
+ xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), NULL);
+ } else if (__op->op == DRM_GPUVA_OP_MAP) {
+ vma = op->map.vma;
+ cp_vma_mem_attr(&vma->attr, &temp_attr);
+ }
+ }
+
+ xe_vm_unlock(vm);
+ drm_gpuva_ops_free(&vm->gpuvm, ops);
+ return 0;
+
+unwind_ops:
+ vm_bind_ioctl_ops_unwind(vm, &ops, 1);
+free_ops:
+ if (ops)
+ drm_gpuva_ops_free(&vm->gpuvm, ops);
+ return err;
+}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 1ef98113fa5b..8151b1b01a13 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -171,6 +171,8 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
+int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
+
/**
* to_userptr_vma() - Return a pointer to an embedding userptr vma
* @vma: Pointer to the embedded struct xe_vma
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* Re: [PATCH v3 09/19] drm/xe/svm: Split system allocator vma incase of madvise call
2025-05-27 16:39 ` [PATCH v3 09/19] drm/xe/svm: Split system allocator vma incase of madvise call Himal Prasad Ghimiray
@ 2025-05-29 2:49 ` Matthew Brost
2025-05-29 3:14 ` Ghimiray, Himal Prasad
2025-06-02 6:31 ` Dan Carpenter
1 sibling, 1 reply; 72+ messages in thread
From: Matthew Brost @ 2025-05-29 2:49 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:09:53PM +0530, Himal Prasad Ghimiray wrote:
> If the start or end of input address range lies within system allocator
> vma split the vma to create new vma's as per input range.
>
> v2 (Matthew Brost)
> - Add lockdep_assert_write for vm->lock
> - Remove unnecessary page aligned checks
> - Add kerrnel-doc and comments
> - Remove unnecessary unwind_ops and return
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/xe/xe_vm.c | 95 ++++++++++++++++++++++++++++++++++++++
> drivers/gpu/drm/xe/xe_vm.h | 2 +
> 2 files changed, 97 insertions(+)
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index d60b711e97e9..c220bf904ee0 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -4161,3 +4161,98 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
> }
> kvfree(snap);
> }
> +
> +/**
> + * xe_vm_alloc_madvise_vma - Allocate VMA's with madvise ops
> + * @vm: Pointer to the xe_vm structure
> + * @start: Starting input address
> + * @range: Size of the input range
> + *
> + * This function splits existing vma to create new vma for user provided input range
> + *
> + * Return: 0 if success
> + */
> +int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
> +{
> + struct xe_vma_ops vops;
> + struct drm_gpuva_ops *ops = NULL;
> + struct drm_gpuva_op *__op;
> + bool is_cpu_addr_mirror = false;
> + int err;
> +
> + vm_dbg(&vm->xe->drm, "MADVISE IN: addr=0x%016llx, size=0x%016llx", start, range);
> +
> + lockdep_assert_held_write(&vm->lock);
> +
> + vm_dbg(&vm->xe->drm, "MADVISE_OPS_CREATE: addr=0x%016llx, size=0x%016llx", start, range);
> + ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, start, range,
> + DRM_GPUVM_SKIP_GEM_OBJ_VA_SPLIT_MADVISE,
> + NULL, start);
> + if (IS_ERR(ops))
> + return PTR_ERR(ops);
> +
> + if (list_empty(&ops->list)) {
> + err = 0;
> + goto free_ops;
> + }
> +
> + drm_gpuva_for_each_op(__op, ops) {
> + struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
> +
> + if (__op->op == DRM_GPUVA_OP_REMAP) {
> + if (xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.remap.unmap->va)))
> + is_cpu_addr_mirror = true;
> + else
> + is_cpu_addr_mirror = false;
Maybe let's add a sanity check here to make sure we know what we are
doing.
xe_assert(xe, !remap_op);
remap_op = true;
> + }
> +
> + if (__op->op == DRM_GPUVA_OP_MAP)
> + /* In case of madvise ops DRM_GPUVA_OP_REMAP is always by
> + * DRM_GPUVA_OP_REMAP, so ensure we assign op->map.is_cpu_addr_mirror true
> + * if REMAP is for xe_vma_is_cpu_addr_mirror vma
> + */
> + op->map.is_cpu_addr_mirror = is_cpu_addr_mirror;
xe_assert(xe, remap_op);
> +
> + print_op(vm->xe, __op);
> + }
> +
> + xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
> + err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
> + if (err)
> + goto unwind_ops;
> +
> + xe_vm_lock(vm, false);
> +
> + drm_gpuva_for_each_op(__op, ops) {
> + struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
> + struct xe_vma *vma;
> + struct xe_vma_mem_attr temp_attr;
> +
> + if (__op->op == DRM_GPUVA_OP_UNMAP) {
> + /* There should be no unmap */
> + XE_WARN_ON("UNEXPECTED UNMAP");
> + xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), NULL);
> + } else if (__op->op == DRM_GPUVA_OP_REMAP) {
> + vma = gpuva_to_vma(op->base.remap.unmap->va);
> + /* Store attributes for REMAP UNMAPPED VMA, so they can be assigned
> + * to newly MAPPED vma.
> + */
> + cp_vma_mem_attr(&temp_attr, &vma->attr);
Again maybe drop the helper here.
> + xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), NULL);
> + } else if (__op->op == DRM_GPUVA_OP_MAP) {
> + vma = op->map.vma;
> + cp_vma_mem_attr(&vma->attr, &temp_attr);
And here.
With the nits fixed:
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
> + }
> + }
> +
> + xe_vm_unlock(vm);
> + drm_gpuva_ops_free(&vm->gpuvm, ops);
> + return 0;
> +
> +unwind_ops:
> + vm_bind_ioctl_ops_unwind(vm, &ops, 1);
> +free_ops:
> + if (ops)
> + drm_gpuva_ops_free(&vm->gpuvm, ops);
> + return err;
> +}
> diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> index 1ef98113fa5b..8151b1b01a13 100644
> --- a/drivers/gpu/drm/xe/xe_vm.h
> +++ b/drivers/gpu/drm/xe/xe_vm.h
> @@ -171,6 +171,8 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
>
> struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
>
> +int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
> +
> /**
> * to_userptr_vma() - Return a pointer to an embedding userptr vma
> * @vma: Pointer to the embedded struct xe_vma
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 09/19] drm/xe/svm: Split system allocator vma incase of madvise call
2025-05-29 2:49 ` Matthew Brost
@ 2025-05-29 3:14 ` Ghimiray, Himal Prasad
0 siblings, 0 replies; 72+ messages in thread
From: Ghimiray, Himal Prasad @ 2025-05-29 3:14 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-xe
On 29-05-2025 08:19, Matthew Brost wrote:
> On Tue, May 27, 2025 at 10:09:53PM +0530, Himal Prasad Ghimiray wrote:
>> If the start or end of input address range lies within system allocator
>> vma split the vma to create new vma's as per input range.
>>
>> v2 (Matthew Brost)
>> - Add lockdep_assert_write for vm->lock
>> - Remove unnecessary page aligned checks
>> - Add kerrnel-doc and comments
>> - Remove unnecessary unwind_ops and return
>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
>> ---
>> drivers/gpu/drm/xe/xe_vm.c | 95 ++++++++++++++++++++++++++++++++++++++
>> drivers/gpu/drm/xe/xe_vm.h | 2 +
>> 2 files changed, 97 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
>> index d60b711e97e9..c220bf904ee0 100644
>> --- a/drivers/gpu/drm/xe/xe_vm.c
>> +++ b/drivers/gpu/drm/xe/xe_vm.c
>> @@ -4161,3 +4161,98 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
>> }
>> kvfree(snap);
>> }
>> +
>> +/**
>> + * xe_vm_alloc_madvise_vma - Allocate VMA's with madvise ops
>> + * @vm: Pointer to the xe_vm structure
>> + * @start: Starting input address
>> + * @range: Size of the input range
>> + *
>> + * This function splits existing vma to create new vma for user provided input range
>> + *
>> + * Return: 0 if success
>> + */
>> +int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
>> +{
>> + struct xe_vma_ops vops;
>> + struct drm_gpuva_ops *ops = NULL;
>> + struct drm_gpuva_op *__op;
>> + bool is_cpu_addr_mirror = false;
>> + int err;
>> +
>> + vm_dbg(&vm->xe->drm, "MADVISE IN: addr=0x%016llx, size=0x%016llx", start, range);
>> +
>> + lockdep_assert_held_write(&vm->lock);
>> +
>> + vm_dbg(&vm->xe->drm, "MADVISE_OPS_CREATE: addr=0x%016llx, size=0x%016llx", start, range);
>> + ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, start, range,
>> + DRM_GPUVM_SKIP_GEM_OBJ_VA_SPLIT_MADVISE,
>> + NULL, start);
>> + if (IS_ERR(ops))
>> + return PTR_ERR(ops);
>> +
>> + if (list_empty(&ops->list)) {
>> + err = 0;
>> + goto free_ops;
>> + }
>> +
>> + drm_gpuva_for_each_op(__op, ops) {
>> + struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
>> +
>> + if (__op->op == DRM_GPUVA_OP_REMAP) {
>> + if (xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.remap.unmap->va)))
>> + is_cpu_addr_mirror = true;
>> + else
>> + is_cpu_addr_mirror = false;
>
> Maybe let's add a sanity check here to make sure we know what we are
> doing.
Sounds good.>
> xe_assert(xe, !remap_op);
> remap_op = true;
>> + }
>> +
>> + if (__op->op == DRM_GPUVA_OP_MAP)
>> + /* In case of madvise ops DRM_GPUVA_OP_REMAP is always by
>> + * DRM_GPUVA_OP_REMAP, so ensure we assign op->map.is_cpu_addr_mirror true
>> + * if REMAP is for xe_vma_is_cpu_addr_mirror vma
>> + */
>> + op->map.is_cpu_addr_mirror = is_cpu_addr_mirror;
>
> xe_assert(xe, remap_op);
>
>> +
>> + print_op(vm->xe, __op);
>> + }
>> +
>> + xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
>> + err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
>> + if (err)
>> + goto unwind_ops;
>> +
>> + xe_vm_lock(vm, false);
>> +
>> + drm_gpuva_for_each_op(__op, ops) {
>> + struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
>> + struct xe_vma *vma;
>> + struct xe_vma_mem_attr temp_attr;
>> +
>> + if (__op->op == DRM_GPUVA_OP_UNMAP) {
>> + /* There should be no unmap */
>> + XE_WARN_ON("UNEXPECTED UNMAP");
>> + xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), NULL);
>> + } else if (__op->op == DRM_GPUVA_OP_REMAP) {
>> + vma = gpuva_to_vma(op->base.remap.unmap->va);
>> + /* Store attributes for REMAP UNMAPPED VMA, so they can be assigned
>> + * to newly MAPPED vma.
>> + */
>> + cp_vma_mem_attr(&temp_attr, &vma->attr);
>
> Again maybe drop the helper here.
Sure.
>
>> + xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), NULL);
>> + } else if (__op->op == DRM_GPUVA_OP_MAP) {
>> + vma = op->map.vma;
>> + cp_vma_mem_attr(&vma->attr, &temp_attr);
>
> And here.
>
> With the nits fixed:
> Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Thanks
>
>> + }
>> + }
>> +
>> + xe_vm_unlock(vm);
>> + drm_gpuva_ops_free(&vm->gpuvm, ops);
>> + return 0;
>> +
>> +unwind_ops:
>> + vm_bind_ioctl_ops_unwind(vm, &ops, 1);
>> +free_ops:
>> + if (ops)
>> + drm_gpuva_ops_free(&vm->gpuvm, ops);
>> + return err;
>> +}
>> diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
>> index 1ef98113fa5b..8151b1b01a13 100644
>> --- a/drivers/gpu/drm/xe/xe_vm.h
>> +++ b/drivers/gpu/drm/xe/xe_vm.h
>> @@ -171,6 +171,8 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
>>
>> struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
>>
>> +int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
>> +
>> /**
>> * to_userptr_vma() - Return a pointer to an embedding userptr vma
>> * @vma: Pointer to the embedded struct xe_vma
>> --
>> 2.34.1
>>
^ permalink raw reply [flat|nested] 72+ messages in thread
* Re: [PATCH v3 09/19] drm/xe/svm: Split system allocator vma incase of madvise call
2025-05-27 16:39 ` [PATCH v3 09/19] drm/xe/svm: Split system allocator vma incase of madvise call Himal Prasad Ghimiray
2025-05-29 2:49 ` Matthew Brost
@ 2025-06-02 6:31 ` Dan Carpenter
1 sibling, 0 replies; 72+ messages in thread
From: Dan Carpenter @ 2025-06-02 6:31 UTC (permalink / raw)
To: oe-kbuild, Himal Prasad Ghimiray, intel-xe
Cc: lkp, oe-kbuild-all, Himal Prasad Ghimiray
Hi Himal,
kernel test robot noticed the following build warnings:
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Himal-Prasad-Ghimiray/Introduce-drm_gpuvm_sm_map_ops_flags-enums-for-sm_map_ops/20250528-041919
base: https://gitlab.freedesktop.org/drm/xe/kernel.git drm-xe-next
patch link: https://lore.kernel.org/r/20250527164003.1068118-10-himal.prasad.ghimiray%40intel.com
patch subject: [PATCH v3 09/19] drm/xe/svm: Split system allocator vma incase of madvise call
config: loongarch-randconfig-r073-20250529 (https://download.01.org/0day-ci/archive/20250530/202505300720.uhAJlLnM-lkp@intel.com/config)
compiler: loongarch64-linux-gcc (GCC) 15.1.0
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
| Closes: https://lore.kernel.org/r/202505300720.uhAJlLnM-lkp@intel.com/
New smatch warnings:
drivers/gpu/drm/xe/xe_vm.c:4255 xe_vm_alloc_madvise_vma() warn: variable dereferenced before check 'ops' (see line 4194)
vim +/ops +4255 drivers/gpu/drm/xe/xe_vm.c
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4175 int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4176 {
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4177 struct xe_vma_ops vops;
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4178 struct drm_gpuva_ops *ops = NULL;
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4179 struct drm_gpuva_op *__op;
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4180 bool is_cpu_addr_mirror = false;
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4181 int err;
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4182
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4183 vm_dbg(&vm->xe->drm, "MADVISE IN: addr=0x%016llx, size=0x%016llx", start, range);
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4184
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4185 lockdep_assert_held_write(&vm->lock);
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4186
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4187 vm_dbg(&vm->xe->drm, "MADVISE_OPS_CREATE: addr=0x%016llx, size=0x%016llx", start, range);
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4188 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, start, range,
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4189 DRM_GPUVM_SKIP_GEM_OBJ_VA_SPLIT_MADVISE,
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4190 NULL, start);
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4191 if (IS_ERR(ops))
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4192 return PTR_ERR(ops);
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4193
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 @4194 if (list_empty(&ops->list)) {
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4195 err = 0;
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4196 goto free_ops;
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4197 }
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4198
[ snip ]
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4252 unwind_ops:
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4253 vm_bind_ioctl_ops_unwind(vm, &ops, 1);
^^^^
It might be nicer to change this... See below?
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 4254 free_ops:
524911c937a5a5 Himal Prasad Ghimiray 2025-05-27 @4255 if (ops)
^^^^^^^^
This check is unnecessary.
If we applied this diff then it wouldn't look like maybe
vm_bind_ioctl_ops_unwind() frees ops and sets it to NULL?
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 5a978da411b0..48723754b0f9 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2822,13 +2822,13 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
}
static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
- struct drm_gpuva_ops **ops,
+ struct drm_gpuva_ops *ops,
int num_ops_list)
{
int i;
for (i = num_ops_list - 1; i >= 0; --i) {
- struct drm_gpuva_ops *__ops = ops[i];
+ struct drm_gpuva_ops *__ops = &ops[i];
struct drm_gpuva_op *__op;
if (!__ops)
@@ -3703,7 +3703,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
unwind_ops:
if (err && err != -ENODATA)
- vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
+ vm_bind_ioctl_ops_unwind(vm, *ops, args->num_binds);
xe_vma_ops_fini(&vops);
for (i = args->num_binds - 1; i >= 0; --i)
if (ops[i])
@@ -3788,7 +3788,7 @@ struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
unwind_ops:
if (err && err != -ENODATA)
- vm_bind_ioctl_ops_unwind(vm, &ops, 1);
+ vm_bind_ioctl_ops_unwind(vm, ops, 1);
xe_vma_ops_fini(&vops);
drm_gpuva_ops_free(&vm->gpuvm, ops);
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply related [flat|nested] 72+ messages in thread
* [PATCH v3 10/19] drm/xe: Implement madvise ioctl for xe
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (8 preceding siblings ...)
2025-05-27 16:39 ` [PATCH v3 09/19] drm/xe/svm: Split system allocator vma incase of madvise call Himal Prasad Ghimiray
@ 2025-05-27 16:39 ` Himal Prasad Ghimiray
2025-05-29 22:43 ` Matthew Brost
2025-05-30 21:34 ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 11/19] drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise Himal Prasad Ghimiray
` (16 subsequent siblings)
26 siblings, 2 replies; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:39 UTC (permalink / raw)
To: intel-xe; +Cc: Himal Prasad Ghimiray
This driver-specific ioctl enables UMDs to control the memory attributes
for GPU VMAs within a specified input range. If the start or end
addresses fall within an existing VMA, the VMA is split accordingly. The
attributes of the VMA are modified as provided by the users. The old
mappings of the VMAs are invalidated, and TLB invalidation is performed
if necessary.
v2(Matthew brost)
- xe_vm_in_fault_mode can't be enabled by Mesa, hence allow ioctl in non
fault mode too
- fix tlb invalidation skip for same ranges in multiple op
- use helper for tlb invalidation
- use xe_svm_notifier_lock/unlock helper
- s/lockdep_assert_held/lockdep_assert_held_write
- Add kernel-doc
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/xe/Makefile | 1 +
drivers/gpu/drm/xe/xe_device.c | 2 +
drivers/gpu/drm/xe/xe_vm_madvise.c | 264 +++++++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_vm_madvise.h | 15 ++
4 files changed, 282 insertions(+)
create mode 100644 drivers/gpu/drm/xe/xe_vm_madvise.c
create mode 100644 drivers/gpu/drm/xe/xe_vm_madvise.h
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index c5d6681645ed..dc64bdcddfdc 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -117,6 +117,7 @@ xe-y += xe_bb.o \
xe_uc.o \
xe_uc_fw.o \
xe_vm.o \
+ xe_vm_madvise.o \
xe_vram.o \
xe_vram_freq.o \
xe_vsec.o \
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index d4b6e623aa48..b9791c614749 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -61,6 +61,7 @@
#include "xe_ttm_stolen_mgr.h"
#include "xe_ttm_sys_mgr.h"
#include "xe_vm.h"
+#include "xe_vm_madvise.h"
#include "xe_vram.h"
#include "xe_vsec.h"
#include "xe_wait_user_fence.h"
@@ -197,6 +198,7 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
};
static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
new file mode 100644
index 000000000000..f7edefe5f6cf
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "xe_vm_madvise.h"
+
+#include <linux/nospec.h>
+#include <drm/ttm/ttm_tt.h>
+#include <drm/xe_drm.h>
+
+#include "xe_bo.h"
+#include "xe_gt_tlb_invalidation.h"
+#include "xe_pt.h"
+#include "xe_svm.h"
+
+static struct xe_vma **get_vmas(struct xe_vm *vm, int *num_vmas,
+ u64 addr, u64 range)
+{
+ struct xe_vma **vmas, **__vmas;
+ struct drm_gpuva *gpuva;
+ int max_vmas = 8;
+
+ lockdep_assert_held(&vm->lock);
+
+ *num_vmas = 0;
+ vmas = kmalloc_array(max_vmas, sizeof(*vmas), GFP_KERNEL);
+ if (!vmas)
+ return NULL;
+
+ vm_dbg(&vm->xe->drm, "VMA's in range: start=0x%016llx, end=0x%016llx", addr, addr + range);
+
+ drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, addr, addr + range) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+
+ if (*num_vmas == max_vmas) {
+ max_vmas <<= 1;
+ __vmas = krealloc(vmas, max_vmas * sizeof(*vmas), GFP_KERNEL);
+ if (!__vmas) {
+ kfree(vmas);
+ return NULL;
+ }
+ vmas = __vmas;
+ }
+
+ vmas[*num_vmas] = vma;
+ (*num_vmas)++;
+ }
+
+ vm_dbg(&vm->xe->drm, "*num_vmas = %d\n", *num_vmas);
+
+ if (!*num_vmas) {
+ kfree(vmas);
+ return NULL;
+ }
+
+ return vmas;
+}
+
+static int madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm,
+ struct xe_vma **vmas, int num_vmas,
+ struct drm_xe_madvise_ops ops)
+{
+ /* Implementation pending */
+ return 0;
+}
+
+static int madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
+ struct xe_vma **vmas, int num_vmas,
+ struct drm_xe_madvise_ops ops)
+{
+ /* Implementation pending */
+ return 0;
+}
+
+static int madvise_pat_index(struct xe_device *xe, struct xe_vm *vm,
+ struct xe_vma **vmas, int num_vmas,
+ struct drm_xe_madvise_ops ops)
+{
+ /* Implementation pending */
+ return 0;
+}
+
+static int madvise_purgeable_state(struct xe_device *xe, struct xe_vm *vm,
+ struct xe_vma **vmas, int num_vmas,
+ struct drm_xe_madvise_ops ops)
+{
+ /* Implementation pending */
+ return 0;
+}
+
+typedef int (*madvise_func)(struct xe_device *xe, struct xe_vm *vm,
+ struct xe_vma **vmas, int num_vmas, struct drm_xe_madvise_ops ops);
+
+static const madvise_func madvise_funcs[] = {
+ [DRM_XE_VMA_ATTR_PREFERRED_LOC] = madvise_preferred_mem_loc,
+ [DRM_XE_VMA_ATTR_ATOMIC] = madvise_atomic,
+ [DRM_XE_VMA_ATTR_PAT] = madvise_pat_index,
+ [DRM_XE_VMA_ATTR_PURGEABLE_STATE] = madvise_purgeable_state,
+};
+
+static void xe_zap_ptes_in_madvise_range(struct xe_vm *vm, u64 start, u64 end, u8 *tile_mask)
+{
+ struct drm_gpuva *gpuva;
+ struct xe_tile *tile;
+ u8 id;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ if (dma_resv_wait_timeout(xe_vm_resv(vm), DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT) <= 0)
+ XE_WARN_ON(1);
+
+ *tile_mask = xe_svm_ranges_zap_ptes_in_range(vm, start, end);
+
+ drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ continue;
+
+ if (xe_vma_is_userptr(vma)) {
+ WARN_ON_ONCE(!mmu_interval_check_retry
+ (&to_userptr_vma(vma)->userptr.notifier,
+ to_userptr_vma(vma)->userptr.notifier_seq));
+
+ WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
+ DMA_RESV_USAGE_BOOKKEEP));
+ }
+
+ if (xe_vma_bo(vma))
+ xe_bo_lock(xe_vma_bo(vma), false);
+
+ for_each_tile(tile, vm->xe, id) {
+ if (xe_pt_zap_ptes(tile, vma))
+ *tile_mask |= BIT(id);
+ }
+
+ if (xe_vma_bo(vma))
+ xe_bo_unlock(xe_vma_bo(vma));
+ }
+}
+
+static int xe_vm_invalidate_madvise_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ u8 tile_mask = 0;
+
+ xe_zap_ptes_in_madvise_range(vm, start, end, &tile_mask);
+ if (!tile_mask)
+ return 0;
+
+ xe_device_wmb(vm->xe);
+
+ return xe_vm_range_tilemask_tlb_invalidation(vm, start, end, tile_mask);
+}
+
+static int input_ranges_same(struct drm_xe_madvise_ops *old,
+ struct drm_xe_madvise_ops *new)
+{
+ return (new->start == old->start && new->range == old->range);
+}
+
+/**
+ * xe_vm_madvise_ioctl - Handle MADVise ioctl for a VM
+ * @dev: DRM device pointer
+ * @data: Pointer to ioctl data (drm_xe_madvise*)
+ * @file: DRM file pointer
+ *
+ * Handles the MADVISE ioctl to provide memory advice for vma's within
+ * input range.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_madvise_ops *advs_ops;
+ struct drm_xe_madvise *args = data;
+ struct xe_vm *vm;
+ struct xe_vma **vmas = NULL;
+ int num_vmas, err = 0;
+ int i, j, attr_type;
+ bool needs_invalidation;
+
+ if (XE_IOCTL_DBG(xe, args->num_ops < 1))
+ return -EINVAL;
+
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -EINVAL;
+
+ down_write(&vm->lock);
+
+ if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
+ err = -ENOENT;
+ goto unlock_vm;
+ }
+
+ if (args->num_ops > 1) {
+ u64 __user *madvise_user = u64_to_user_ptr(args->vector_of_ops);
+
+ advs_ops = kvmalloc_array(args->num_ops, sizeof(struct drm_xe_madvise_ops),
+ GFP_KERNEL | __GFP_ACCOUNT |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!advs_ops) {
+ err = args->num_ops > 1 ? -ENOBUFS : -ENOMEM;
+ goto unlock_vm;
+ }
+
+ err = __copy_from_user(advs_ops, madvise_user,
+ sizeof(struct drm_xe_madvise_ops) *
+ args->num_ops);
+ if (XE_IOCTL_DBG(xe, err)) {
+ err = -EFAULT;
+ goto free_advs_ops;
+ }
+ } else {
+ advs_ops = &args->ops;
+ }
+
+ for (i = 0; i < args->num_ops; i++) {
+ xe_vm_alloc_madvise_vma(vm, advs_ops[i].start, advs_ops[i].range);
+
+ vmas = get_vmas(vm, &num_vmas, advs_ops[i].start, advs_ops[i].range);
+ if (!vmas) {
+ err = -ENOMEM;
+ goto free_advs_ops;
+ }
+
+ attr_type = array_index_nospec(advs_ops[i].type, ARRAY_SIZE(madvise_funcs));
+ err = madvise_funcs[attr_type](xe, vm, vmas, num_vmas, advs_ops[i]);
+
+ kfree(vmas);
+ vmas = NULL;
+
+ if (err)
+ goto free_advs_ops;
+ }
+
+ for (i = 0; i < args->num_ops; i++) {
+ needs_invalidation = true;
+ for (j = i + 1; j < args->num_ops; ++j) {
+ if (input_ranges_same(&advs_ops[j], &advs_ops[i])) {
+ needs_invalidation = false;
+ break;
+ }
+ }
+ if (needs_invalidation) {
+ err = xe_vm_invalidate_madvise_range(vm, advs_ops[i].start,
+ advs_ops[i].start + advs_ops[i].range);
+ if (err)
+ goto free_advs_ops;
+ }
+ }
+
+free_advs_ops:
+ if (args->num_ops > 1)
+ kvfree(advs_ops);
+unlock_vm:
+ up_write(&vm->lock);
+ xe_vm_put(vm);
+ return err;
+}
diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.h b/drivers/gpu/drm/xe/xe_vm_madvise.h
new file mode 100644
index 000000000000..c5cdd058c322
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vm_madvise.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_VM_MADVISE_H_
+#define _XE_VM_MADVISE_H_
+
+struct drm_device;
+struct drm_file;
+
+int xe_vm_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+#endif
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* Re: [PATCH v3 10/19] drm/xe: Implement madvise ioctl for xe
2025-05-27 16:39 ` [PATCH v3 10/19] drm/xe: Implement madvise ioctl for xe Himal Prasad Ghimiray
@ 2025-05-29 22:43 ` Matthew Brost
2025-05-30 6:36 ` Matthew Brost
2025-05-30 21:34 ` Matthew Brost
1 sibling, 1 reply; 72+ messages in thread
From: Matthew Brost @ 2025-05-29 22:43 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:09:54PM +0530, Himal Prasad Ghimiray wrote:
> This driver-specific ioctl enables UMDs to control the memory attributes
> for GPU VMAs within a specified input range. If the start or end
> addresses fall within an existing VMA, the VMA is split accordingly. The
> attributes of the VMA are modified as provided by the users. The old
> mappings of the VMAs are invalidated, and TLB invalidation is performed
> if necessary.
>
> v2(Matthew brost)
> - xe_vm_in_fault_mode can't be enabled by Mesa, hence allow ioctl in non
> fault mode too
> - fix tlb invalidation skip for same ranges in multiple op
> - use helper for tlb invalidation
> - use xe_svm_notifier_lock/unlock helper
> - s/lockdep_assert_held/lockdep_assert_held_write
> - Add kernel-doc
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/xe/Makefile | 1 +
> drivers/gpu/drm/xe/xe_device.c | 2 +
> drivers/gpu/drm/xe/xe_vm_madvise.c | 264 +++++++++++++++++++++++++++++
> drivers/gpu/drm/xe/xe_vm_madvise.h | 15 ++
> 4 files changed, 282 insertions(+)
> create mode 100644 drivers/gpu/drm/xe/xe_vm_madvise.c
> create mode 100644 drivers/gpu/drm/xe/xe_vm_madvise.h
>
> diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
> index c5d6681645ed..dc64bdcddfdc 100644
> --- a/drivers/gpu/drm/xe/Makefile
> +++ b/drivers/gpu/drm/xe/Makefile
> @@ -117,6 +117,7 @@ xe-y += xe_bb.o \
> xe_uc.o \
> xe_uc_fw.o \
> xe_vm.o \
> + xe_vm_madvise.o \
> xe_vram.o \
> xe_vram_freq.o \
> xe_vsec.o \
> diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
> index d4b6e623aa48..b9791c614749 100644
> --- a/drivers/gpu/drm/xe/xe_device.c
> +++ b/drivers/gpu/drm/xe/xe_device.c
> @@ -61,6 +61,7 @@
> #include "xe_ttm_stolen_mgr.h"
> #include "xe_ttm_sys_mgr.h"
> #include "xe_vm.h"
> +#include "xe_vm_madvise.h"
> #include "xe_vram.h"
> #include "xe_vsec.h"
> #include "xe_wait_user_fence.h"
> @@ -197,6 +198,7 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
> DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
> DRM_RENDER_ALLOW),
> DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
> + DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
> };
>
> static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
> new file mode 100644
> index 000000000000..f7edefe5f6cf
> --- /dev/null
> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
> @@ -0,0 +1,264 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2024 Intel Corporation
> + */
> +
> +#include "xe_vm_madvise.h"
> +
> +#include <linux/nospec.h>
> +#include <drm/ttm/ttm_tt.h>
> +#include <drm/xe_drm.h>
> +
> +#include "xe_bo.h"
> +#include "xe_gt_tlb_invalidation.h"
> +#include "xe_pt.h"
> +#include "xe_svm.h"
> +
> +static struct xe_vma **get_vmas(struct xe_vm *vm, int *num_vmas,
> + u64 addr, u64 range)
> +{
> + struct xe_vma **vmas, **__vmas;
> + struct drm_gpuva *gpuva;
> + int max_vmas = 8;
> +
> + lockdep_assert_held(&vm->lock);
> +
> + *num_vmas = 0;
> + vmas = kmalloc_array(max_vmas, sizeof(*vmas), GFP_KERNEL);
> + if (!vmas)
> + return NULL;
> +
> + vm_dbg(&vm->xe->drm, "VMA's in range: start=0x%016llx, end=0x%016llx", addr, addr + range);
> +
> + drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, addr, addr + range) {
> + struct xe_vma *vma = gpuva_to_vma(gpuva);
> +
> + if (*num_vmas == max_vmas) {
> + max_vmas <<= 1;
> + __vmas = krealloc(vmas, max_vmas * sizeof(*vmas), GFP_KERNEL);
> + if (!__vmas) {
> + kfree(vmas);
> + return NULL;
> + }
> + vmas = __vmas;
> + }
> +
> + vmas[*num_vmas] = vma;
> + (*num_vmas)++;
> + }
> +
> + vm_dbg(&vm->xe->drm, "*num_vmas = %d\n", *num_vmas);
> +
> + if (!*num_vmas) {
> + kfree(vmas);
> + return NULL;
> + }
> +
> + return vmas;
> +}
> +
> +static int madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm,
> + struct xe_vma **vmas, int num_vmas,
> + struct drm_xe_madvise_ops ops)
> +{
> + /* Implementation pending */
> + return 0;
> +}
> +
> +static int madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
> + struct xe_vma **vmas, int num_vmas,
> + struct drm_xe_madvise_ops ops)
> +{
> + /* Implementation pending */
> + return 0;
> +}
> +
> +static int madvise_pat_index(struct xe_device *xe, struct xe_vm *vm,
> + struct xe_vma **vmas, int num_vmas,
> + struct drm_xe_madvise_ops ops)
> +{
> + /* Implementation pending */
> + return 0;
> +}
> +
> +static int madvise_purgeable_state(struct xe_device *xe, struct xe_vm *vm,
> + struct xe_vma **vmas, int num_vmas,
> + struct drm_xe_madvise_ops ops)
> +{
> + /* Implementation pending */
> + return 0;
> +}
> +
> +typedef int (*madvise_func)(struct xe_device *xe, struct xe_vm *vm,
> + struct xe_vma **vmas, int num_vmas, struct drm_xe_madvise_ops ops);
> +
See my latest replies in patch #19, if possible, making these functions
so they can't fail would be best.
> +static const madvise_func madvise_funcs[] = {
> + [DRM_XE_VMA_ATTR_PREFERRED_LOC] = madvise_preferred_mem_loc,
> + [DRM_XE_VMA_ATTR_ATOMIC] = madvise_atomic,
> + [DRM_XE_VMA_ATTR_PAT] = madvise_pat_index,
> + [DRM_XE_VMA_ATTR_PURGEABLE_STATE] = madvise_purgeable_state,
> +};
> +
> +static void xe_zap_ptes_in_madvise_range(struct xe_vm *vm, u64 start, u64 end, u8 *tile_mask)
> +{
> + struct drm_gpuva *gpuva;
> + struct xe_tile *tile;
> + u8 id;
> +
> + lockdep_assert_held_write(&vm->lock);
> +
> + if (dma_resv_wait_timeout(xe_vm_resv(vm), DMA_RESV_USAGE_BOOKKEEP,
> + false, MAX_SCHEDULE_TIMEOUT) <= 0)
> + XE_WARN_ON(1);
> +
> + *tile_mask = xe_svm_ranges_zap_ptes_in_range(vm, start, end);
> +
> + drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
> + struct xe_vma *vma = gpuva_to_vma(gpuva);
> +
> + if (xe_vma_is_cpu_addr_mirror(vma))
> + continue;
> +
> + if (xe_vma_is_userptr(vma)) {
> + WARN_ON_ONCE(!mmu_interval_check_retry
> + (&to_userptr_vma(vma)->userptr.notifier,
> + to_userptr_vma(vma)->userptr.notifier_seq));
> +
> + WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
> + DMA_RESV_USAGE_BOOKKEEP));
> + }
> +
I think the similar code in xe_vm_invalidate_vma is a bit stale and
contains remnants from when we issued the userptr TLB invalidation
outside of the notifier lock in the MMU notifier. There, it should be:
if (xe_vma_is_userptr())
lockdep_assert(userptr notifier lock held);
extra sanity checks
So with that, you need the userptr notifier lock in read mode here.
Also, I think the first WARN_ON is likely to always trigger here, as the
CPU pages are likely valid, so that one should be removed.
In xe_vm_invalidate_vma, the extra tests are protected by the
PROVE_LOCKING Kconfig option. So if you keep the dma-resv check, I’d
recommend hiding it behind PROVE_LOCKING.
Note: speaking of the userptr notifier lock, Matt Auld has a patch
series [1] to unify this with the GPU SVM notifier lock. If that lands
before your changes, then you should use the SVM notifier lock in read
mode for userptr here.
[1] https://patchwork.freedesktop.org/series/146553/
> + if (xe_vma_bo(vma))
> + xe_bo_lock(xe_vma_bo(vma), false);
> +
Ah yes, you do need the BO lock here to prevent races from BO moves
issuing a zap. Maybe we can update the xe_pt_zap_ptes lockdep assertions
for userptr/BOs, along with some comments? We could do that
independently of this series. I suggest this because I had forgotten how
this worked in the previous revision. Maybe I can post this
independently.
> + for_each_tile(tile, vm->xe, id) {
> + if (xe_pt_zap_ptes(tile, vma))
> + *tile_mask |= BIT(id);
> + }
> +
> + if (xe_vma_bo(vma))
> + xe_bo_unlock(xe_vma_bo(vma));
> + }
> +}
> +
> +static int xe_vm_invalidate_madvise_range(struct xe_vm *vm, u64 start, u64 end)
> +{
> + u8 tile_mask = 0;
> +
> + xe_zap_ptes_in_madvise_range(vm, start, end, &tile_mask);
> + if (!tile_mask)
> + return 0;
> +
> + xe_device_wmb(vm->xe);
> +
> + return xe_vm_range_tilemask_tlb_invalidation(vm, start, end, tile_mask);
> +}
> +
> +static int input_ranges_same(struct drm_xe_madvise_ops *old,
> + struct drm_xe_madvise_ops *new)
> +{
> + return (new->start == old->start && new->range == old->range);
> +}
> +
> +/**
> + * xe_vm_madvise_ioctl - Handle MADVise ioctl for a VM
> + * @dev: DRM device pointer
> + * @data: Pointer to ioctl data (drm_xe_madvise*)
> + * @file: DRM file pointer
> + *
> + * Handles the MADVISE ioctl to provide memory advice for vma's within
> + * input range.
> + *
> + * Return: 0 on success or a negative error code on failure.
> + */
> +int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> +{
> + struct xe_device *xe = to_xe_device(dev);
> + struct xe_file *xef = to_xe_file(file);
> + struct drm_xe_madvise_ops *advs_ops;
> + struct drm_xe_madvise *args = data;
> + struct xe_vm *vm;
> + struct xe_vma **vmas = NULL;
> + int num_vmas, err = 0;
> + int i, j, attr_type;
> + bool needs_invalidation;
> +
> + if (XE_IOCTL_DBG(xe, args->num_ops < 1))
> + return -EINVAL;
> +
> + vm = xe_vm_lookup(xef, args->vm_id);
> + if (XE_IOCTL_DBG(xe, !vm))
> + return -EINVAL;
> +
> + down_write(&vm->lock);
> +
> + if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
> + err = -ENOENT;
> + goto unlock_vm;
> + }
> +
> + if (args->num_ops > 1) {
> + u64 __user *madvise_user = u64_to_user_ptr(args->vector_of_ops);
> +
> + advs_ops = kvmalloc_array(args->num_ops, sizeof(struct drm_xe_madvise_ops),
> + GFP_KERNEL | __GFP_ACCOUNT |
> + __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
> + if (!advs_ops) {
> + err = args->num_ops > 1 ? -ENOBUFS : -ENOMEM;
> + goto unlock_vm;
> + }
> +
> + err = __copy_from_user(advs_ops, madvise_user,
> + sizeof(struct drm_xe_madvise_ops) *
> + args->num_ops);
> + if (XE_IOCTL_DBG(xe, err)) {
> + err = -EFAULT;
> + goto free_advs_ops;
> + }
> + } else {
> + advs_ops = &args->ops;
> + }
> +
See my reply in patch #19, I think we should validate user input ahead
of the below loop rather than failing mid-loop on bad user input.
> + for (i = 0; i < args->num_ops; i++) {
> + xe_vm_alloc_madvise_vma(vm, advs_ops[i].start, advs_ops[i].range);
> +
> + vmas = get_vmas(vm, &num_vmas, advs_ops[i].start, advs_ops[i].range);
> + if (!vmas) {
> + err = -ENOMEM;
> + goto free_advs_ops;
> + }
> +
> + attr_type = array_index_nospec(advs_ops[i].type, ARRAY_SIZE(madvise_funcs));
> + err = madvise_funcs[attr_type](xe, vm, vmas, num_vmas, advs_ops[i]);
> +
> + kfree(vmas);
> + vmas = NULL;
> +
> + if (err)
> + goto free_advs_ops;
> + }
> +
> + for (i = 0; i < args->num_ops; i++) {
> + needs_invalidation = true;
> + for (j = i + 1; j < args->num_ops; ++j) {
> + if (input_ranges_same(&advs_ops[j], &advs_ops[i])) {
> + needs_invalidation = false;
> + break;
> + }
> + }
I'd drop this extra check. The invalidation code already short ciruits
on the tile_present / tile_invalidated bits in the range or VMA so I
don't think an extra short circuit here buys us a ton.
Matt
> + if (needs_invalidation) {
> + err = xe_vm_invalidate_madvise_range(vm, advs_ops[i].start,
> + advs_ops[i].start + advs_ops[i].range);
> + if (err)
> + goto free_advs_ops;
> + }
> + }
> +
> +free_advs_ops:
> + if (args->num_ops > 1)
> + kvfree(advs_ops);
> +unlock_vm:
> + up_write(&vm->lock);
> + xe_vm_put(vm);
> + return err;
> +}
> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.h b/drivers/gpu/drm/xe/xe_vm_madvise.h
> new file mode 100644
> index 000000000000..c5cdd058c322
> --- /dev/null
> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.h
> @@ -0,0 +1,15 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2024 Intel Corporation
> + */
> +
> +#ifndef _XE_VM_MADVISE_H_
> +#define _XE_VM_MADVISE_H_
> +
> +struct drm_device;
> +struct drm_file;
> +
> +int xe_vm_madvise_ioctl(struct drm_device *dev, void *data,
> + struct drm_file *file);
> +
> +#endif
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 10/19] drm/xe: Implement madvise ioctl for xe
2025-05-29 22:43 ` Matthew Brost
@ 2025-05-30 6:36 ` Matthew Brost
0 siblings, 0 replies; 72+ messages in thread
From: Matthew Brost @ 2025-05-30 6:36 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Thu, May 29, 2025 at 03:43:40PM -0700, Matthew Brost wrote:
> On Tue, May 27, 2025 at 10:09:54PM +0530, Himal Prasad Ghimiray wrote:
> > This driver-specific ioctl enables UMDs to control the memory attributes
> > for GPU VMAs within a specified input range. If the start or end
> > addresses fall within an existing VMA, the VMA is split accordingly. The
> > attributes of the VMA are modified as provided by the users. The old
> > mappings of the VMAs are invalidated, and TLB invalidation is performed
> > if necessary.
> >
> > v2(Matthew brost)
> > - xe_vm_in_fault_mode can't be enabled by Mesa, hence allow ioctl in non
> > fault mode too
> > - fix tlb invalidation skip for same ranges in multiple op
> > - use helper for tlb invalidation
> > - use xe_svm_notifier_lock/unlock helper
> > - s/lockdep_assert_held/lockdep_assert_held_write
> > - Add kernel-doc
> >
> > Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> > ---
> > drivers/gpu/drm/xe/Makefile | 1 +
> > drivers/gpu/drm/xe/xe_device.c | 2 +
> > drivers/gpu/drm/xe/xe_vm_madvise.c | 264 +++++++++++++++++++++++++++++
> > drivers/gpu/drm/xe/xe_vm_madvise.h | 15 ++
> > 4 files changed, 282 insertions(+)
> > create mode 100644 drivers/gpu/drm/xe/xe_vm_madvise.c
> > create mode 100644 drivers/gpu/drm/xe/xe_vm_madvise.h
> >
> > diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
> > index c5d6681645ed..dc64bdcddfdc 100644
> > --- a/drivers/gpu/drm/xe/Makefile
> > +++ b/drivers/gpu/drm/xe/Makefile
> > @@ -117,6 +117,7 @@ xe-y += xe_bb.o \
> > xe_uc.o \
> > xe_uc_fw.o \
> > xe_vm.o \
> > + xe_vm_madvise.o \
> > xe_vram.o \
> > xe_vram_freq.o \
> > xe_vsec.o \
> > diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
> > index d4b6e623aa48..b9791c614749 100644
> > --- a/drivers/gpu/drm/xe/xe_device.c
> > +++ b/drivers/gpu/drm/xe/xe_device.c
> > @@ -61,6 +61,7 @@
> > #include "xe_ttm_stolen_mgr.h"
> > #include "xe_ttm_sys_mgr.h"
> > #include "xe_vm.h"
> > +#include "xe_vm_madvise.h"
> > #include "xe_vram.h"
> > #include "xe_vsec.h"
> > #include "xe_wait_user_fence.h"
> > @@ -197,6 +198,7 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
> > DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
> > DRM_RENDER_ALLOW),
> > DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
> > + DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
> > };
> >
> > static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
> > diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
> > new file mode 100644
> > index 000000000000..f7edefe5f6cf
> > --- /dev/null
> > +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
> > @@ -0,0 +1,264 @@
> > +// SPDX-License-Identifier: MIT
> > +/*
> > + * Copyright © 2024 Intel Corporation
> > + */
> > +
> > +#include "xe_vm_madvise.h"
> > +
> > +#include <linux/nospec.h>
> > +#include <drm/ttm/ttm_tt.h>
> > +#include <drm/xe_drm.h>
> > +
> > +#include "xe_bo.h"
> > +#include "xe_gt_tlb_invalidation.h"
> > +#include "xe_pt.h"
> > +#include "xe_svm.h"
> > +
> > +static struct xe_vma **get_vmas(struct xe_vm *vm, int *num_vmas,
> > + u64 addr, u64 range)
> > +{
> > + struct xe_vma **vmas, **__vmas;
> > + struct drm_gpuva *gpuva;
> > + int max_vmas = 8;
> > +
> > + lockdep_assert_held(&vm->lock);
> > +
> > + *num_vmas = 0;
> > + vmas = kmalloc_array(max_vmas, sizeof(*vmas), GFP_KERNEL);
> > + if (!vmas)
> > + return NULL;
> > +
> > + vm_dbg(&vm->xe->drm, "VMA's in range: start=0x%016llx, end=0x%016llx", addr, addr + range);
> > +
> > + drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, addr, addr + range) {
> > + struct xe_vma *vma = gpuva_to_vma(gpuva);
> > +
> > + if (*num_vmas == max_vmas) {
> > + max_vmas <<= 1;
> > + __vmas = krealloc(vmas, max_vmas * sizeof(*vmas), GFP_KERNEL);
> > + if (!__vmas) {
> > + kfree(vmas);
> > + return NULL;
> > + }
> > + vmas = __vmas;
> > + }
> > +
> > + vmas[*num_vmas] = vma;
> > + (*num_vmas)++;
> > + }
> > +
> > + vm_dbg(&vm->xe->drm, "*num_vmas = %d\n", *num_vmas);
> > +
> > + if (!*num_vmas) {
> > + kfree(vmas);
> > + return NULL;
> > + }
> > +
> > + return vmas;
> > +}
> > +
> > +static int madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm,
> > + struct xe_vma **vmas, int num_vmas,
> > + struct drm_xe_madvise_ops ops)
> > +{
> > + /* Implementation pending */
> > + return 0;
> > +}
> > +
> > +static int madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
> > + struct xe_vma **vmas, int num_vmas,
> > + struct drm_xe_madvise_ops ops)
> > +{
> > + /* Implementation pending */
> > + return 0;
> > +}
> > +
> > +static int madvise_pat_index(struct xe_device *xe, struct xe_vm *vm,
> > + struct xe_vma **vmas, int num_vmas,
> > + struct drm_xe_madvise_ops ops)
> > +{
> > + /* Implementation pending */
> > + return 0;
> > +}
> > +
> > +static int madvise_purgeable_state(struct xe_device *xe, struct xe_vm *vm,
> > + struct xe_vma **vmas, int num_vmas,
> > + struct drm_xe_madvise_ops ops)
> > +{
> > + /* Implementation pending */
> > + return 0;
> > +}
> > +
> > +typedef int (*madvise_func)(struct xe_device *xe, struct xe_vm *vm,
> > + struct xe_vma **vmas, int num_vmas, struct drm_xe_madvise_ops ops);
> > +
>
> See my latest replies in patch #19, if possible, making these functions
> so they can't fail would be best.
>
> > +static const madvise_func madvise_funcs[] = {
> > + [DRM_XE_VMA_ATTR_PREFERRED_LOC] = madvise_preferred_mem_loc,
> > + [DRM_XE_VMA_ATTR_ATOMIC] = madvise_atomic,
> > + [DRM_XE_VMA_ATTR_PAT] = madvise_pat_index,
> > + [DRM_XE_VMA_ATTR_PURGEABLE_STATE] = madvise_purgeable_state,
> > +};
> > +
> > +static void xe_zap_ptes_in_madvise_range(struct xe_vm *vm, u64 start, u64 end, u8 *tile_mask)
> > +{
> > + struct drm_gpuva *gpuva;
> > + struct xe_tile *tile;
> > + u8 id;
> > +
> > + lockdep_assert_held_write(&vm->lock);
> > +
> > + if (dma_resv_wait_timeout(xe_vm_resv(vm), DMA_RESV_USAGE_BOOKKEEP,
> > + false, MAX_SCHEDULE_TIMEOUT) <= 0)
> > + XE_WARN_ON(1);
> > +
> > + *tile_mask = xe_svm_ranges_zap_ptes_in_range(vm, start, end);
> > +
> > + drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
> > + struct xe_vma *vma = gpuva_to_vma(gpuva);
> > +
> > + if (xe_vma_is_cpu_addr_mirror(vma))
> > + continue;
> > +
> > + if (xe_vma_is_userptr(vma)) {
> > + WARN_ON_ONCE(!mmu_interval_check_retry
> > + (&to_userptr_vma(vma)->userptr.notifier,
> > + to_userptr_vma(vma)->userptr.notifier_seq));
> > +
> > + WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
> > + DMA_RESV_USAGE_BOOKKEEP));
> > + }
> > +
>
> I think the similar code in xe_vm_invalidate_vma is a bit stale and
> contains remnants from when we issued the userptr TLB invalidation
> outside of the notifier lock in the MMU notifier. There, it should be:
>
> if (xe_vma_is_userptr())
> lockdep_assert(userptr notifier lock held);
> extra sanity checks
>
> So with that, you need the userptr notifier lock in read mode here.
>
> Also, I think the first WARN_ON is likely to always trigger here, as the
> CPU pages are likely valid, so that one should be removed.
>
> In xe_vm_invalidate_vma, the extra tests are protected by the
> PROVE_LOCKING Kconfig option. So if you keep the dma-resv check, I’d
> recommend hiding it behind PROVE_LOCKING.
>
> Note: speaking of the userptr notifier lock, Matt Auld has a patch
> series [1] to unify this with the GPU SVM notifier lock. If that lands
> before your changes, then you should use the SVM notifier lock in read
> mode for userptr here.
>
> [1] https://patchwork.freedesktop.org/series/146553/
>
> > + if (xe_vma_bo(vma))
> > + xe_bo_lock(xe_vma_bo(vma), false);
> > +
>
> Ah yes, you do need the BO lock here to prevent races from BO moves
> issuing a zap. Maybe we can update the xe_pt_zap_ptes lockdep assertions
> for userptr/BOs, along with some comments? We could do that
> independently of this series. I suggest this because I had forgotten how
> this worked in the previous revision. Maybe I can post this
> independently.
>
I posted a patch for this [2] and also realized a few more things. These
locks need to be held for an extended period, as detailed here [3].
Additionally, you don’t set vma->tile_invalidated here — you’ll need to
do that. Make sure to use WRITE_ONCE when doing so, and include a pairs
with comment like in [2] and in the existing SVM code for the
tile_invalidated / tile_present ranges.
Matt
[2] https://patchwork.freedesktop.org/series/149676/
[3] https://patchwork.freedesktop.org/patch/655898/?series=149550&rev=1#comment_1201162
> > + for_each_tile(tile, vm->xe, id) {
> > + if (xe_pt_zap_ptes(tile, vma))
> > + *tile_mask |= BIT(id);
> > + }
> > +
> > + if (xe_vma_bo(vma))
> > + xe_bo_unlock(xe_vma_bo(vma));
> > + }
> > +}
> > +
> > +static int xe_vm_invalidate_madvise_range(struct xe_vm *vm, u64 start, u64 end)
> > +{
> > + u8 tile_mask = 0;
> > +
> > + xe_zap_ptes_in_madvise_range(vm, start, end, &tile_mask);
> > + if (!tile_mask)
> > + return 0;
> > +
> > + xe_device_wmb(vm->xe);
> > +
> > + return xe_vm_range_tilemask_tlb_invalidation(vm, start, end, tile_mask);
> > +}
> > +
> > +static int input_ranges_same(struct drm_xe_madvise_ops *old,
> > + struct drm_xe_madvise_ops *new)
> > +{
> > + return (new->start == old->start && new->range == old->range);
> > +}
> > +
> > +/**
> > + * xe_vm_madvise_ioctl - Handle MADVise ioctl for a VM
> > + * @dev: DRM device pointer
> > + * @data: Pointer to ioctl data (drm_xe_madvise*)
> > + * @file: DRM file pointer
> > + *
> > + * Handles the MADVISE ioctl to provide memory advice for vma's within
> > + * input range.
> > + *
> > + * Return: 0 on success or a negative error code on failure.
> > + */
> > +int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> > +{
> > + struct xe_device *xe = to_xe_device(dev);
> > + struct xe_file *xef = to_xe_file(file);
> > + struct drm_xe_madvise_ops *advs_ops;
> > + struct drm_xe_madvise *args = data;
> > + struct xe_vm *vm;
> > + struct xe_vma **vmas = NULL;
> > + int num_vmas, err = 0;
> > + int i, j, attr_type;
> > + bool needs_invalidation;
> > +
> > + if (XE_IOCTL_DBG(xe, args->num_ops < 1))
> > + return -EINVAL;
> > +
> > + vm = xe_vm_lookup(xef, args->vm_id);
> > + if (XE_IOCTL_DBG(xe, !vm))
> > + return -EINVAL;
> > +
> > + down_write(&vm->lock);
> > +
> > + if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
> > + err = -ENOENT;
> > + goto unlock_vm;
> > + }
> > +
> > + if (args->num_ops > 1) {
> > + u64 __user *madvise_user = u64_to_user_ptr(args->vector_of_ops);
> > +
> > + advs_ops = kvmalloc_array(args->num_ops, sizeof(struct drm_xe_madvise_ops),
> > + GFP_KERNEL | __GFP_ACCOUNT |
> > + __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
> > + if (!advs_ops) {
> > + err = args->num_ops > 1 ? -ENOBUFS : -ENOMEM;
> > + goto unlock_vm;
> > + }
> > +
> > + err = __copy_from_user(advs_ops, madvise_user,
> > + sizeof(struct drm_xe_madvise_ops) *
> > + args->num_ops);
> > + if (XE_IOCTL_DBG(xe, err)) {
> > + err = -EFAULT;
> > + goto free_advs_ops;
> > + }
> > + } else {
> > + advs_ops = &args->ops;
> > + }
> > +
>
> See my reply in patch #19, I think we should validate user input ahead
> of the below loop rather than failing mid-loop on bad user input.
>
> > + for (i = 0; i < args->num_ops; i++) {
> > + xe_vm_alloc_madvise_vma(vm, advs_ops[i].start, advs_ops[i].range);
> > +
> > + vmas = get_vmas(vm, &num_vmas, advs_ops[i].start, advs_ops[i].range);
> > + if (!vmas) {
> > + err = -ENOMEM;
> > + goto free_advs_ops;
> > + }
> > +
> > + attr_type = array_index_nospec(advs_ops[i].type, ARRAY_SIZE(madvise_funcs));
> > + err = madvise_funcs[attr_type](xe, vm, vmas, num_vmas, advs_ops[i]);
> > +
> > + kfree(vmas);
> > + vmas = NULL;
> > +
> > + if (err)
> > + goto free_advs_ops;
> > + }
> > +
> > + for (i = 0; i < args->num_ops; i++) {
> > + needs_invalidation = true;
> > + for (j = i + 1; j < args->num_ops; ++j) {
> > + if (input_ranges_same(&advs_ops[j], &advs_ops[i])) {
> > + needs_invalidation = false;
> > + break;
> > + }
> > + }
>
> I'd drop this extra check. The invalidation code already short ciruits
> on the tile_present / tile_invalidated bits in the range or VMA so I
> don't think an extra short circuit here buys us a ton.
>
> Matt
>
> > + if (needs_invalidation) {
> > + err = xe_vm_invalidate_madvise_range(vm, advs_ops[i].start,
> > + advs_ops[i].start + advs_ops[i].range);
> > + if (err)
> > + goto free_advs_ops;
> > + }
> > + }
> > +
> > +free_advs_ops:
> > + if (args->num_ops > 1)
> > + kvfree(advs_ops);
> > +unlock_vm:
> > + up_write(&vm->lock);
> > + xe_vm_put(vm);
> > + return err;
> > +}
> > diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.h b/drivers/gpu/drm/xe/xe_vm_madvise.h
> > new file mode 100644
> > index 000000000000..c5cdd058c322
> > --- /dev/null
> > +++ b/drivers/gpu/drm/xe/xe_vm_madvise.h
> > @@ -0,0 +1,15 @@
> > +/* SPDX-License-Identifier: MIT */
> > +/*
> > + * Copyright © 2024 Intel Corporation
> > + */
> > +
> > +#ifndef _XE_VM_MADVISE_H_
> > +#define _XE_VM_MADVISE_H_
> > +
> > +struct drm_device;
> > +struct drm_file;
> > +
> > +int xe_vm_madvise_ioctl(struct drm_device *dev, void *data,
> > + struct drm_file *file);
> > +
> > +#endif
> > --
> > 2.34.1
> >
^ permalink raw reply [flat|nested] 72+ messages in thread
* Re: [PATCH v3 10/19] drm/xe: Implement madvise ioctl for xe
2025-05-27 16:39 ` [PATCH v3 10/19] drm/xe: Implement madvise ioctl for xe Himal Prasad Ghimiray
2025-05-29 22:43 ` Matthew Brost
@ 2025-05-30 21:34 ` Matthew Brost
2025-06-10 4:52 ` Ghimiray, Himal Prasad
1 sibling, 1 reply; 72+ messages in thread
From: Matthew Brost @ 2025-05-30 21:34 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:09:54PM +0530, Himal Prasad Ghimiray wrote:
> This driver-specific ioctl enables UMDs to control the memory attributes
> for GPU VMAs within a specified input range. If the start or end
> addresses fall within an existing VMA, the VMA is split accordingly. The
> attributes of the VMA are modified as provided by the users. The old
> mappings of the VMAs are invalidated, and TLB invalidation is performed
> if necessary.
>
> v2(Matthew brost)
> - xe_vm_in_fault_mode can't be enabled by Mesa, hence allow ioctl in non
> fault mode too
> - fix tlb invalidation skip for same ranges in multiple op
> - use helper for tlb invalidation
> - use xe_svm_notifier_lock/unlock helper
> - s/lockdep_assert_held/lockdep_assert_held_write
> - Add kernel-doc
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/xe/Makefile | 1 +
> drivers/gpu/drm/xe/xe_device.c | 2 +
> drivers/gpu/drm/xe/xe_vm_madvise.c | 264 +++++++++++++++++++++++++++++
> drivers/gpu/drm/xe/xe_vm_madvise.h | 15 ++
> 4 files changed, 282 insertions(+)
> create mode 100644 drivers/gpu/drm/xe/xe_vm_madvise.c
> create mode 100644 drivers/gpu/drm/xe/xe_vm_madvise.h
>
> diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
> index c5d6681645ed..dc64bdcddfdc 100644
> --- a/drivers/gpu/drm/xe/Makefile
> +++ b/drivers/gpu/drm/xe/Makefile
> @@ -117,6 +117,7 @@ xe-y += xe_bb.o \
> xe_uc.o \
> xe_uc_fw.o \
> xe_vm.o \
> + xe_vm_madvise.o \
> xe_vram.o \
> xe_vram_freq.o \
> xe_vsec.o \
> diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
> index d4b6e623aa48..b9791c614749 100644
> --- a/drivers/gpu/drm/xe/xe_device.c
> +++ b/drivers/gpu/drm/xe/xe_device.c
> @@ -61,6 +61,7 @@
> #include "xe_ttm_stolen_mgr.h"
> #include "xe_ttm_sys_mgr.h"
> #include "xe_vm.h"
> +#include "xe_vm_madvise.h"
> #include "xe_vram.h"
> #include "xe_vsec.h"
> #include "xe_wait_user_fence.h"
> @@ -197,6 +198,7 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
> DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
> DRM_RENDER_ALLOW),
> DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
> + DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
> };
>
> static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
> new file mode 100644
> index 000000000000..f7edefe5f6cf
> --- /dev/null
> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
> @@ -0,0 +1,264 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2024 Intel Corporation
> + */
> +
> +#include "xe_vm_madvise.h"
> +
> +#include <linux/nospec.h>
> +#include <drm/ttm/ttm_tt.h>
> +#include <drm/xe_drm.h>
> +
> +#include "xe_bo.h"
> +#include "xe_gt_tlb_invalidation.h"
> +#include "xe_pt.h"
> +#include "xe_svm.h"
> +
> +static struct xe_vma **get_vmas(struct xe_vm *vm, int *num_vmas,
> + u64 addr, u64 range)
> +{
> + struct xe_vma **vmas, **__vmas;
> + struct drm_gpuva *gpuva;
> + int max_vmas = 8;
> +
> + lockdep_assert_held(&vm->lock);
> +
> + *num_vmas = 0;
> + vmas = kmalloc_array(max_vmas, sizeof(*vmas), GFP_KERNEL);
> + if (!vmas)
> + return NULL;
> +
> + vm_dbg(&vm->xe->drm, "VMA's in range: start=0x%016llx, end=0x%016llx", addr, addr + range);
> +
> + drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, addr, addr + range) {
> + struct xe_vma *vma = gpuva_to_vma(gpuva);
> +
> + if (*num_vmas == max_vmas) {
> + max_vmas <<= 1;
> + __vmas = krealloc(vmas, max_vmas * sizeof(*vmas), GFP_KERNEL);
> + if (!__vmas) {
> + kfree(vmas);
> + return NULL;
> + }
> + vmas = __vmas;
> + }
> +
> + vmas[*num_vmas] = vma;
> + (*num_vmas)++;
> + }
> +
> + vm_dbg(&vm->xe->drm, "*num_vmas = %d\n", *num_vmas);
> +
> + if (!*num_vmas) {
> + kfree(vmas);
> + return NULL;
> + }
> +
> + return vmas;
> +}
> +
> +static int madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm,
> + struct xe_vma **vmas, int num_vmas,
> + struct drm_xe_madvise_ops ops)
> +{
> + /* Implementation pending */
> + return 0;
> +}
> +
> +static int madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
> + struct xe_vma **vmas, int num_vmas,
> + struct drm_xe_madvise_ops ops)
> +{
> + /* Implementation pending */
> + return 0;
> +}
> +
> +static int madvise_pat_index(struct xe_device *xe, struct xe_vm *vm,
> + struct xe_vma **vmas, int num_vmas,
> + struct drm_xe_madvise_ops ops)
> +{
> + /* Implementation pending */
> + return 0;
> +}
> +
> +static int madvise_purgeable_state(struct xe_device *xe, struct xe_vm *vm,
> + struct xe_vma **vmas, int num_vmas,
> + struct drm_xe_madvise_ops ops)
> +{
> + /* Implementation pending */
> + return 0;
> +}
> +
> +typedef int (*madvise_func)(struct xe_device *xe, struct xe_vm *vm,
> + struct xe_vma **vmas, int num_vmas, struct drm_xe_madvise_ops ops);
> +
> +static const madvise_func madvise_funcs[] = {
> + [DRM_XE_VMA_ATTR_PREFERRED_LOC] = madvise_preferred_mem_loc,
> + [DRM_XE_VMA_ATTR_ATOMIC] = madvise_atomic,
> + [DRM_XE_VMA_ATTR_PAT] = madvise_pat_index,
> + [DRM_XE_VMA_ATTR_PURGEABLE_STATE] = madvise_purgeable_state,
> +};
> +
> +static void xe_zap_ptes_in_madvise_range(struct xe_vm *vm, u64 start, u64 end, u8 *tile_mask)
> +{
> + struct drm_gpuva *gpuva;
> + struct xe_tile *tile;
> + u8 id;
> +
> + lockdep_assert_held_write(&vm->lock);
> +
> + if (dma_resv_wait_timeout(xe_vm_resv(vm), DMA_RESV_USAGE_BOOKKEEP,
> + false, MAX_SCHEDULE_TIMEOUT) <= 0)
> + XE_WARN_ON(1);
> +
> + *tile_mask = xe_svm_ranges_zap_ptes_in_range(vm, start, end);
> +
> + drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
> + struct xe_vma *vma = gpuva_to_vma(gpuva);
> +
> + if (xe_vma_is_cpu_addr_mirror(vma))
> + continue;
> +
> + if (xe_vma_is_userptr(vma)) {
> + WARN_ON_ONCE(!mmu_interval_check_retry
> + (&to_userptr_vma(vma)->userptr.notifier,
> + to_userptr_vma(vma)->userptr.notifier_seq));
> +
> + WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
> + DMA_RESV_USAGE_BOOKKEEP));
> + }
> +
> + if (xe_vma_bo(vma))
> + xe_bo_lock(xe_vma_bo(vma), false);
> +
> + for_each_tile(tile, vm->xe, id) {
> + if (xe_pt_zap_ptes(tile, vma))
> + *tile_mask |= BIT(id);
> + }
> +
> + if (xe_vma_bo(vma))
> + xe_bo_unlock(xe_vma_bo(vma));
> + }
> +}
> +
> +static int xe_vm_invalidate_madvise_range(struct xe_vm *vm, u64 start, u64 end)
> +{
> + u8 tile_mask = 0;
> +
> + xe_zap_ptes_in_madvise_range(vm, start, end, &tile_mask);
> + if (!tile_mask)
> + return 0;
> +
> + xe_device_wmb(vm->xe);
> +
> + return xe_vm_range_tilemask_tlb_invalidation(vm, start, end, tile_mask);
> +}
> +
> +static int input_ranges_same(struct drm_xe_madvise_ops *old,
> + struct drm_xe_madvise_ops *new)
> +{
> + return (new->start == old->start && new->range == old->range);
> +}
> +
> +/**
> + * xe_vm_madvise_ioctl - Handle MADVise ioctl for a VM
> + * @dev: DRM device pointer
> + * @data: Pointer to ioctl data (drm_xe_madvise*)
> + * @file: DRM file pointer
> + *
> + * Handles the MADVISE ioctl to provide memory advice for vma's within
> + * input range.
> + *
> + * Return: 0 on success or a negative error code on failure.
> + */
> +int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> +{
> + struct xe_device *xe = to_xe_device(dev);
> + struct xe_file *xef = to_xe_file(file);
> + struct drm_xe_madvise_ops *advs_ops;
> + struct drm_xe_madvise *args = data;
> + struct xe_vm *vm;
> + struct xe_vma **vmas = NULL;
> + int num_vmas, err = 0;
> + int i, j, attr_type;
> + bool needs_invalidation;
> +
> + if (XE_IOCTL_DBG(xe, args->num_ops < 1))
> + return -EINVAL;
> +
> + vm = xe_vm_lookup(xef, args->vm_id);
> + if (XE_IOCTL_DBG(xe, !vm))
> + return -EINVAL;
> +
> + down_write(&vm->lock);
> +
> + if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
> + err = -ENOENT;
> + goto unlock_vm;
> + }
> +
> + if (args->num_ops > 1) {
> + u64 __user *madvise_user = u64_to_user_ptr(args->vector_of_ops);
> +
> + advs_ops = kvmalloc_array(args->num_ops, sizeof(struct drm_xe_madvise_ops),
> + GFP_KERNEL | __GFP_ACCOUNT |
> + __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
> + if (!advs_ops) {
> + err = args->num_ops > 1 ? -ENOBUFS : -ENOMEM;
> + goto unlock_vm;
> + }
> +
> + err = __copy_from_user(advs_ops, madvise_user,
> + sizeof(struct drm_xe_madvise_ops) *
> + args->num_ops);
> + if (XE_IOCTL_DBG(xe, err)) {
> + err = -EFAULT;
> + goto free_advs_ops;
> + }
> + } else {
> + advs_ops = &args->ops;
> + }
> +
> + for (i = 0; i < args->num_ops; i++) {
> + xe_vm_alloc_madvise_vma(vm, advs_ops[i].start, advs_ops[i].range);
> +
> + vmas = get_vmas(vm, &num_vmas, advs_ops[i].start, advs_ops[i].range);
> + if (!vmas) {
> + err = -ENOMEM;
> + goto free_advs_ops;
> + }
> +
> + attr_type = array_index_nospec(advs_ops[i].type, ARRAY_SIZE(madvise_funcs));
> + err = madvise_funcs[attr_type](xe, vm, vmas, num_vmas, advs_ops[i]);
> +
> + kfree(vmas);
> + vmas = NULL;
> +
> + if (err)
> + goto free_advs_ops;
> + }
> +
> + for (i = 0; i < args->num_ops; i++) {
> + needs_invalidation = true;
> + for (j = i + 1; j < args->num_ops; ++j) {
> + if (input_ranges_same(&advs_ops[j], &advs_ops[i])) {
> + needs_invalidation = false;
> + break;
> + }
> + }
> + if (needs_invalidation) {
> + err = xe_vm_invalidate_madvise_range(vm, advs_ops[i].start,
> + advs_ops[i].start + advs_ops[i].range);
> + if (err)
> + goto free_advs_ops;
In additional to all the other comments around invalidations - you don't
always need to issue TLB invalidations.
- For pat_index, only if the VMAs pat_index changed + valid page tables
- For atomic, only if the VMAs atomic mode changed + valid page tables +
current placement would cause issues
- Purgeable - never
- Preferred placement - valid page tables + current placement != desired
placement
We likley can set a temp bit in the vfuncs in either the VMA (BO,
userptr based) or the SVM range(s) which the invalidation func can parse
/ clear indicating an invalidation is required.
Matt
> + }
> + }
> +
> +free_advs_ops:
> + if (args->num_ops > 1)
> + kvfree(advs_ops);
> +unlock_vm:
> + up_write(&vm->lock);
> + xe_vm_put(vm);
> + return err;
> +}
> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.h b/drivers/gpu/drm/xe/xe_vm_madvise.h
> new file mode 100644
> index 000000000000..c5cdd058c322
> --- /dev/null
> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.h
> @@ -0,0 +1,15 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2024 Intel Corporation
> + */
> +
> +#ifndef _XE_VM_MADVISE_H_
> +#define _XE_VM_MADVISE_H_
> +
> +struct drm_device;
> +struct drm_file;
> +
> +int xe_vm_madvise_ioctl(struct drm_device *dev, void *data,
> + struct drm_file *file);
> +
> +#endif
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 10/19] drm/xe: Implement madvise ioctl for xe
2025-05-30 21:34 ` Matthew Brost
@ 2025-06-10 4:52 ` Ghimiray, Himal Prasad
2025-06-10 5:13 ` Matthew Brost
0 siblings, 1 reply; 72+ messages in thread
From: Ghimiray, Himal Prasad @ 2025-06-10 4:52 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-xe
On 31-05-2025 03:04, Matthew Brost wrote:
> On Tue, May 27, 2025 at 10:09:54PM +0530, Himal Prasad Ghimiray wrote:
>> This driver-specific ioctl enables UMDs to control the memory attributes
>> for GPU VMAs within a specified input range. If the start or end
>> addresses fall within an existing VMA, the VMA is split accordingly. The
>> attributes of the VMA are modified as provided by the users. The old
>> mappings of the VMAs are invalidated, and TLB invalidation is performed
>> if necessary.
>>
>> v2(Matthew brost)
>> - xe_vm_in_fault_mode can't be enabled by Mesa, hence allow ioctl in non
>> fault mode too
>> - fix tlb invalidation skip for same ranges in multiple op
>> - use helper for tlb invalidation
>> - use xe_svm_notifier_lock/unlock helper
>> - s/lockdep_assert_held/lockdep_assert_held_write
>> - Add kernel-doc
>>
>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
>> ---
>> drivers/gpu/drm/xe/Makefile | 1 +
>> drivers/gpu/drm/xe/xe_device.c | 2 +
>> drivers/gpu/drm/xe/xe_vm_madvise.c | 264 +++++++++++++++++++++++++++++
>> drivers/gpu/drm/xe/xe_vm_madvise.h | 15 ++
>> 4 files changed, 282 insertions(+)
>> create mode 100644 drivers/gpu/drm/xe/xe_vm_madvise.c
>> create mode 100644 drivers/gpu/drm/xe/xe_vm_madvise.h
>>
>> diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
>> index c5d6681645ed..dc64bdcddfdc 100644
>> --- a/drivers/gpu/drm/xe/Makefile
>> +++ b/drivers/gpu/drm/xe/Makefile
>> @@ -117,6 +117,7 @@ xe-y += xe_bb.o \
>> xe_uc.o \
>> xe_uc_fw.o \
>> xe_vm.o \
>> + xe_vm_madvise.o \
>> xe_vram.o \
>> xe_vram_freq.o \
>> xe_vsec.o \
>> diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
>> index d4b6e623aa48..b9791c614749 100644
>> --- a/drivers/gpu/drm/xe/xe_device.c
>> +++ b/drivers/gpu/drm/xe/xe_device.c
>> @@ -61,6 +61,7 @@
>> #include "xe_ttm_stolen_mgr.h"
>> #include "xe_ttm_sys_mgr.h"
>> #include "xe_vm.h"
>> +#include "xe_vm_madvise.h"
>> #include "xe_vram.h"
>> #include "xe_vsec.h"
>> #include "xe_wait_user_fence.h"
>> @@ -197,6 +198,7 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
>> DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
>> DRM_RENDER_ALLOW),
>> DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
>> + DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
>> };
>>
>> static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
>> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
>> new file mode 100644
>> index 000000000000..f7edefe5f6cf
>> --- /dev/null
>> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
>> @@ -0,0 +1,264 @@
>> +// SPDX-License-Identifier: MIT
>> +/*
>> + * Copyright © 2024 Intel Corporation
>> + */
>> +
>> +#include "xe_vm_madvise.h"
>> +
>> +#include <linux/nospec.h>
>> +#include <drm/ttm/ttm_tt.h>
>> +#include <drm/xe_drm.h>
>> +
>> +#include "xe_bo.h"
>> +#include "xe_gt_tlb_invalidation.h"
>> +#include "xe_pt.h"
>> +#include "xe_svm.h"
>> +
>> +static struct xe_vma **get_vmas(struct xe_vm *vm, int *num_vmas,
>> + u64 addr, u64 range)
>> +{
>> + struct xe_vma **vmas, **__vmas;
>> + struct drm_gpuva *gpuva;
>> + int max_vmas = 8;
>> +
>> + lockdep_assert_held(&vm->lock);
>> +
>> + *num_vmas = 0;
>> + vmas = kmalloc_array(max_vmas, sizeof(*vmas), GFP_KERNEL);
>> + if (!vmas)
>> + return NULL;
>> +
>> + vm_dbg(&vm->xe->drm, "VMA's in range: start=0x%016llx, end=0x%016llx", addr, addr + range);
>> +
>> + drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, addr, addr + range) {
>> + struct xe_vma *vma = gpuva_to_vma(gpuva);
>> +
>> + if (*num_vmas == max_vmas) {
>> + max_vmas <<= 1;
>> + __vmas = krealloc(vmas, max_vmas * sizeof(*vmas), GFP_KERNEL);
>> + if (!__vmas) {
>> + kfree(vmas);
>> + return NULL;
>> + }
>> + vmas = __vmas;
>> + }
>> +
>> + vmas[*num_vmas] = vma;
>> + (*num_vmas)++;
>> + }
>> +
>> + vm_dbg(&vm->xe->drm, "*num_vmas = %d\n", *num_vmas);
>> +
>> + if (!*num_vmas) {
>> + kfree(vmas);
>> + return NULL;
>> + }
>> +
>> + return vmas;
>> +}
>> +
>> +static int madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm,
>> + struct xe_vma **vmas, int num_vmas,
>> + struct drm_xe_madvise_ops ops)
>> +{
>> + /* Implementation pending */
>> + return 0;
>> +}
>> +
>> +static int madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
>> + struct xe_vma **vmas, int num_vmas,
>> + struct drm_xe_madvise_ops ops)
>> +{
>> + /* Implementation pending */
>> + return 0;
>> +}
>> +
>> +static int madvise_pat_index(struct xe_device *xe, struct xe_vm *vm,
>> + struct xe_vma **vmas, int num_vmas,
>> + struct drm_xe_madvise_ops ops)
>> +{
>> + /* Implementation pending */
>> + return 0;
>> +}
>> +
>> +static int madvise_purgeable_state(struct xe_device *xe, struct xe_vm *vm,
>> + struct xe_vma **vmas, int num_vmas,
>> + struct drm_xe_madvise_ops ops)
>> +{
>> + /* Implementation pending */
>> + return 0;
>> +}
>> +
>> +typedef int (*madvise_func)(struct xe_device *xe, struct xe_vm *vm,
>> + struct xe_vma **vmas, int num_vmas, struct drm_xe_madvise_ops ops);
>> +
>> +static const madvise_func madvise_funcs[] = {
>> + [DRM_XE_VMA_ATTR_PREFERRED_LOC] = madvise_preferred_mem_loc,
>> + [DRM_XE_VMA_ATTR_ATOMIC] = madvise_atomic,
>> + [DRM_XE_VMA_ATTR_PAT] = madvise_pat_index,
>> + [DRM_XE_VMA_ATTR_PURGEABLE_STATE] = madvise_purgeable_state,
>> +};
>> +
>> +static void xe_zap_ptes_in_madvise_range(struct xe_vm *vm, u64 start, u64 end, u8 *tile_mask)
>> +{
>> + struct drm_gpuva *gpuva;
>> + struct xe_tile *tile;
>> + u8 id;
>> +
>> + lockdep_assert_held_write(&vm->lock);
>> +
>> + if (dma_resv_wait_timeout(xe_vm_resv(vm), DMA_RESV_USAGE_BOOKKEEP,
>> + false, MAX_SCHEDULE_TIMEOUT) <= 0)
>> + XE_WARN_ON(1);
>> +
>> + *tile_mask = xe_svm_ranges_zap_ptes_in_range(vm, start, end);
>> +
>> + drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
>> + struct xe_vma *vma = gpuva_to_vma(gpuva);
>> +
>> + if (xe_vma_is_cpu_addr_mirror(vma))
>> + continue;
>> +
>> + if (xe_vma_is_userptr(vma)) {
>> + WARN_ON_ONCE(!mmu_interval_check_retry
>> + (&to_userptr_vma(vma)->userptr.notifier,
>> + to_userptr_vma(vma)->userptr.notifier_seq));
>> +
>> + WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
>> + DMA_RESV_USAGE_BOOKKEEP));
>> + }
>> +
>> + if (xe_vma_bo(vma))
>> + xe_bo_lock(xe_vma_bo(vma), false);
>> +
>> + for_each_tile(tile, vm->xe, id) {
>> + if (xe_pt_zap_ptes(tile, vma))
>> + *tile_mask |= BIT(id);
>> + }
>> +
>> + if (xe_vma_bo(vma))
>> + xe_bo_unlock(xe_vma_bo(vma));
>> + }
>> +}
>> +
>> +static int xe_vm_invalidate_madvise_range(struct xe_vm *vm, u64 start, u64 end)
>> +{
>> + u8 tile_mask = 0;
>> +
>> + xe_zap_ptes_in_madvise_range(vm, start, end, &tile_mask);
>> + if (!tile_mask)
>> + return 0;
>> +
>> + xe_device_wmb(vm->xe);
>> +
>> + return xe_vm_range_tilemask_tlb_invalidation(vm, start, end, tile_mask);
>> +}
>> +
>> +static int input_ranges_same(struct drm_xe_madvise_ops *old,
>> + struct drm_xe_madvise_ops *new)
>> +{
>> + return (new->start == old->start && new->range == old->range);
>> +}
>> +
>> +/**
>> + * xe_vm_madvise_ioctl - Handle MADVise ioctl for a VM
>> + * @dev: DRM device pointer
>> + * @data: Pointer to ioctl data (drm_xe_madvise*)
>> + * @file: DRM file pointer
>> + *
>> + * Handles the MADVISE ioctl to provide memory advice for vma's within
>> + * input range.
>> + *
>> + * Return: 0 on success or a negative error code on failure.
>> + */
>> +int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>> +{
>> + struct xe_device *xe = to_xe_device(dev);
>> + struct xe_file *xef = to_xe_file(file);
>> + struct drm_xe_madvise_ops *advs_ops;
>> + struct drm_xe_madvise *args = data;
>> + struct xe_vm *vm;
>> + struct xe_vma **vmas = NULL;
>> + int num_vmas, err = 0;
>> + int i, j, attr_type;
>> + bool needs_invalidation;
>> +
>> + if (XE_IOCTL_DBG(xe, args->num_ops < 1))
>> + return -EINVAL;
>> +
>> + vm = xe_vm_lookup(xef, args->vm_id);
>> + if (XE_IOCTL_DBG(xe, !vm))
>> + return -EINVAL;
>> +
>> + down_write(&vm->lock);
>> +
>> + if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
>> + err = -ENOENT;
>> + goto unlock_vm;
>> + }
>> +
>> + if (args->num_ops > 1) {
>> + u64 __user *madvise_user = u64_to_user_ptr(args->vector_of_ops);
>> +
>> + advs_ops = kvmalloc_array(args->num_ops, sizeof(struct drm_xe_madvise_ops),
>> + GFP_KERNEL | __GFP_ACCOUNT |
>> + __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
>> + if (!advs_ops) {
>> + err = args->num_ops > 1 ? -ENOBUFS : -ENOMEM;
>> + goto unlock_vm;
>> + }
>> +
>> + err = __copy_from_user(advs_ops, madvise_user,
>> + sizeof(struct drm_xe_madvise_ops) *
>> + args->num_ops);
>> + if (XE_IOCTL_DBG(xe, err)) {
>> + err = -EFAULT;
>> + goto free_advs_ops;
>> + }
>> + } else {
>> + advs_ops = &args->ops;
>> + }
>> +
>> + for (i = 0; i < args->num_ops; i++) {
>> + xe_vm_alloc_madvise_vma(vm, advs_ops[i].start, advs_ops[i].range);
>> +
>> + vmas = get_vmas(vm, &num_vmas, advs_ops[i].start, advs_ops[i].range);
>> + if (!vmas) {
>> + err = -ENOMEM;
>> + goto free_advs_ops;
>> + }
>> +
>> + attr_type = array_index_nospec(advs_ops[i].type, ARRAY_SIZE(madvise_funcs));
>> + err = madvise_funcs[attr_type](xe, vm, vmas, num_vmas, advs_ops[i]);
>> +
>> + kfree(vmas);
>> + vmas = NULL;
>> +
>> + if (err)
>> + goto free_advs_ops;
>> + }
>> +
>> + for (i = 0; i < args->num_ops; i++) {
>> + needs_invalidation = true;
>> + for (j = i + 1; j < args->num_ops; ++j) {
>> + if (input_ranges_same(&advs_ops[j], &advs_ops[i])) {
>> + needs_invalidation = false;
>> + break;
>> + }
>> + }
>> + if (needs_invalidation) {
>> + err = xe_vm_invalidate_madvise_range(vm, advs_ops[i].start,
>> + advs_ops[i].start + advs_ops[i].range);
>> + if (err)
>> + goto free_advs_ops;
>
> In additional to all the other comments around invalidations - you don't
> always need to issue TLB invalidations.
>
> - For pat_index, only if the VMAs pat_index changed + valid page tables
> - For atomic, only if the VMAs atomic mode changed + valid page tables +
> current placement would cause issues
> - Purgeable - never
> - Preferred placement - valid page tables + current placement != desired
> placement
>
> We likley can set a temp bit in the vfuncs in either the VMA (BO,
> userptr based) or the SVM range(s) which the invalidation func can parse
> / clear indicating an invalidation is required.
In the current implementation, I’m zapping PTEs for SVM ranges and
BO/Userptr-based VMAs within the madvise range, and issuing a TLB
invalidation for the entire madvise range. Should we instead issue TLB
invalidations at the granularity of the SVM range or the individual VMA
(BO or Userptr)? I don’t see how we can avoid TLB invalidation if some
VMAs require it and others don’t, given that we’re currently
invalidating the entire range.
>
> Matt
>
>> + }
>> + }
>> +
>> +free_advs_ops:
>> + if (args->num_ops > 1)
>> + kvfree(advs_ops);
>> +unlock_vm:
>> + up_write(&vm->lock);
>> + xe_vm_put(vm);
>> + return err;
>> +}
>> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.h b/drivers/gpu/drm/xe/xe_vm_madvise.h
>> new file mode 100644
>> index 000000000000..c5cdd058c322
>> --- /dev/null
>> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.h
>> @@ -0,0 +1,15 @@
>> +/* SPDX-License-Identifier: MIT */
>> +/*
>> + * Copyright © 2024 Intel Corporation
>> + */
>> +
>> +#ifndef _XE_VM_MADVISE_H_
>> +#define _XE_VM_MADVISE_H_
>> +
>> +struct drm_device;
>> +struct drm_file;
>> +
>> +int xe_vm_madvise_ioctl(struct drm_device *dev, void *data,
>> + struct drm_file *file);
>> +
>> +#endif
>> --
>> 2.34.1
>>
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 10/19] drm/xe: Implement madvise ioctl for xe
2025-06-10 4:52 ` Ghimiray, Himal Prasad
@ 2025-06-10 5:13 ` Matthew Brost
0 siblings, 0 replies; 72+ messages in thread
From: Matthew Brost @ 2025-06-10 5:13 UTC (permalink / raw)
To: Ghimiray, Himal Prasad; +Cc: intel-xe
On Tue, Jun 10, 2025 at 10:22:42AM +0530, Ghimiray, Himal Prasad wrote:
>
>
> On 31-05-2025 03:04, Matthew Brost wrote:
> > On Tue, May 27, 2025 at 10:09:54PM +0530, Himal Prasad Ghimiray wrote:
> > > This driver-specific ioctl enables UMDs to control the memory attributes
> > > for GPU VMAs within a specified input range. If the start or end
> > > addresses fall within an existing VMA, the VMA is split accordingly. The
> > > attributes of the VMA are modified as provided by the users. The old
> > > mappings of the VMAs are invalidated, and TLB invalidation is performed
> > > if necessary.
> > >
> > > v2(Matthew brost)
> > > - xe_vm_in_fault_mode can't be enabled by Mesa, hence allow ioctl in non
> > > fault mode too
> > > - fix tlb invalidation skip for same ranges in multiple op
> > > - use helper for tlb invalidation
> > > - use xe_svm_notifier_lock/unlock helper
> > > - s/lockdep_assert_held/lockdep_assert_held_write
> > > - Add kernel-doc
> > >
> > > Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> > > ---
> > > drivers/gpu/drm/xe/Makefile | 1 +
> > > drivers/gpu/drm/xe/xe_device.c | 2 +
> > > drivers/gpu/drm/xe/xe_vm_madvise.c | 264 +++++++++++++++++++++++++++++
> > > drivers/gpu/drm/xe/xe_vm_madvise.h | 15 ++
> > > 4 files changed, 282 insertions(+)
> > > create mode 100644 drivers/gpu/drm/xe/xe_vm_madvise.c
> > > create mode 100644 drivers/gpu/drm/xe/xe_vm_madvise.h
> > >
> > > diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
> > > index c5d6681645ed..dc64bdcddfdc 100644
> > > --- a/drivers/gpu/drm/xe/Makefile
> > > +++ b/drivers/gpu/drm/xe/Makefile
> > > @@ -117,6 +117,7 @@ xe-y += xe_bb.o \
> > > xe_uc.o \
> > > xe_uc_fw.o \
> > > xe_vm.o \
> > > + xe_vm_madvise.o \
> > > xe_vram.o \
> > > xe_vram_freq.o \
> > > xe_vsec.o \
> > > diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
> > > index d4b6e623aa48..b9791c614749 100644
> > > --- a/drivers/gpu/drm/xe/xe_device.c
> > > +++ b/drivers/gpu/drm/xe/xe_device.c
> > > @@ -61,6 +61,7 @@
> > > #include "xe_ttm_stolen_mgr.h"
> > > #include "xe_ttm_sys_mgr.h"
> > > #include "xe_vm.h"
> > > +#include "xe_vm_madvise.h"
> > > #include "xe_vram.h"
> > > #include "xe_vsec.h"
> > > #include "xe_wait_user_fence.h"
> > > @@ -197,6 +198,7 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
> > > DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
> > > DRM_RENDER_ALLOW),
> > > DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
> > > + DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
> > > };
> > > static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
> > > diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
> > > new file mode 100644
> > > index 000000000000..f7edefe5f6cf
> > > --- /dev/null
> > > +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
> > > @@ -0,0 +1,264 @@
> > > +// SPDX-License-Identifier: MIT
> > > +/*
> > > + * Copyright © 2024 Intel Corporation
> > > + */
> > > +
> > > +#include "xe_vm_madvise.h"
> > > +
> > > +#include <linux/nospec.h>
> > > +#include <drm/ttm/ttm_tt.h>
> > > +#include <drm/xe_drm.h>
> > > +
> > > +#include "xe_bo.h"
> > > +#include "xe_gt_tlb_invalidation.h"
> > > +#include "xe_pt.h"
> > > +#include "xe_svm.h"
> > > +
> > > +static struct xe_vma **get_vmas(struct xe_vm *vm, int *num_vmas,
> > > + u64 addr, u64 range)
> > > +{
> > > + struct xe_vma **vmas, **__vmas;
> > > + struct drm_gpuva *gpuva;
> > > + int max_vmas = 8;
> > > +
> > > + lockdep_assert_held(&vm->lock);
> > > +
> > > + *num_vmas = 0;
> > > + vmas = kmalloc_array(max_vmas, sizeof(*vmas), GFP_KERNEL);
> > > + if (!vmas)
> > > + return NULL;
> > > +
> > > + vm_dbg(&vm->xe->drm, "VMA's in range: start=0x%016llx, end=0x%016llx", addr, addr + range);
> > > +
> > > + drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, addr, addr + range) {
> > > + struct xe_vma *vma = gpuva_to_vma(gpuva);
> > > +
> > > + if (*num_vmas == max_vmas) {
> > > + max_vmas <<= 1;
> > > + __vmas = krealloc(vmas, max_vmas * sizeof(*vmas), GFP_KERNEL);
> > > + if (!__vmas) {
> > > + kfree(vmas);
> > > + return NULL;
> > > + }
> > > + vmas = __vmas;
> > > + }
> > > +
> > > + vmas[*num_vmas] = vma;
> > > + (*num_vmas)++;
> > > + }
> > > +
> > > + vm_dbg(&vm->xe->drm, "*num_vmas = %d\n", *num_vmas);
> > > +
> > > + if (!*num_vmas) {
> > > + kfree(vmas);
> > > + return NULL;
> > > + }
> > > +
> > > + return vmas;
> > > +}
> > > +
> > > +static int madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm,
> > > + struct xe_vma **vmas, int num_vmas,
> > > + struct drm_xe_madvise_ops ops)
> > > +{
> > > + /* Implementation pending */
> > > + return 0;
> > > +}
> > > +
> > > +static int madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
> > > + struct xe_vma **vmas, int num_vmas,
> > > + struct drm_xe_madvise_ops ops)
> > > +{
> > > + /* Implementation pending */
> > > + return 0;
> > > +}
> > > +
> > > +static int madvise_pat_index(struct xe_device *xe, struct xe_vm *vm,
> > > + struct xe_vma **vmas, int num_vmas,
> > > + struct drm_xe_madvise_ops ops)
> > > +{
> > > + /* Implementation pending */
> > > + return 0;
> > > +}
> > > +
> > > +static int madvise_purgeable_state(struct xe_device *xe, struct xe_vm *vm,
> > > + struct xe_vma **vmas, int num_vmas,
> > > + struct drm_xe_madvise_ops ops)
> > > +{
> > > + /* Implementation pending */
> > > + return 0;
> > > +}
> > > +
> > > +typedef int (*madvise_func)(struct xe_device *xe, struct xe_vm *vm,
> > > + struct xe_vma **vmas, int num_vmas, struct drm_xe_madvise_ops ops);
> > > +
> > > +static const madvise_func madvise_funcs[] = {
> > > + [DRM_XE_VMA_ATTR_PREFERRED_LOC] = madvise_preferred_mem_loc,
> > > + [DRM_XE_VMA_ATTR_ATOMIC] = madvise_atomic,
> > > + [DRM_XE_VMA_ATTR_PAT] = madvise_pat_index,
> > > + [DRM_XE_VMA_ATTR_PURGEABLE_STATE] = madvise_purgeable_state,
> > > +};
> > > +
> > > +static void xe_zap_ptes_in_madvise_range(struct xe_vm *vm, u64 start, u64 end, u8 *tile_mask)
> > > +{
> > > + struct drm_gpuva *gpuva;
> > > + struct xe_tile *tile;
> > > + u8 id;
> > > +
> > > + lockdep_assert_held_write(&vm->lock);
> > > +
> > > + if (dma_resv_wait_timeout(xe_vm_resv(vm), DMA_RESV_USAGE_BOOKKEEP,
> > > + false, MAX_SCHEDULE_TIMEOUT) <= 0)
> > > + XE_WARN_ON(1);
> > > +
> > > + *tile_mask = xe_svm_ranges_zap_ptes_in_range(vm, start, end);
> > > +
> > > + drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
> > > + struct xe_vma *vma = gpuva_to_vma(gpuva);
> > > +
> > > + if (xe_vma_is_cpu_addr_mirror(vma))
> > > + continue;
> > > +
> > > + if (xe_vma_is_userptr(vma)) {
> > > + WARN_ON_ONCE(!mmu_interval_check_retry
> > > + (&to_userptr_vma(vma)->userptr.notifier,
> > > + to_userptr_vma(vma)->userptr.notifier_seq));
> > > +
> > > + WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
> > > + DMA_RESV_USAGE_BOOKKEEP));
> > > + }
> > > +
> > > + if (xe_vma_bo(vma))
> > > + xe_bo_lock(xe_vma_bo(vma), false);
> > > +
> > > + for_each_tile(tile, vm->xe, id) {
> > > + if (xe_pt_zap_ptes(tile, vma))
> > > + *tile_mask |= BIT(id);
> > > + }
> > > +
> > > + if (xe_vma_bo(vma))
> > > + xe_bo_unlock(xe_vma_bo(vma));
> > > + }
> > > +}
> > > +
> > > +static int xe_vm_invalidate_madvise_range(struct xe_vm *vm, u64 start, u64 end)
> > > +{
> > > + u8 tile_mask = 0;
> > > +
> > > + xe_zap_ptes_in_madvise_range(vm, start, end, &tile_mask);
> > > + if (!tile_mask)
> > > + return 0;
> > > +
> > > + xe_device_wmb(vm->xe);
> > > +
> > > + return xe_vm_range_tilemask_tlb_invalidation(vm, start, end, tile_mask);
> > > +}
> > > +
> > > +static int input_ranges_same(struct drm_xe_madvise_ops *old,
> > > + struct drm_xe_madvise_ops *new)
> > > +{
> > > + return (new->start == old->start && new->range == old->range);
> > > +}
> > > +
> > > +/**
> > > + * xe_vm_madvise_ioctl - Handle MADVise ioctl for a VM
> > > + * @dev: DRM device pointer
> > > + * @data: Pointer to ioctl data (drm_xe_madvise*)
> > > + * @file: DRM file pointer
> > > + *
> > > + * Handles the MADVISE ioctl to provide memory advice for vma's within
> > > + * input range.
> > > + *
> > > + * Return: 0 on success or a negative error code on failure.
> > > + */
> > > +int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> > > +{
> > > + struct xe_device *xe = to_xe_device(dev);
> > > + struct xe_file *xef = to_xe_file(file);
> > > + struct drm_xe_madvise_ops *advs_ops;
> > > + struct drm_xe_madvise *args = data;
> > > + struct xe_vm *vm;
> > > + struct xe_vma **vmas = NULL;
> > > + int num_vmas, err = 0;
> > > + int i, j, attr_type;
> > > + bool needs_invalidation;
> > > +
> > > + if (XE_IOCTL_DBG(xe, args->num_ops < 1))
> > > + return -EINVAL;
> > > +
> > > + vm = xe_vm_lookup(xef, args->vm_id);
> > > + if (XE_IOCTL_DBG(xe, !vm))
> > > + return -EINVAL;
> > > +
> > > + down_write(&vm->lock);
> > > +
> > > + if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
> > > + err = -ENOENT;
> > > + goto unlock_vm;
> > > + }
> > > +
> > > + if (args->num_ops > 1) {
> > > + u64 __user *madvise_user = u64_to_user_ptr(args->vector_of_ops);
> > > +
> > > + advs_ops = kvmalloc_array(args->num_ops, sizeof(struct drm_xe_madvise_ops),
> > > + GFP_KERNEL | __GFP_ACCOUNT |
> > > + __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
> > > + if (!advs_ops) {
> > > + err = args->num_ops > 1 ? -ENOBUFS : -ENOMEM;
> > > + goto unlock_vm;
> > > + }
> > > +
> > > + err = __copy_from_user(advs_ops, madvise_user,
> > > + sizeof(struct drm_xe_madvise_ops) *
> > > + args->num_ops);
> > > + if (XE_IOCTL_DBG(xe, err)) {
> > > + err = -EFAULT;
> > > + goto free_advs_ops;
> > > + }
> > > + } else {
> > > + advs_ops = &args->ops;
> > > + }
> > > +
> > > + for (i = 0; i < args->num_ops; i++) {
> > > + xe_vm_alloc_madvise_vma(vm, advs_ops[i].start, advs_ops[i].range);
> > > +
> > > + vmas = get_vmas(vm, &num_vmas, advs_ops[i].start, advs_ops[i].range);
> > > + if (!vmas) {
> > > + err = -ENOMEM;
> > > + goto free_advs_ops;
> > > + }
> > > +
> > > + attr_type = array_index_nospec(advs_ops[i].type, ARRAY_SIZE(madvise_funcs));
> > > + err = madvise_funcs[attr_type](xe, vm, vmas, num_vmas, advs_ops[i]);
> > > +
> > > + kfree(vmas);
> > > + vmas = NULL;
> > > +
> > > + if (err)
> > > + goto free_advs_ops;
> > > + }
> > > +
> > > + for (i = 0; i < args->num_ops; i++) {
> > > + needs_invalidation = true;
> > > + for (j = i + 1; j < args->num_ops; ++j) {
> > > + if (input_ranges_same(&advs_ops[j], &advs_ops[i])) {
> > > + needs_invalidation = false;
> > > + break;
> > > + }
> > > + }
> > > + if (needs_invalidation) {
> > > + err = xe_vm_invalidate_madvise_range(vm, advs_ops[i].start,
> > > + advs_ops[i].start + advs_ops[i].range);
> > > + if (err)
> > > + goto free_advs_ops;
> >
> > In additional to all the other comments around invalidations - you don't
> > always need to issue TLB invalidations.
> >
> > - For pat_index, only if the VMAs pat_index changed + valid page tables
> > - For atomic, only if the VMAs atomic mode changed + valid page tables +
> > current placement would cause issues
> > - Purgeable - never
> > - Preferred placement - valid page tables + current placement != desired
> > placement
> >
> > We likley can set a temp bit in the vfuncs in either the VMA (BO,
> > userptr based) or the SVM range(s) which the invalidation func can parse
> > / clear indicating an invalidation is required.
>
> In the current implementation, I’m zapping PTEs for SVM ranges and
> BO/Userptr-based VMAs within the madvise range, and issuing a TLB
> invalidation for the entire madvise range. Should we instead issue TLB
> invalidations at the granularity of the SVM range or the individual VMA (BO
> or Userptr)? I don’t see how we can avoid TLB invalidation if some VMAs
> require it and others don’t, given that we’re currently invalidating the
> entire range.
>
I guess we could skip the zap on per range / VMA basis and then issue
still issue a single TLB invalidation - I don't think we want to issue
multiple TLB invalidations as those have been profiled to be quite slow
- at least 100us. If we skipped the zap, we wouldn't get a fault rather
just a TLB miss. I think that is a fair tradeoff.
Matt
> >
> > Matt
> >
> > > + }
> > > + }
> > > +
> > > +free_advs_ops:
> > > + if (args->num_ops > 1)
> > > + kvfree(advs_ops);
> > > +unlock_vm:
> > > + up_write(&vm->lock);
> > > + xe_vm_put(vm);
> > > + return err;
> > > +}
> > > diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.h b/drivers/gpu/drm/xe/xe_vm_madvise.h
> > > new file mode 100644
> > > index 000000000000..c5cdd058c322
> > > --- /dev/null
> > > +++ b/drivers/gpu/drm/xe/xe_vm_madvise.h
> > > @@ -0,0 +1,15 @@
> > > +/* SPDX-License-Identifier: MIT */
> > > +/*
> > > + * Copyright © 2024 Intel Corporation
> > > + */
> > > +
> > > +#ifndef _XE_VM_MADVISE_H_
> > > +#define _XE_VM_MADVISE_H_
> > > +
> > > +struct drm_device;
> > > +struct drm_file;
> > > +
> > > +int xe_vm_madvise_ioctl(struct drm_device *dev, void *data,
> > > + struct drm_file *file);
> > > +
> > > +#endif
> > > --
> > > 2.34.1
> > >
>
^ permalink raw reply [flat|nested] 72+ messages in thread
* [PATCH v3 11/19] drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (9 preceding siblings ...)
2025-05-27 16:39 ` [PATCH v3 10/19] drm/xe: Implement madvise ioctl for xe Himal Prasad Ghimiray
@ 2025-05-27 16:39 ` Himal Prasad Ghimiray
2025-05-29 22:54 ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 12/19] drm/xe/svm : Add svm ranges migration policy on atomic access Himal Prasad Ghimiray
` (15 subsequent siblings)
26 siblings, 1 reply; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:39 UTC (permalink / raw)
To: intel-xe; +Cc: Himal Prasad Ghimiray
In the case of the MADVISE ioctl, if the start or end addresses fall
within a VMA and existing SVM ranges are present, remove the existing
SVM mappings. Then, continue with ops_parse to create new VMAs by REMAP
unmapping of old one.
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/xe/xe_svm.c | 25 +++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_svm.h | 8 ++++++++
drivers/gpu/drm/xe/xe_vm.c | 18 +++++++++++++++++-
3 files changed, 50 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index a4d53c24fcbc..5691bb9dbf26 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -942,6 +942,31 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
}
+/**
+ * xe_svm_range_clean_if_addr_within - Clean SVM mappings and ranges
+ * @start: start addr
+ * @end: end addr
+ *
+ * This function cleans up svm ranges if start or end address are inside them.
+ */
+void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end)
+{
+ struct drm_gpusvm_notifier *notifier, *next;
+
+ drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
+ struct drm_gpusvm_range *range, *__next;
+
+ drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
+ if (start > drm_gpusvm_range_start(range) ||
+ end < drm_gpusvm_range_end(range)) {
+ if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
+ drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
+ __xe_svm_garbage_collector(vm, to_xe_range(range));
+ }
+ }
+ }
+}
+
/**
* xe_svm_bo_evict() - SVM evict BO to system memory
* @bo: BO to evict
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index af8f285b6caa..b36f70ab3d03 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -92,6 +92,9 @@ bool xe_svm_range_validate(struct xe_vm *vm,
u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
+
+void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end);
+
/**
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
* @range: SVM range
@@ -312,6 +315,11 @@ u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
return 0;
}
+static inline
+void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end)
+{
+}
+
#define xe_svm_assert_in_notifier(...) do {} while (0)
#define xe_svm_range_has_dma_mapping(...) false
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index c220bf904ee0..8208409485f6 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2359,6 +2359,22 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
op->map.pat_index = pat_index;
op->map.invalidate_on_bind =
__xe_vm_needs_clear_scratch_pages(vm, flags);
+ } else if (__op->op == DRM_GPUVA_OP_REMAP) {
+ struct xe_vma *old =
+ gpuva_to_vma(op->base.remap.unmap->va);
+ u64 start = xe_vma_start(old), end = xe_vma_end(old);
+
+ if (op->base.remap.prev)
+ start = op->base.remap.prev->va.addr +
+ op->base.remap.prev->va.range;
+ if (op->base.remap.next)
+ end = op->base.remap.next->va.addr;
+
+ if (xe_vma_is_cpu_addr_mirror(old) &&
+ xe_svm_has_mapping(vm, start, end)) {
+ drm_gpuva_ops_free(&vm->gpuvm, ops);
+ return ERR_PTR(-EBUSY);
+ }
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
struct xe_svm_range *svm_range;
@@ -2662,7 +2678,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
if (xe_vma_is_cpu_addr_mirror(old) &&
xe_svm_has_mapping(vm, start, end))
- return -EBUSY;
+ xe_svm_range_clean_if_addr_within(vm, start, end);
op->remap.start = xe_vma_start(old);
op->remap.range = xe_vma_size(old);
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* Re: [PATCH v3 11/19] drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise
2025-05-27 16:39 ` [PATCH v3 11/19] drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise Himal Prasad Ghimiray
@ 2025-05-29 22:54 ` Matthew Brost
2025-06-12 9:02 ` Ghimiray, Himal Prasad
0 siblings, 1 reply; 72+ messages in thread
From: Matthew Brost @ 2025-05-29 22:54 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:09:55PM +0530, Himal Prasad Ghimiray wrote:
> In the case of the MADVISE ioctl, if the start or end addresses fall
> within a VMA and existing SVM ranges are present, remove the existing
> SVM mappings. Then, continue with ops_parse to create new VMAs by REMAP
> unmapping of old one.
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/xe/xe_svm.c | 25 +++++++++++++++++++++++++
> drivers/gpu/drm/xe/xe_svm.h | 8 ++++++++
> drivers/gpu/drm/xe/xe_vm.c | 18 +++++++++++++++++-
> 3 files changed, 50 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index a4d53c24fcbc..5691bb9dbf26 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -942,6 +942,31 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
> return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
> }
>
> +/**
> + * xe_svm_range_clean_if_addr_within - Clean SVM mappings and ranges
> + * @start: start addr
> + * @end: end addr
> + *
> + * This function cleans up svm ranges if start or end address are inside them.
> + */
> +void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end)
> +{
> + struct drm_gpusvm_notifier *notifier, *next;
> +
lockdep_assert(vm lock in write mode);
> + drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
> + struct drm_gpusvm_range *range, *__next;
> +
> + drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
> + if (start > drm_gpusvm_range_start(range) ||
> + end < drm_gpusvm_range_end(range)) {
> + if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
> + drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
Why evict here? I don't think that is required.
> + __xe_svm_garbage_collector(vm, to_xe_range(range));
> + }
> + }
> + }
> +}
> +
> /**
> * xe_svm_bo_evict() - SVM evict BO to system memory
> * @bo: BO to evict
> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
> index af8f285b6caa..b36f70ab3d03 100644
> --- a/drivers/gpu/drm/xe/xe_svm.h
> +++ b/drivers/gpu/drm/xe/xe_svm.h
> @@ -92,6 +92,9 @@ bool xe_svm_range_validate(struct xe_vm *vm,
> u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
>
> u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
> +
> +void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end);
> +
> /**
> * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
> * @range: SVM range
> @@ -312,6 +315,11 @@ u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
> return 0;
> }
>
> +static inline
> +void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end)
> +{
> +}
Maybe...
s/xe_svm_range_clean_if_addr_within/s/xe_svm_unmap_address_range
Or if you can think of something better but don't really like
xe_svm_range_clean_if_addr_within.
> +
> #define xe_svm_assert_in_notifier(...) do {} while (0)
> #define xe_svm_range_has_dma_mapping(...) false
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index c220bf904ee0..8208409485f6 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2359,6 +2359,22 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
> op->map.pat_index = pat_index;
> op->map.invalidate_on_bind =
> __xe_vm_needs_clear_scratch_pages(vm, flags);
> + } else if (__op->op == DRM_GPUVA_OP_REMAP) {
> + struct xe_vma *old =
> + gpuva_to_vma(op->base.remap.unmap->va);
> + u64 start = xe_vma_start(old), end = xe_vma_end(old);
> +
> + if (op->base.remap.prev)
> + start = op->base.remap.prev->va.addr +
> + op->base.remap.prev->va.range;
> + if (op->base.remap.next)
> + end = op->base.remap.next->va.addr;
> +
> + if (xe_vma_is_cpu_addr_mirror(old) &&
> + xe_svm_has_mapping(vm, start, end)) {
> + drm_gpuva_ops_free(&vm->gpuvm, ops);
> + return ERR_PTR(-EBUSY);
> + }
How about dropping this.
> } else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
> struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
> struct xe_svm_range *svm_range;
> @@ -2662,7 +2678,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>
> if (xe_vma_is_cpu_addr_mirror(old) &&
> xe_svm_has_mapping(vm, start, end))
> - return -EBUSY;
> + xe_svm_range_clean_if_addr_within(vm, start, end);
>
And here add a flag to xe_vma_ops which says we are in madvise.
e.g. XE_VMA_OPS_FLAG_MADVISE
Then...
if (xe_vma_is_cpu_addr_mirror(old) &&
xe_svm_has_mapping(vm, start, end)) {
if (vops->flags & XE_VMA_OPS_FLAG_MADVISE)
xe_svm_range_clean_if_addr_within(vm, start, end);
else
return -EBUSY;
}
Matt
> op->remap.start = xe_vma_start(old);
> op->remap.range = xe_vma_size(old);
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 11/19] drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise
2025-05-29 22:54 ` Matthew Brost
@ 2025-06-12 9:02 ` Ghimiray, Himal Prasad
0 siblings, 0 replies; 72+ messages in thread
From: Ghimiray, Himal Prasad @ 2025-06-12 9:02 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-xe
On 30-05-2025 04:24, Matthew Brost wrote:
> On Tue, May 27, 2025 at 10:09:55PM +0530, Himal Prasad Ghimiray wrote:
>> In the case of the MADVISE ioctl, if the start or end addresses fall
>> within a VMA and existing SVM ranges are present, remove the existing
>> SVM mappings. Then, continue with ops_parse to create new VMAs by REMAP
>> unmapping of old one.
>>
>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
>> ---
>> drivers/gpu/drm/xe/xe_svm.c | 25 +++++++++++++++++++++++++
>> drivers/gpu/drm/xe/xe_svm.h | 8 ++++++++
>> drivers/gpu/drm/xe/xe_vm.c | 18 +++++++++++++++++-
>> 3 files changed, 50 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
>> index a4d53c24fcbc..5691bb9dbf26 100644
>> --- a/drivers/gpu/drm/xe/xe_svm.c
>> +++ b/drivers/gpu/drm/xe/xe_svm.c
>> @@ -942,6 +942,31 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
>> return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
>> }
>>
>> +/**
>> + * xe_svm_range_clean_if_addr_within - Clean SVM mappings and ranges
>> + * @start: start addr
>> + * @end: end addr
>> + *
>> + * This function cleans up svm ranges if start or end address are inside them.
>> + */
>> +void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end)
>> +{
>> + struct drm_gpusvm_notifier *notifier, *next;
>> +
>
> lockdep_assert(vm lock in write mode);
>
>> + drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
>> + struct drm_gpusvm_range *range, *__next;
>> +
>> + drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
>> + if (start > drm_gpusvm_range_start(range) ||
>> + end < drm_gpusvm_range_end(range)) {
>> + if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
>> + drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
>
> Why evict here? I don't think that is required.
on susequent fault the vram allocation for smaller ranges fails for
first time with EFAULT in drm_gpusvm_migrate_to_devmem and with retry
its succeds. But prefetch for drm_gpusvm_migrate_to_devmem bails out
saying retry from userspace. Hence evict ensures the prefetch post this
also works.
>
>> + __xe_svm_garbage_collector(vm, to_xe_range(range));
>> + }
>> + }
>> + }
>> +}
>> +
>> /**
>> * xe_svm_bo_evict() - SVM evict BO to system memory
>> * @bo: BO to evict
>> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
>> index af8f285b6caa..b36f70ab3d03 100644
>> --- a/drivers/gpu/drm/xe/xe_svm.h
>> +++ b/drivers/gpu/drm/xe/xe_svm.h
>> @@ -92,6 +92,9 @@ bool xe_svm_range_validate(struct xe_vm *vm,
>> u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
>>
>> u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
>> +
>> +void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end);
>> +
>> /**
>> * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
>> * @range: SVM range
>> @@ -312,6 +315,11 @@ u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
>> return 0;
>> }
>>
>> +static inline
>> +void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end)
>> +{
>> +}
>
> Maybe...
>
> s/xe_svm_range_clean_if_addr_within/s/xe_svm_unmap_address_range
Makes sense. Will change>
> Or if you can think of something better but don't really like
> xe_svm_range_clean_if_addr_within.
>
>> +
>> #define xe_svm_assert_in_notifier(...) do {} while (0)
>> #define xe_svm_range_has_dma_mapping(...) false
>>
>> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
>> index c220bf904ee0..8208409485f6 100644
>> --- a/drivers/gpu/drm/xe/xe_vm.c
>> +++ b/drivers/gpu/drm/xe/xe_vm.c
>> @@ -2359,6 +2359,22 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
>> op->map.pat_index = pat_index;
>> op->map.invalidate_on_bind =
>> __xe_vm_needs_clear_scratch_pages(vm, flags);
>> + } else if (__op->op == DRM_GPUVA_OP_REMAP) {
>> + struct xe_vma *old =
>> + gpuva_to_vma(op->base.remap.unmap->va);
>> + u64 start = xe_vma_start(old), end = xe_vma_end(old);
>> +
>> + if (op->base.remap.prev)
>> + start = op->base.remap.prev->va.addr +
>> + op->base.remap.prev->va.range;
>> + if (op->base.remap.next)
>> + end = op->base.remap.next->va.addr;
>> +
>> + if (xe_vma_is_cpu_addr_mirror(old) &&
>> + xe_svm_has_mapping(vm, start, end)) {
>> + drm_gpuva_ops_free(&vm->gpuvm, ops);
>> + return ERR_PTR(-EBUSY);
>> + }
>
> How about dropping this.
>
>> } else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
>> struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
>> struct xe_svm_range *svm_range;
>> @@ -2662,7 +2678,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
>>
>> if (xe_vma_is_cpu_addr_mirror(old) &&
>> xe_svm_has_mapping(vm, start, end))
>> - return -EBUSY;
>> + xe_svm_range_clean_if_addr_within(vm, start, end);
>>
>
> And here add a flag to xe_vma_ops which says we are in madvise.
>
> e.g. XE_VMA_OPS_FLAG_MADVISE
>
> Then...
>
> if (xe_vma_is_cpu_addr_mirror(old) &&
> xe_svm_has_mapping(vm, start, end)) {
> if (vops->flags & XE_VMA_OPS_FLAG_MADVISE)
> xe_svm_range_clean_if_addr_within(vm, start, end);
> else
> return -EBUSY;
> }
>
> Matt
>
>> op->remap.start = xe_vma_start(old);
>> op->remap.range = xe_vma_size(old);
>> --
>> 2.34.1
>>
^ permalink raw reply [flat|nested] 72+ messages in thread
* [PATCH v3 12/19] drm/xe/svm : Add svm ranges migration policy on atomic access
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (10 preceding siblings ...)
2025-05-27 16:39 ` [PATCH v3 11/19] drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise Himal Prasad Ghimiray
@ 2025-05-27 16:39 ` Himal Prasad Ghimiray
2025-05-29 23:27 ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 13/19] drm/xe/madvise: Update migration policy based on preferred location Himal Prasad Ghimiray
` (14 subsequent siblings)
26 siblings, 1 reply; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:39 UTC (permalink / raw)
To: intel-xe; +Cc: Himal Prasad Ghimiray
If the platform does not support atomic access on system memory, and the
ranges are in system memory, but the user requires atomic accesses on
the VMA, then migrate the ranges to VRAM. Apply this policy for prefetch
operations as well.
v2
- Drop unnecessary vm_dbg
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/xe/xe_pt.c | 9 +++++--
drivers/gpu/drm/xe/xe_svm.c | 4 +++-
drivers/gpu/drm/xe/xe_vm.c | 38 ++++++++++++++++++++++++++++--
drivers/gpu/drm/xe/xe_vm.h | 2 ++
drivers/gpu/drm/xe/xe_vm_madvise.c | 10 +++++++-
5 files changed, 57 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 39bc1964089e..ad17ded0ecaa 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -645,13 +645,18 @@ static bool xe_atomic_for_vram(struct xe_vm *vm)
return true;
}
-static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_bo *bo)
+static bool xe_atomic_for_system(struct xe_vm *vm,
+ struct xe_bo *bo,
+ struct xe_vma *vma)
{
struct xe_device *xe = vm->xe;
if (!xe->info.has_device_atomics_on_smem)
return false;
+ if (vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_DEVICE)
+ return true;
+
/*
* If a SMEM+LMEM allocation is backed by SMEM, a device
* atomics will cause a gpu page fault and which then
@@ -745,7 +750,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) {
xe_walk.default_vram_pte = xe_atomic_for_vram(vm) ? XE_USM_PPGTT_PTE_AE : 0;
- xe_walk.default_system_pte = xe_atomic_for_system(vm, bo) ?
+ xe_walk.default_system_pte = xe_atomic_for_system(vm, bo, vma) ?
XE_USM_PPGTT_PTE_AE : 0;
}
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 5691bb9dbf26..743bb1f7d39c 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -771,6 +771,8 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
struct xe_vm *vm = range_to_vm(&range->base);
u64 range_size = xe_svm_range_size(range);
+ preferred_region_is_vram |= xe_vma_need_vram_migrate_for_atomic(vm->xe, vma);
+
if (!range->base.flags.migrate_devmem || !preferred_region_is_vram)
return false;
@@ -812,7 +814,7 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
.check_pages_threshold = IS_DGFX(vm->xe) &&
IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0,
- .devmem_only = atomic && IS_DGFX(vm->xe) &&
+ .devmem_only = atomic && xe_vma_need_vram_migrate_for_atomic(vm->xe, vma) &&
IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
.timeslice_ms = atomic && IS_DGFX(vm->xe) &&
IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ?
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 8208409485f6..e5fc2c2be8b2 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2930,13 +2930,22 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
ctx.read_only = xe_vma_read_only(vma);
ctx.devmem_possible = devmem_possible;
ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
+ ctx.devmem_only = xe_vma_need_vram_migrate_for_atomic(vm->xe, vma) &&
+ IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR);
/* TODO: Threading the migration */
xa_for_each(&op->prefetch_range.range, i, svm_range) {
- if (!region)
+ bool needs_vram = xe_svm_range_needs_migrate_to_vram(svm_range, vma, region);
+
+ if (!needs_vram) {
xe_svm_range_migrate_to_smem(vm, svm_range);
+ } else if (needs_vram) {
+ /* If migration is mandated by atomic attributes
+ * in vma and prefetch region is smem force prefetch
+ * in vram of root tile.
+ */
+ region = region ? region : 1;
- if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, region)) {
tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
err = xe_svm_alloc_vram(vm, tile, svm_range, &ctx);
if (err) {
@@ -4178,6 +4187,31 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
kvfree(snap);
}
+/**
+ * xe_vma_need_vram_migrate_for_atomic - Check if VMA needs VRAM migration for atomic operations
+ * @xe: Pointer to the XE device structure
+ * @vma: Pointer to the virtual memory area (VMA) structure
+ *
+ * This function determines whether the given VMA needs to be migrated to
+ * VRAM in order to do atomic GPU operation.
+ *
+ * Return: true if migration to VRAM is required, false otherwise.
+ */
+bool xe_vma_need_vram_migrate_for_atomic(struct xe_device *xe, struct xe_vma *vma)
+{
+ /* Note: The checks implemented here are platform-specific. For instance,
+ * on a device supporting CXL atomics, these would ideally work universally
+ * without additional handling.
+ */
+ if (!IS_DGFX(xe) || vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_UNDEFINED ||
+ vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_CPU ||
+ (xe->info.has_device_atomics_on_smem &&
+ vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_DEVICE))
+ return false;
+
+ return true;
+}
+
/**
* xe_vm_alloc_madvise_vma - Allocate VMA's with madvise ops
* @vm: Pointer to the xe_vm structure
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 8151b1b01a13..edd6ffd7c3ac 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -171,6 +171,8 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
+bool xe_vma_need_vram_migrate_for_atomic(struct xe_device *xe, struct xe_vma *vma);
+
int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
/**
diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
index f7edefe5f6cf..084719660401 100644
--- a/drivers/gpu/drm/xe/xe_vm_madvise.c
+++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
@@ -69,7 +69,15 @@ static int madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
struct xe_vma **vmas, int num_vmas,
struct drm_xe_madvise_ops ops)
{
- /* Implementation pending */
+ int i;
+
+ xe_assert(vm->xe, ops.type == DRM_XE_VMA_ATTR_ATOMIC);
+ xe_assert(vm->xe, ops.atomic.val > DRM_XE_VMA_ATOMIC_UNDEFINED &&
+ ops.atomic.val <= DRM_XE_VMA_ATOMIC_CPU);
+
+ for (i = 0; i < num_vmas; i++)
+ vmas[i]->attr.atomic_access = ops.atomic.val;
+ /*TODO: handle bo backed vmas */
return 0;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* Re: [PATCH v3 12/19] drm/xe/svm : Add svm ranges migration policy on atomic access
2025-05-27 16:39 ` [PATCH v3 12/19] drm/xe/svm : Add svm ranges migration policy on atomic access Himal Prasad Ghimiray
@ 2025-05-29 23:27 ` Matthew Brost
2025-05-29 23:38 ` Matthew Brost
2025-05-30 4:40 ` Matthew Brost
0 siblings, 2 replies; 72+ messages in thread
From: Matthew Brost @ 2025-05-29 23:27 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:09:56PM +0530, Himal Prasad Ghimiray wrote:
> If the platform does not support atomic access on system memory, and the
> ranges are in system memory, but the user requires atomic accesses on
> the VMA, then migrate the ranges to VRAM. Apply this policy for prefetch
> operations as well.
>
> v2
> - Drop unnecessary vm_dbg
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/xe/xe_pt.c | 9 +++++--
> drivers/gpu/drm/xe/xe_svm.c | 4 +++-
> drivers/gpu/drm/xe/xe_vm.c | 38 ++++++++++++++++++++++++++++--
> drivers/gpu/drm/xe/xe_vm.h | 2 ++
> drivers/gpu/drm/xe/xe_vm_madvise.c | 10 +++++++-
> 5 files changed, 57 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
> index 39bc1964089e..ad17ded0ecaa 100644
> --- a/drivers/gpu/drm/xe/xe_pt.c
> +++ b/drivers/gpu/drm/xe/xe_pt.c
> @@ -645,13 +645,18 @@ static bool xe_atomic_for_vram(struct xe_vm *vm)
> return true;
> }
>
> -static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_bo *bo)
> +static bool xe_atomic_for_system(struct xe_vm *vm,
> + struct xe_bo *bo,
> + struct xe_vma *vma)
You can get the BO from the VMA, so I'd drop the BO argument.
> {
> struct xe_device *xe = vm->xe;
>
> if (!xe->info.has_device_atomics_on_smem)
> return false;
>
> + if (vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_DEVICE)
> + return true;
> +
> /*
> * If a SMEM+LMEM allocation is backed by SMEM, a device
> * atomics will cause a gpu page fault and which then
> @@ -745,7 +750,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
>
> if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) {
> xe_walk.default_vram_pte = xe_atomic_for_vram(vm) ? XE_USM_PPGTT_PTE_AE : 0;
> - xe_walk.default_system_pte = xe_atomic_for_system(vm, bo) ?
> + xe_walk.default_system_pte = xe_atomic_for_system(vm, bo, vma) ?
> XE_USM_PPGTT_PTE_AE : 0;
> }
>
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index 5691bb9dbf26..743bb1f7d39c 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -771,6 +771,8 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
> struct xe_vm *vm = range_to_vm(&range->base);
> u64 range_size = xe_svm_range_size(range);
>
> + preferred_region_is_vram |= xe_vma_need_vram_migrate_for_atomic(vm->xe, vma);
> +
I'm not sure about this. Shouldn't we just set preferred_region_is_vram
at the caller (prefered_vram || atomic fault) in the fault handler?
> if (!range->base.flags.migrate_devmem || !preferred_region_is_vram)
> return false;
>
> @@ -812,7 +814,7 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
> IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
> .check_pages_threshold = IS_DGFX(vm->xe) &&
> IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0,
> - .devmem_only = atomic && IS_DGFX(vm->xe) &&
> + .devmem_only = atomic && xe_vma_need_vram_migrate_for_atomic(vm->xe, vma) &&
> IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
> .timeslice_ms = atomic && IS_DGFX(vm->xe) &&
> IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ?
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 8208409485f6..e5fc2c2be8b2 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2930,13 +2930,22 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
> ctx.read_only = xe_vma_read_only(vma);
> ctx.devmem_possible = devmem_possible;
> ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
> + ctx.devmem_only = xe_vma_need_vram_migrate_for_atomic(vm->xe, vma) &&
> + IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR);
I still wouldn't set devmem only for prefetch as I don't think we should
fail the prefetch unless we absolutely have too. A fault will still fix
up atomic faults that are in system memory if needed.
>
> /* TODO: Threading the migration */
> xa_for_each(&op->prefetch_range.range, i, svm_range) {
> - if (!region)
> + bool needs_vram = xe_svm_range_needs_migrate_to_vram(svm_range, vma, region);
> +
> + if (!needs_vram) {
> xe_svm_range_migrate_to_smem(vm, svm_range);
> + } else if (needs_vram) {
> + /* If migration is mandated by atomic attributes
> + * in vma and prefetch region is smem force prefetch
> + * in vram of root tile.
> + */
> + region = region ? region : 1;
>
I don't this logic needs to change until we have preferred location is
implemented. I don't think the atomic mode has any bearing on prefetch.
> - if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, region)) {
> tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
> err = xe_svm_alloc_vram(vm, tile, svm_range, &ctx);
> if (err) {
> @@ -4178,6 +4187,31 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
> kvfree(snap);
> }
>
> +/**
> + * xe_vma_need_vram_migrate_for_atomic - Check if VMA needs VRAM migration for atomic operations
> + * @xe: Pointer to the XE device structure
> + * @vma: Pointer to the virtual memory area (VMA) structure
> + *
> + * This function determines whether the given VMA needs to be migrated to
> + * VRAM in order to do atomic GPU operation.
> + *
> + * Return: true if migration to VRAM is required, false otherwise.
> + */
> +bool xe_vma_need_vram_migrate_for_atomic(struct xe_device *xe, struct xe_vma *vma)
> +{
> + /* Note: The checks implemented here are platform-specific. For instance,
> + * on a device supporting CXL atomics, these would ideally work universally
> + * without additional handling.
> + */
> + if (!IS_DGFX(xe) || vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_UNDEFINED ||
I think DRM_XE_VMA_ATOMIC_UNDEFINED is same as GLOBAL, right? Isn't that
the default? Or global the default? We have been told whatever the
default is, just has to work for SVM so maybe set to GLOBAL by default?
> + vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_CPU ||
> + (xe->info.has_device_atomics_on_smem &&
> + vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_DEVICE))
> + return false;
> +
> + return true;
> +}
> +
> /**
> * xe_vm_alloc_madvise_vma - Allocate VMA's with madvise ops
> * @vm: Pointer to the xe_vm structure
> diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> index 8151b1b01a13..edd6ffd7c3ac 100644
> --- a/drivers/gpu/drm/xe/xe_vm.h
> +++ b/drivers/gpu/drm/xe/xe_vm.h
> @@ -171,6 +171,8 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
>
> struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
>
> +bool xe_vma_need_vram_migrate_for_atomic(struct xe_device *xe, struct xe_vma *vma);
> +
> int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
>
> /**
> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
> index f7edefe5f6cf..084719660401 100644
> --- a/drivers/gpu/drm/xe/xe_vm_madvise.c
> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
> @@ -69,7 +69,15 @@ static int madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
> struct xe_vma **vmas, int num_vmas,
> struct drm_xe_madvise_ops ops)
> {
> - /* Implementation pending */
> + int i;
> +
> + xe_assert(vm->xe, ops.type == DRM_XE_VMA_ATTR_ATOMIC);
> + xe_assert(vm->xe, ops.atomic.val > DRM_XE_VMA_ATOMIC_UNDEFINED &&
>= DRM_XE_VMA_ATOMIC_UNDEFINED, right?
Also santize this input before here as discussed in patch 19 and 10.
Matt
> + ops.atomic.val <= DRM_XE_VMA_ATOMIC_CPU);
> +
> + for (i = 0; i < num_vmas; i++)
> + vmas[i]->attr.atomic_access = ops.atomic.val;
> + /*TODO: handle bo backed vmas */
> return 0;
> }
>
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 12/19] drm/xe/svm : Add svm ranges migration policy on atomic access
2025-05-29 23:27 ` Matthew Brost
@ 2025-05-29 23:38 ` Matthew Brost
2025-05-30 4:40 ` Matthew Brost
1 sibling, 0 replies; 72+ messages in thread
From: Matthew Brost @ 2025-05-29 23:38 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Thu, May 29, 2025 at 04:27:09PM -0700, Matthew Brost wrote:
> On Tue, May 27, 2025 at 10:09:56PM +0530, Himal Prasad Ghimiray wrote:
> > If the platform does not support atomic access on system memory, and the
> > ranges are in system memory, but the user requires atomic accesses on
> > the VMA, then migrate the ranges to VRAM. Apply this policy for prefetch
> > operations as well.
> >
> > v2
> > - Drop unnecessary vm_dbg
> >
> > Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> > ---
> > drivers/gpu/drm/xe/xe_pt.c | 9 +++++--
> > drivers/gpu/drm/xe/xe_svm.c | 4 +++-
> > drivers/gpu/drm/xe/xe_vm.c | 38 ++++++++++++++++++++++++++++--
> > drivers/gpu/drm/xe/xe_vm.h | 2 ++
> > drivers/gpu/drm/xe/xe_vm_madvise.c | 10 +++++++-
> > 5 files changed, 57 insertions(+), 6 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
> > index 39bc1964089e..ad17ded0ecaa 100644
> > --- a/drivers/gpu/drm/xe/xe_pt.c
> > +++ b/drivers/gpu/drm/xe/xe_pt.c
> > @@ -645,13 +645,18 @@ static bool xe_atomic_for_vram(struct xe_vm *vm)
> > return true;
> > }
> >
> > -static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_bo *bo)
> > +static bool xe_atomic_for_system(struct xe_vm *vm,
> > + struct xe_bo *bo,
> > + struct xe_vma *vma)
>
> You can get the BO from the VMA, so I'd drop the BO argument.
>
> > {
> > struct xe_device *xe = vm->xe;
> >
> > if (!xe->info.has_device_atomics_on_smem)
> > return false;
> >
> > + if (vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_DEVICE)
> > + return true;
> > +
> > /*
> > * If a SMEM+LMEM allocation is backed by SMEM, a device
> > * atomics will cause a gpu page fault and which then
> > @@ -745,7 +750,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
> >
> > if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) {
> > xe_walk.default_vram_pte = xe_atomic_for_vram(vm) ? XE_USM_PPGTT_PTE_AE : 0;
> > - xe_walk.default_system_pte = xe_atomic_for_system(vm, bo) ?
> > + xe_walk.default_system_pte = xe_atomic_for_system(vm, bo, vma) ?
> > XE_USM_PPGTT_PTE_AE : 0;
> > }
> >
> > diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> > index 5691bb9dbf26..743bb1f7d39c 100644
> > --- a/drivers/gpu/drm/xe/xe_svm.c
> > +++ b/drivers/gpu/drm/xe/xe_svm.c
> > @@ -771,6 +771,8 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
> > struct xe_vm *vm = range_to_vm(&range->base);
> > u64 range_size = xe_svm_range_size(range);
> >
> > + preferred_region_is_vram |= xe_vma_need_vram_migrate_for_atomic(vm->xe, vma);
> > +
>
> I'm not sure about this. Shouldn't we just set preferred_region_is_vram
> at the caller (prefered_vram || atomic fault) in the fault handler?
>
Let me fix this condition at the caller...
prefered_vram || (atomic fault && xe_vma_need_vram_migrate_for_atomic)
Matt
> > if (!range->base.flags.migrate_devmem || !preferred_region_is_vram)
> > return false;
> >
> > @@ -812,7 +814,7 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
> > IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
> > .check_pages_threshold = IS_DGFX(vm->xe) &&
> > IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0,
> > - .devmem_only = atomic && IS_DGFX(vm->xe) &&
> > + .devmem_only = atomic && xe_vma_need_vram_migrate_for_atomic(vm->xe, vma) &&
> > IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
> > .timeslice_ms = atomic && IS_DGFX(vm->xe) &&
> > IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ?
> > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> > index 8208409485f6..e5fc2c2be8b2 100644
> > --- a/drivers/gpu/drm/xe/xe_vm.c
> > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > @@ -2930,13 +2930,22 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
> > ctx.read_only = xe_vma_read_only(vma);
> > ctx.devmem_possible = devmem_possible;
> > ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
> > + ctx.devmem_only = xe_vma_need_vram_migrate_for_atomic(vm->xe, vma) &&
> > + IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR);
>
> I still wouldn't set devmem only for prefetch as I don't think we should
> fail the prefetch unless we absolutely have too. A fault will still fix
> up atomic faults that are in system memory if needed.
>
> >
> > /* TODO: Threading the migration */
> > xa_for_each(&op->prefetch_range.range, i, svm_range) {
> > - if (!region)
> > + bool needs_vram = xe_svm_range_needs_migrate_to_vram(svm_range, vma, region);
> > +
> > + if (!needs_vram) {
> > xe_svm_range_migrate_to_smem(vm, svm_range);
> > + } else if (needs_vram) {
> > + /* If migration is mandated by atomic attributes
> > + * in vma and prefetch region is smem force prefetch
> > + * in vram of root tile.
> > + */
> > + region = region ? region : 1;
> >
>
> I don't this logic needs to change until we have preferred location is
> implemented. I don't think the atomic mode has any bearing on prefetch.
>
> > - if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, region)) {
> > tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
> > err = xe_svm_alloc_vram(vm, tile, svm_range, &ctx);
> > if (err) {
> > @@ -4178,6 +4187,31 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
> > kvfree(snap);
> > }
> >
> > +/**
> > + * xe_vma_need_vram_migrate_for_atomic - Check if VMA needs VRAM migration for atomic operations
> > + * @xe: Pointer to the XE device structure
> > + * @vma: Pointer to the virtual memory area (VMA) structure
> > + *
> > + * This function determines whether the given VMA needs to be migrated to
> > + * VRAM in order to do atomic GPU operation.
> > + *
> > + * Return: true if migration to VRAM is required, false otherwise.
> > + */
> > +bool xe_vma_need_vram_migrate_for_atomic(struct xe_device *xe, struct xe_vma *vma)
> > +{
> > + /* Note: The checks implemented here are platform-specific. For instance,
> > + * on a device supporting CXL atomics, these would ideally work universally
> > + * without additional handling.
> > + */
> > + if (!IS_DGFX(xe) || vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_UNDEFINED ||
>
> I think DRM_XE_VMA_ATOMIC_UNDEFINED is same as GLOBAL, right? Isn't that
> the default? Or global the default? We have been told whatever the
> default is, just has to work for SVM so maybe set to GLOBAL by default?
>
> > + vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_CPU ||
> > + (xe->info.has_device_atomics_on_smem &&
> > + vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_DEVICE))
> > + return false;
> > +
> > + return true;
> > +}
> > +
> > /**
> > * xe_vm_alloc_madvise_vma - Allocate VMA's with madvise ops
> > * @vm: Pointer to the xe_vm structure
> > diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> > index 8151b1b01a13..edd6ffd7c3ac 100644
> > --- a/drivers/gpu/drm/xe/xe_vm.h
> > +++ b/drivers/gpu/drm/xe/xe_vm.h
> > @@ -171,6 +171,8 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
> >
> > struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
> >
> > +bool xe_vma_need_vram_migrate_for_atomic(struct xe_device *xe, struct xe_vma *vma);
> > +
> > int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
> >
> > /**
> > diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
> > index f7edefe5f6cf..084719660401 100644
> > --- a/drivers/gpu/drm/xe/xe_vm_madvise.c
> > +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
> > @@ -69,7 +69,15 @@ static int madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
> > struct xe_vma **vmas, int num_vmas,
> > struct drm_xe_madvise_ops ops)
> > {
> > - /* Implementation pending */
> > + int i;
> > +
> > + xe_assert(vm->xe, ops.type == DRM_XE_VMA_ATTR_ATOMIC);
> > + xe_assert(vm->xe, ops.atomic.val > DRM_XE_VMA_ATOMIC_UNDEFINED &&
>
> >= DRM_XE_VMA_ATOMIC_UNDEFINED, right?
>
> Also santize this input before here as discussed in patch 19 and 10.
>
> Matt
>
> > + ops.atomic.val <= DRM_XE_VMA_ATOMIC_CPU);
> > +
> > + for (i = 0; i < num_vmas; i++)
> > + vmas[i]->attr.atomic_access = ops.atomic.val;
> > + /*TODO: handle bo backed vmas */
> > return 0;
> > }
> >
> > --
> > 2.34.1
> >
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 12/19] drm/xe/svm : Add svm ranges migration policy on atomic access
2025-05-29 23:27 ` Matthew Brost
2025-05-29 23:38 ` Matthew Brost
@ 2025-05-30 4:40 ` Matthew Brost
1 sibling, 0 replies; 72+ messages in thread
From: Matthew Brost @ 2025-05-30 4:40 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Thu, May 29, 2025 at 04:27:09PM -0700, Matthew Brost wrote:
> On Tue, May 27, 2025 at 10:09:56PM +0530, Himal Prasad Ghimiray wrote:
> > If the platform does not support atomic access on system memory, and the
> > ranges are in system memory, but the user requires atomic accesses on
> > the VMA, then migrate the ranges to VRAM. Apply this policy for prefetch
> > operations as well.
> >
> > v2
> > - Drop unnecessary vm_dbg
> >
> > Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> > ---
> > drivers/gpu/drm/xe/xe_pt.c | 9 +++++--
> > drivers/gpu/drm/xe/xe_svm.c | 4 +++-
> > drivers/gpu/drm/xe/xe_vm.c | 38 ++++++++++++++++++++++++++++--
> > drivers/gpu/drm/xe/xe_vm.h | 2 ++
> > drivers/gpu/drm/xe/xe_vm_madvise.c | 10 +++++++-
> > 5 files changed, 57 insertions(+), 6 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
> > index 39bc1964089e..ad17ded0ecaa 100644
> > --- a/drivers/gpu/drm/xe/xe_pt.c
> > +++ b/drivers/gpu/drm/xe/xe_pt.c
> > @@ -645,13 +645,18 @@ static bool xe_atomic_for_vram(struct xe_vm *vm)
> > return true;
> > }
> >
> > -static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_bo *bo)
> > +static bool xe_atomic_for_system(struct xe_vm *vm,
> > + struct xe_bo *bo,
> > + struct xe_vma *vma)
>
> You can get the BO from the VMA, so I'd drop the BO argument.
>
> > {
> > struct xe_device *xe = vm->xe;
> >
> > if (!xe->info.has_device_atomics_on_smem)
> > return false;
> >
> > + if (vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_DEVICE)
> > + return true;
> > +
> > /*
> > * If a SMEM+LMEM allocation is backed by SMEM, a device
> > * atomics will cause a gpu page fault and which then
> > @@ -745,7 +750,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
> >
> > if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) {
> > xe_walk.default_vram_pte = xe_atomic_for_vram(vm) ? XE_USM_PPGTT_PTE_AE : 0;
> > - xe_walk.default_system_pte = xe_atomic_for_system(vm, bo) ?
> > + xe_walk.default_system_pte = xe_atomic_for_system(vm, bo, vma) ?
> > XE_USM_PPGTT_PTE_AE : 0;
> > }
> >
> > diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> > index 5691bb9dbf26..743bb1f7d39c 100644
> > --- a/drivers/gpu/drm/xe/xe_svm.c
> > +++ b/drivers/gpu/drm/xe/xe_svm.c
> > @@ -771,6 +771,8 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
> > struct xe_vm *vm = range_to_vm(&range->base);
> > u64 range_size = xe_svm_range_size(range);
> >
> > + preferred_region_is_vram |= xe_vma_need_vram_migrate_for_atomic(vm->xe, vma);
> > +
>
> I'm not sure about this. Shouldn't we just set preferred_region_is_vram
> at the caller (prefered_vram || atomic fault) in the fault handler?
>
> > if (!range->base.flags.migrate_devmem || !preferred_region_is_vram)
> > return false;
> >
> > @@ -812,7 +814,7 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
> > IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
> > .check_pages_threshold = IS_DGFX(vm->xe) &&
> > IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0,
> > - .devmem_only = atomic && IS_DGFX(vm->xe) &&
> > + .devmem_only = atomic && xe_vma_need_vram_migrate_for_atomic(vm->xe, vma) &&
> > IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
> > .timeslice_ms = atomic && IS_DGFX(vm->xe) &&
> > IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ?
> > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> > index 8208409485f6..e5fc2c2be8b2 100644
> > --- a/drivers/gpu/drm/xe/xe_vm.c
> > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > @@ -2930,13 +2930,22 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
> > ctx.read_only = xe_vma_read_only(vma);
> > ctx.devmem_possible = devmem_possible;
> > ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
> > + ctx.devmem_only = xe_vma_need_vram_migrate_for_atomic(vm->xe, vma) &&
> > + IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR);
>
> I still wouldn't set devmem only for prefetch as I don't think we should
> fail the prefetch unless we absolutely have too. A fault will still fix
> up atomic faults that are in system memory if needed.
>
> >
> > /* TODO: Threading the migration */
> > xa_for_each(&op->prefetch_range.range, i, svm_range) {
> > - if (!region)
> > + bool needs_vram = xe_svm_range_needs_migrate_to_vram(svm_range, vma, region);
> > +
> > + if (!needs_vram) {
> > xe_svm_range_migrate_to_smem(vm, svm_range);
> > + } else if (needs_vram) {
> > + /* If migration is mandated by atomic attributes
> > + * in vma and prefetch region is smem force prefetch
> > + * in vram of root tile.
> > + */
> > + region = region ? region : 1;
> >
>
> I don't this logic needs to change until we have preferred location is
> implemented. I don't think the atomic mode has any bearing on prefetch.
>
Sorry for multiple replies, things come as I look at other patches.
To be clear, I think if xe_vma_need_vram_migrate_for_atomic is removed
from xe_svm_range_needs_migrate_to_vram, we don't need this logic as
region non-zero or tile in the final result will always be non-NULL.
Matt
> > - if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, region)) {
> > tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
> > err = xe_svm_alloc_vram(vm, tile, svm_range, &ctx);
> > if (err) {
> > @@ -4178,6 +4187,31 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
> > kvfree(snap);
> > }
> >
> > +/**
> > + * xe_vma_need_vram_migrate_for_atomic - Check if VMA needs VRAM migration for atomic operations
> > + * @xe: Pointer to the XE device structure
> > + * @vma: Pointer to the virtual memory area (VMA) structure
> > + *
> > + * This function determines whether the given VMA needs to be migrated to
> > + * VRAM in order to do atomic GPU operation.
> > + *
> > + * Return: true if migration to VRAM is required, false otherwise.
> > + */
> > +bool xe_vma_need_vram_migrate_for_atomic(struct xe_device *xe, struct xe_vma *vma)
> > +{
> > + /* Note: The checks implemented here are platform-specific. For instance,
> > + * on a device supporting CXL atomics, these would ideally work universally
> > + * without additional handling.
> > + */
> > + if (!IS_DGFX(xe) || vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_UNDEFINED ||
>
> I think DRM_XE_VMA_ATOMIC_UNDEFINED is same as GLOBAL, right? Isn't that
> the default? Or global the default? We have been told whatever the
> default is, just has to work for SVM so maybe set to GLOBAL by default?
>
> > + vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_CPU ||
> > + (xe->info.has_device_atomics_on_smem &&
> > + vma->attr.atomic_access == DRM_XE_VMA_ATOMIC_DEVICE))
> > + return false;
> > +
> > + return true;
> > +}
> > +
> > /**
> > * xe_vm_alloc_madvise_vma - Allocate VMA's with madvise ops
> > * @vm: Pointer to the xe_vm structure
> > diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> > index 8151b1b01a13..edd6ffd7c3ac 100644
> > --- a/drivers/gpu/drm/xe/xe_vm.h
> > +++ b/drivers/gpu/drm/xe/xe_vm.h
> > @@ -171,6 +171,8 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
> >
> > struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
> >
> > +bool xe_vma_need_vram_migrate_for_atomic(struct xe_device *xe, struct xe_vma *vma);
> > +
> > int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
> >
> > /**
> > diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
> > index f7edefe5f6cf..084719660401 100644
> > --- a/drivers/gpu/drm/xe/xe_vm_madvise.c
> > +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
> > @@ -69,7 +69,15 @@ static int madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
> > struct xe_vma **vmas, int num_vmas,
> > struct drm_xe_madvise_ops ops)
> > {
> > - /* Implementation pending */
> > + int i;
> > +
> > + xe_assert(vm->xe, ops.type == DRM_XE_VMA_ATTR_ATOMIC);
> > + xe_assert(vm->xe, ops.atomic.val > DRM_XE_VMA_ATOMIC_UNDEFINED &&
>
> >= DRM_XE_VMA_ATOMIC_UNDEFINED, right?
>
> Also santize this input before here as discussed in patch 19 and 10.
>
> Matt
>
> > + ops.atomic.val <= DRM_XE_VMA_ATOMIC_CPU);
> > +
> > + for (i = 0; i < num_vmas; i++)
> > + vmas[i]->attr.atomic_access = ops.atomic.val;
> > + /*TODO: handle bo backed vmas */
> > return 0;
> > }
> >
> > --
> > 2.34.1
> >
^ permalink raw reply [flat|nested] 72+ messages in thread
* [PATCH v3 13/19] drm/xe/madvise: Update migration policy based on preferred location
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (11 preceding siblings ...)
2025-05-27 16:39 ` [PATCH v3 12/19] drm/xe/svm : Add svm ranges migration policy on atomic access Himal Prasad Ghimiray
@ 2025-05-27 16:39 ` Himal Prasad Ghimiray
2025-05-29 23:42 ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 14/19] drm/xe/svm: Support DRM_XE_SVM_ATTR_PAT memory attribute Himal Prasad Ghimiray
` (13 subsequent siblings)
26 siblings, 1 reply; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:39 UTC (permalink / raw)
To: intel-xe; +Cc: Himal Prasad Ghimiray
When the user sets the valid devmem_fd as a preferred location, GPU fault
will trigger migration to tile of device associated with devmem_fd.
If the user sets an invalid devmem_fd the preferred location is current
placement(smem) only.
v2(Matthew Brost)
- Default should be faulting tile
- remove devmem_fd used as region
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/xe/xe_svm.c | 39 +++++++++++++++++++++++++++++-
drivers/gpu/drm/xe/xe_svm.h | 8 ++++++
drivers/gpu/drm/xe/xe_vm.h | 3 +++
drivers/gpu/drm/xe/xe_vm_madvise.c | 15 +++++++++++-
4 files changed, 63 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 743bb1f7d39c..8b6546ebac72 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -791,6 +791,37 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
return true;
}
+/**
+ * xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
+ * @vma: Pointer to the xe_vma structure containing memory attributes
+ * @tile: Pointer to the xe_tile structure used as fallback for VRAM mapping
+ *
+ * This function determines the correct DRM pagemap to use for a given VMA.
+ * It first checks if a valid devmem_fd is provided in the VMA's preferred
+ * location. If the devmem_fd is negative, it returns NULL, indicating no
+ * pagemap is available and smem to be used as preferred location.
+ * If the devmem_fd is equal to the default faulting
+ * GT identifier, it returns the VRAM pagemap associated with the tile.
+ *
+ * Future support for multi-device configurations may use drm_pagemap_from_fd()
+ * to resolve pagemaps from arbitrary file descriptors.
+ *
+ * Return: A pointer to the resolved drm_pagemap, or NULL if none is applicable.
+ */
+struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
+{
+ s32 fd = (s32)vma->attr.preferred_loc.devmem_fd;
+
+ if (fd < 0)
+ return NULL;
+
+ if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVMEM_FD && tile)
+ return &tile->mem.vram.dpagemap;
+
+ /* TODO: Support multi-device with drm_pagemap_from_fd(fd) */
+ return NULL;
+}
+
/**
* xe_svm_handle_pagefault() - SVM handle page fault
* @vm: The VM.
@@ -823,6 +854,7 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
struct xe_svm_range *range;
struct drm_exec exec;
struct dma_fence *fence;
+ struct drm_pagemap *dpagemap;
struct xe_tile *tile = gt_to_tile(gt);
int migrate_try_count = ctx.devmem_only ? 3 : 1;
ktime_t end = 0;
@@ -852,8 +884,13 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
range_debug(range, "PAGE FAULT");
+ dpagemap = xe_vma_resolve_pagemap(vma, tile);
if (--migrate_try_count >= 0 &&
- xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) {
+ xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe) && !!dpagemap)) {
+ /* TODO : For multi-device dpagemap will be used to find the remote tile
+ * and remote device. Will need to modify xe_svm_alloc_vram to use dpagemap
+ * for future multi-device support.
+ */
err = xe_svm_alloc_vram(vm, tile, range, &ctx);
ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
if (err) {
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index b36f70ab3d03..344349313001 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -95,6 +95,8 @@ u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end);
+struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile);
+
/**
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
* @range: SVM range
@@ -320,6 +322,12 @@ void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end)
{
}
+static inline
+struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
+{
+ return NULL;
+}
+
#define xe_svm_assert_in_notifier(...) do {} while (0)
#define xe_svm_range_has_dma_mapping(...) false
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index edd6ffd7c3ac..340ac34936f4 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -222,6 +222,9 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
int xe_vm_userptr_check_repin(struct xe_vm *vm);
+bool xe_vma_has_preferred_mem_loc(struct xe_vma *vma,
+ u32 *mem_region, u32 *devmem_fd);
+
int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
u8 tile_mask);
diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
index 084719660401..1b31e41b3331 100644
--- a/drivers/gpu/drm/xe/xe_vm_madvise.c
+++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
@@ -61,7 +61,20 @@ static int madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm,
struct xe_vma **vmas, int num_vmas,
struct drm_xe_madvise_ops ops)
{
- /* Implementation pending */
+ int i;
+
+ xe_assert(vm->xe, ops.type == DRM_XE_VMA_ATTR_PREFERRED_LOC);
+
+ for (i = 0; i < num_vmas; i++) {
+ vmas[i]->attr.preferred_loc.devmem_fd = ops.preferred_mem_loc.devmem_fd;
+
+ /* Till multi-device support is not added migration_policy
+ * is of no use and can be ignored.
+ */
+ //vmas[i]->attr.preferred_loc.migration_policy =
+ // ops.preferred_mem_loc.migration_policy;
+ }
+
return 0;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* Re: [PATCH v3 13/19] drm/xe/madvise: Update migration policy based on preferred location
2025-05-27 16:39 ` [PATCH v3 13/19] drm/xe/madvise: Update migration policy based on preferred location Himal Prasad Ghimiray
@ 2025-05-29 23:42 ` Matthew Brost
0 siblings, 0 replies; 72+ messages in thread
From: Matthew Brost @ 2025-05-29 23:42 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:09:57PM +0530, Himal Prasad Ghimiray wrote:
> When the user sets the valid devmem_fd as a preferred location, GPU fault
> will trigger migration to tile of device associated with devmem_fd.
>
> If the user sets an invalid devmem_fd the preferred location is current
> placement(smem) only.
>
> v2(Matthew Brost)
> - Default should be faulting tile
> - remove devmem_fd used as region
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/xe/xe_svm.c | 39 +++++++++++++++++++++++++++++-
> drivers/gpu/drm/xe/xe_svm.h | 8 ++++++
> drivers/gpu/drm/xe/xe_vm.h | 3 +++
> drivers/gpu/drm/xe/xe_vm_madvise.c | 15 +++++++++++-
> 4 files changed, 63 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index 743bb1f7d39c..8b6546ebac72 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -791,6 +791,37 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
> return true;
> }
>
> +/**
> + * xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
> + * @vma: Pointer to the xe_vma structure containing memory attributes
> + * @tile: Pointer to the xe_tile structure used as fallback for VRAM mapping
> + *
> + * This function determines the correct DRM pagemap to use for a given VMA.
> + * It first checks if a valid devmem_fd is provided in the VMA's preferred
> + * location. If the devmem_fd is negative, it returns NULL, indicating no
> + * pagemap is available and smem to be used as preferred location.
> + * If the devmem_fd is equal to the default faulting
> + * GT identifier, it returns the VRAM pagemap associated with the tile.
> + *
> + * Future support for multi-device configurations may use drm_pagemap_from_fd()
> + * to resolve pagemaps from arbitrary file descriptors.
> + *
> + * Return: A pointer to the resolved drm_pagemap, or NULL if none is applicable.
> + */
> +struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
> +{
> + s32 fd = (s32)vma->attr.preferred_loc.devmem_fd;
> +
> + if (fd < 0)
> + return NULL;
> +
> + if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVMEM_FD && tile)
> + return &tile->mem.vram.dpagemap;
I'd change this here to:
return IS_DGFX(xe) ? return &tile->mem.vram.dpagemap : NULL;
> +
> + /* TODO: Support multi-device with drm_pagemap_from_fd(fd) */
> + return NULL;
> +}
> +
> /**
> * xe_svm_handle_pagefault() - SVM handle page fault
> * @vm: The VM.
> @@ -823,6 +854,7 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
> struct xe_svm_range *range;
> struct drm_exec exec;
> struct dma_fence *fence;
> + struct drm_pagemap *dpagemap;
> struct xe_tile *tile = gt_to_tile(gt);
> int migrate_try_count = ctx.devmem_only ? 3 : 1;
> ktime_t end = 0;
> @@ -852,8 +884,13 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>
> range_debug(range, "PAGE FAULT");
>
> + dpagemap = xe_vma_resolve_pagemap(vma, tile);
> if (--migrate_try_count >= 0 &&
> - xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) {
> + xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe) && !!dpagemap)) {
See my comment in patch 12 + comment above, I think this condition should be:
!!dpagemap || (atomic && xe_vma_need_vram_migrate_for_atomic)
> + /* TODO : For multi-device dpagemap will be used to find the remote tile
> + * and remote device. Will need to modify xe_svm_alloc_vram to use dpagemap
> + * for future multi-device support.
> + */
> err = xe_svm_alloc_vram(vm, tile, range, &ctx);
> ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
> if (err) {
> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
> index b36f70ab3d03..344349313001 100644
> --- a/drivers/gpu/drm/xe/xe_svm.h
> +++ b/drivers/gpu/drm/xe/xe_svm.h
> @@ -95,6 +95,8 @@ u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
>
> void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end);
>
> +struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile);
> +
> /**
> * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
> * @range: SVM range
> @@ -320,6 +322,12 @@ void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end)
> {
> }
>
> +static inline
> +struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
> +{
> + return NULL;
> +}
> +
> #define xe_svm_assert_in_notifier(...) do {} while (0)
> #define xe_svm_range_has_dma_mapping(...) false
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> index edd6ffd7c3ac..340ac34936f4 100644
> --- a/drivers/gpu/drm/xe/xe_vm.h
> +++ b/drivers/gpu/drm/xe/xe_vm.h
> @@ -222,6 +222,9 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
>
> int xe_vm_userptr_check_repin(struct xe_vm *vm);
>
> +bool xe_vma_has_preferred_mem_loc(struct xe_vma *vma,
> + u32 *mem_region, u32 *devmem_fd);
> +
> int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
> struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
> u8 tile_mask);
> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
> index 084719660401..1b31e41b3331 100644
> --- a/drivers/gpu/drm/xe/xe_vm_madvise.c
> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
> @@ -61,7 +61,20 @@ static int madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm,
> struct xe_vma **vmas, int num_vmas,
> struct drm_xe_madvise_ops ops)
> {
> - /* Implementation pending */
> + int i;
> +
> + xe_assert(vm->xe, ops.type == DRM_XE_VMA_ATTR_PREFERRED_LOC);
> +
> + for (i = 0; i < num_vmas; i++) {
> + vmas[i]->attr.preferred_loc.devmem_fd = ops.preferred_mem_loc.devmem_fd;
> +
> + /* Till multi-device support is not added migration_policy
> + * is of no use and can be ignored.
> + */
> + //vmas[i]->attr.preferred_loc.migration_policy =
> + // ops.preferred_mem_loc.migration_policy;
No harm in just setting this for now and remaning unused, right?
Matt
> + }
> +
> return 0;
> }
>
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread
* [PATCH v3 14/19] drm/xe/svm: Support DRM_XE_SVM_ATTR_PAT memory attribute
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (12 preceding siblings ...)
2025-05-27 16:39 ` [PATCH v3 13/19] drm/xe/madvise: Update migration policy based on preferred location Himal Prasad Ghimiray
@ 2025-05-27 16:39 ` Himal Prasad Ghimiray
2025-05-30 0:24 ` Matthew Brost
2025-05-27 16:39 ` [PATCH v3 15/19] drm/xe/uapi: Add flag for consulting madvise hints on svm prefetch Himal Prasad Ghimiray
` (12 subsequent siblings)
26 siblings, 1 reply; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:39 UTC (permalink / raw)
To: intel-xe; +Cc: Himal Prasad Ghimiray
This attributes sets the pat_index for the svm used vma range, which is
utilized to ascertain the coherence.
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/xe/xe_vm_madvise.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
index 1b31e41b3331..0f0b94cb43f2 100644
--- a/drivers/gpu/drm/xe/xe_vm_madvise.c
+++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
@@ -98,7 +98,13 @@ static int madvise_pat_index(struct xe_device *xe, struct xe_vm *vm,
struct xe_vma **vmas, int num_vmas,
struct drm_xe_madvise_ops ops)
{
- /* Implementation pending */
+ int i;
+
+ xe_assert(vm->xe, ops.type == DRM_XE_VMA_ATTR_PAT);
+
+ for (i = 0; i < num_vmas; i++)
+ vmas[i]->attr.pat_index = ops.pat_index.val;
+
return 0;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* Re: [PATCH v3 14/19] drm/xe/svm: Support DRM_XE_SVM_ATTR_PAT memory attribute
2025-05-27 16:39 ` [PATCH v3 14/19] drm/xe/svm: Support DRM_XE_SVM_ATTR_PAT memory attribute Himal Prasad Ghimiray
@ 2025-05-30 0:24 ` Matthew Brost
0 siblings, 0 replies; 72+ messages in thread
From: Matthew Brost @ 2025-05-30 0:24 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:09:58PM +0530, Himal Prasad Ghimiray wrote:
> This attributes sets the pat_index for the svm used vma range, which is
> utilized to ascertain the coherence.
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/xe/xe_vm_madvise.c | 8 +++++++-
> 1 file changed, 7 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
> index 1b31e41b3331..0f0b94cb43f2 100644
> --- a/drivers/gpu/drm/xe/xe_vm_madvise.c
> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
> @@ -98,7 +98,13 @@ static int madvise_pat_index(struct xe_device *xe, struct xe_vm *vm,
> struct xe_vma **vmas, int num_vmas,
> struct drm_xe_madvise_ops ops)
> {
> - /* Implementation pending */
> + int i;
> +
> + xe_assert(vm->xe, ops.type == DRM_XE_VMA_ATTR_PAT);
> +
> + for (i = 0; i < num_vmas; i++)
> + vmas[i]->attr.pat_index = ops.pat_index.val;
> +
Again as discussed in other patches maybe drop the return value on the
vfuncs.
But you will need to validate the pat_index ahead of this like VM bind
does:
coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
err = -EINVAL;
goto free_bind_ops;
}
if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
err = -EINVAL;
goto free_bind_ops;
}
Matt
> return 0;
> }
>
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread
* [PATCH v3 15/19] drm/xe/uapi: Add flag for consulting madvise hints on svm prefetch
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (13 preceding siblings ...)
2025-05-27 16:39 ` [PATCH v3 14/19] drm/xe/svm: Support DRM_XE_SVM_ATTR_PAT memory attribute Himal Prasad Ghimiray
@ 2025-05-27 16:39 ` Himal Prasad Ghimiray
2025-05-28 16:29 ` Matthew Brost
2025-05-27 16:40 ` [PATCH v3 16/19] drm/xe/svm: Consult madvise preferred location in prefetch Himal Prasad Ghimiray
` (11 subsequent siblings)
26 siblings, 1 reply; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:39 UTC (permalink / raw)
To: intel-xe; +Cc: Himal Prasad Ghimiray
Introduce flag DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC to ensure prefetching
in madvise-advised memory regions
v2 (Matthew Brost)
- Add kernel-doc
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
include/uapi/drm/xe_drm.h | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index e0d75226a724..03adfdc20dde 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -1111,6 +1111,10 @@ struct drm_xe_vm_bind_op {
/** @flags: Bind flags */
__u32 flags;
+ /** DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC as prefetch_mem_region_instance
+ * ensures prefetching in madvise-advised memory region.
+ */
+#define DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC -1
/**
* @prefetch_mem_region_instance: Memory region to prefetch VMA to.
* It is a region instance, not a mask.
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* Re: [PATCH v3 15/19] drm/xe/uapi: Add flag for consulting madvise hints on svm prefetch
2025-05-27 16:39 ` [PATCH v3 15/19] drm/xe/uapi: Add flag for consulting madvise hints on svm prefetch Himal Prasad Ghimiray
@ 2025-05-28 16:29 ` Matthew Brost
0 siblings, 0 replies; 72+ messages in thread
From: Matthew Brost @ 2025-05-28 16:29 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:09:59PM +0530, Himal Prasad Ghimiray wrote:
> Introduce flag DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC to ensure prefetching
> in madvise-advised memory regions
>
> v2 (Matthew Brost)
> - Add kernel-doc
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> include/uapi/drm/xe_drm.h | 4 ++++
> 1 file changed, 4 insertions(+)
>
> diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
> index e0d75226a724..03adfdc20dde 100644
> --- a/include/uapi/drm/xe_drm.h
> +++ b/include/uapi/drm/xe_drm.h
> @@ -1111,6 +1111,10 @@ struct drm_xe_vm_bind_op {
> /** @flags: Bind flags */
> __u32 flags;
>
> + /** DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC as prefetch_mem_region_instance
> + * ensures prefetching in madvise-advised memory region.
> + */
> +#define DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC -1
I think the prefered way is to put this 'struct drm_xe_vm_bind_op' based
on existing style.
Matt
> /**
> * @prefetch_mem_region_instance: Memory region to prefetch VMA to.
> * It is a region instance, not a mask.
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread
* [PATCH v3 16/19] drm/xe/svm: Consult madvise preferred location in prefetch
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (14 preceding siblings ...)
2025-05-27 16:39 ` [PATCH v3 15/19] drm/xe/uapi: Add flag for consulting madvise hints on svm prefetch Himal Prasad Ghimiray
@ 2025-05-27 16:40 ` Himal Prasad Ghimiray
2025-05-30 4:24 ` Matthew Brost
2025-06-24 18:56 ` Matthew Brost
2025-05-27 16:40 ` [PATCH v3 17/19] drm/xe/uapi: Add UAPI for querying VMA count and memory attributes Himal Prasad Ghimiray
` (10 subsequent siblings)
26 siblings, 2 replies; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:40 UTC (permalink / raw)
To: intel-xe; +Cc: Himal Prasad Ghimiray
When prefetch region is DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, prefetch svm
ranges to preferred location provided by madvise.
v2 (Matthew Brost)
- Fix region, devmem_fd usages
- consult madvise is applicable for other vma's too.
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/xe/xe_svm.c | 11 +++++++++++
drivers/gpu/drm/xe/xe_svm.h | 7 +++++++
drivers/gpu/drm/xe/xe_vm.c | 30 ++++++++++++++++++++----------
3 files changed, 38 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 8b6546ebac72..0c929eb192e7 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -822,6 +822,17 @@ struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *t
return NULL;
}
+/**
+ * xe_tile_from_dpagemap - Find xe_tile from drm_pagemap
+ * @dpagemap: pointer to struct drm_pagemap
+ *
+ * Return: Pointer to xe_tile
+ */
+struct xe_tile *xe_tile_from_dpagemap(struct drm_pagemap *dpagemap)
+{
+ return container_of(dpagemap, struct xe_tile, mem.vram.dpagemap);
+}
+
/**
* xe_svm_handle_pagefault() - SVM handle page fault
* @vm: The VM.
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 344349313001..a8b5bebf7a54 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -97,6 +97,8 @@ void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end);
struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile);
+struct xe_tile *xe_tile_from_dpagemap(struct drm_pagemap *dpagemap);
+
/**
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
* @range: SVM range
@@ -328,6 +330,11 @@ struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *t
return NULL;
}
+static inline
+struct xe_tile *xe_tile_from_dpagemap(struct drm_pagemap *dpagemap)
+{
+ return NULL;
+}
#define xe_svm_assert_in_notifier(...) do {} while (0)
#define xe_svm_range_has_dma_mapping(...) false
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index e5fc2c2be8b2..4520e475399e 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2917,15 +2917,24 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
int err = 0;
struct xe_svm_range *svm_range;
+ struct drm_pagemap *dpagemap;
struct drm_gpusvm_ctx ctx = {};
- struct xe_tile *tile;
+ struct xe_tile *tile = NULL;
unsigned long i;
u32 region;
if (!xe_vma_is_cpu_addr_mirror(vma))
return 0;
- region = op->prefetch_range.region;
+ if (op->prefetch_range.region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC) {
+ dpagemap = xe_vma_resolve_pagemap(vma, tile);
+ if (dpagemap)
+ tile = xe_tile_from_dpagemap(dpagemap);
+ } else {
+ region = op->prefetch_range.region;
+ if (region)
+ tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
+ }
ctx.read_only = xe_vma_read_only(vma);
ctx.devmem_possible = devmem_possible;
@@ -2935,18 +2944,18 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
/* TODO: Threading the migration */
xa_for_each(&op->prefetch_range.range, i, svm_range) {
- bool needs_vram = xe_svm_range_needs_migrate_to_vram(svm_range, vma, region);
+ bool needs_vram = xe_svm_range_needs_migrate_to_vram(svm_range, vma, !!tile);
if (!needs_vram) {
xe_svm_range_migrate_to_smem(vm, svm_range);
} else if (needs_vram) {
- /* If migration is mandated by atomic attributes
- * in vma and prefetch region is smem force prefetch
+ /* If migration is mandated by atomic attributes
+ * in vma, and prefetch region is smem, force prefetch
* in vram of root tile.
*/
- region = region ? region : 1;
+ if (!tile)
+ tile = xe_device_get_root_tile(vm->xe);
- tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
err = xe_svm_alloc_vram(vm, tile, svm_range, &ctx);
if (err) {
drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
@@ -3014,7 +3023,8 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
else
region = op->prefetch.region;
- xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
+ xe_assert(vm->xe, region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC ||
+ region <= ARRAY_SIZE(region_to_mem_type));
err = vma_lock_and_validate(exec,
gpuva_to_vma(op->base.prefetch.va),
@@ -3432,8 +3442,8 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
op == DRM_XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, prefetch_region &&
op != DRM_XE_VM_BIND_OP_PREFETCH) ||
- XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
- xe->info.mem_region_mask)) ||
+ XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
+ !(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
XE_IOCTL_DBG(xe, obj &&
op == DRM_XE_VM_BIND_OP_UNMAP)) {
err = -EINVAL;
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* Re: [PATCH v3 16/19] drm/xe/svm: Consult madvise preferred location in prefetch
2025-05-27 16:40 ` [PATCH v3 16/19] drm/xe/svm: Consult madvise preferred location in prefetch Himal Prasad Ghimiray
@ 2025-05-30 4:24 ` Matthew Brost
2025-06-24 18:56 ` Matthew Brost
1 sibling, 0 replies; 72+ messages in thread
From: Matthew Brost @ 2025-05-30 4:24 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:10:00PM +0530, Himal Prasad Ghimiray wrote:
> When prefetch region is DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, prefetch svm
> ranges to preferred location provided by madvise.
>
> v2 (Matthew Brost)
> - Fix region, devmem_fd usages
> - consult madvise is applicable for other vma's too.
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/xe/xe_svm.c | 11 +++++++++++
> drivers/gpu/drm/xe/xe_svm.h | 7 +++++++
> drivers/gpu/drm/xe/xe_vm.c | 30 ++++++++++++++++++++----------
> 3 files changed, 38 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index 8b6546ebac72..0c929eb192e7 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -822,6 +822,17 @@ struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *t
> return NULL;
> }
>
> +/**
> + * xe_tile_from_dpagemap - Find xe_tile from drm_pagemap
> + * @dpagemap: pointer to struct drm_pagemap
> + *
> + * Return: Pointer to xe_tile
> + */
> +struct xe_tile *xe_tile_from_dpagemap(struct drm_pagemap *dpagemap)
> +{
> + return container_of(dpagemap, struct xe_tile, mem.vram.dpagemap);
> +}
> +
This looks to be in the wrong file. xe_tile.h would be my choice as
static inline.
> /**
> * xe_svm_handle_pagefault() - SVM handle page fault
> * @vm: The VM.
> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
> index 344349313001..a8b5bebf7a54 100644
> --- a/drivers/gpu/drm/xe/xe_svm.h
> +++ b/drivers/gpu/drm/xe/xe_svm.h
> @@ -97,6 +97,8 @@ void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end);
>
> struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile);
>
> +struct xe_tile *xe_tile_from_dpagemap(struct drm_pagemap *dpagemap);
> +
> /**
> * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
> * @range: SVM range
> @@ -328,6 +330,11 @@ struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *t
> return NULL;
> }
>
> +static inline
> +struct xe_tile *xe_tile_from_dpagemap(struct drm_pagemap *dpagemap)
> +{
> + return NULL;
> +}
> #define xe_svm_assert_in_notifier(...) do {} while (0)
> #define xe_svm_range_has_dma_mapping(...) false
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index e5fc2c2be8b2..4520e475399e 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2917,15 +2917,24 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
> int err = 0;
>
> struct xe_svm_range *svm_range;
> + struct drm_pagemap *dpagemap;
> struct drm_gpusvm_ctx ctx = {};
> - struct xe_tile *tile;
> + struct xe_tile *tile = NULL;
> unsigned long i;
> u32 region;
>
> if (!xe_vma_is_cpu_addr_mirror(vma))
> return 0;
>
> - region = op->prefetch_range.region;
> + if (op->prefetch_range.region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC) {
> + dpagemap = xe_vma_resolve_pagemap(vma, tile);
> + if (dpagemap)
> + tile = xe_tile_from_dpagemap(dpagemap);
> + } else {
> + region = op->prefetch_range.region;
> + if (region)
> + tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
> + }
>
> ctx.read_only = xe_vma_read_only(vma);
> ctx.devmem_possible = devmem_possible;
> @@ -2935,18 +2944,18 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
>
> /* TODO: Threading the migration */
> xa_for_each(&op->prefetch_range.range, i, svm_range) {
> - bool needs_vram = xe_svm_range_needs_migrate_to_vram(svm_range, vma, region);
> + bool needs_vram = xe_svm_range_needs_migrate_to_vram(svm_range, vma, !!tile);
>
> if (!needs_vram) {
> xe_svm_range_migrate_to_smem(vm, svm_range);
> } else if (needs_vram) {
else {
Matt
> - /* If migration is mandated by atomic attributes
> - * in vma and prefetch region is smem force prefetch
> + /* If migration is mandated by atomic attributes
> + * in vma, and prefetch region is smem, force prefetch
> * in vram of root tile.
> */
> - region = region ? region : 1;
> + if (!tile)
> + tile = xe_device_get_root_tile(vm->xe);
>
> - tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
> err = xe_svm_alloc_vram(vm, tile, svm_range, &ctx);
> if (err) {
> drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
> @@ -3014,7 +3023,8 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
> else
> region = op->prefetch.region;
>
> - xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
> + xe_assert(vm->xe, region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC ||
> + region <= ARRAY_SIZE(region_to_mem_type));
>
> err = vma_lock_and_validate(exec,
> gpuva_to_vma(op->base.prefetch.va),
> @@ -3432,8 +3442,8 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
> op == DRM_XE_VM_BIND_OP_PREFETCH) ||
> XE_IOCTL_DBG(xe, prefetch_region &&
> op != DRM_XE_VM_BIND_OP_PREFETCH) ||
> - XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
> - xe->info.mem_region_mask)) ||
> + XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
> + !(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
> XE_IOCTL_DBG(xe, obj &&
> op == DRM_XE_VM_BIND_OP_UNMAP)) {
> err = -EINVAL;
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 16/19] drm/xe/svm: Consult madvise preferred location in prefetch
2025-05-27 16:40 ` [PATCH v3 16/19] drm/xe/svm: Consult madvise preferred location in prefetch Himal Prasad Ghimiray
2025-05-30 4:24 ` Matthew Brost
@ 2025-06-24 18:56 ` Matthew Brost
1 sibling, 0 replies; 72+ messages in thread
From: Matthew Brost @ 2025-06-24 18:56 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:10:00PM +0530, Himal Prasad Ghimiray wrote:
> When prefetch region is DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, prefetch svm
> ranges to preferred location provided by madvise.
>
> v2 (Matthew Brost)
> - Fix region, devmem_fd usages
> - consult madvise is applicable for other vma's too.
>
Not strictly related to this patch, but we should also update
xe_bo_placement_for_flags to choose the order in which bo->placement
entries are populated based on the desired placements. We might also
need to call xe_bo_placement_for_flags when setting the preferred
location VMA with the BO in order to reorder bo->placement.
Additionally, prefetching may need to move the BO based on this
preferred location. Lastly, if we find that a BO needs this preference
set before being bound (e.g. at creation time), we may need an extension
to the BO creation IOCTL.
I think, for now, this is probably out of scope, but it’s something we
should keep in mind.
Matt
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/xe/xe_svm.c | 11 +++++++++++
> drivers/gpu/drm/xe/xe_svm.h | 7 +++++++
> drivers/gpu/drm/xe/xe_vm.c | 30 ++++++++++++++++++++----------
> 3 files changed, 38 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index 8b6546ebac72..0c929eb192e7 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -822,6 +822,17 @@ struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *t
> return NULL;
> }
>
> +/**
> + * xe_tile_from_dpagemap - Find xe_tile from drm_pagemap
> + * @dpagemap: pointer to struct drm_pagemap
> + *
> + * Return: Pointer to xe_tile
> + */
> +struct xe_tile *xe_tile_from_dpagemap(struct drm_pagemap *dpagemap)
> +{
> + return container_of(dpagemap, struct xe_tile, mem.vram.dpagemap);
> +}
> +
> /**
> * xe_svm_handle_pagefault() - SVM handle page fault
> * @vm: The VM.
> diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
> index 344349313001..a8b5bebf7a54 100644
> --- a/drivers/gpu/drm/xe/xe_svm.h
> +++ b/drivers/gpu/drm/xe/xe_svm.h
> @@ -97,6 +97,8 @@ void xe_svm_range_clean_if_addr_within(struct xe_vm *vm, u64 start, u64 end);
>
> struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile);
>
> +struct xe_tile *xe_tile_from_dpagemap(struct drm_pagemap *dpagemap);
> +
> /**
> * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
> * @range: SVM range
> @@ -328,6 +330,11 @@ struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *t
> return NULL;
> }
>
> +static inline
> +struct xe_tile *xe_tile_from_dpagemap(struct drm_pagemap *dpagemap)
> +{
> + return NULL;
> +}
> #define xe_svm_assert_in_notifier(...) do {} while (0)
> #define xe_svm_range_has_dma_mapping(...) false
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index e5fc2c2be8b2..4520e475399e 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2917,15 +2917,24 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
> int err = 0;
>
> struct xe_svm_range *svm_range;
> + struct drm_pagemap *dpagemap;
> struct drm_gpusvm_ctx ctx = {};
> - struct xe_tile *tile;
> + struct xe_tile *tile = NULL;
> unsigned long i;
> u32 region;
>
> if (!xe_vma_is_cpu_addr_mirror(vma))
> return 0;
>
> - region = op->prefetch_range.region;
> + if (op->prefetch_range.region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC) {
> + dpagemap = xe_vma_resolve_pagemap(vma, tile);
> + if (dpagemap)
> + tile = xe_tile_from_dpagemap(dpagemap);
> + } else {
> + region = op->prefetch_range.region;
> + if (region)
> + tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
> + }
>
> ctx.read_only = xe_vma_read_only(vma);
> ctx.devmem_possible = devmem_possible;
> @@ -2935,18 +2944,18 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
>
> /* TODO: Threading the migration */
> xa_for_each(&op->prefetch_range.range, i, svm_range) {
> - bool needs_vram = xe_svm_range_needs_migrate_to_vram(svm_range, vma, region);
> + bool needs_vram = xe_svm_range_needs_migrate_to_vram(svm_range, vma, !!tile);
>
> if (!needs_vram) {
> xe_svm_range_migrate_to_smem(vm, svm_range);
> } else if (needs_vram) {
> - /* If migration is mandated by atomic attributes
> - * in vma and prefetch region is smem force prefetch
> + /* If migration is mandated by atomic attributes
> + * in vma, and prefetch region is smem, force prefetch
> * in vram of root tile.
> */
> - region = region ? region : 1;
> + if (!tile)
> + tile = xe_device_get_root_tile(vm->xe);
>
> - tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
> err = xe_svm_alloc_vram(vm, tile, svm_range, &ctx);
> if (err) {
> drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
> @@ -3014,7 +3023,8 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
> else
> region = op->prefetch.region;
>
> - xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
> + xe_assert(vm->xe, region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC ||
> + region <= ARRAY_SIZE(region_to_mem_type));
>
> err = vma_lock_and_validate(exec,
> gpuva_to_vma(op->base.prefetch.va),
> @@ -3432,8 +3442,8 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
> op == DRM_XE_VM_BIND_OP_PREFETCH) ||
> XE_IOCTL_DBG(xe, prefetch_region &&
> op != DRM_XE_VM_BIND_OP_PREFETCH) ||
> - XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
> - xe->info.mem_region_mask)) ||
> + XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
> + !(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
> XE_IOCTL_DBG(xe, obj &&
> op == DRM_XE_VM_BIND_OP_UNMAP)) {
> err = -EINVAL;
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread
* [PATCH v3 17/19] drm/xe/uapi: Add UAPI for querying VMA count and memory attributes
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (15 preceding siblings ...)
2025-05-27 16:40 ` [PATCH v3 16/19] drm/xe/svm: Consult madvise preferred location in prefetch Himal Prasad Ghimiray
@ 2025-05-27 16:40 ` Himal Prasad Ghimiray
2025-05-28 17:02 ` Souza, Jose
` (2 more replies)
2025-05-27 16:40 ` [PATCH v3 18/19] drm/xe/bo: Add attributes field to xe_bo Himal Prasad Ghimiray
` (9 subsequent siblings)
26 siblings, 3 replies; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:40 UTC (permalink / raw)
To: intel-xe; +Cc: Himal Prasad Ghimiray
Introduce the DRM_IOCTL_XE_VM_QUERY_VMAS_ATTRS ioctl to allow userspace
to query memory attributes of VMAs within a specified virtual address
range.
If num_vmas == 0 and vector_of_vma_mem_attr == NULL, the ioctl returns
the number of VMAs in the specified range.
If num_vmas > 0 and a valid user pointer is provided in
vector_of_vma_mem_attr, the ioctl fills the buffer with memory
attributes for each VMA.
This two-step interface allows userspace to first query the required
buffer size, then retrieve detailed attributes efficiently.
v2 (Matthew Brost)
- Use same ioctl to overload functionality
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/xe/xe_device.c | 1 +
drivers/gpu/drm/xe/xe_vm.c | 87 ++++++++++++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_vm.h | 2 +-
include/uapi/drm/xe_drm.h | 82 ++++++++++++++++++++++++++++++++
4 files changed, 171 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index b9791c614749..8c965d15c187 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -199,6 +199,7 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_VM_QUERY_VMAS_ATTRS, xe_vm_query_vmas_attrs_ioctl, DRM_RENDER_ALLOW),
};
static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 4520e475399e..9611d7ca2bed 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2162,6 +2162,93 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
return err;
}
+static void xe_vm_query_vmas(struct xe_vm *vm, u32 *num_vmas, u64 start, u64 end)
+{
+ struct drm_gpuva *gpuva;
+
+ lockdep_assert_held(&vm->lock);
+ drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end)
+ (*num_vmas)++;
+}
+
+static int get_mem_attrs(struct xe_vm *vm, u32 *num_vmas, u64 start,
+ u64 end, struct drm_xe_vma_mem_attr *mem_attrs)
+{
+ struct drm_gpuva *gpuva;
+ int i = 0;
+
+ lockdep_assert_held(&vm->lock);
+
+ drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+
+ if (i == *num_vmas)
+ return -EINVAL;
+
+ mem_attrs[i].start = xe_vma_start(vma);
+ mem_attrs[i].end = xe_vma_end(vma);
+ mem_attrs[i].atomic.val = vma->attr.atomic_access;
+ mem_attrs[i].pat_index.val = vma->attr.pat_index;
+ mem_attrs[i].preferred_mem_loc.devmem_fd = vma->attr.preferred_loc.devmem_fd;
+ mem_attrs[i].preferred_mem_loc.migration_policy = vma->attr.preferred_loc.migration_policy;
+
+ i++;
+ }
+
+ if (i < (*num_vmas - 1))
+ *num_vmas = i;
+ return 0;
+}
+
+int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_vma_mem_attr *mem_attrs;
+ struct drm_xe_vm_query_vmas_attr *args = data;
+ u64 __user *attrs_user = NULL;
+ struct xe_vm *vm;
+ int err = 0;
+
+ if (XE_IOCTL_DBG(xe, args->num_vmas < 0))
+ return -EINVAL;
+
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -EINVAL;
+
+ down_read(&vm->lock);
+
+ attrs_user = u64_to_user_ptr(args->vector_of_vma_mem_attr);
+
+ if (args->num_vmas == 0 && !attrs_user) {
+ xe_vm_query_vmas(vm, &args->num_vmas, args->start, args->start + args->range);
+ goto unlock_vm;
+ }
+
+ mem_attrs = kvmalloc_array(args->num_vmas, sizeof(struct drm_xe_vma_mem_attr),
+ GFP_KERNEL | __GFP_ACCOUNT |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!mem_attrs) {
+ err = args->num_vmas > 1 ? -ENOBUFS : -ENOMEM;
+ goto unlock_vm;
+ }
+
+ err = get_mem_attrs(vm, &args->num_vmas, args->start,
+ args->start + args->range, mem_attrs);
+ if (err)
+ goto free_mem_attrs;
+
+ err = __copy_to_user(attrs_user, mem_attrs,
+ sizeof(struct drm_xe_vma_mem_attr) * args->num_vmas);
+
+free_mem_attrs:
+ kvfree(mem_attrs);
+unlock_vm:
+ up_read(&vm->lock);
+ return err;
+}
+
static bool vma_matches(struct xe_vma *vma, u64 page_addr)
{
if (page_addr > xe_vma_end(vma) - 1 ||
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 340ac34936f4..b1e94b536c80 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -195,7 +195,7 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-
+int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
void xe_vm_close_and_put(struct xe_vm *vm);
static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 03adfdc20dde..6ff9ff0c09dd 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -82,6 +82,7 @@ extern "C" {
* - &DRM_IOCTL_XE_WAIT_USER_FENCE
* - &DRM_IOCTL_XE_OBSERVATION
* - &DRM_IOCTL_XE_MADVISE
+ * - &DRM_IOCTL_XE_VM_QUERY_VMAS_ATTRS
*/
/*
@@ -104,6 +105,7 @@ extern "C" {
#define DRM_XE_WAIT_USER_FENCE 0x0a
#define DRM_XE_OBSERVATION 0x0b
#define DRM_XE_MADVISE 0x0c
+#define DRM_XE_VM_QUERY_VMAS_ATTRS 0x0d
/* Must be kept compact -- no holes */
@@ -120,6 +122,7 @@ extern "C" {
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
#define DRM_IOCTL_XE_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
+#define DRM_IOCTL_XE_VM_QUERY_VMAS_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_VMAS_ATTRS, struct drm_xe_vm_query_vmas_attr)
/**
* DOC: Xe IOCTL Extensions
@@ -2066,6 +2069,85 @@ struct drm_xe_madvise {
};
+struct drm_xe_vma_mem_attr {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @start: start of the vma */
+ __u64 start;
+
+ /** @size: end of the vma */
+ __u64 end;
+
+ struct {
+ struct {
+ /** @val: value of atomic operation*/
+ __u32 val;
+
+ /** @reserved: Reserved */
+ __u32 reserved;
+ } atomic;
+
+ struct {
+ /** @val: value for DRM_XE_VMA_ATTR_PURGEABLE_STATE */
+ __u32 val;
+
+ /** @reserved: Reserved */
+ __u32 reserved;
+ } purge_state_val;
+
+ struct {
+ /** @pat_index */
+ __u32 val;
+
+ /** @reserved: Reserved */
+ __u32 reserved;
+ } pat_index;
+
+ /** @preferred_mem_loc: preferred memory location */
+ struct {
+ __u32 devmem_fd;
+
+ __u32 migration_policy;
+ } preferred_mem_loc;
+ };
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_query_vmas_attr - Input of &DRM_IOCTL_XE_VM_QUERY_MEM_ATTRIBUTES
+ *
+ * Get memory attributes to a virtual address range
+ */
+struct drm_xe_vm_query_vmas_attr {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @vm_id: vm_id of the virtual range */
+ __u32 vm_id;
+
+ /** @num_vmas: number of vmas in range returned in @num_vmas */
+ __u32 num_vmas;
+
+ /** @start: start of the virtual address range */
+ __u64 start;
+
+ /** @size: size of the virtual address range */
+ __u64 range;
+
+ /**
+ * @vector_of_ops: userptr to array of struct
+ * drm_xe_vma_mem_attr
+ */
+ __u64 vector_of_vma_mem_attr;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+
+};
+
#if defined(__cplusplus)
}
#endif
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* Re: [PATCH v3 17/19] drm/xe/uapi: Add UAPI for querying VMA count and memory attributes
2025-05-27 16:40 ` [PATCH v3 17/19] drm/xe/uapi: Add UAPI for querying VMA count and memory attributes Himal Prasad Ghimiray
@ 2025-05-28 17:02 ` Souza, Jose
2025-05-30 1:11 ` kernel test robot
2025-05-30 4:29 ` Matthew Brost
2 siblings, 0 replies; 72+ messages in thread
From: Souza, Jose @ 2025-05-28 17:02 UTC (permalink / raw)
To: intel-xe@lists.freedesktop.org, Ghimiray, Himal Prasad
On Tue, 2025-05-27 at 22:10 +0530, Himal Prasad Ghimiray wrote:
> Introduce the DRM_IOCTL_XE_VM_QUERY_VMAS_ATTRS ioctl to allow userspace
> to query memory attributes of VMAs within a specified virtual address
> range.
> If num_vmas == 0 and vector_of_vma_mem_attr == NULL, the ioctl returns
> the number of VMAs in the specified range.
> If num_vmas > 0 and a valid user pointer is provided in
> vector_of_vma_mem_attr, the ioctl fills the buffer with memory
> attributes for each VMA.
> This two-step interface allows userspace to first query the required
> buffer size, then retrieve detailed attributes efficiently.
>
> v2 (Matthew Brost)
> - Use same ioctl to overload functionality
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/xe/xe_device.c | 1 +
> drivers/gpu/drm/xe/xe_vm.c | 87 ++++++++++++++++++++++++++++++++++
> drivers/gpu/drm/xe/xe_vm.h | 2 +-
> include/uapi/drm/xe_drm.h | 82 ++++++++++++++++++++++++++++++++
> 4 files changed, 171 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
> index b9791c614749..8c965d15c187 100644
> --- a/drivers/gpu/drm/xe/xe_device.c
> +++ b/drivers/gpu/drm/xe/xe_device.c
> @@ -199,6 +199,7 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
> DRM_RENDER_ALLOW),
> DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
> DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
> + DRM_IOCTL_DEF_DRV(XE_VM_QUERY_VMAS_ATTRS, xe_vm_query_vmas_attrs_ioctl, DRM_RENDER_ALLOW),
> };
>
> static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 4520e475399e..9611d7ca2bed 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2162,6 +2162,93 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
> return err;
> }
>
> +static void xe_vm_query_vmas(struct xe_vm *vm, u32 *num_vmas, u64 start, u64 end)
> +{
> + struct drm_gpuva *gpuva;
> +
> + lockdep_assert_held(&vm->lock);
> + drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end)
> + (*num_vmas)++;
> +}
> +
> +static int get_mem_attrs(struct xe_vm *vm, u32 *num_vmas, u64 start,
> + u64 end, struct drm_xe_vma_mem_attr *mem_attrs)
> +{
> + struct drm_gpuva *gpuva;
> + int i = 0;
> +
> + lockdep_assert_held(&vm->lock);
> +
> + drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
> + struct xe_vma *vma = gpuva_to_vma(gpuva);
> +
> + if (i == *num_vmas)
> + return -EINVAL;
> +
> + mem_attrs[i].start = xe_vma_start(vma);
> + mem_attrs[i].end = xe_vma_end(vma);
> + mem_attrs[i].atomic.val = vma->attr.atomic_access;
> + mem_attrs[i].pat_index.val = vma->attr.pat_index;
> + mem_attrs[i].preferred_mem_loc.devmem_fd = vma->attr.preferred_loc.devmem_fd;
> + mem_attrs[i].preferred_mem_loc.migration_policy = vma->attr.preferred_loc.migration_policy;
> +
> + i++;
> + }
> +
> + if (i < (*num_vmas - 1))
> + *num_vmas = i;
> + return 0;
> +}
> +
> +int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> +{
> + struct xe_device *xe = to_xe_device(dev);
> + struct xe_file *xef = to_xe_file(file);
> + struct drm_xe_vma_mem_attr *mem_attrs;
> + struct drm_xe_vm_query_vmas_attr *args = data;
> + u64 __user *attrs_user = NULL;
> + struct xe_vm *vm;
> + int err = 0;
> +
> + if (XE_IOCTL_DBG(xe, args->num_vmas < 0))
> + return -EINVAL;
> +
> + vm = xe_vm_lookup(xef, args->vm_id);
> + if (XE_IOCTL_DBG(xe, !vm))
> + return -EINVAL;
> +
> + down_read(&vm->lock);
> +
> + attrs_user = u64_to_user_ptr(args->vector_of_vma_mem_attr);
> +
> + if (args->num_vmas == 0 && !attrs_user) {
> + xe_vm_query_vmas(vm, &args->num_vmas, args->start, args->start + args->range);
> + goto unlock_vm;
> + }
> +
> + mem_attrs = kvmalloc_array(args->num_vmas, sizeof(struct drm_xe_vma_mem_attr),
> + GFP_KERNEL | __GFP_ACCOUNT |
> + __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
> + if (!mem_attrs) {
> + err = args->num_vmas > 1 ? -ENOBUFS : -ENOMEM;
> + goto unlock_vm;
> + }
> +
> + err = get_mem_attrs(vm, &args->num_vmas, args->start,
> + args->start + args->range, mem_attrs);
> + if (err)
> + goto free_mem_attrs;
> +
> + err = __copy_to_user(attrs_user, mem_attrs,
> + sizeof(struct drm_xe_vma_mem_attr) * args->num_vmas);
> +
> +free_mem_attrs:
> + kvfree(mem_attrs);
> +unlock_vm:
> + up_read(&vm->lock);
> + return err;
> +}
> +
> static bool vma_matches(struct xe_vma *vma, u64 page_addr)
> {
> if (page_addr > xe_vma_end(vma) - 1 ||
> diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> index 340ac34936f4..b1e94b536c80 100644
> --- a/drivers/gpu/drm/xe/xe_vm.h
> +++ b/drivers/gpu/drm/xe/xe_vm.h
> @@ -195,7 +195,7 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
> struct drm_file *file);
> int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
> struct drm_file *file);
> -
> +int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
> void xe_vm_close_and_put(struct xe_vm *vm);
>
> static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
> diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
> index 03adfdc20dde..6ff9ff0c09dd 100644
> --- a/include/uapi/drm/xe_drm.h
> +++ b/include/uapi/drm/xe_drm.h
> @@ -82,6 +82,7 @@ extern "C" {
> * - &DRM_IOCTL_XE_WAIT_USER_FENCE
> * - &DRM_IOCTL_XE_OBSERVATION
> * - &DRM_IOCTL_XE_MADVISE
> + * - &DRM_IOCTL_XE_VM_QUERY_VMAS_ATTRS
> */
>
> /*
> @@ -104,6 +105,7 @@ extern "C" {
> #define DRM_XE_WAIT_USER_FENCE 0x0a
> #define DRM_XE_OBSERVATION 0x0b
> #define DRM_XE_MADVISE 0x0c
> +#define DRM_XE_VM_QUERY_VMAS_ATTRS 0x0d
>
> /* Must be kept compact -- no holes */
>
> @@ -120,6 +122,7 @@ extern "C" {
> #define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
> #define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
> #define DRM_IOCTL_XE_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
> +#define DRM_IOCTL_XE_VM_QUERY_VMAS_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_VMAS_ATTRS, struct drm_xe_vm_query_vmas_attr)
>
> /**
> * DOC: Xe IOCTL Extensions
> @@ -2066,6 +2069,85 @@ struct drm_xe_madvise {
>
> };
>
> +struct drm_xe_vma_mem_attr {
> + /** @extensions: Pointer to the first extension struct, if any */
> + __u64 extensions;
> +
> + /** @start: start of the vma */
> + __u64 start;
> +
> + /** @size: end of the vma */
> + __u64 end;
> +
> + struct {
> + struct {
> + /** @val: value of atomic operation*/
> + __u32 val;
> +
> + /** @reserved: Reserved */
> + __u32 reserved;
> + } atomic;
> +
> + struct {
> + /** @val: value for DRM_XE_VMA_ATTR_PURGEABLE_STATE */
> + __u32 val;
> +
> + /** @reserved: Reserved */
> + __u32 reserved;
> + } purge_state_val;
Looks like purge_state_val is not implemented but the get uAPI looks good to me, so this part is
Acked-by: José Roberto de Souza <jose.souza@intel.com>
> +
> + struct {
> + /** @pat_index */
> + __u32 val;
> +
> + /** @reserved: Reserved */
> + __u32 reserved;
> + } pat_index;
> +
> + /** @preferred_mem_loc: preferred memory location */
> + struct {
> + __u32 devmem_fd;
> +
> + __u32 migration_policy;
> + } preferred_mem_loc;
> + };
> +
> + /** @reserved: Reserved */
> + __u64 reserved[2];
> +};
> +
> +/**
> + * struct drm_xe_vm_query_vmas_attr - Input of &DRM_IOCTL_XE_VM_QUERY_MEM_ATTRIBUTES
> + *
> + * Get memory attributes to a virtual address range
> + */
> +struct drm_xe_vm_query_vmas_attr {
> + /** @extensions: Pointer to the first extension struct, if any */
> + __u64 extensions;
> +
> + /** @vm_id: vm_id of the virtual range */
> + __u32 vm_id;
> +
> + /** @num_vmas: number of vmas in range returned in @num_vmas */
> + __u32 num_vmas;
> +
> + /** @start: start of the virtual address range */
> + __u64 start;
> +
> + /** @size: size of the virtual address range */
> + __u64 range;
> +
> + /**
> + * @vector_of_ops: userptr to array of struct
> + * drm_xe_vma_mem_attr
> + */
> + __u64 vector_of_vma_mem_attr;
> +
> + /** @reserved: Reserved */
> + __u64 reserved[2];
> +
> +};
> +
> #if defined(__cplusplus)
> }
> #endif
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 17/19] drm/xe/uapi: Add UAPI for querying VMA count and memory attributes
2025-05-27 16:40 ` [PATCH v3 17/19] drm/xe/uapi: Add UAPI for querying VMA count and memory attributes Himal Prasad Ghimiray
2025-05-28 17:02 ` Souza, Jose
@ 2025-05-30 1:11 ` kernel test robot
2025-05-30 4:29 ` Matthew Brost
2 siblings, 0 replies; 72+ messages in thread
From: kernel test robot @ 2025-05-30 1:11 UTC (permalink / raw)
To: Himal Prasad Ghimiray, intel-xe; +Cc: oe-kbuild-all, Himal Prasad Ghimiray
Hi Himal,
kernel test robot noticed the following build warnings:
[auto build test WARNING on drm-xe/drm-xe-next]
[also build test WARNING on next-20250529]
[cannot apply to linus/master v6.15]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Himal-Prasad-Ghimiray/Introduce-drm_gpuvm_sm_map_ops_flags-enums-for-sm_map_ops/20250528-041919
base: https://gitlab.freedesktop.org/drm/xe/kernel.git drm-xe-next
patch link: https://lore.kernel.org/r/20250527164003.1068118-18-himal.prasad.ghimiray%40intel.com
patch subject: [PATCH v3 17/19] drm/xe/uapi: Add UAPI for querying VMA count and memory attributes
config: loongarch-randconfig-r073-20250529 (https://download.01.org/0day-ci/archive/20250530/202505300820.fmdHkQH6-lkp@intel.com/config)
compiler: loongarch64-linux-gcc (GCC) 15.1.0
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202505300820.fmdHkQH6-lkp@intel.com/
New smatch warnings:
drivers/gpu/drm/xe/xe_vm.c:2213 xe_vm_query_vmas_attrs_ioctl() warn: unsigned 'args->num_vmas' is never less than zero.
drivers/gpu/drm/xe/xe_vm.c:2249 xe_vm_query_vmas_attrs_ioctl() warn: maybe return -EFAULT instead of the bytes remaining?
Old smatch warnings:
drivers/gpu/drm/xe/xe_vm.c:2583 new_vma() error: we previously assumed 'bo' could be null (see line 2557)
drivers/gpu/drm/xe/xe_vm.c:2595 new_vma() error: 'vma' dereferencing possible ERR_PTR()
drivers/gpu/drm/xe/xe_vm.c:3033 prefetch_ranges() warn: iterator 'i' not incremented
drivers/gpu/drm/xe/xe_vm.c:4035 xe_vm_range_tilemask_tlb_invalidation() error: uninitialized symbol 'err'.
arch/loongarch/include/asm/atomic.h:135 arch_atomic_fetch_add_unless() warn: inconsistent indenting
drivers/gpu/drm/xe/xe_vm.c:4241 xe_vm_snapshot_print() warn: passing zero to 'PTR_ERR'
drivers/gpu/drm/xe/xe_vm.c:4402 xe_vm_alloc_madvise_vma() warn: variable dereferenced before check 'ops' (see line 4341)
vim +2213 drivers/gpu/drm/xe/xe_vm.c
2202
2203 int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2204 {
2205 struct xe_device *xe = to_xe_device(dev);
2206 struct xe_file *xef = to_xe_file(file);
2207 struct drm_xe_vma_mem_attr *mem_attrs;
2208 struct drm_xe_vm_query_vmas_attr *args = data;
2209 u64 __user *attrs_user = NULL;
2210 struct xe_vm *vm;
2211 int err = 0;
2212
> 2213 if (XE_IOCTL_DBG(xe, args->num_vmas < 0))
2214 return -EINVAL;
2215
2216 vm = xe_vm_lookup(xef, args->vm_id);
2217 if (XE_IOCTL_DBG(xe, !vm))
2218 return -EINVAL;
2219
2220 down_read(&vm->lock);
2221
2222 attrs_user = u64_to_user_ptr(args->vector_of_vma_mem_attr);
2223
2224 if (args->num_vmas == 0 && !attrs_user) {
2225 xe_vm_query_vmas(vm, &args->num_vmas, args->start, args->start + args->range);
2226 goto unlock_vm;
2227 }
2228
2229 mem_attrs = kvmalloc_array(args->num_vmas, sizeof(struct drm_xe_vma_mem_attr),
2230 GFP_KERNEL | __GFP_ACCOUNT |
2231 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
2232 if (!mem_attrs) {
2233 err = args->num_vmas > 1 ? -ENOBUFS : -ENOMEM;
2234 goto unlock_vm;
2235 }
2236
2237 err = get_mem_attrs(vm, &args->num_vmas, args->start,
2238 args->start + args->range, mem_attrs);
2239 if (err)
2240 goto free_mem_attrs;
2241
2242 err = __copy_to_user(attrs_user, mem_attrs,
2243 sizeof(struct drm_xe_vma_mem_attr) * args->num_vmas);
2244
2245 free_mem_attrs:
2246 kvfree(mem_attrs);
2247 unlock_vm:
2248 up_read(&vm->lock);
> 2249 return err;
2250 }
2251
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 17/19] drm/xe/uapi: Add UAPI for querying VMA count and memory attributes
2025-05-27 16:40 ` [PATCH v3 17/19] drm/xe/uapi: Add UAPI for querying VMA count and memory attributes Himal Prasad Ghimiray
2025-05-28 17:02 ` Souza, Jose
2025-05-30 1:11 ` kernel test robot
@ 2025-05-30 4:29 ` Matthew Brost
2 siblings, 0 replies; 72+ messages in thread
From: Matthew Brost @ 2025-05-30 4:29 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:10:01PM +0530, Himal Prasad Ghimiray wrote:
> Introduce the DRM_IOCTL_XE_VM_QUERY_VMAS_ATTRS ioctl to allow userspace
> to query memory attributes of VMAs within a specified virtual address
> range.
> If num_vmas == 0 and vector_of_vma_mem_attr == NULL, the ioctl returns
> the number of VMAs in the specified range.
> If num_vmas > 0 and a valid user pointer is provided in
> vector_of_vma_mem_attr, the ioctl fills the buffer with memory
> attributes for each VMA.
> This two-step interface allows userspace to first query the required
> buffer size, then retrieve detailed attributes efficiently.
>
> v2 (Matthew Brost)
> - Use same ioctl to overload functionality
>
I thought we landed on this was not needed? Or if it is we can add it
later. Anyways this does look better than previous rev if needed (not a
complete review).
Matt
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/xe/xe_device.c | 1 +
> drivers/gpu/drm/xe/xe_vm.c | 87 ++++++++++++++++++++++++++++++++++
> drivers/gpu/drm/xe/xe_vm.h | 2 +-
> include/uapi/drm/xe_drm.h | 82 ++++++++++++++++++++++++++++++++
> 4 files changed, 171 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
> index b9791c614749..8c965d15c187 100644
> --- a/drivers/gpu/drm/xe/xe_device.c
> +++ b/drivers/gpu/drm/xe/xe_device.c
> @@ -199,6 +199,7 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
> DRM_RENDER_ALLOW),
> DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
> DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
> + DRM_IOCTL_DEF_DRV(XE_VM_QUERY_VMAS_ATTRS, xe_vm_query_vmas_attrs_ioctl, DRM_RENDER_ALLOW),
> };
>
> static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 4520e475399e..9611d7ca2bed 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2162,6 +2162,93 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
> return err;
> }
>
> +static void xe_vm_query_vmas(struct xe_vm *vm, u32 *num_vmas, u64 start, u64 end)
> +{
> + struct drm_gpuva *gpuva;
> +
> + lockdep_assert_held(&vm->lock);
> + drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end)
> + (*num_vmas)++;
> +}
> +
> +static int get_mem_attrs(struct xe_vm *vm, u32 *num_vmas, u64 start,
> + u64 end, struct drm_xe_vma_mem_attr *mem_attrs)
> +{
> + struct drm_gpuva *gpuva;
> + int i = 0;
> +
> + lockdep_assert_held(&vm->lock);
> +
> + drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
> + struct xe_vma *vma = gpuva_to_vma(gpuva);
> +
> + if (i == *num_vmas)
> + return -EINVAL;
> +
> + mem_attrs[i].start = xe_vma_start(vma);
> + mem_attrs[i].end = xe_vma_end(vma);
> + mem_attrs[i].atomic.val = vma->attr.atomic_access;
> + mem_attrs[i].pat_index.val = vma->attr.pat_index;
> + mem_attrs[i].preferred_mem_loc.devmem_fd = vma->attr.preferred_loc.devmem_fd;
> + mem_attrs[i].preferred_mem_loc.migration_policy = vma->attr.preferred_loc.migration_policy;
> +
> + i++;
> + }
> +
> + if (i < (*num_vmas - 1))
> + *num_vmas = i;
> + return 0;
> +}
> +
> +int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> +{
> + struct xe_device *xe = to_xe_device(dev);
> + struct xe_file *xef = to_xe_file(file);
> + struct drm_xe_vma_mem_attr *mem_attrs;
> + struct drm_xe_vm_query_vmas_attr *args = data;
> + u64 __user *attrs_user = NULL;
> + struct xe_vm *vm;
> + int err = 0;
> +
> + if (XE_IOCTL_DBG(xe, args->num_vmas < 0))
> + return -EINVAL;
> +
> + vm = xe_vm_lookup(xef, args->vm_id);
> + if (XE_IOCTL_DBG(xe, !vm))
> + return -EINVAL;
> +
> + down_read(&vm->lock);
> +
> + attrs_user = u64_to_user_ptr(args->vector_of_vma_mem_attr);
> +
> + if (args->num_vmas == 0 && !attrs_user) {
> + xe_vm_query_vmas(vm, &args->num_vmas, args->start, args->start + args->range);
> + goto unlock_vm;
> + }
> +
> + mem_attrs = kvmalloc_array(args->num_vmas, sizeof(struct drm_xe_vma_mem_attr),
> + GFP_KERNEL | __GFP_ACCOUNT |
> + __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
> + if (!mem_attrs) {
> + err = args->num_vmas > 1 ? -ENOBUFS : -ENOMEM;
> + goto unlock_vm;
> + }
> +
> + err = get_mem_attrs(vm, &args->num_vmas, args->start,
> + args->start + args->range, mem_attrs);
> + if (err)
> + goto free_mem_attrs;
> +
> + err = __copy_to_user(attrs_user, mem_attrs,
> + sizeof(struct drm_xe_vma_mem_attr) * args->num_vmas);
> +
> +free_mem_attrs:
> + kvfree(mem_attrs);
> +unlock_vm:
> + up_read(&vm->lock);
> + return err;
> +}
> +
> static bool vma_matches(struct xe_vma *vma, u64 page_addr)
> {
> if (page_addr > xe_vma_end(vma) - 1 ||
> diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> index 340ac34936f4..b1e94b536c80 100644
> --- a/drivers/gpu/drm/xe/xe_vm.h
> +++ b/drivers/gpu/drm/xe/xe_vm.h
> @@ -195,7 +195,7 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
> struct drm_file *file);
> int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
> struct drm_file *file);
> -
> +int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
> void xe_vm_close_and_put(struct xe_vm *vm);
>
> static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
> diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
> index 03adfdc20dde..6ff9ff0c09dd 100644
> --- a/include/uapi/drm/xe_drm.h
> +++ b/include/uapi/drm/xe_drm.h
> @@ -82,6 +82,7 @@ extern "C" {
> * - &DRM_IOCTL_XE_WAIT_USER_FENCE
> * - &DRM_IOCTL_XE_OBSERVATION
> * - &DRM_IOCTL_XE_MADVISE
> + * - &DRM_IOCTL_XE_VM_QUERY_VMAS_ATTRS
> */
>
> /*
> @@ -104,6 +105,7 @@ extern "C" {
> #define DRM_XE_WAIT_USER_FENCE 0x0a
> #define DRM_XE_OBSERVATION 0x0b
> #define DRM_XE_MADVISE 0x0c
> +#define DRM_XE_VM_QUERY_VMAS_ATTRS 0x0d
>
> /* Must be kept compact -- no holes */
>
> @@ -120,6 +122,7 @@ extern "C" {
> #define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
> #define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
> #define DRM_IOCTL_XE_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
> +#define DRM_IOCTL_XE_VM_QUERY_VMAS_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_VMAS_ATTRS, struct drm_xe_vm_query_vmas_attr)
>
> /**
> * DOC: Xe IOCTL Extensions
> @@ -2066,6 +2069,85 @@ struct drm_xe_madvise {
>
> };
>
> +struct drm_xe_vma_mem_attr {
> + /** @extensions: Pointer to the first extension struct, if any */
> + __u64 extensions;
> +
> + /** @start: start of the vma */
> + __u64 start;
> +
> + /** @size: end of the vma */
> + __u64 end;
> +
> + struct {
> + struct {
> + /** @val: value of atomic operation*/
> + __u32 val;
> +
> + /** @reserved: Reserved */
> + __u32 reserved;
> + } atomic;
> +
> + struct {
> + /** @val: value for DRM_XE_VMA_ATTR_PURGEABLE_STATE */
> + __u32 val;
> +
> + /** @reserved: Reserved */
> + __u32 reserved;
> + } purge_state_val;
> +
> + struct {
> + /** @pat_index */
> + __u32 val;
> +
> + /** @reserved: Reserved */
> + __u32 reserved;
> + } pat_index;
> +
> + /** @preferred_mem_loc: preferred memory location */
> + struct {
> + __u32 devmem_fd;
> +
> + __u32 migration_policy;
> + } preferred_mem_loc;
> + };
> +
> + /** @reserved: Reserved */
> + __u64 reserved[2];
> +};
> +
> +/**
> + * struct drm_xe_vm_query_vmas_attr - Input of &DRM_IOCTL_XE_VM_QUERY_MEM_ATTRIBUTES
> + *
> + * Get memory attributes to a virtual address range
> + */
> +struct drm_xe_vm_query_vmas_attr {
> + /** @extensions: Pointer to the first extension struct, if any */
> + __u64 extensions;
> +
> + /** @vm_id: vm_id of the virtual range */
> + __u32 vm_id;
> +
> + /** @num_vmas: number of vmas in range returned in @num_vmas */
> + __u32 num_vmas;
> +
> + /** @start: start of the virtual address range */
> + __u64 start;
> +
> + /** @size: size of the virtual address range */
> + __u64 range;
> +
> + /**
> + * @vector_of_ops: userptr to array of struct
> + * drm_xe_vma_mem_attr
> + */
> + __u64 vector_of_vma_mem_attr;
> +
> + /** @reserved: Reserved */
> + __u64 reserved[2];
> +
> +};
> +
> #if defined(__cplusplus)
> }
> #endif
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread
* [PATCH v3 18/19] drm/xe/bo: Add attributes field to xe_bo
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (16 preceding siblings ...)
2025-05-27 16:40 ` [PATCH v3 17/19] drm/xe/uapi: Add UAPI for querying VMA count and memory attributes Himal Prasad Ghimiray
@ 2025-05-27 16:40 ` Himal Prasad Ghimiray
2025-05-28 23:47 ` Matthew Brost
2025-05-27 16:40 ` [PATCH v3 19/19] drm/xe/bo: Update atomic_access attribute on madvise Himal Prasad Ghimiray
` (8 subsequent siblings)
26 siblings, 1 reply; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:40 UTC (permalink / raw)
To: intel-xe; +Cc: Himal Prasad Ghimiray, Matthew Brost
A single BO can be linked to multiple VMAs, making VMA attributes
insufficient for determining the placement and PTE update attributes
of the BO. To address this, an attributes field has been added to the
BO.
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/xe/xe_bo_types.h | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index eb5e83c5f233..70960368a5a8 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -62,6 +62,11 @@ struct xe_bo {
*/
struct list_head client_link;
#endif
+ /** @attr: User controlled attributes for bo */
+ struct {
+ /** @atomic_access: type of atomic access bo needs */
+ u32 atomic_access;
+ } attr;
/**
* @pxp_key_instance: PXP key instance this BO was created against. A
* 0 in this variable indicates that the BO does not use PXP encryption.
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* Re: [PATCH v3 18/19] drm/xe/bo: Add attributes field to xe_bo
2025-05-27 16:40 ` [PATCH v3 18/19] drm/xe/bo: Add attributes field to xe_bo Himal Prasad Ghimiray
@ 2025-05-28 23:47 ` Matthew Brost
2025-05-29 2:29 ` Ghimiray, Himal Prasad
0 siblings, 1 reply; 72+ messages in thread
From: Matthew Brost @ 2025-05-28 23:47 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:10:02PM +0530, Himal Prasad Ghimiray wrote:
> A single BO can be linked to multiple VMAs, making VMA attributes
> insufficient for determining the placement and PTE update attributes
> of the BO. To address this, an attributes field has been added to the
> BO.
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> Reviewed-by: Matthew Brost <matthew.brost@intel.com>
> ---
> drivers/gpu/drm/xe/xe_bo_types.h | 5 +++++
> 1 file changed, 5 insertions(+)
>
> diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
> index eb5e83c5f233..70960368a5a8 100644
> --- a/drivers/gpu/drm/xe/xe_bo_types.h
> +++ b/drivers/gpu/drm/xe/xe_bo_types.h
> @@ -62,6 +62,11 @@ struct xe_bo {
> */
> struct list_head client_link;
> #endif
> + /** @attr: User controlled attributes for bo */
> + struct {
> + /** @atomic_access: type of atomic access bo needs */
, protected by BO dma-resv lock.
Missed above in previous rev, RB stands.
Matt
> + u32 atomic_access;
> + } attr;
> /**
> * @pxp_key_instance: PXP key instance this BO was created against. A
> * 0 in this variable indicates that the BO does not use PXP encryption.
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 18/19] drm/xe/bo: Add attributes field to xe_bo
2025-05-28 23:47 ` Matthew Brost
@ 2025-05-29 2:29 ` Ghimiray, Himal Prasad
0 siblings, 0 replies; 72+ messages in thread
From: Ghimiray, Himal Prasad @ 2025-05-29 2:29 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-xe
On 29-05-2025 05:17, Matthew Brost wrote:
> On Tue, May 27, 2025 at 10:10:02PM +0530, Himal Prasad Ghimiray wrote:
>> A single BO can be linked to multiple VMAs, making VMA attributes
>> insufficient for determining the placement and PTE update attributes
>> of the BO. To address this, an attributes field has been added to the
>> BO.
>>
>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
>> Reviewed-by: Matthew Brost <matthew.brost@intel.com>
>> ---
>> drivers/gpu/drm/xe/xe_bo_types.h | 5 +++++
>> 1 file changed, 5 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
>> index eb5e83c5f233..70960368a5a8 100644
>> --- a/drivers/gpu/drm/xe/xe_bo_types.h
>> +++ b/drivers/gpu/drm/xe/xe_bo_types.h
>> @@ -62,6 +62,11 @@ struct xe_bo {
>> */
>> struct list_head client_link;
>> #endif
>> + /** @attr: User controlled attributes for bo */
>> + struct {
>> + /** @atomic_access: type of atomic access bo needs */
>
> , protected by BO dma-resv lock.
>
> Missed above in previous rev, RB stands.
Will add in next rev. Thanks.
>
> Matt
>
>> + u32 atomic_access;
>> + } attr;
>> /**
>> * @pxp_key_instance: PXP key instance this BO was created against. A
>> * 0 in this variable indicates that the BO does not use PXP encryption.
>> --
>> 2.34.1
>>
^ permalink raw reply [flat|nested] 72+ messages in thread
* [PATCH v3 19/19] drm/xe/bo: Update atomic_access attribute on madvise
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (17 preceding siblings ...)
2025-05-27 16:40 ` [PATCH v3 18/19] drm/xe/bo: Add attributes field to xe_bo Himal Prasad Ghimiray
@ 2025-05-27 16:40 ` Himal Prasad Ghimiray
2025-05-28 23:46 ` Matthew Brost
2025-05-27 21:35 ` ✓ CI.Patch_applied: success for MADVISE FOR XE Patchwork
` (7 subsequent siblings)
26 siblings, 1 reply; 72+ messages in thread
From: Himal Prasad Ghimiray @ 2025-05-27 16:40 UTC (permalink / raw)
To: intel-xe; +Cc: Himal Prasad Ghimiray
Update the bo_atomic_access based on user-provided input and determine
the migration to smem during a CPU fault
v2 (Matthew Brost)
- Avoid cpu unmapping if bo is already in smem
- check atomics on smem too for ioctl
- Add comments
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/xe/xe_bo.c | 21 ++++++++++++--
drivers/gpu/drm/xe/xe_vm.c | 11 ++++++--
drivers/gpu/drm/xe/xe_vm_madvise.c | 45 ++++++++++++++++++++++++++++--
3 files changed, 69 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index d99d91fe8aa9..9072e8ae3f3e 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -1662,6 +1662,12 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
}
}
+static bool should_migrate_to_smem(struct xe_bo *bo)
+{
+ return bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_GLOBAL ||
+ bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_CPU;
+}
+
static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
@@ -1670,7 +1676,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
struct xe_bo *bo = ttm_to_xe_bo(tbo);
bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
vm_fault_t ret;
- int idx;
+ int idx, r = 0;
if (needs_rpm)
xe_pm_runtime_get(xe);
@@ -1682,8 +1688,17 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
if (drm_dev_enter(ddev, &idx)) {
trace_xe_bo_cpu_fault(bo);
- ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
- TTM_BO_VM_NUM_PREFAULT);
+ if (should_migrate_to_smem(bo)) {
+ r = xe_bo_migrate(bo, XE_PL_TT);
+ if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
+ ret = VM_FAULT_NOPAGE;
+ else if (r)
+ ret = VM_FAULT_SIGBUS;
+ }
+ if (!ret)
+ ret = ttm_bo_vm_fault_reserved(vmf,
+ vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT);
drm_dev_exit(idx);
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 9611d7ca2bed..1bdf85c12374 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -3116,9 +3116,16 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
err = vma_lock_and_validate(exec,
gpuva_to_vma(op->base.prefetch.va),
false);
- if (!err && !xe_vma_has_no_bo(vma))
- err = xe_bo_migrate(xe_vma_bo(vma),
+ if (!err && !xe_vma_has_no_bo(vma)) {
+ struct xe_bo *bo = xe_vma_bo(vma);
+
+ if (region == 0 && !vm->xe->info.has_device_atomics_on_smem &&
+ bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_DEVICE)
+ region = 1;
+
+ err = xe_bo_migrate(bo,
region_to_mem_type[region]);
+ }
break;
}
default:
diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
index 0f0b94cb43f2..e048eb48826c 100644
--- a/drivers/gpu/drm/xe/xe_vm_madvise.c
+++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
@@ -82,15 +82,54 @@ static int madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
struct xe_vma **vmas, int num_vmas,
struct drm_xe_madvise_ops ops)
{
- int i;
+ struct xe_bo *bo;
+ int err, i;
xe_assert(vm->xe, ops.type == DRM_XE_VMA_ATTR_ATOMIC);
xe_assert(vm->xe, ops.atomic.val > DRM_XE_VMA_ATOMIC_UNDEFINED &&
ops.atomic.val <= DRM_XE_VMA_ATOMIC_CPU);
- for (i = 0; i < num_vmas; i++)
+ for (i = 0; i < num_vmas; i++) {
vmas[i]->attr.atomic_access = ops.atomic.val;
- /*TODO: handle bo backed vmas */
+
+ bo = xe_vma_bo(vmas[i]);
+ if (!bo)
+ continue;
+
+ if (XE_IOCTL_DBG(xe, ops.atomic.val == DRM_XE_VMA_ATOMIC_CPU &&
+ !(bo->flags & XE_BO_FLAG_SYSTEM)))
+ return -EINVAL;
+
+ /* NOTE: The following atomic checks are platform-specific. For example,
+ * if a device supports CXL atomics, these may not be necessary or
+ * may behave differently.
+ */
+ if (XE_IOCTL_DBG(xe, ops.atomic.val == DRM_XE_VMA_ATOMIC_DEVICE &&
+ !(bo->flags & XE_BO_FLAG_VRAM0) &&
+ !(bo->flags & XE_BO_FLAG_VRAM1) &&
+ !(bo->flags & XE_BO_FLAG_SYSTEM &&
+ xe->info.has_device_atomics_on_smem)))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, ops.atomic.val == DRM_XE_VMA_ATOMIC_GLOBAL &&
+ (!(bo->flags & XE_BO_FLAG_SYSTEM) ||
+ (!(bo->flags & XE_BO_FLAG_VRAM0) &&
+ !(bo->flags & XE_BO_FLAG_VRAM1)))))
+ return -EINVAL;
+
+ err = xe_bo_lock(bo, true);
+ if (err)
+ return err;
+ bo->attr.atomic_access = ops.atomic.val;
+
+ /* Invalidate cpu page table, so bo can migrate to smem in next access */
+ if (xe_bo_is_vram(bo) &&
+ (bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_CPU ||
+ bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_GLOBAL))
+ ttm_bo_unmap_virtual(&bo->ttm);
+
+ xe_bo_unlock(bo);
+ }
return 0;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 72+ messages in thread* Re: [PATCH v3 19/19] drm/xe/bo: Update atomic_access attribute on madvise
2025-05-27 16:40 ` [PATCH v3 19/19] drm/xe/bo: Update atomic_access attribute on madvise Himal Prasad Ghimiray
@ 2025-05-28 23:46 ` Matthew Brost
2025-05-29 3:03 ` Ghimiray, Himal Prasad
0 siblings, 1 reply; 72+ messages in thread
From: Matthew Brost @ 2025-05-28 23:46 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
On Tue, May 27, 2025 at 10:10:03PM +0530, Himal Prasad Ghimiray wrote:
> Update the bo_atomic_access based on user-provided input and determine
> the migration to smem during a CPU fault
>
> v2 (Matthew Brost)
> - Avoid cpu unmapping if bo is already in smem
> - check atomics on smem too for ioctl
> - Add comments
>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
> drivers/gpu/drm/xe/xe_bo.c | 21 ++++++++++++--
> drivers/gpu/drm/xe/xe_vm.c | 11 ++++++--
> drivers/gpu/drm/xe/xe_vm_madvise.c | 45 ++++++++++++++++++++++++++++--
> 3 files changed, 69 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
> index d99d91fe8aa9..9072e8ae3f3e 100644
> --- a/drivers/gpu/drm/xe/xe_bo.c
> +++ b/drivers/gpu/drm/xe/xe_bo.c
> @@ -1662,6 +1662,12 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
> }
> }
>
> +static bool should_migrate_to_smem(struct xe_bo *bo)
> +{
xe_bo_assert_held, more on that in reply to previous patch.
> + return bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_GLOBAL ||
> + bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_CPU;
> +}
> +
Hmm, this is tricky. I guess this means sharded atomics on BOs do not
just work whereas for SVM they do (i.e., DRM_XE_VMA_ATOMIC_UNDEFINED
means atomics do not work for BOs but for SVM they do). I suppose this
is the current behavior. I think this will need to be document in the
uAPI kernel doc.
> static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
> {
> struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
> @@ -1670,7 +1676,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
> struct xe_bo *bo = ttm_to_xe_bo(tbo);
> bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
> vm_fault_t ret;
> - int idx;
> + int idx, r = 0;
>
> if (needs_rpm)
> xe_pm_runtime_get(xe);
> @@ -1682,8 +1688,17 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
> if (drm_dev_enter(ddev, &idx)) {
> trace_xe_bo_cpu_fault(bo);
>
> - ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
> - TTM_BO_VM_NUM_PREFAULT);
> + if (should_migrate_to_smem(bo)) {
> + r = xe_bo_migrate(bo, XE_PL_TT);
> + if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
> + ret = VM_FAULT_NOPAGE;
> + else if (r)
> + ret = VM_FAULT_SIGBUS;
> + }
> + if (!ret)
> + ret = ttm_bo_vm_fault_reserved(vmf,
> + vmf->vma->vm_page_prot,
> + TTM_BO_VM_NUM_PREFAULT);
> drm_dev_exit(idx);
> } else {
> ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 9611d7ca2bed..1bdf85c12374 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -3116,9 +3116,16 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
> err = vma_lock_and_validate(exec,
> gpuva_to_vma(op->base.prefetch.va),
> false);
> - if (!err && !xe_vma_has_no_bo(vma))
> - err = xe_bo_migrate(xe_vma_bo(vma),
> + if (!err && !xe_vma_has_no_bo(vma)) {
> + struct xe_bo *bo = xe_vma_bo(vma);
> +
> + if (region == 0 && !vm->xe->info.has_device_atomics_on_smem &&
> + bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_DEVICE)
> + region = 1;
I wonder if it better to just leave region as is and let the next atomic
fault trigger the migration.
> +
> + err = xe_bo_migrate(bo,
> region_to_mem_type[region]);
> + }
> break;
> }
> default:
> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
> index 0f0b94cb43f2..e048eb48826c 100644
> --- a/drivers/gpu/drm/xe/xe_vm_madvise.c
> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
> @@ -82,15 +82,54 @@ static int madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
> struct xe_vma **vmas, int num_vmas,
> struct drm_xe_madvise_ops ops)
> {
> - int i;
> + struct xe_bo *bo;
> + int err, i;
>
> xe_assert(vm->xe, ops.type == DRM_XE_VMA_ATTR_ATOMIC);
> xe_assert(vm->xe, ops.atomic.val > DRM_XE_VMA_ATOMIC_UNDEFINED &&
> ops.atomic.val <= DRM_XE_VMA_ATOMIC_CPU);
>
Do you sanitize ops.atomic.val prior to this? Also do we disallow a user
setting DRM_XE_VMA_ATOMIC_UNDEFINED? If not, then this needs to be >=
DRM_XE_VMA_ATOMIC_UNDEFINED.
> - for (i = 0; i < num_vmas; i++)
> + for (i = 0; i < num_vmas; i++) {
> vmas[i]->attr.atomic_access = ops.atomic.val;
> - /*TODO: handle bo backed vmas */
> +
> + bo = xe_vma_bo(vmas[i]);
> + if (!bo)
> + continue;
> +
> + if (XE_IOCTL_DBG(xe, ops.atomic.val == DRM_XE_VMA_ATOMIC_CPU &&
> + !(bo->flags & XE_BO_FLAG_SYSTEM)))
> + return -EINVAL;
> +
Note when we fail here (or anywhere else in madvise), we could be in a
state where madvise has partially completed. I think that is actually ok
as nothing in madvise is fatal as we are just changing attributes. But I
think we need to document this in the uAPI kernel doc that if madvise
fails, the state of madvise attributes are undefined.
In practice this really should never fail unless a user is giving bad
input or extreme memory pressure and kmalloc fails.
Matt
> + /* NOTE: The following atomic checks are platform-specific. For example,
> + * if a device supports CXL atomics, these may not be necessary or
> + * may behave differently.
> + */
> + if (XE_IOCTL_DBG(xe, ops.atomic.val == DRM_XE_VMA_ATOMIC_DEVICE &&
> + !(bo->flags & XE_BO_FLAG_VRAM0) &&
> + !(bo->flags & XE_BO_FLAG_VRAM1) &&
> + !(bo->flags & XE_BO_FLAG_SYSTEM &&
> + xe->info.has_device_atomics_on_smem)))
> + return -EINVAL;
> +
> + if (XE_IOCTL_DBG(xe, ops.atomic.val == DRM_XE_VMA_ATOMIC_GLOBAL &&
> + (!(bo->flags & XE_BO_FLAG_SYSTEM) ||
> + (!(bo->flags & XE_BO_FLAG_VRAM0) &&
> + !(bo->flags & XE_BO_FLAG_VRAM1)))))
> + return -EINVAL;
> +
> + err = xe_bo_lock(bo, true);
> + if (err)
> + return err;
> + bo->attr.atomic_access = ops.atomic.val;
> +
> + /* Invalidate cpu page table, so bo can migrate to smem in next access */
> + if (xe_bo_is_vram(bo) &&
> + (bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_CPU ||
> + bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_GLOBAL))
> + ttm_bo_unmap_virtual(&bo->ttm);
> +
> + xe_bo_unlock(bo);
> + }
> return 0;
> }
>
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 19/19] drm/xe/bo: Update atomic_access attribute on madvise
2025-05-28 23:46 ` Matthew Brost
@ 2025-05-29 3:03 ` Ghimiray, Himal Prasad
2025-05-29 18:24 ` Matthew Brost
0 siblings, 1 reply; 72+ messages in thread
From: Ghimiray, Himal Prasad @ 2025-05-29 3:03 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-xe
On 29-05-2025 05:16, Matthew Brost wrote:
> On Tue, May 27, 2025 at 10:10:03PM +0530, Himal Prasad Ghimiray wrote:
>> Update the bo_atomic_access based on user-provided input and determine
>> the migration to smem during a CPU fault
>>
>> v2 (Matthew Brost)
>> - Avoid cpu unmapping if bo is already in smem
>> - check atomics on smem too for ioctl
>> - Add comments
>>
>> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
>> ---
>> drivers/gpu/drm/xe/xe_bo.c | 21 ++++++++++++--
>> drivers/gpu/drm/xe/xe_vm.c | 11 ++++++--
>> drivers/gpu/drm/xe/xe_vm_madvise.c | 45 ++++++++++++++++++++++++++++--
>> 3 files changed, 69 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
>> index d99d91fe8aa9..9072e8ae3f3e 100644
>> --- a/drivers/gpu/drm/xe/xe_bo.c
>> +++ b/drivers/gpu/drm/xe/xe_bo.c
>> @@ -1662,6 +1662,12 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
>> }
>> }
>>
>> +static bool should_migrate_to_smem(struct xe_bo *bo)
>> +{
>
> xe_bo_assert_held, more on that in reply to previous patch.
Sure
>
>> + return bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_GLOBAL ||
>> + bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_CPU;
>> +}
>> +
>
> Hmm, this is tricky. I guess this means sharded atomics on BOs do not
> just work whereas for SVM they do (i.e., DRM_XE_VMA_ATOMIC_UNDEFINED
> means atomics do not work for BOs but for SVM they do). I suppose this
> is the current behavior. I think this will need to be document in the
> uAPI kernel doc.
Makes sense
>
>> static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
>> {
>> struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
>> @@ -1670,7 +1676,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
>> struct xe_bo *bo = ttm_to_xe_bo(tbo);
>> bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
>> vm_fault_t ret;
>> - int idx;
>> + int idx, r = 0;
>>
>> if (needs_rpm)
>> xe_pm_runtime_get(xe);
>> @@ -1682,8 +1688,17 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
>> if (drm_dev_enter(ddev, &idx)) {
>> trace_xe_bo_cpu_fault(bo);
>>
>> - ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
>> - TTM_BO_VM_NUM_PREFAULT);
>> + if (should_migrate_to_smem(bo)) {
>> + r = xe_bo_migrate(bo, XE_PL_TT);
>> + if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
>> + ret = VM_FAULT_NOPAGE;
>> + else if (r)
>> + ret = VM_FAULT_SIGBUS;
>> + }
>> + if (!ret)
>> + ret = ttm_bo_vm_fault_reserved(vmf,
>> + vmf->vma->vm_page_prot,
>> + TTM_BO_VM_NUM_PREFAULT);
>> drm_dev_exit(idx);
>> } else {
>> ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
>> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
>> index 9611d7ca2bed..1bdf85c12374 100644
>> --- a/drivers/gpu/drm/xe/xe_vm.c
>> +++ b/drivers/gpu/drm/xe/xe_vm.c
>> @@ -3116,9 +3116,16 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
>> err = vma_lock_and_validate(exec,
>> gpuva_to_vma(op->base.prefetch.va),
>> false);
>> - if (!err && !xe_vma_has_no_bo(vma))
>> - err = xe_bo_migrate(xe_vma_bo(vma),
>> + if (!err && !xe_vma_has_no_bo(vma)) {
>> + struct xe_bo *bo = xe_vma_bo(vma);
>> +
>> + if (region == 0 && !vm->xe->info.has_device_atomics_on_smem &&
>> + bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_DEVICE)
>> + region = 1;
>
> I wonder if it better to just leave region as is and let the next atomic
> fault trigger the migration.
Ok. lets do it that way.
>
>> +
>> + err = xe_bo_migrate(bo,
>> region_to_mem_type[region]);
>> + }
>> break;
>> }
>> default:
>> diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
>> index 0f0b94cb43f2..e048eb48826c 100644
>> --- a/drivers/gpu/drm/xe/xe_vm_madvise.c
>> +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
>> @@ -82,15 +82,54 @@ static int madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
>> struct xe_vma **vmas, int num_vmas,
>> struct drm_xe_madvise_ops ops)
>> {
>> - int i;
>> + struct xe_bo *bo;
>> + int err, i;
>>
>> xe_assert(vm->xe, ops.type == DRM_XE_VMA_ATTR_ATOMIC);
>> xe_assert(vm->xe, ops.atomic.val > DRM_XE_VMA_ATOMIC_UNDEFINED &&
>> ops.atomic.val <= DRM_XE_VMA_ATOMIC_CPU);
>>
>
> Do you sanitize ops.atomic.val prior to this? Also do we disallow a user
> setting DRM_XE_VMA_ATOMIC_UNDEFINED? If not, then this needs to be >=
> DRM_XE_VMA_ATOMIC_UNDEFINED.
Agreed it should be >= DRM_XE_VMA_ATOMIC_UNDEFINED. And instead of
assertion will sanitize it here only.
>
>> - for (i = 0; i < num_vmas; i++)
>> + for (i = 0; i < num_vmas; i++) {
>> vmas[i]->attr.atomic_access = ops.atomic.val;
>> - /*TODO: handle bo backed vmas */
>> +
>> + bo = xe_vma_bo(vmas[i]);
>> + if (!bo)
>> + continue;
>> +
>> + if (XE_IOCTL_DBG(xe, ops.atomic.val == DRM_XE_VMA_ATOMIC_CPU &&
>> + !(bo->flags & XE_BO_FLAG_SYSTEM)))
>> + return -EINVAL;
>> +
>
> Note when we fail here (or anywhere else in madvise), we could be in a
> state where madvise has partially completed. I think that is actually ok
> as nothing in madvise is fatal as we are just changing attributes. But I
> think we need to document this in the uAPI kernel doc that if madvise
> fails, the state of madvise attributes are undefined.
Will add in kernel-doc of uAPI.
>
> In practice this really should never fail unless a user is giving bad
> input or extreme memory pressure and kmalloc fails.
>
> Matt
>
>> + /* NOTE: The following atomic checks are platform-specific. For example,
>> + * if a device supports CXL atomics, these may not be necessary or
>> + * may behave differently.
>> + */
>> + if (XE_IOCTL_DBG(xe, ops.atomic.val == DRM_XE_VMA_ATOMIC_DEVICE &&
>> + !(bo->flags & XE_BO_FLAG_VRAM0) &&
>> + !(bo->flags & XE_BO_FLAG_VRAM1) &&
>> + !(bo->flags & XE_BO_FLAG_SYSTEM &&
>> + xe->info.has_device_atomics_on_smem)))
>> + return -EINVAL;
>> +
>> + if (XE_IOCTL_DBG(xe, ops.atomic.val == DRM_XE_VMA_ATOMIC_GLOBAL &&
>> + (!(bo->flags & XE_BO_FLAG_SYSTEM) ||
>> + (!(bo->flags & XE_BO_FLAG_VRAM0) &&
>> + !(bo->flags & XE_BO_FLAG_VRAM1)))))
>> + return -EINVAL;
>> +
>> + err = xe_bo_lock(bo, true);
>> + if (err)
>> + return err;
>> + bo->attr.atomic_access = ops.atomic.val;
>> +
>> + /* Invalidate cpu page table, so bo can migrate to smem in next access */
>> + if (xe_bo_is_vram(bo) &&
>> + (bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_CPU ||
>> + bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_GLOBAL))
>> + ttm_bo_unmap_virtual(&bo->ttm);
>> +
>> + xe_bo_unlock(bo);
>> + }
>> return 0;
>> }
>>
>> --
>> 2.34.1
>>
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 19/19] drm/xe/bo: Update atomic_access attribute on madvise
2025-05-29 3:03 ` Ghimiray, Himal Prasad
@ 2025-05-29 18:24 ` Matthew Brost
2025-05-29 18:30 ` Matthew Brost
0 siblings, 1 reply; 72+ messages in thread
From: Matthew Brost @ 2025-05-29 18:24 UTC (permalink / raw)
To: Ghimiray, Himal Prasad; +Cc: intel-xe
On Thu, May 29, 2025 at 08:33:39AM +0530, Ghimiray, Himal Prasad wrote:
>
>
> On 29-05-2025 05:16, Matthew Brost wrote:
> > On Tue, May 27, 2025 at 10:10:03PM +0530, Himal Prasad Ghimiray wrote:
> > > Update the bo_atomic_access based on user-provided input and determine
> > > the migration to smem during a CPU fault
> > >
> > > v2 (Matthew Brost)
> > > - Avoid cpu unmapping if bo is already in smem
> > > - check atomics on smem too for ioctl
> > > - Add comments
> > >
> > > Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> > > ---
> > > drivers/gpu/drm/xe/xe_bo.c | 21 ++++++++++++--
> > > drivers/gpu/drm/xe/xe_vm.c | 11 ++++++--
> > > drivers/gpu/drm/xe/xe_vm_madvise.c | 45 ++++++++++++++++++++++++++++--
> > > 3 files changed, 69 insertions(+), 8 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
> > > index d99d91fe8aa9..9072e8ae3f3e 100644
> > > --- a/drivers/gpu/drm/xe/xe_bo.c
> > > +++ b/drivers/gpu/drm/xe/xe_bo.c
> > > @@ -1662,6 +1662,12 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
> > > }
> > > }
> > > +static bool should_migrate_to_smem(struct xe_bo *bo)
> > > +{
> >
> > xe_bo_assert_held, more on that in reply to previous patch.
>
> Sure
>
> >
> > > + return bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_GLOBAL ||
> > > + bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_CPU;
> > > +}
> > > +
> >
> > Hmm, this is tricky. I guess this means sharded atomics on BOs do not
> > just work whereas for SVM they do (i.e., DRM_XE_VMA_ATOMIC_UNDEFINED
> > means atomics do not work for BOs but for SVM they do). I suppose this
> > is the current behavior. I think this will need to be document in the
> > uAPI kernel doc.
>
> Makes sense
>
> >
> > > static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
> > > {
> > > struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
> > > @@ -1670,7 +1676,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
> > > struct xe_bo *bo = ttm_to_xe_bo(tbo);
> > > bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
> > > vm_fault_t ret;
> > > - int idx;
> > > + int idx, r = 0;
> > > if (needs_rpm)
> > > xe_pm_runtime_get(xe);
> > > @@ -1682,8 +1688,17 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
> > > if (drm_dev_enter(ddev, &idx)) {
> > > trace_xe_bo_cpu_fault(bo);
> > > - ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
> > > - TTM_BO_VM_NUM_PREFAULT);
> > > + if (should_migrate_to_smem(bo)) {
> > > + r = xe_bo_migrate(bo, XE_PL_TT);
> > > + if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
> > > + ret = VM_FAULT_NOPAGE;
> > > + else if (r)
> > > + ret = VM_FAULT_SIGBUS;
> > > + }
> > > + if (!ret)
> > > + ret = ttm_bo_vm_fault_reserved(vmf,
> > > + vmf->vma->vm_page_prot,
> > > + TTM_BO_VM_NUM_PREFAULT);
> > > drm_dev_exit(idx);
> > > } else {
> > > ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
> > > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> > > index 9611d7ca2bed..1bdf85c12374 100644
> > > --- a/drivers/gpu/drm/xe/xe_vm.c
> > > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > > @@ -3116,9 +3116,16 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
> > > err = vma_lock_and_validate(exec,
> > > gpuva_to_vma(op->base.prefetch.va),
> > > false);
> > > - if (!err && !xe_vma_has_no_bo(vma))
> > > - err = xe_bo_migrate(xe_vma_bo(vma),
> > > + if (!err && !xe_vma_has_no_bo(vma)) {
> > > + struct xe_bo *bo = xe_vma_bo(vma);
> > > +
> > > + if (region == 0 && !vm->xe->info.has_device_atomics_on_smem &&
> > > + bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_DEVICE)
> > > + region = 1;
> >
> > I wonder if it better to just leave region as is and let the next atomic
> > fault trigger the migration.
>
> Ok. lets do it that way.
>
> >
> > > +
> > > + err = xe_bo_migrate(bo,
> > > region_to_mem_type[region]);
> > > + }
> > > break;
> > > }
> > > default:
> > > diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
> > > index 0f0b94cb43f2..e048eb48826c 100644
> > > --- a/drivers/gpu/drm/xe/xe_vm_madvise.c
> > > +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
> > > @@ -82,15 +82,54 @@ static int madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
> > > struct xe_vma **vmas, int num_vmas,
> > > struct drm_xe_madvise_ops ops)
> > > {
> > > - int i;
> > > + struct xe_bo *bo;
> > > + int err, i;
> > > xe_assert(vm->xe, ops.type == DRM_XE_VMA_ATTR_ATOMIC);
> > > xe_assert(vm->xe, ops.atomic.val > DRM_XE_VMA_ATOMIC_UNDEFINED &&
> > > ops.atomic.val <= DRM_XE_VMA_ATOMIC_CPU);
> >
> > Do you sanitize ops.atomic.val prior to this? Also do we disallow a user
> > setting DRM_XE_VMA_ATOMIC_UNDEFINED? If not, then this needs to be >=
> > DRM_XE_VMA_ATOMIC_UNDEFINED.
> Agreed it should be >= DRM_XE_VMA_ATOMIC_UNDEFINED. And instead of
> assertion will sanitize it here only.
>
> >
> > > - for (i = 0; i < num_vmas; i++)
> > > + for (i = 0; i < num_vmas; i++) {
> > > vmas[i]->attr.atomic_access = ops.atomic.val;
> > > - /*TODO: handle bo backed vmas */
> > > +
> > > + bo = xe_vma_bo(vmas[i]);
> > > + if (!bo)
> > > + continue;
> > > +
> > > + if (XE_IOCTL_DBG(xe, ops.atomic.val == DRM_XE_VMA_ATOMIC_CPU &&
> > > + !(bo->flags & XE_BO_FLAG_SYSTEM)))
> > > + return -EINVAL;
> > > +
> >
> > Note when we fail here (or anywhere else in madvise), we could be in a
> > state where madvise has partially completed. I think that is actually ok
> > as nothing in madvise is fatal as we are just changing attributes. But I
> > think we need to document this in the uAPI kernel doc that if madvise
> > fails, the state of madvise attributes are undefined.
>
> Will add in kernel-doc of uAPI.
>
Actually, on second thought, it might be better to sanitize user input
before attempting madvise. This is similar to vm_bind_ioctl_check_args.
I think that would be cleaner.
I believe we can make the failing state stable if we can avoid failures
in madvise_funcs (i.e., by returning void), which should be possible if
we take locks in non-interruptible modes (likely fine, as we’re not
doing much inside any locks) and avoid mallocs (none are used in this
series).
We’d also have to restructure this loop:
for (i = 0; i < args->num_ops; i++) {
xe_vm_alloc_madvise_vma(vm, advs_ops[i].start, advs_ops[i].range);
vmas = get_vmas(vm, &num_vmas, advs_ops[i].start, advs_ops[i].range);
if (!vmas) {
err = -ENOMEM;
goto free_advs_ops;
}
attr_type = array_index_nospec(advs_ops[i].type, ARRAY_SIZE(madvise_funcs));
err = madvise_funcs[attr_type](xe, vm, vmas, num_vmas, advs_ops[i]);
kfree(vmas);
vmas = NULL;
if (err)
goto free_advs_ops;
}
xe_vm_alloc_madvise_vma and get_vmas would run in the first loop (which
can fail), followed by a second loop that calls madvise_funcs (which
cannot fail). If the first loop fails, the worst-case scenario is that
we've split some VMAs into smaller ones, but their attributes would
remain the same as before the IOCTL.
I think this approach would be better avoiding a unknown state on
failure.
Matt
> >
> > In practice this really should never fail unless a user is giving bad
> > input or extreme memory pressure and kmalloc fails.
> >
> > Matt
> >
> > > + /* NOTE: The following atomic checks are platform-specific. For example,
> > > + * if a device supports CXL atomics, these may not be necessary or
> > > + * may behave differently.
> > > + */
> > > + if (XE_IOCTL_DBG(xe, ops.atomic.val == DRM_XE_VMA_ATOMIC_DEVICE &&
> > > + !(bo->flags & XE_BO_FLAG_VRAM0) &&
> > > + !(bo->flags & XE_BO_FLAG_VRAM1) &&
> > > + !(bo->flags & XE_BO_FLAG_SYSTEM &&
> > > + xe->info.has_device_atomics_on_smem)))
> > > + return -EINVAL;
> > > +
> > > + if (XE_IOCTL_DBG(xe, ops.atomic.val == DRM_XE_VMA_ATOMIC_GLOBAL &&
> > > + (!(bo->flags & XE_BO_FLAG_SYSTEM) ||
> > > + (!(bo->flags & XE_BO_FLAG_VRAM0) &&
> > > + !(bo->flags & XE_BO_FLAG_VRAM1)))))
> > > + return -EINVAL;
> > > +
> > > + err = xe_bo_lock(bo, true);
> > > + if (err)
> > > + return err;
> > > + bo->attr.atomic_access = ops.atomic.val;
> > > +
> > > + /* Invalidate cpu page table, so bo can migrate to smem in next access */
> > > + if (xe_bo_is_vram(bo) &&
> > > + (bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_CPU ||
> > > + bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_GLOBAL))
> > > + ttm_bo_unmap_virtual(&bo->ttm);
> > > +
> > > + xe_bo_unlock(bo);
> > > + }
> > > return 0;
> > > }
> > > --
> > > 2.34.1
> > >
>
^ permalink raw reply [flat|nested] 72+ messages in thread* Re: [PATCH v3 19/19] drm/xe/bo: Update atomic_access attribute on madvise
2025-05-29 18:24 ` Matthew Brost
@ 2025-05-29 18:30 ` Matthew Brost
0 siblings, 0 replies; 72+ messages in thread
From: Matthew Brost @ 2025-05-29 18:30 UTC (permalink / raw)
To: Ghimiray, Himal Prasad; +Cc: intel-xe
On Thu, May 29, 2025 at 11:24:28AM -0700, Matthew Brost wrote:
> On Thu, May 29, 2025 at 08:33:39AM +0530, Ghimiray, Himal Prasad wrote:
> >
> >
> > On 29-05-2025 05:16, Matthew Brost wrote:
> > > On Tue, May 27, 2025 at 10:10:03PM +0530, Himal Prasad Ghimiray wrote:
> > > > Update the bo_atomic_access based on user-provided input and determine
> > > > the migration to smem during a CPU fault
> > > >
> > > > v2 (Matthew Brost)
> > > > - Avoid cpu unmapping if bo is already in smem
> > > > - check atomics on smem too for ioctl
> > > > - Add comments
> > > >
> > > > Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> > > > ---
> > > > drivers/gpu/drm/xe/xe_bo.c | 21 ++++++++++++--
> > > > drivers/gpu/drm/xe/xe_vm.c | 11 ++++++--
> > > > drivers/gpu/drm/xe/xe_vm_madvise.c | 45 ++++++++++++++++++++++++++++--
> > > > 3 files changed, 69 insertions(+), 8 deletions(-)
> > > >
> > > > diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
> > > > index d99d91fe8aa9..9072e8ae3f3e 100644
> > > > --- a/drivers/gpu/drm/xe/xe_bo.c
> > > > +++ b/drivers/gpu/drm/xe/xe_bo.c
> > > > @@ -1662,6 +1662,12 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
> > > > }
> > > > }
> > > > +static bool should_migrate_to_smem(struct xe_bo *bo)
> > > > +{
> > >
> > > xe_bo_assert_held, more on that in reply to previous patch.
> >
> > Sure
> >
> > >
> > > > + return bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_GLOBAL ||
> > > > + bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_CPU;
> > > > +}
> > > > +
> > >
> > > Hmm, this is tricky. I guess this means sharded atomics on BOs do not
> > > just work whereas for SVM they do (i.e., DRM_XE_VMA_ATOMIC_UNDEFINED
> > > means atomics do not work for BOs but for SVM they do). I suppose this
> > > is the current behavior. I think this will need to be document in the
> > > uAPI kernel doc.
> >
> > Makes sense
> >
> > >
> > > > static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
> > > > {
> > > > struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
> > > > @@ -1670,7 +1676,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
> > > > struct xe_bo *bo = ttm_to_xe_bo(tbo);
> > > > bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
> > > > vm_fault_t ret;
> > > > - int idx;
> > > > + int idx, r = 0;
> > > > if (needs_rpm)
> > > > xe_pm_runtime_get(xe);
> > > > @@ -1682,8 +1688,17 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
> > > > if (drm_dev_enter(ddev, &idx)) {
> > > > trace_xe_bo_cpu_fault(bo);
> > > > - ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
> > > > - TTM_BO_VM_NUM_PREFAULT);
> > > > + if (should_migrate_to_smem(bo)) {
> > > > + r = xe_bo_migrate(bo, XE_PL_TT);
> > > > + if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
> > > > + ret = VM_FAULT_NOPAGE;
> > > > + else if (r)
> > > > + ret = VM_FAULT_SIGBUS;
> > > > + }
> > > > + if (!ret)
> > > > + ret = ttm_bo_vm_fault_reserved(vmf,
> > > > + vmf->vma->vm_page_prot,
> > > > + TTM_BO_VM_NUM_PREFAULT);
> > > > drm_dev_exit(idx);
> > > > } else {
> > > > ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
> > > > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> > > > index 9611d7ca2bed..1bdf85c12374 100644
> > > > --- a/drivers/gpu/drm/xe/xe_vm.c
> > > > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > > > @@ -3116,9 +3116,16 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
> > > > err = vma_lock_and_validate(exec,
> > > > gpuva_to_vma(op->base.prefetch.va),
> > > > false);
> > > > - if (!err && !xe_vma_has_no_bo(vma))
> > > > - err = xe_bo_migrate(xe_vma_bo(vma),
> > > > + if (!err && !xe_vma_has_no_bo(vma)) {
> > > > + struct xe_bo *bo = xe_vma_bo(vma);
> > > > +
> > > > + if (region == 0 && !vm->xe->info.has_device_atomics_on_smem &&
> > > > + bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_DEVICE)
> > > > + region = 1;
> > >
> > > I wonder if it better to just leave region as is and let the next atomic
> > > fault trigger the migration.
> >
> > Ok. lets do it that way.
> >
> > >
> > > > +
> > > > + err = xe_bo_migrate(bo,
> > > > region_to_mem_type[region]);
> > > > + }
> > > > break;
> > > > }
> > > > default:
> > > > diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
> > > > index 0f0b94cb43f2..e048eb48826c 100644
> > > > --- a/drivers/gpu/drm/xe/xe_vm_madvise.c
> > > > +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
> > > > @@ -82,15 +82,54 @@ static int madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
> > > > struct xe_vma **vmas, int num_vmas,
> > > > struct drm_xe_madvise_ops ops)
> > > > {
> > > > - int i;
> > > > + struct xe_bo *bo;
> > > > + int err, i;
> > > > xe_assert(vm->xe, ops.type == DRM_XE_VMA_ATTR_ATOMIC);
> > > > xe_assert(vm->xe, ops.atomic.val > DRM_XE_VMA_ATOMIC_UNDEFINED &&
> > > > ops.atomic.val <= DRM_XE_VMA_ATOMIC_CPU);
> > >
> > > Do you sanitize ops.atomic.val prior to this? Also do we disallow a user
> > > setting DRM_XE_VMA_ATOMIC_UNDEFINED? If not, then this needs to be >=
> > > DRM_XE_VMA_ATOMIC_UNDEFINED.
> > Agreed it should be >= DRM_XE_VMA_ATOMIC_UNDEFINED. And instead of
> > assertion will sanitize it here only.
> >
> > >
> > > > - for (i = 0; i < num_vmas; i++)
> > > > + for (i = 0; i < num_vmas; i++) {
> > > > vmas[i]->attr.atomic_access = ops.atomic.val;
> > > > - /*TODO: handle bo backed vmas */
> > > > +
> > > > + bo = xe_vma_bo(vmas[i]);
> > > > + if (!bo)
> > > > + continue;
> > > > +
> > > > + if (XE_IOCTL_DBG(xe, ops.atomic.val == DRM_XE_VMA_ATOMIC_CPU &&
> > > > + !(bo->flags & XE_BO_FLAG_SYSTEM)))
> > > > + return -EINVAL;
> > > > +
> > >
> > > Note when we fail here (or anywhere else in madvise), we could be in a
> > > state where madvise has partially completed. I think that is actually ok
> > > as nothing in madvise is fatal as we are just changing attributes. But I
> > > think we need to document this in the uAPI kernel doc that if madvise
> > > fails, the state of madvise attributes are undefined.
> >
> > Will add in kernel-doc of uAPI.
> >
>
> Actually, on second thought, it might be better to sanitize user input
> before attempting madvise. This is similar to vm_bind_ioctl_check_args.
> I think that would be cleaner.
>
> I believe we can make the failing state stable if we can avoid failures
> in madvise_funcs (i.e., by returning void), which should be possible if
> we take locks in non-interruptible modes (likely fine, as we’re not
> doing much inside any locks) and avoid mallocs (none are used in this
> series).
>
> We’d also have to restructure this loop:
>
> for (i = 0; i < args->num_ops; i++) {
> xe_vm_alloc_madvise_vma(vm, advs_ops[i].start, advs_ops[i].range);
>
> vmas = get_vmas(vm, &num_vmas, advs_ops[i].start, advs_ops[i].range);
> if (!vmas) {
> err = -ENOMEM;
> goto free_advs_ops;
> }
>
> attr_type = array_index_nospec(advs_ops[i].type, ARRAY_SIZE(madvise_funcs));
> err = madvise_funcs[attr_type](xe, vm, vmas, num_vmas, advs_ops[i]);
>
> kfree(vmas);
> vmas = NULL;
>
> if (err)
> goto free_advs_ops;
> }
>
> xe_vm_alloc_madvise_vma and get_vmas would run in the first loop (which
> can fail), followed by a second loop that calls madvise_funcs (which
> cannot fail). If the first loop fails, the worst-case scenario is that
> we've split some VMAs into smaller ones, but their attributes would
> remain the same as before the IOCTL.
>
Ah, as soon I typed this, I realized this doesn't work as this is
iterative process (each xe_vm_alloc_madvise_vma depends on the previous
madvise_funcs being done). So scratch the loop restructure but I still
think validating user input prior to madvise_funcs is a good idea, along
with madvise_funcs not being able to fail if possible.
Matt
> I think this approach would be better avoiding a unknown state on
> failure.
>
> Matt
>
> > >
> > > In practice this really should never fail unless a user is giving bad
> > > input or extreme memory pressure and kmalloc fails.
> > >
> > > Matt
> > >
> > > > + /* NOTE: The following atomic checks are platform-specific. For example,
> > > > + * if a device supports CXL atomics, these may not be necessary or
> > > > + * may behave differently.
> > > > + */
> > > > + if (XE_IOCTL_DBG(xe, ops.atomic.val == DRM_XE_VMA_ATOMIC_DEVICE &&
> > > > + !(bo->flags & XE_BO_FLAG_VRAM0) &&
> > > > + !(bo->flags & XE_BO_FLAG_VRAM1) &&
> > > > + !(bo->flags & XE_BO_FLAG_SYSTEM &&
> > > > + xe->info.has_device_atomics_on_smem)))
> > > > + return -EINVAL;
> > > > +
> > > > + if (XE_IOCTL_DBG(xe, ops.atomic.val == DRM_XE_VMA_ATOMIC_GLOBAL &&
> > > > + (!(bo->flags & XE_BO_FLAG_SYSTEM) ||
> > > > + (!(bo->flags & XE_BO_FLAG_VRAM0) &&
> > > > + !(bo->flags & XE_BO_FLAG_VRAM1)))))
> > > > + return -EINVAL;
> > > > +
> > > > + err = xe_bo_lock(bo, true);
> > > > + if (err)
> > > > + return err;
> > > > + bo->attr.atomic_access = ops.atomic.val;
> > > > +
> > > > + /* Invalidate cpu page table, so bo can migrate to smem in next access */
> > > > + if (xe_bo_is_vram(bo) &&
> > > > + (bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_CPU ||
> > > > + bo->attr.atomic_access == DRM_XE_VMA_ATOMIC_GLOBAL))
> > > > + ttm_bo_unmap_virtual(&bo->ttm);
> > > > +
> > > > + xe_bo_unlock(bo);
> > > > + }
> > > > return 0;
> > > > }
> > > > --
> > > > 2.34.1
> > > >
> >
^ permalink raw reply [flat|nested] 72+ messages in thread
* ✓ CI.Patch_applied: success for MADVISE FOR XE
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (18 preceding siblings ...)
2025-05-27 16:40 ` [PATCH v3 19/19] drm/xe/bo: Update atomic_access attribute on madvise Himal Prasad Ghimiray
@ 2025-05-27 21:35 ` Patchwork
2025-05-27 21:35 ` ✗ CI.checkpatch: warning " Patchwork
` (6 subsequent siblings)
26 siblings, 0 replies; 72+ messages in thread
From: Patchwork @ 2025-05-27 21:35 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
== Series Details ==
Series: MADVISE FOR XE
URL : https://patchwork.freedesktop.org/series/149550/
State : success
== Summary ==
=== Applying kernel patches on branch 'drm-tip' with base: ===
Base commit: 07b6736f75df drm-tip: 2025y-05m-27d-20h-13m-12s UTC integration manifest
=== git am output follows ===
Applying: Introduce drm_gpuvm_sm_map_ops_flags enums for sm_map_ops
Applying: drm/xe/uapi: Add madvise interface
Applying: drm/xe/vm: Add attributes struct as member of vma
Applying: drm/xe/vma: Move pat_index to vma attributes
Applying: drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter
Applying: drm/gpusvm: Make drm_gpusvm_for_each_* macros public
Applying: drm/xe/vm: Add a helper xe_vm_range_tilemask_tlb_invalidation()
Applying: drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping
Applying: drm/xe/svm: Split system allocator vma incase of madvise call
Applying: drm/xe: Implement madvise ioctl for xe
Applying: drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise
Applying: drm/xe/svm : Add svm ranges migration policy on atomic access
Applying: drm/xe/madvise: Update migration policy based on preferred location
Applying: drm/xe/svm: Support DRM_XE_SVM_ATTR_PAT memory attribute
Applying: drm/xe/uapi: Add flag for consulting madvise hints on svm prefetch
Applying: drm/xe/svm: Consult madvise preferred location in prefetch
Applying: drm/xe/uapi: Add UAPI for querying VMA count and memory attributes
Applying: drm/xe/bo: Add attributes field to xe_bo
Applying: drm/xe/bo: Update atomic_access attribute on madvise
^ permalink raw reply [flat|nested] 72+ messages in thread* ✗ CI.checkpatch: warning for MADVISE FOR XE
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (19 preceding siblings ...)
2025-05-27 21:35 ` ✓ CI.Patch_applied: success for MADVISE FOR XE Patchwork
@ 2025-05-27 21:35 ` Patchwork
2025-05-27 21:37 ` ✓ CI.KUnit: success " Patchwork
` (5 subsequent siblings)
26 siblings, 0 replies; 72+ messages in thread
From: Patchwork @ 2025-05-27 21:35 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
== Series Details ==
Series: MADVISE FOR XE
URL : https://patchwork.freedesktop.org/series/149550/
State : warning
== Summary ==
+ KERNEL=/kernel
+ git clone https://gitlab.freedesktop.org/drm/maintainer-tools mt
Cloning into 'mt'...
warning: redirecting to https://gitlab.freedesktop.org/drm/maintainer-tools.git/
+ git -C mt rev-list -n1 origin/master
202708c00696422fd217223bb679a353a5936e23
+ cd /kernel
+ git config --global --add safe.directory /kernel
+ git log -n1
commit 8efc3f700df0efaf26d998c615ee03862a64b1dd
Author: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Date: Tue May 27 22:10:03 2025 +0530
drm/xe/bo: Update atomic_access attribute on madvise
Update the bo_atomic_access based on user-provided input and determine
the migration to smem during a CPU fault
v2 (Matthew Brost)
- Avoid cpu unmapping if bo is already in smem
- check atomics on smem too for ioctl
- Add comments
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
+ /mt/dim checkpatch 07b6736f75df3e60d5e20eda53ecb97f6d9a66c0 drm-intel
7f6424798069 Introduce drm_gpuvm_sm_map_ops_flags enums for sm_map_ops
e6fe68af7a54 drm/xe/uapi: Add madvise interface
-:37: WARNING:LONG_LINE: line length of 114 exceeds 100 columns
#37: FILE: include/uapi/drm/xe_drm.h:122:
+#define DRM_IOCTL_XE_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
total: 0 errors, 1 warnings, 0 checks, 121 lines checked
819ba9e2e009 drm/xe/vm: Add attributes struct as member of vma
d245db89a660 drm/xe/vma: Move pat_index to vma attributes
3d926afb1501 drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter
93cca09e422f drm/gpusvm: Make drm_gpusvm_for_each_* macros public
-:224: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'range__' - possible side-effects?
#224: FILE: include/drm/drm_gpusvm.h:548:
+#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
+ for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_range_next(range__); \
+ (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
+ (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
-:224: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'next__' - possible side-effects?
#224: FILE: include/drm/drm_gpusvm.h:548:
+#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
+ for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_range_next(range__); \
+ (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
+ (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
-:224: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'end__' - possible side-effects?
#224: FILE: include/drm/drm_gpusvm.h:548:
+#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
+ for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_range_next(range__); \
+ (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
+ (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
-:257: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'notifier__' - possible side-effects?
#257: FILE: include/drm/drm_gpusvm.h:581:
+#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
+ for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = __drm_gpusvm_notifier_next(notifier__))
-:257: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'end__' - possible side-effects?
#257: FILE: include/drm/drm_gpusvm.h:581:
+#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
+ for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = __drm_gpusvm_notifier_next(notifier__))
-:273: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'notifier__' - possible side-effects?
#273: FILE: include/drm/drm_gpusvm.h:597:
+#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
+ for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_notifier_next(notifier__); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
-:273: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'next__' - possible side-effects?
#273: FILE: include/drm/drm_gpusvm.h:597:
+#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
+ for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_notifier_next(notifier__); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
-:273: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'end__' - possible side-effects?
#273: FILE: include/drm/drm_gpusvm.h:597:
+#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
+ for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_notifier_next(notifier__); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
total: 0 errors, 0 warnings, 8 checks, 248 lines checked
ae3927698d3b drm/xe/vm: Add a helper xe_vm_range_tilemask_tlb_invalidation()
355637e9a1a2 drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping
7812ed76f877 drm/xe/svm: Split system allocator vma incase of madvise call
a12baf509b9b drm/xe: Implement madvise ioctl for xe
-:57: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#57:
new file mode 100644
total: 0 errors, 1 warnings, 0 checks, 300 lines checked
e7fca0cbbd90 drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise
8908087fe603 drm/xe/svm : Add svm ranges migration policy on atomic access
df6fdefcfbf8 drm/xe/madvise: Update migration policy based on preferred location
d081210b1f13 drm/xe/svm: Support DRM_XE_SVM_ATTR_PAT memory attribute
4a216efb8ae6 drm/xe/uapi: Add flag for consulting madvise hints on svm prefetch
693c55c96b16 drm/xe/svm: Consult madvise preferred location in prefetch
2397fcb48be1 drm/xe/uapi: Add UAPI for querying VMA count and memory attributes
-:71: WARNING:LONG_LINE: line length of 107 exceeds 100 columns
#71: FILE: drivers/gpu/drm/xe/xe_vm.c:2193:
+ mem_attrs[i].preferred_mem_loc.migration_policy = vma->attr.preferred_loc.migration_policy;
-:170: WARNING:LONG_LINE: line length of 137 exceeds 100 columns
#170: FILE: include/uapi/drm/xe_drm.h:125:
+#define DRM_IOCTL_XE_VM_QUERY_VMAS_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_VMAS_ATTRS, struct drm_xe_vm_query_vmas_attr)
total: 0 errors, 2 warnings, 0 checks, 214 lines checked
8a11d0006b65 drm/xe/bo: Add attributes field to xe_bo
8efc3f700df0 drm/xe/bo: Update atomic_access attribute on madvise
^ permalink raw reply [flat|nested] 72+ messages in thread* ✓ CI.KUnit: success for MADVISE FOR XE
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (20 preceding siblings ...)
2025-05-27 21:35 ` ✗ CI.checkpatch: warning " Patchwork
@ 2025-05-27 21:37 ` Patchwork
2025-05-27 21:40 ` ✗ CI.Build: failure " Patchwork
` (4 subsequent siblings)
26 siblings, 0 replies; 72+ messages in thread
From: Patchwork @ 2025-05-27 21:37 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
== Series Details ==
Series: MADVISE FOR XE
URL : https://patchwork.freedesktop.org/series/149550/
State : success
== Summary ==
+ trap cleanup EXIT
+ /kernel/tools/testing/kunit/kunit.py run --kunitconfig /kernel/drivers/gpu/drm/xe/.kunitconfig
[21:35:54] Configuring KUnit Kernel ...
Generating .config ...
Populating config with:
$ make ARCH=um O=.kunit olddefconfig
[21:35:58] Building KUnit Kernel ...
Populating config with:
$ make ARCH=um O=.kunit olddefconfig
Building with:
$ make all compile_commands.json scripts_gdb ARCH=um O=.kunit --jobs=48
[21:36:25] Starting KUnit Kernel (1/1)...
[21:36:25] ============================================================
Running tests with:
$ .kunit/linux kunit.enable=1 mem=1G console=tty kunit_shutdown=halt
[21:36:25] ================== guc_buf (11 subtests) ===================
[21:36:25] [PASSED] test_smallest
[21:36:25] [PASSED] test_largest
[21:36:25] [PASSED] test_granular
[21:36:25] [PASSED] test_unique
[21:36:25] [PASSED] test_overlap
[21:36:25] [PASSED] test_reusable
[21:36:25] [PASSED] test_too_big
[21:36:25] [PASSED] test_flush
[21:36:25] [PASSED] test_lookup
[21:36:25] [PASSED] test_data
[21:36:25] [PASSED] test_class
[21:36:25] ===================== [PASSED] guc_buf =====================
[21:36:25] =================== guc_dbm (7 subtests) ===================
[21:36:25] [PASSED] test_empty
[21:36:25] [PASSED] test_default
[21:36:25] ======================== test_size ========================
[21:36:25] [PASSED] 4
[21:36:25] [PASSED] 8
[21:36:25] [PASSED] 32
[21:36:25] [PASSED] 256
[21:36:25] ==================== [PASSED] test_size ====================
[21:36:25] ======================= test_reuse ========================
[21:36:25] [PASSED] 4
[21:36:25] [PASSED] 8
[21:36:25] [PASSED] 32
[21:36:25] [PASSED] 256
[21:36:25] =================== [PASSED] test_reuse ====================
[21:36:25] =================== test_range_overlap ====================
[21:36:25] [PASSED] 4
[21:36:25] [PASSED] 8
[21:36:25] [PASSED] 32
[21:36:25] [PASSED] 256
[21:36:25] =============== [PASSED] test_range_overlap ================
[21:36:25] =================== test_range_compact ====================
[21:36:25] [PASSED] 4
[21:36:25] [PASSED] 8
[21:36:25] [PASSED] 32
[21:36:25] [PASSED] 256
[21:36:25] =============== [PASSED] test_range_compact ================
[21:36:25] ==================== test_range_spare =====================
[21:36:25] [PASSED] 4
[21:36:25] [PASSED] 8
[21:36:25] [PASSED] 32
[21:36:25] [PASSED] 256
[21:36:25] ================ [PASSED] test_range_spare =================
[21:36:25] ===================== [PASSED] guc_dbm =====================
[21:36:25] =================== guc_idm (6 subtests) ===================
[21:36:25] [PASSED] bad_init
[21:36:25] [PASSED] no_init
[21:36:25] [PASSED] init_fini
[21:36:25] [PASSED] check_used
[21:36:25] [PASSED] check_quota
[21:36:25] [PASSED] check_all
[21:36:25] ===================== [PASSED] guc_idm =====================
[21:36:25] ================== no_relay (3 subtests) ===================
[21:36:25] [PASSED] xe_drops_guc2pf_if_not_ready
[21:36:25] [PASSED] xe_drops_guc2vf_if_not_ready
[21:36:25] [PASSED] xe_rejects_send_if_not_ready
[21:36:25] ==================== [PASSED] no_relay =====================
[21:36:25] ================== pf_relay (14 subtests) ==================
[21:36:25] [PASSED] pf_rejects_guc2pf_too_short
[21:36:25] [PASSED] pf_rejects_guc2pf_too_long
[21:36:25] [PASSED] pf_rejects_guc2pf_no_payload
[21:36:25] [PASSED] pf_fails_no_payload
[21:36:25] [PASSED] pf_fails_bad_origin
[21:36:25] [PASSED] pf_fails_bad_type
[21:36:25] [PASSED] pf_txn_reports_error
[21:36:25] [PASSED] pf_txn_sends_pf2guc
[21:36:25] [PASSED] pf_sends_pf2guc
[21:36:25] [SKIPPED] pf_loopback_nop
[21:36:25] [SKIPPED] pf_loopback_echo
[21:36:25] [SKIPPED] pf_loopback_fail
[21:36:25] [SKIPPED] pf_loopback_busy
[21:36:25] [SKIPPED] pf_loopback_retry
[21:36:25] ==================== [PASSED] pf_relay =====================
[21:36:25] ================== vf_relay (3 subtests) ===================
[21:36:25] [PASSED] vf_rejects_guc2vf_too_short
[21:36:25] [PASSED] vf_rejects_guc2vf_too_long
[21:36:25] [PASSED] vf_rejects_guc2vf_no_payload
[21:36:25] ==================== [PASSED] vf_relay =====================
[21:36:25] ================= pf_service (11 subtests) =================
[21:36:25] [PASSED] pf_negotiate_any
[21:36:25] [PASSED] pf_negotiate_base_match
[21:36:25] [PASSED] pf_negotiate_base_newer
[21:36:25] [PASSED] pf_negotiate_base_next
[21:36:25] [SKIPPED] pf_negotiate_base_older
[21:36:25] [PASSED] pf_negotiate_base_prev
[21:36:25] [PASSED] pf_negotiate_latest_match
[21:36:25] [PASSED] pf_negotiate_latest_newer
[21:36:25] [PASSED] pf_negotiate_latest_next
[21:36:25] [SKIPPED] pf_negotiate_latest_older
[21:36:25] [SKIPPED] pf_negotiate_latest_prev
[21:36:25] =================== [PASSED] pf_service ====================
[21:36:25] ===================== lmtt (1 subtest) =====================
[21:36:25] ======================== test_ops =========================
[21:36:25] [PASSED] 2-level
[21:36:25] [PASSED] multi-level
[21:36:25] ==================== [PASSED] test_ops =====================
[21:36:25] ====================== [PASSED] lmtt =======================
[21:36:25] =================== xe_mocs (2 subtests) ===================
[21:36:25] ================ xe_live_mocs_kernel_kunit ================
[21:36:25] =========== [SKIPPED] xe_live_mocs_kernel_kunit ============
[21:36:25] ================ xe_live_mocs_reset_kunit =================
[21:36:25] ============ [SKIPPED] xe_live_mocs_reset_kunit ============
[21:36:25] ==================== [SKIPPED] xe_mocs =====================
[21:36:25] ================= xe_migrate (2 subtests) ==================
[21:36:25] ================= xe_migrate_sanity_kunit =================
[21:36:25] ============ [SKIPPED] xe_migrate_sanity_kunit =============
[21:36:25] ================== xe_validate_ccs_kunit ==================
[21:36:25] ============= [SKIPPED] xe_validate_ccs_kunit ==============
[21:36:25] =================== [SKIPPED] xe_migrate ===================
[21:36:25] ================== xe_dma_buf (1 subtest) ==================
[21:36:25] ==================== xe_dma_buf_kunit =====================
[21:36:25] ================ [SKIPPED] xe_dma_buf_kunit ================
[21:36:25] =================== [SKIPPED] xe_dma_buf ===================
[21:36:25] ================= xe_bo_shrink (1 subtest) =================
[21:36:25] =================== xe_bo_shrink_kunit ====================
[21:36:25] =============== [SKIPPED] xe_bo_shrink_kunit ===============
[21:36:25] ================== [SKIPPED] xe_bo_shrink ==================
[21:36:25] ==================== xe_bo (2 subtests) ====================
[21:36:25] ================== xe_ccs_migrate_kunit ===================
[21:36:25] ============== [SKIPPED] xe_ccs_migrate_kunit ==============
[21:36:25] ==================== xe_bo_evict_kunit ====================
[21:36:25] =============== [SKIPPED] xe_bo_evict_kunit ================
[21:36:25] ===================== [SKIPPED] xe_bo ======================
[21:36:25] ==================== args (11 subtests) ====================
[21:36:25] [PASSED] count_args_test
[21:36:25] [PASSED] call_args_example
[21:36:25] [PASSED] call_args_test
[21:36:25] [PASSED] drop_first_arg_example
[21:36:25] [PASSED] drop_first_arg_test
[21:36:25] [PASSED] first_arg_example
[21:36:25] [PASSED] first_arg_test
[21:36:25] [PASSED] last_arg_example
[21:36:25] [PASSED] last_arg_test
[21:36:25] [PASSED] pick_arg_example
[21:36:25] [PASSED] sep_comma_example
[21:36:25] ====================== [PASSED] args =======================
[21:36:25] =================== xe_pci (2 subtests) ====================
[21:36:25] [PASSED] xe_gmdid_graphics_ip
[21:36:25] [PASSED] xe_gmdid_media_ip
[21:36:25] ===================== [PASSED] xe_pci ======================
[21:36:25] =================== xe_rtp (2 subtests) ====================
[21:36:25] =============== xe_rtp_process_to_sr_tests ================
[21:36:25] [PASSED] coalesce-same-reg
[21:36:25] [PASSED] no-match-no-add
[21:36:25] [PASSED] match-or
[21:36:25] [PASSED] match-or-xfail
[21:36:25] [PASSED] no-match-no-add-multiple-rules
[21:36:25] [PASSED] two-regs-two-entries
[21:36:25] [PASSED] clr-one-set-other
[21:36:25] [PASSED] set-field
[21:36:25] [PASSED] conflict-duplicate
[21:36:25] [PASSED] conflict-not-disjoint
stty: 'standard input': Inappropriate ioctl for device
[21:36:25] [PASSED] conflict-reg-type
[21:36:25] =========== [PASSED] xe_rtp_process_to_sr_tests ============
[21:36:25] ================== xe_rtp_process_tests ===================
[21:36:25] [PASSED] active1
[21:36:25] [PASSED] active2
[21:36:25] [PASSED] active-inactive
[21:36:25] [PASSED] inactive-active
[21:36:25] [PASSED] inactive-1st_or_active-inactive
[21:36:25] [PASSED] inactive-2nd_or_active-inactive
[21:36:25] [PASSED] inactive-last_or_active-inactive
[21:36:25] [PASSED] inactive-no_or_active-inactive
[21:36:25] ============== [PASSED] xe_rtp_process_tests ===============
[21:36:25] ===================== [PASSED] xe_rtp ======================
[21:36:25] ==================== xe_wa (1 subtest) =====================
[21:36:25] ======================== xe_wa_gt =========================
[21:36:25] [PASSED] TIGERLAKE (B0)
[21:36:25] [PASSED] DG1 (A0)
[21:36:25] [PASSED] DG1 (B0)
[21:36:25] [PASSED] ALDERLAKE_S (A0)
[21:36:25] [PASSED] ALDERLAKE_S (B0)
[21:36:25] [PASSED] ALDERLAKE_S (C0)
[21:36:25] [PASSED] ALDERLAKE_S (D0)
[21:36:25] [PASSED] ALDERLAKE_P (A0)
[21:36:25] [PASSED] ALDERLAKE_P (B0)
[21:36:25] [PASSED] ALDERLAKE_P (C0)
[21:36:25] [PASSED] ALDERLAKE_S_RPLS (D0)
[21:36:25] [PASSED] ALDERLAKE_P_RPLU (E0)
[21:36:25] [PASSED] DG2_G10 (C0)
[21:36:25] [PASSED] DG2_G11 (B1)
[21:36:25] [PASSED] DG2_G12 (A1)
[21:36:25] [PASSED] METEORLAKE (g:A0, m:A0)
[21:36:25] [PASSED] METEORLAKE (g:A0, m:A0)
[21:36:25] [PASSED] METEORLAKE (g:A0, m:A0)
[21:36:25] [PASSED] LUNARLAKE (g:A0, m:A0)
[21:36:25] [PASSED] LUNARLAKE (g:B0, m:A0)
[21:36:25] [PASSED] BATTLEMAGE (g:A0, m:A1)
[21:36:25] ==================== [PASSED] xe_wa_gt =====================
[21:36:25] ====================== [PASSED] xe_wa ======================
[21:36:25] ============================================================
[21:36:25] Testing complete. Ran 133 tests: passed: 117, skipped: 16
[21:36:25] Elapsed time: 31.204s total, 4.179s configuring, 26.709s building, 0.298s running
+ /kernel/tools/testing/kunit/kunit.py run --kunitconfig /kernel/drivers/gpu/drm/tests/.kunitconfig
[21:36:25] Configuring KUnit Kernel ...
Regenerating .config ...
Populating config with:
$ make ARCH=um O=.kunit olddefconfig
[21:36:27] Building KUnit Kernel ...
Populating config with:
$ make ARCH=um O=.kunit olddefconfig
Building with:
$ make all compile_commands.json scripts_gdb ARCH=um O=.kunit --jobs=48
[21:36:48] Starting KUnit Kernel (1/1)...
[21:36:48] ============================================================
Running tests with:
$ .kunit/linux kunit.enable=1 mem=1G console=tty kunit_shutdown=halt
[21:36:48] == drm_test_atomic_get_connector_for_encoder (1 subtest) ===
[21:36:48] [PASSED] drm_test_drm_atomic_get_connector_for_encoder
[21:36:48] ==== [PASSED] drm_test_atomic_get_connector_for_encoder ====
[21:36:48] =========== drm_validate_clone_mode (2 subtests) ===========
[21:36:48] ============== drm_test_check_in_clone_mode ===============
[21:36:48] [PASSED] in_clone_mode
[21:36:48] [PASSED] not_in_clone_mode
[21:36:48] ========== [PASSED] drm_test_check_in_clone_mode ===========
[21:36:48] =============== drm_test_check_valid_clones ===============
[21:36:48] [PASSED] not_in_clone_mode
[21:36:48] [PASSED] valid_clone
[21:36:48] [PASSED] invalid_clone
[21:36:48] =========== [PASSED] drm_test_check_valid_clones ===========
[21:36:48] ============= [PASSED] drm_validate_clone_mode =============
[21:36:48] ============= drm_validate_modeset (1 subtest) =============
[21:36:48] [PASSED] drm_test_check_connector_changed_modeset
[21:36:48] ============== [PASSED] drm_validate_modeset ===============
[21:36:48] ====== drm_test_bridge_get_current_state (2 subtests) ======
[21:36:48] [PASSED] drm_test_drm_bridge_get_current_state_atomic
[21:36:48] [PASSED] drm_test_drm_bridge_get_current_state_legacy
[21:36:48] ======== [PASSED] drm_test_bridge_get_current_state ========
[21:36:48] ====== drm_test_bridge_helper_reset_crtc (3 subtests) ======
[21:36:48] [PASSED] drm_test_drm_bridge_helper_reset_crtc_atomic
[21:36:48] [PASSED] drm_test_drm_bridge_helper_reset_crtc_atomic_disabled
[21:36:48] [PASSED] drm_test_drm_bridge_helper_reset_crtc_legacy
[21:36:48] ======== [PASSED] drm_test_bridge_helper_reset_crtc ========
[21:36:48] ================== drm_buddy (7 subtests) ==================
[21:36:48] [PASSED] drm_test_buddy_alloc_limit
[21:36:48] [PASSED] drm_test_buddy_alloc_optimistic
[21:36:48] [PASSED] drm_test_buddy_alloc_pessimistic
[21:36:48] [PASSED] drm_test_buddy_alloc_pathological
[21:36:48] [PASSED] drm_test_buddy_alloc_contiguous
[21:36:48] [PASSED] drm_test_buddy_alloc_clear
[21:36:48] [PASSED] drm_test_buddy_alloc_range_bias
[21:36:48] ==================== [PASSED] drm_buddy ====================
[21:36:48] ============= drm_cmdline_parser (40 subtests) =============
[21:36:48] [PASSED] drm_test_cmdline_force_d_only
[21:36:48] [PASSED] drm_test_cmdline_force_D_only_dvi
[21:36:48] [PASSED] drm_test_cmdline_force_D_only_hdmi
[21:36:48] [PASSED] drm_test_cmdline_force_D_only_not_digital
[21:36:48] [PASSED] drm_test_cmdline_force_e_only
[21:36:48] [PASSED] drm_test_cmdline_res
[21:36:48] [PASSED] drm_test_cmdline_res_vesa
[21:36:48] [PASSED] drm_test_cmdline_res_vesa_rblank
[21:36:48] [PASSED] drm_test_cmdline_res_rblank
[21:36:48] [PASSED] drm_test_cmdline_res_bpp
[21:36:48] [PASSED] drm_test_cmdline_res_refresh
[21:36:48] [PASSED] drm_test_cmdline_res_bpp_refresh
[21:36:48] [PASSED] drm_test_cmdline_res_bpp_refresh_interlaced
[21:36:48] [PASSED] drm_test_cmdline_res_bpp_refresh_margins
[21:36:48] [PASSED] drm_test_cmdline_res_bpp_refresh_force_off
[21:36:48] [PASSED] drm_test_cmdline_res_bpp_refresh_force_on
[21:36:48] [PASSED] drm_test_cmdline_res_bpp_refresh_force_on_analog
[21:36:48] [PASSED] drm_test_cmdline_res_bpp_refresh_force_on_digital
[21:36:48] [PASSED] drm_test_cmdline_res_bpp_refresh_interlaced_margins_force_on
[21:36:48] [PASSED] drm_test_cmdline_res_margins_force_on
[21:36:48] [PASSED] drm_test_cmdline_res_vesa_margins
[21:36:48] [PASSED] drm_test_cmdline_name
[21:36:48] [PASSED] drm_test_cmdline_name_bpp
[21:36:48] [PASSED] drm_test_cmdline_name_option
[21:36:48] [PASSED] drm_test_cmdline_name_bpp_option
[21:36:48] [PASSED] drm_test_cmdline_rotate_0
[21:36:48] [PASSED] drm_test_cmdline_rotate_90
[21:36:48] [PASSED] drm_test_cmdline_rotate_180
[21:36:48] [PASSED] drm_test_cmdline_rotate_270
[21:36:48] [PASSED] drm_test_cmdline_hmirror
[21:36:48] [PASSED] drm_test_cmdline_vmirror
[21:36:48] [PASSED] drm_test_cmdline_margin_options
[21:36:48] [PASSED] drm_test_cmdline_multiple_options
[21:36:48] [PASSED] drm_test_cmdline_bpp_extra_and_option
[21:36:48] [PASSED] drm_test_cmdline_extra_and_option
[21:36:48] [PASSED] drm_test_cmdline_freestanding_options
[21:36:48] [PASSED] drm_test_cmdline_freestanding_force_e_and_options
[21:36:48] [PASSED] drm_test_cmdline_panel_orientation
[21:36:48] ================ drm_test_cmdline_invalid =================
[21:36:48] [PASSED] margin_only
[21:36:48] [PASSED] interlace_only
[21:36:48] [PASSED] res_missing_x
[21:36:48] [PASSED] res_missing_y
[21:36:48] [PASSED] res_bad_y
[21:36:48] [PASSED] res_missing_y_bpp
[21:36:48] [PASSED] res_bad_bpp
[21:36:48] [PASSED] res_bad_refresh
[21:36:48] [PASSED] res_bpp_refresh_force_on_off
[21:36:48] [PASSED] res_invalid_mode
[21:36:48] [PASSED] res_bpp_wrong_place_mode
[21:36:48] [PASSED] name_bpp_refresh
[21:36:48] [PASSED] name_refresh
[21:36:48] [PASSED] name_refresh_wrong_mode
[21:36:48] [PASSED] name_refresh_invalid_mode
[21:36:48] [PASSED] rotate_multiple
[21:36:48] [PASSED] rotate_invalid_val
[21:36:48] [PASSED] rotate_truncated
[21:36:48] [PASSED] invalid_option
[21:36:48] [PASSED] invalid_tv_option
[21:36:48] [PASSED] truncated_tv_option
[21:36:48] ============ [PASSED] drm_test_cmdline_invalid =============
[21:36:48] =============== drm_test_cmdline_tv_options ===============
[21:36:48] [PASSED] NTSC
[21:36:48] [PASSED] NTSC_443
[21:36:48] [PASSED] NTSC_J
[21:36:48] [PASSED] PAL
[21:36:48] [PASSED] PAL_M
[21:36:48] [PASSED] PAL_N
[21:36:48] [PASSED] SECAM
[21:36:48] [PASSED] MONO_525
[21:36:48] [PASSED] MONO_625
[21:36:48] =========== [PASSED] drm_test_cmdline_tv_options ===========
[21:36:48] =============== [PASSED] drm_cmdline_parser ================
[21:36:48] ========== drmm_connector_hdmi_init (20 subtests) ==========
[21:36:48] [PASSED] drm_test_connector_hdmi_init_valid
[21:36:48] [PASSED] drm_test_connector_hdmi_init_bpc_8
[21:36:48] [PASSED] drm_test_connector_hdmi_init_bpc_10
[21:36:48] [PASSED] drm_test_connector_hdmi_init_bpc_12
[21:36:48] [PASSED] drm_test_connector_hdmi_init_bpc_invalid
[21:36:48] [PASSED] drm_test_connector_hdmi_init_bpc_null
[21:36:48] [PASSED] drm_test_connector_hdmi_init_formats_empty
[21:36:48] [PASSED] drm_test_connector_hdmi_init_formats_no_rgb
[21:36:48] === drm_test_connector_hdmi_init_formats_yuv420_allowed ===
[21:36:48] [PASSED] supported_formats=0x9 yuv420_allowed=1
[21:36:48] [PASSED] supported_formats=0x9 yuv420_allowed=0
[21:36:48] [PASSED] supported_formats=0x3 yuv420_allowed=1
[21:36:48] [PASSED] supported_formats=0x3 yuv420_allowed=0
[21:36:48] === [PASSED] drm_test_connector_hdmi_init_formats_yuv420_allowed ===
[21:36:48] [PASSED] drm_test_connector_hdmi_init_null_ddc
[21:36:48] [PASSED] drm_test_connector_hdmi_init_null_product
[21:36:48] [PASSED] drm_test_connector_hdmi_init_null_vendor
[21:36:48] [PASSED] drm_test_connector_hdmi_init_product_length_exact
[21:36:48] [PASSED] drm_test_connector_hdmi_init_product_length_too_long
[21:36:48] [PASSED] drm_test_connector_hdmi_init_product_valid
[21:36:48] [PASSED] drm_test_connector_hdmi_init_vendor_length_exact
[21:36:48] [PASSED] drm_test_connector_hdmi_init_vendor_length_too_long
[21:36:48] [PASSED] drm_test_connector_hdmi_init_vendor_valid
[21:36:48] ========= drm_test_connector_hdmi_init_type_valid =========
[21:36:48] [PASSED] HDMI-A
[21:36:48] [PASSED] HDMI-B
[21:36:48] ===== [PASSED] drm_test_connector_hdmi_init_type_valid =====
[21:36:48] ======== drm_test_connector_hdmi_init_type_invalid ========
[21:36:48] [PASSED] Unknown
[21:36:48] [PASSED] VGA
[21:36:48] [PASSED] DVI-I
[21:36:48] [PASSED] DVI-D
[21:36:48] [PASSED] DVI-A
[21:36:48] [PASSED] Composite
[21:36:48] [PASSED] SVIDEO
[21:36:48] [PASSED] LVDS
[21:36:48] [PASSED] Component
[21:36:48] [PASSED] DIN
[21:36:48] [PASSED] DP
[21:36:48] [PASSED] TV
[21:36:48] [PASSED] eDP
[21:36:48] [PASSED] Virtual
[21:36:48] [PASSED] DSI
[21:36:48] [PASSED] DPI
[21:36:48] [PASSED] Writeback
[21:36:48] [PASSED] SPI
[21:36:48] [PASSED] USB
[21:36:48] ==== [PASSED] drm_test_connector_hdmi_init_type_invalid ====
[21:36:48] ============ [PASSED] drmm_connector_hdmi_init =============
[21:36:48] ============= drmm_connector_init (3 subtests) =============
[21:36:48] [PASSED] drm_test_drmm_connector_init
[21:36:48] [PASSED] drm_test_drmm_connector_init_null_ddc
[21:36:48] ========= drm_test_drmm_connector_init_type_valid =========
[21:36:48] [PASSED] Unknown
[21:36:48] [PASSED] VGA
[21:36:48] [PASSED] DVI-I
[21:36:48] [PASSED] DVI-D
[21:36:48] [PASSED] DVI-A
[21:36:48] [PASSED] Composite
[21:36:48] [PASSED] SVIDEO
[21:36:48] [PASSED] LVDS
[21:36:48] [PASSED] Component
[21:36:48] [PASSED] DIN
[21:36:48] [PASSED] DP
[21:36:48] [PASSED] HDMI-A
[21:36:48] [PASSED] HDMI-B
[21:36:48] [PASSED] TV
[21:36:48] [PASSED] eDP
[21:36:48] [PASSED] Virtual
[21:36:48] [PASSED] DSI
[21:36:48] [PASSED] DPI
[21:36:48] [PASSED] Writeback
[21:36:48] [PASSED] SPI
[21:36:48] [PASSED] USB
[21:36:48] ===== [PASSED] drm_test_drmm_connector_init_type_valid =====
[21:36:48] =============== [PASSED] drmm_connector_init ===============
[21:36:48] ========= drm_connector_dynamic_init (6 subtests) ==========
[21:36:48] [PASSED] drm_test_drm_connector_dynamic_init
[21:36:48] [PASSED] drm_test_drm_connector_dynamic_init_null_ddc
[21:36:48] [PASSED] drm_test_drm_connector_dynamic_init_not_added
[21:36:48] [PASSED] drm_test_drm_connector_dynamic_init_properties
[21:36:48] ===== drm_test_drm_connector_dynamic_init_type_valid ======
[21:36:48] [PASSED] Unknown
[21:36:48] [PASSED] VGA
[21:36:48] [PASSED] DVI-I
[21:36:48] [PASSED] DVI-D
[21:36:48] [PASSED] DVI-A
[21:36:48] [PASSED] Composite
[21:36:48] [PASSED] SVIDEO
[21:36:48] [PASSED] LVDS
[21:36:48] [PASSED] Component
[21:36:48] [PASSED] DIN
[21:36:48] [PASSED] DP
[21:36:48] [PASSED] HDMI-A
[21:36:48] [PASSED] HDMI-B
[21:36:48] [PASSED] TV
[21:36:48] [PASSED] eDP
[21:36:48] [PASSED] Virtual
[21:36:48] [PASSED] DSI
[21:36:48] [PASSED] DPI
[21:36:48] [PASSED] Writeback
[21:36:48] [PASSED] SPI
[21:36:48] [PASSED] USB
[21:36:48] = [PASSED] drm_test_drm_connector_dynamic_init_type_valid ==
[21:36:48] ======== drm_test_drm_connector_dynamic_init_name =========
[21:36:48] [PASSED] Unknown
[21:36:48] [PASSED] VGA
[21:36:48] [PASSED] DVI-I
[21:36:48] [PASSED] DVI-D
[21:36:48] [PASSED] DVI-A
[21:36:48] [PASSED] Composite
[21:36:48] [PASSED] SVIDEO
[21:36:48] [PASSED] LVDS
[21:36:48] [PASSED] Component
[21:36:48] [PASSED] DIN
[21:36:48] [PASSED] DP
[21:36:48] [PASSED] HDMI-A
[21:36:48] [PASSED] HDMI-B
[21:36:48] [PASSED] TV
[21:36:48] [PASSED] eDP
[21:36:48] [PASSED] Virtual
[21:36:48] [PASSED] DSI
[21:36:48] [PASSED] DPI
[21:36:48] [PASSED] Writeback
[21:36:48] [PASSED] SPI
[21:36:48] [PASSED] USB
[21:36:48] ==== [PASSED] drm_test_drm_connector_dynamic_init_name =====
[21:36:48] =========== [PASSED] drm_connector_dynamic_init ============
[21:36:48] ==== drm_connector_dynamic_register_early (4 subtests) =====
[21:36:48] [PASSED] drm_test_drm_connector_dynamic_register_early_on_list
[21:36:48] [PASSED] drm_test_drm_connector_dynamic_register_early_defer
[21:36:48] [PASSED] drm_test_drm_connector_dynamic_register_early_no_init
[21:36:48] [PASSED] drm_test_drm_connector_dynamic_register_early_no_mode_object
[21:36:48] ====== [PASSED] drm_connector_dynamic_register_early =======
[21:36:48] ======= drm_connector_dynamic_register (7 subtests) ========
[21:36:48] [PASSED] drm_test_drm_connector_dynamic_register_on_list
[21:36:48] [PASSED] drm_test_drm_connector_dynamic_register_no_defer
[21:36:48] [PASSED] drm_test_drm_connector_dynamic_register_no_init
[21:36:48] [PASSED] drm_test_drm_connector_dynamic_register_mode_object
[21:36:48] [PASSED] drm_test_drm_connector_dynamic_register_sysfs
[21:36:48] [PASSED] drm_test_drm_connector_dynamic_register_sysfs_name
[21:36:48] [PASSED] drm_test_drm_connector_dynamic_register_debugfs
[21:36:48] ========= [PASSED] drm_connector_dynamic_register ==========
[21:36:48] = drm_connector_attach_broadcast_rgb_property (2 subtests) =
[21:36:48] [PASSED] drm_test_drm_connector_attach_broadcast_rgb_property
[21:36:48] [PASSED] drm_test_drm_connector_attach_broadcast_rgb_property_hdmi_connector
[21:36:48] === [PASSED] drm_connector_attach_broadcast_rgb_property ===
[21:36:48] ========== drm_get_tv_mode_from_name (2 subtests) ==========
[21:36:48] ========== drm_test_get_tv_mode_from_name_valid ===========
[21:36:48] [PASSED] NTSC
[21:36:48] [PASSED] NTSC-443
[21:36:48] [PASSED] NTSC-J
[21:36:48] [PASSED] PAL
[21:36:48] [PASSED] PAL-M
[21:36:48] [PASSED] PAL-N
[21:36:48] [PASSED] SECAM
[21:36:48] [PASSED] Mono
[21:36:48] ====== [PASSED] drm_test_get_tv_mode_from_name_valid =======
[21:36:48] [PASSED] drm_test_get_tv_mode_from_name_truncated
[21:36:48] ============ [PASSED] drm_get_tv_mode_from_name ============
[21:36:48] = drm_test_connector_hdmi_compute_mode_clock (12 subtests) =
[21:36:48] [PASSED] drm_test_drm_hdmi_compute_mode_clock_rgb
[21:36:48] [PASSED] drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc
[21:36:48] [PASSED] drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc_vic_1
[21:36:48] [PASSED] drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc
[21:36:48] [PASSED] drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc_vic_1
[21:36:48] [PASSED] drm_test_drm_hdmi_compute_mode_clock_rgb_double
[21:36:48] = drm_test_connector_hdmi_compute_mode_clock_yuv420_valid =
[21:36:48] [PASSED] VIC 96
[21:36:48] [PASSED] VIC 97
[21:36:48] [PASSED] VIC 101
[21:36:48] [PASSED] VIC 102
[21:36:48] [PASSED] VIC 106
[21:36:48] [PASSED] VIC 107
[21:36:48] === [PASSED] drm_test_connector_hdmi_compute_mode_clock_yuv420_valid ===
[21:36:48] [PASSED] drm_test_connector_hdmi_compute_mode_clock_yuv420_10_bpc
[21:36:48] [PASSED] drm_test_connector_hdmi_compute_mode_clock_yuv420_12_bpc
[21:36:48] [PASSED] drm_test_connector_hdmi_compute_mode_clock_yuv422_8_bpc
[21:36:48] [PASSED] drm_test_connector_hdmi_compute_mode_clock_yuv422_10_bpc
[21:36:48] [PASSED] drm_test_connector_hdmi_compute_mode_clock_yuv422_12_bpc
[21:36:48] === [PASSED] drm_test_connector_hdmi_compute_mode_clock ====
[21:36:48] == drm_hdmi_connector_get_broadcast_rgb_name (2 subtests) ==
[21:36:48] === drm_test_drm_hdmi_connector_get_broadcast_rgb_name ====
[21:36:48] [PASSED] Automatic
[21:36:48] [PASSED] Full
[21:36:48] [PASSED] Limited 16:235
[21:36:48] === [PASSED] drm_test_drm_hdmi_connector_get_broadcast_rgb_name ===
[21:36:48] [PASSED] drm_test_drm_hdmi_connector_get_broadcast_rgb_name_invalid
[21:36:48] ==== [PASSED] drm_hdmi_connector_get_broadcast_rgb_name ====
[21:36:48] == drm_hdmi_connector_get_output_format_name (2 subtests) ==
[21:36:48] === drm_test_drm_hdmi_connector_get_output_format_name ====
[21:36:48] [PASSED] RGB
[21:36:48] [PASSED] YUV 4:2:0
[21:36:48] [PASSED] YUV 4:2:2
[21:36:48] [PASSED] YUV 4:4:4
[21:36:48] === [PASSED] drm_test_drm_hdmi_connector_get_output_format_name ===
[21:36:48] [PASSED] drm_test_drm_hdmi_connector_get_output_format_name_invalid
[21:36:48] ==== [PASSED] drm_hdmi_connector_get_output_format_name ====
[21:36:48] ============= drm_damage_helper (21 subtests) ==============
[21:36:48] [PASSED] drm_test_damage_iter_no_damage
[21:36:48] [PASSED] drm_test_damage_iter_no_damage_fractional_src
[21:36:48] [PASSED] drm_test_damage_iter_no_damage_src_moved
[21:36:48] [PASSED] drm_test_damage_iter_no_damage_fractional_src_moved
[21:36:48] [PASSED] drm_test_damage_iter_no_damage_not_visible
[21:36:48] [PASSED] drm_test_damage_iter_no_damage_no_crtc
[21:36:48] [PASSED] drm_test_damage_iter_no_damage_no_fb
[21:36:48] [PASSED] drm_test_damage_iter_simple_damage
[21:36:48] [PASSED] drm_test_damage_iter_single_damage
[21:36:48] [PASSED] drm_test_damage_iter_single_damage_intersect_src
[21:36:48] [PASSED] drm_test_damage_iter_single_damage_outside_src
[21:36:48] [PASSED] drm_test_damage_iter_single_damage_fractional_src
[21:36:48] [PASSED] drm_test_damage_iter_single_damage_intersect_fractional_src
[21:36:48] [PASSED] drm_test_damage_iter_single_damage_outside_fractional_src
[21:36:48] [PASSED] drm_test_damage_iter_single_damage_src_moved
[21:36:48] [PASSED] drm_test_damage_iter_single_damage_fractional_src_moved
[21:36:48] [PASSED] drm_test_damage_iter_damage
[21:36:48] [PASSED] drm_test_damage_iter_damage_one_intersect
[21:36:48] [PASSED] drm_test_damage_iter_damage_one_outside
[21:36:48] [PASSED] drm_test_damage_iter_damage_src_moved
[21:36:48] [PASSED] drm_test_damage_iter_damage_not_visible
[21:36:48] ================ [PASSED] drm_damage_helper ================
[21:36:48] ============== drm_dp_mst_helper (3 subtests) ==============
[21:36:48] ============== drm_test_dp_mst_calc_pbn_mode ==============
[21:36:48] [PASSED] Clock 154000 BPP 30 DSC disabled
[21:36:48] [PASSED] Clock 234000 BPP 30 DSC disabled
[21:36:48] [PASSED] Clock 297000 BPP 24 DSC disabled
[21:36:48] [PASSED] Clock 332880 BPP 24 DSC enabled
[21:36:48] [PASSED] Clock 324540 BPP 24 DSC enabled
[21:36:48] ========== [PASSED] drm_test_dp_mst_calc_pbn_mode ==========
[21:36:48] ============== drm_test_dp_mst_calc_pbn_div ===============
[21:36:48] [PASSED] Link rate 2000000 lane count 4
[21:36:48] [PASSED] Link rate 2000000 lane count 2
[21:36:48] [PASSED] Link rate 2000000 lane count 1
[21:36:48] [PASSED] Link rate 1350000 lane count 4
[21:36:48] [PASSED] Link rate 1350000 lane count 2
[21:36:48] [PASSED] Link rate 1350000 lane count 1
[21:36:48] [PASSED] Link rate 1000000 lane count 4
[21:36:48] [PASSED] Link rate 1000000 lane count 2
[21:36:48] [PASSED] Link rate 1000000 lane count 1
[21:36:48] [PASSED] Link rate 810000 lane count 4
[21:36:48] [PASSED] Link rate 810000 lane count 2
[21:36:48] [PASSED] Link rate 810000 lane count 1
[21:36:48] [PASSED] Link rate 540000 lane count 4
[21:36:48] [PASSED] Link rate 540000 lane count 2
[21:36:48] [PASSED] Link rate 540000 lane count 1
[21:36:48] [PASSED] Link rate 270000 lane count 4
[21:36:48] [PASSED] Link rate 270000 lane count 2
[21:36:48] [PASSED] Link rate 270000 lane count 1
[21:36:48] [PASSED] Link rate 162000 lane count 4
[21:36:48] [PASSED] Link rate 162000 lane count 2
[21:36:48] [PASSED] Link rate 162000 lane count 1
[21:36:48] ========== [PASSED] drm_test_dp_mst_calc_pbn_div ===========
[21:36:48] ========= drm_test_dp_mst_sideband_msg_req_decode =========
[21:36:48] [PASSED] DP_ENUM_PATH_RESOURCES with port number
[21:36:48] [PASSED] DP_POWER_UP_PHY with port number
[21:36:48] [PASSED] DP_POWER_DOWN_PHY with port number
[21:36:48] [PASSED] DP_ALLOCATE_PAYLOAD with SDP stream sinks
[21:36:48] [PASSED] DP_ALLOCATE_PAYLOAD with port number
[21:36:48] [PASSED] DP_ALLOCATE_PAYLOAD with VCPI
[21:36:48] [PASSED] DP_ALLOCATE_PAYLOAD with PBN
[21:36:48] [PASSED] DP_QUERY_PAYLOAD with port number
[21:36:48] [PASSED] DP_QUERY_PAYLOAD with VCPI
[21:36:48] [PASSED] DP_REMOTE_DPCD_READ with port number
[21:36:48] [PASSED] DP_REMOTE_DPCD_READ with DPCD address
[21:36:48] [PASSED] DP_REMOTE_DPCD_READ with max number of bytes
[21:36:48] [PASSED] DP_REMOTE_DPCD_WRITE with port number
[21:36:48] [PASSED] DP_REMOTE_DPCD_WRITE with DPCD address
[21:36:48] [PASSED] DP_REMOTE_DPCD_WRITE with data array
[21:36:48] [PASSED] DP_REMOTE_I2C_READ with port number
[21:36:48] [PASSED] DP_REMOTE_I2C_READ with I2C device ID
[21:36:48] [PASSED] DP_REMOTE_I2C_READ with transactions array
[21:36:48] [PASSED] DP_REMOTE_I2C_WRITE with port number
[21:36:48] [PASSED] DP_REMOTE_I2C_WRITE with I2C device ID
[21:36:48] [PASSED] DP_REMOTE_I2C_WRITE with data array
[21:36:48] [PASSED] DP_QUERY_STREAM_ENC_STATUS with stream ID
[21:36:48] [PASSED] DP_QUERY_STREAM_ENC_STATUS with client ID
[21:36:48] [PASSED] DP_QUERY_STREAM_ENC_STATUS with stream event
[21:36:48] [PASSED] DP_QUERY_STREAM_ENC_STATUS with valid stream event
[21:36:48] [PASSED] DP_QUERY_STREAM_ENC_STATUS with stream behavior
[21:36:48] [PASSED] DP_QUERY_STREAM_ENC_STATUS with a valid stream behavior
[21:36:48] ===== [PASSED] drm_test_dp_mst_sideband_msg_req_decode =====
[21:36:48] ================ [PASSED] drm_dp_mst_helper ================
[21:36:48] ================== drm_exec (7 subtests) ===================
[21:36:48] [PASSED] sanitycheck
[21:36:48] [PASSED] test_lock
[21:36:48] [PASSED] test_lock_unlock
[21:36:48] [PASSED] test_duplicates
[21:36:48] [PASSED] test_prepare
[21:36:48] [PASSED] test_prepare_array
[21:36:48] [PASSED] test_multiple_loops
[21:36:48] ==================== [PASSED] drm_exec =====================
[21:36:48] =========== drm_format_helper_test (18 subtests) ===========
[21:36:48] ============== drm_test_fb_xrgb8888_to_gray8 ==============
[21:36:48] [PASSED] single_pixel_source_buffer
[21:36:48] [PASSED] single_pixel_clip_rectangle
[21:36:48] [PASSED] well_known_colors
[21:36:48] [PASSED] destination_pitch
[21:36:48] ========== [PASSED] drm_test_fb_xrgb8888_to_gray8 ==========
[21:36:48] ============= drm_test_fb_xrgb8888_to_rgb332 ==============
[21:36:48] [PASSED] single_pixel_source_buffer
[21:36:48] [PASSED] single_pixel_clip_rectangle
[21:36:48] [PASSED] well_known_colors
[21:36:48] [PASSED] destination_pitch
[21:36:48] ========= [PASSED] drm_test_fb_xrgb8888_to_rgb332 ==========
[21:36:48] ============= drm_test_fb_xrgb8888_to_rgb565 ==============
[21:36:48] [PASSED] single_pixel_source_buffer
[21:36:48] [PASSED] single_pixel_clip_rectangle
[21:36:48] [PASSED] well_known_colors
[21:36:48] [PASSED] destination_pitch
[21:36:48] ========= [PASSED] drm_test_fb_xrgb8888_to_rgb565 ==========
[21:36:48] ============ drm_test_fb_xrgb8888_to_xrgb1555 =============
[21:36:48] [PASSED] single_pixel_source_buffer
[21:36:48] [PASSED] single_pixel_clip_rectangle
[21:36:48] [PASSED] well_known_colors
[21:36:48] [PASSED] destination_pitch
[21:36:48] ======== [PASSED] drm_test_fb_xrgb8888_to_xrgb1555 =========
[21:36:48] ============ drm_test_fb_xrgb8888_to_argb1555 =============
[21:36:48] [PASSED] single_pixel_source_buffer
[21:36:48] [PASSED] single_pixel_clip_rectangle
[21:36:48] [PASSED] well_known_colors
[21:36:48] [PASSED] destination_pitch
[21:36:48] ======== [PASSED] drm_test_fb_xrgb8888_to_argb1555 =========
[21:36:48] ============ drm_test_fb_xrgb8888_to_rgba5551 =============
[21:36:48] [PASSED] single_pixel_source_buffer
[21:36:48] [PASSED] single_pixel_clip_rectangle
[21:36:48] [PASSED] well_known_colors
[21:36:48] [PASSED] destination_pitch
[21:36:48] ======== [PASSED] drm_test_fb_xrgb8888_to_rgba5551 =========
[21:36:48] ============= drm_test_fb_xrgb8888_to_rgb888 ==============
[21:36:48] [PASSED] single_pixel_source_buffer
[21:36:48] [PASSED] single_pixel_clip_rectangle
[21:36:48] [PASSED] well_known_colors
[21:36:48] [PASSED] destination_pitch
[21:36:48] ========= [PASSED] drm_test_fb_xrgb8888_to_rgb888 ==========
[21:36:48] ============= drm_test_fb_xrgb8888_to_bgr888 ==============
[21:36:48] [PASSED] single_pixel_source_buffer
[21:36:48] [PASSED] single_pixel_clip_rectangle
[21:36:48] [PASSED] well_known_colors
[21:36:48] [PASSED] destination_pitch
[21:36:48] ========= [PASSED] drm_test_fb_xrgb8888_to_bgr888 ==========
[21:36:48] ============ drm_test_fb_xrgb8888_to_argb8888 =============
[21:36:48] [PASSED] single_pixel_source_buffer
[21:36:48] [PASSED] single_pixel_clip_rectangle
[21:36:48] [PASSED] well_known_colors
[21:36:48] [PASSED] destination_pitch
[21:36:48] ======== [PASSED] drm_test_fb_xrgb8888_to_argb8888 =========
[21:36:48] =========== drm_test_fb_xrgb8888_to_xrgb2101010 ===========
[21:36:48] [PASSED] single_pixel_source_buffer
[21:36:48] [PASSED] single_pixel_clip_rectangle
[21:36:48] [PASSED] well_known_colors
[21:36:48] [PASSED] destination_pitch
[21:36:48] ======= [PASSED] drm_test_fb_xrgb8888_to_xrgb2101010 =======
[21:36:48] =========== drm_test_fb_xrgb8888_to_argb2101010 ===========
[21:36:48] [PASSED] single_pixel_source_buffer
[21:36:48] [PASSED] single_pixel_clip_rectangle
[21:36:48] [PASSED] well_known_colors
[21:36:48] [PASSED] destination_pitch
[21:36:48] ======= [PASSED] drm_test_fb_xrgb8888_to_argb2101010 =======
[21:36:48] ============== drm_test_fb_xrgb8888_to_mono ===============
[21:36:48] [PASSED] single_pixel_source_buffer
[21:36:48] [PASSED] single_pixel_clip_rectangle
[21:36:48] [PASSED] well_known_colors
[21:36:48] [PASSED] destination_pitch
[21:36:48] ========== [PASSED] drm_test_fb_xrgb8888_to_mono ===========
[21:36:48] ==================== drm_test_fb_swab =====================
[21:36:48] [PASSED] single_pixel_source_buffer
[21:36:48] [PASSED] single_pixel_clip_rectangle
[21:36:48] [PASSED] well_known_colors
[21:36:48] [PASSED] destination_pitch
[21:36:48] ================ [PASSED] drm_test_fb_swab =================
[21:36:48] ============ drm_test_fb_xrgb8888_to_xbgr8888 =============
[21:36:48] [PASSED] single_pixel_source_buffer
[21:36:48] [PASSED] single_pixel_clip_rectangle
[21:36:48] [PASSED] well_known_colors
[21:36:48] [PASSED] destination_pitch
[21:36:48] ======== [PASSED] drm_test_fb_xrgb8888_to_xbgr8888 =========
[21:36:48] ============ drm_test_fb_xrgb8888_to_abgr8888 =============
[21:36:48] [PASSED] single_pixel_source_buffer
[21:36:48] [PASSED] single_pixel_clip_rectangle
[21:36:48] [PASSED] well_known_colors
[21:36:48] [PASSED] destination_pitch
[21:36:48] ======== [PASSED] drm_test_fb_xrgb8888_to_abgr8888 =========
[21:36:48] ================= drm_test_fb_clip_offset =================
[21:36:48] [PASSED] pass through
[21:36:48] [PASSED] horizontal offset
[21:36:48] [PASSED] vertical offset
[21:36:48] [PASSED] horizontal and vertical offset
[21:36:48] [PASSED] horizontal offset (custom pitch)
[21:36:48] [PASSED] vertical offset (custom pitch)
[21:36:48] [PASSED] horizontal and vertical offset (custom pitch)
[21:36:48] ============= [PASSED] drm_test_fb_clip_offset =============
[21:36:48] ============== drm_test_fb_build_fourcc_list ==============
[21:36:48] [PASSED] no native formats
[21:36:48] [PASSED] XRGB8888 as native format
[21:36:48] [PASSED] remove duplicates
[21:36:48] [PASSED] convert alpha formats
[21:36:48] [PASSED] random formats
[21:36:48] ========== [PASSED] drm_test_fb_build_fourcc_list ==========
[21:36:48] =================== drm_test_fb_memcpy ====================
[21:36:48] [PASSED] single_pixel_source_buffer: XR24 little-endian (0x34325258)
[21:36:48] [PASSED] single_pixel_source_buffer: XRA8 little-endian (0x38415258)
[21:36:48] [PASSED] single_pixel_source_buffer: YU24 little-endian (0x34325559)
[21:36:48] [PASSED] single_pixel_clip_rectangle: XB24 little-endian (0x34324258)
[21:36:48] [PASSED] single_pixel_clip_rectangle: XRA8 little-endian (0x38415258)
[21:36:48] [PASSED] single_pixel_clip_rectangle: YU24 little-endian (0x34325559)
[21:36:48] [PASSED] well_known_colors: XB24 little-endian (0x34324258)
[21:36:48] [PASSED] well_known_colors: XRA8 little-endian (0x38415258)
[21:36:48] [PASSED] well_known_colors: YU24 little-endian (0x34325559)
[21:36:48] [PASSED] destination_pitch: XB24 little-endian (0x34324258)
[21:36:48] [PASSED] destination_pitch: XRA8 little-endian (0x38415258)
[21:36:48] [PASSED] destination_pitch: YU24 little-endian (0x34325559)
[21:36:48] =============== [PASSED] drm_test_fb_memcpy ================
[21:36:48] ============= [PASSED] drm_format_helper_test ==============
[21:36:48] ================= drm_format (18 subtests) =================
[21:36:48] [PASSED] drm_test_format_block_width_invalid
[21:36:48] [PASSED] drm_test_format_block_width_one_plane
[21:36:48] [PASSED] drm_test_format_block_width_two_plane
[21:36:48] [PASSED] drm_test_format_block_width_three_plane
[21:36:48] [PASSED] drm_test_format_block_width_tiled
[21:36:48] [PASSED] drm_test_format_block_height_invalid
[21:36:48] [PASSED] drm_test_format_block_height_one_plane
[21:36:48] [PASSED] drm_test_format_block_height_two_plane
[21:36:48] [PASSED] drm_test_format_block_height_three_plane
[21:36:48] [PASSED] drm_test_format_block_height_tiled
[21:36:48] [PASSED] drm_test_format_min_pitch_invalid
[21:36:48] [PASSED] drm_test_format_min_pitch_one_plane_8bpp
[21:36:48] [PASSED] drm_test_format_min_pitch_one_plane_16bpp
[21:36:48] [PASSED] drm_test_format_min_pitch_one_plane_24bpp
[21:36:48] [PASSED] drm_test_format_min_pitch_one_plane_32bpp
[21:36:48] [PASSED] drm_test_format_min_pitch_two_plane
[21:36:48] [PASSED] drm_test_format_min_pitch_three_plane_8bpp
[21:36:48] [PASSED] drm_test_format_min_pitch_tiled
[21:36:48] =================== [PASSED] drm_format ====================
[21:36:48] ============== drm_framebuffer (10 subtests) ===============
[21:36:48] ========== drm_test_framebuffer_check_src_coords ==========
[21:36:48] [PASSED] Success: source fits into fb
[21:36:48] [PASSED] Fail: overflowing fb with x-axis coordinate
[21:36:48] [PASSED] Fail: overflowing fb with y-axis coordinate
[21:36:48] [PASSED] Fail: overflowing fb with source width
[21:36:48] [PASSED] Fail: overflowing fb with source height
[21:36:48] ====== [PASSED] drm_test_framebuffer_check_src_coords ======
[21:36:48] [PASSED] drm_test_framebuffer_cleanup
[21:36:48] =============== drm_test_framebuffer_create ===============
[21:36:48] [PASSED] ABGR8888 normal sizes
[21:36:48] [PASSED] ABGR8888 max sizes
[21:36:48] [PASSED] ABGR8888 pitch greater than min required
[21:36:48] [PASSED] ABGR8888 pitch less than min required
[21:36:48] [PASSED] ABGR8888 Invalid width
[21:36:48] [PASSED] ABGR8888 Invalid buffer handle
[21:36:48] [PASSED] No pixel format
[21:36:48] [PASSED] ABGR8888 Width 0
[21:36:48] [PASSED] ABGR8888 Height 0
[21:36:48] [PASSED] ABGR8888 Out of bound height * pitch combination
[21:36:48] [PASSED] ABGR8888 Large buffer offset
[21:36:48] [PASSED] ABGR8888 Buffer offset for inexistent plane
[21:36:48] [PASSED] ABGR8888 Invalid flag
[21:36:48] [PASSED] ABGR8888 Set DRM_MODE_FB_MODIFIERS without modifiers
[21:36:48] [PASSED] ABGR8888 Valid buffer modifier
[21:36:48] [PASSED] ABGR8888 Invalid buffer modifier(DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)
[21:36:48] [PASSED] ABGR8888 Extra pitches without DRM_MODE_FB_MODIFIERS
[21:36:48] [PASSED] ABGR8888 Extra pitches with DRM_MODE_FB_MODIFIERS
[21:36:48] [PASSED] NV12 Normal sizes
[21:36:48] [PASSED] NV12 Max sizes
[21:36:48] [PASSED] NV12 Invalid pitch
[21:36:48] [PASSED] NV12 Invalid modifier/missing DRM_MODE_FB_MODIFIERS flag
[21:36:48] [PASSED] NV12 different modifier per-plane
[21:36:48] [PASSED] NV12 with DRM_FORMAT_MOD_SAMSUNG_64_32_TILE
[21:36:48] [PASSED] NV12 Valid modifiers without DRM_MODE_FB_MODIFIERS
[21:36:48] [PASSED] NV12 Modifier for inexistent plane
[21:36:48] [PASSED] NV12 Handle for inexistent plane
[21:36:48] [PASSED] NV12 Handle for inexistent plane without DRM_MODE_FB_MODIFIERS
[21:36:48] [PASSED] YVU420 DRM_MODE_FB_MODIFIERS set without modifier
[21:36:48] [PASSED] YVU420 Normal sizes
[21:36:48] [PASSED] YVU420 Max sizes
[21:36:48] [PASSED] YVU420 Invalid pitch
[21:36:48] [PASSED] YVU420 Different pitches
[21:36:48] [PASSED] YVU420 Different buffer offsets/pitches
[21:36:48] [PASSED] YVU420 Modifier set just for plane 0, without DRM_MODE_FB_MODIFIERS
[21:36:48] [PASSED] YVU420 Modifier set just for planes 0, 1, without DRM_MODE_FB_MODIFIERS
[21:36:48] [PASSED] YVU420 Modifier set just for plane 0, 1, with DRM_MODE_FB_MODIFIERS
[21:36:48] [PASSED] YVU420 Valid modifier
[21:36:48] [PASSED] YVU420 Different modifiers per plane
[21:36:48] [PASSED] YVU420 Modifier for inexistent plane
[21:36:48] [PASSED] YUV420_10BIT Invalid modifier(DRM_FORMAT_MOD_LINEAR)
[21:36:48] [PASSED] X0L2 Normal sizes
[21:36:48] [PASSED] X0L2 Max sizes
[21:36:48] [PASSED] X0L2 Invalid pitch
[21:36:48] [PASSED] X0L2 Pitch greater than minimum required
[21:36:48] [PASSED] X0L2 Handle for inexistent plane
[21:36:48] [PASSED] X0L2 Offset for inexistent plane, without DRM_MODE_FB_MODIFIERS set
[21:36:48] [PASSED] X0L2 Modifier without DRM_MODE_FB_MODIFIERS set
[21:36:48] [PASSED] X0L2 Valid modifier
[21:36:48] [PASSED] X0L2 Modifier for inexistent plane
[21:36:48] =========== [PASSED] drm_test_framebuffer_create ===========
[21:36:48] [PASSED] drm_test_framebuffer_free
[21:36:48] [PASSED] drm_test_framebuffer_init
[21:36:48] [PASSED] drm_test_framebuffer_init_bad_format
[21:36:48] [PASSED] drm_test_framebuffer_init_dev_mismatch
[21:36:48] [PASSED] drm_test_framebuffer_lookup
[21:36:48] [PASSED] drm_test_framebuffer_lookup_inexistent
[21:36:48] [PASSED] drm_test_framebuffer_modifiers_not_supported
[21:36:48] ================= [PASSED] drm_framebuffer =================
[21:36:48] ================ drm_gem_shmem (8 subtests) ================
[21:36:48] [PASSED] drm_gem_shmem_test_obj_create
[21:36:48] [PASSED] drm_gem_shmem_test_obj_create_private
[21:36:48] [PASSED] drm_gem_shmem_test_pin_pages
[21:36:48] [PASSED] drm_gem_shmem_test_vmap
[21:36:48] [PASSED] drm_gem_shmem_test_get_pages_sgt
[21:36:48] [PASSED] drm_gem_shmem_test_get_sg_table
[21:36:48] [PASSED] drm_gem_shmem_test_madvise
[21:36:48] [PASSED] drm_gem_shmem_test_purge
[21:36:48] ================== [PASSED] drm_gem_shmem ==================
[21:36:48] === drm_atomic_helper_connector_hdmi_check (23 subtests) ===
[21:36:48] [PASSED] drm_test_check_broadcast_rgb_auto_cea_mode
[21:36:48] [PASSED] drm_test_check_broadcast_rgb_auto_cea_mode_vic_1
[21:36:48] [PASSED] drm_test_check_broadcast_rgb_full_cea_mode
[21:36:48] [PASSED] drm_test_check_broadcast_rgb_full_cea_mode_vic_1
[21:36:48] [PASSED] drm_test_check_broadcast_rgb_limited_cea_mode
[21:36:48] [PASSED] drm_test_check_broadcast_rgb_limited_cea_mode_vic_1
[21:36:48] [PASSED] drm_test_check_broadcast_rgb_crtc_mode_changed
[21:36:48] [PASSED] drm_test_check_broadcast_rgb_crtc_mode_not_changed
[21:36:48] [PASSED] drm_test_check_disable_connector
[21:36:48] [PASSED] drm_test_check_hdmi_funcs_reject_rate
[21:36:48] [PASSED] drm_test_check_max_tmds_rate_bpc_fallback
[21:36:48] [PASSED] drm_test_check_max_tmds_rate_format_fallback
[21:36:48] [PASSED] drm_test_check_output_bpc_crtc_mode_changed
[21:36:48] [PASSED] drm_test_check_output_bpc_crtc_mode_not_changed
[21:36:48] [PASSED] drm_test_check_output_bpc_dvi
[21:36:48] [PASSED] drm_test_check_output_bpc_format_vic_1
[21:36:48] [PASSED] drm_test_check_output_bpc_format_display_8bpc_only
[21:36:48] [PASSED] drm_test_check_output_bpc_format_display_rgb_only
[21:36:48] [PASSED] drm_test_check_output_bpc_format_driver_8bpc_only
[21:36:48] [PASSED] drm_test_check_output_bpc_format_driver_rgb_only
[21:36:48] [PASSED] drm_test_check_tmds_char_rate_rgb_8bpc
[21:36:48] [PASSED] drm_test_check_tmds_char_rate_rgb_10bpc
[21:36:48] [PASSED] drm_test_check_tmds_char_rate_rgb_12bpc
[21:36:48] ===== [PASSED] drm_atomic_helper_connector_hdmi_check ======
[21:36:48] === drm_atomic_helper_connector_hdmi_reset (6 subtests) ====
[21:36:48] [PASSED] drm_test_check_broadcast_rgb_value
[21:36:48] [PASSED] drm_test_check_bpc_8_value
[21:36:48] [PASSED] drm_test_check_bpc_10_value
[21:36:48] [PASSED] drm_test_check_bpc_12_value
[21:36:48] [PASSED] drm_test_check_format_value
[21:36:48] [PASSED] drm_test_check_tmds_char_value
[21:36:48] ===== [PASSED] drm_atomic_helper_connector_hdmi_reset ======
[21:36:48] = drm_atomic_helper_connector_hdmi_mode_valid (4 subtests) =
[21:36:48] [PASSED] drm_test_check_mode_valid
[21:36:48] [PASSED] drm_test_check_mode_valid_reject
[21:36:48] [PASSED] drm_test_check_mode_valid_reject_rate
[21:36:48] [PASSED] drm_test_check_mode_valid_reject_max_clock
[21:36:48] === [PASSED] drm_atomic_helper_connector_hdmi_mode_valid ===
[21:36:48] ================= drm_managed (2 subtests) =================
[21:36:48] [PASSED] drm_test_managed_release_action
[21:36:48] [PASSED] drm_test_managed_run_action
[21:36:48] =================== [PASSED] drm_managed ===================
[21:36:48] =================== drm_mm (6 subtests) ====================
[21:36:48] [PASSED] drm_test_mm_init
[21:36:48] [PASSED] drm_test_mm_debug
[21:36:48] [PASSED] drm_test_mm_align32
[21:36:48] [PASSED] drm_test_mm_align64
[21:36:48] [PASSED] drm_test_mm_lowest
[21:36:48] [PASSED] drm_test_mm_highest
[21:36:48] ===================== [PASSED] drm_mm ======================
[21:36:48] ============= drm_modes_analog_tv (5 subtests) =============
[21:36:48] [PASSED] drm_test_modes_analog_tv_mono_576i
[21:36:48] [PASSED] drm_test_modes_analog_tv_ntsc_480i
[21:36:48] [PASSED] drm_test_modes_analog_tv_ntsc_480i_inlined
[21:36:48] [PASSED] drm_test_modes_analog_tv_pal_576i
[21:36:48] [PASSED] drm_test_modes_analog_tv_pal_576i_inlined
[21:36:48] =============== [PASSED] drm_modes_analog_tv ===============
[21:36:48] ============== drm_plane_helper (2 subtests) ===============
[21:36:48] =============== drm_test_check_plane_state ================
[21:36:48] [PASSED] clipping_simple
[21:36:48] [PASSED] clipping_rotate_reflect
[21:36:48] [PASSED] positioning_simple
[21:36:48] [PASSED] upscaling
[21:36:48] [PASSED] downscaling
[21:36:48] [PASSED] rounding1
[21:36:48] [PASSED] rounding2
[21:36:48] [PASSED] rounding3
[21:36:48] [PASSED] rounding4
[21:36:48] =========== [PASSED] drm_test_check_plane_state ============
[21:36:48] =========== drm_test_check_invalid_plane_state ============
[21:36:48] [PASSED] positioning_invalid
[21:36:48] [PASSED] upscaling_invalid
[21:36:48] [PASSED] downscaling_invalid
[21:36:48] ======= [PASSED] drm_test_check_invalid_plane_state ========
[21:36:48] ================ [PASSED] drm_plane_helper =================
[21:36:48] ====== drm_connector_helper_tv_get_modes (1 subtest) =======
[21:36:48] ====== drm_test_connector_helper_tv_get_modes_check =======
[21:36:48] [PASSED] None
[21:36:48] [PASSED] PAL
[21:36:48] [PASSED] NTSC
[21:36:48] [PASSED] Both, NTSC Default
[21:36:48] [PASSED] Both, PAL Default
[21:36:48] [PASSED] Both, NTSC Default, with PAL on command-line
[21:36:48] [PASSED] Both, PAL Default, with NTSC on command-line
[21:36:48] == [PASSED] drm_test_connector_helper_tv_get_modes_check ===
[21:36:48] ======== [PASSED] drm_connector_helper_tv_get_modes ========
[21:36:48] ================== drm_rect (9 subtests) ===================
[21:36:48] [PASSED] drm_test_rect_clip_scaled_div_by_zero
[21:36:48] [PASSED] drm_test_rect_clip_scaled_not_clipped
[21:36:48] [PASSED] drm_test_rect_clip_scaled_clipped
[21:36:48] [PASSED] drm_test_rect_clip_scaled_signed_vs_unsigned
[21:36:48] ================= drm_test_rect_intersect =================
[21:36:48] [PASSED] top-left x bottom-right: 2x2+1+1 x 2x2+0+0
[21:36:48] [PASSED] top-right x bottom-left: 2x2+0+0 x 2x2+1-1
[21:36:48] [PASSED] bottom-left x top-right: 2x2+1-1 x 2x2+0+0
[21:36:48] [PASSED] bottom-right x top-left: 2x2+0+0 x 2x2+1+1
[21:36:48] [PASSED] right x left: 2x1+0+0 x 3x1+1+0
[21:36:48] [PASSED] left x right: 3x1+1+0 x 2x1+0+0
[21:36:48] [PASSED] up x bottom: 1x2+0+0 x 1x3+0-1
[21:36:48] [PASSED] bottom x up: 1x3+0-1 x 1x2+0+0
[21:36:48] [PASSED] touching corner: 1x1+0+0 x 2x2+1+1
[21:36:48] [PASSED] touching side: 1x1+0+0 x 1x1+1+0
[21:36:48] [PASSED] equal rects: 2x2+0+0 x 2x2+0+0
[21:36:48] [PASSED] inside another: 2x2+0+0 x 1x1+1+1
[21:36:48] [PASSED] far away: 1x1+0+0 x 1x1+3+6
[21:36:48] [PASSED] points intersecting: 0x0+5+10 x 0x0+5+10
[21:36:48] [PASSED] points not intersecting: 0x0+0+0 x 0x0+5+10
[21:36:48] ============= [PASSED] drm_test_rect_intersect =============
[21:36:48] ================ drm_test_rect_calc_hscale ================
[21:36:48] [PASSED] normal use
[21:36:48] [PASSED] out of max range
[21:36:48] [PASSED] out of min range
[21:36:48] [PASSED] zero dst
[21:36:48] [PASSED] negative src
[21:36:48] [PASSED] negative dst
[21:36:48] ============ [PASSED] drm_test_rect_calc_hscale ============
[21:36:48] ================ drm_test_rect_calc_vscale ================
[21:36:48] [PASSED] normal use
[21:36:48] [PASSED] out of max range
[21:36:48] [PASSED] out of min range
[21:36:48] [PASSED] zero dst
[21:36:48] [PASSED] negative src
[21:36:48] [PASSED] negative dst
[21:36:48] ============ [PASSED] drm_test_rect_calc_vscale ============
[21:36:48] ================== drm_test_rect_rotate ===================
[21:36:48] [PASSED] reflect-x
[21:36:48] [PASSED] reflect-y
[21:36:48] [PASSED] rotate-0
[21:36:48] [PASSED] rotate-90
[21:36:48] [PASSED] rotate-180
[21:36:48] [PASSED] rotate-270
[21:36:48] ============== [PASSED] drm_test_rect_rotate ===============
[21:36:48] ================ drm_test_rect_rotate_inv =================
[21:36:48] [PASSED] reflect-x
[21:36:48] [PASSED] reflect-y
[21:36:48] [PASSED] rotate-0
[21:36:48] [PASSED] rotate-90
[21:36:48] [PASSED] rotate-180
[21:36:48] [PASSED] rotate-270
[21:36:48] ============ [PASSED] drm_test_rect_rotate_inv =============
stty: 'standard input': Inappropriate ioctl for device
[21:36:48] ==================== [PASSED] drm_rect =====================
[21:36:48] ============================================================
[21:36:48] Testing complete. Ran 608 tests: passed: 608
[21:36:48] Elapsed time: 22.975s total, 1.685s configuring, 21.069s building, 0.191s running
+ /kernel/tools/testing/kunit/kunit.py run --kunitconfig /kernel/drivers/gpu/drm/ttm/tests/.kunitconfig
[21:36:48] Configuring KUnit Kernel ...
Regenerating .config ...
Populating config with:
$ make ARCH=um O=.kunit olddefconfig
[21:36:50] Building KUnit Kernel ...
Populating config with:
$ make ARCH=um O=.kunit olddefconfig
Building with:
$ make all compile_commands.json scripts_gdb ARCH=um O=.kunit --jobs=48
[21:36:58] Starting KUnit Kernel (1/1)...
[21:36:58] ============================================================
Running tests with:
$ .kunit/linux kunit.enable=1 mem=1G console=tty kunit_shutdown=halt
[21:36:58] ================= ttm_device (5 subtests) ==================
[21:36:58] [PASSED] ttm_device_init_basic
[21:36:58] [PASSED] ttm_device_init_multiple
[21:36:58] [PASSED] ttm_device_fini_basic
[21:36:58] [PASSED] ttm_device_init_no_vma_man
[21:36:58] ================== ttm_device_init_pools ==================
[21:36:58] [PASSED] No DMA allocations, no DMA32 required
[21:36:58] [PASSED] DMA allocations, DMA32 required
[21:36:58] [PASSED] No DMA allocations, DMA32 required
[21:36:58] [PASSED] DMA allocations, no DMA32 required
[21:36:58] ============== [PASSED] ttm_device_init_pools ==============
[21:36:58] =================== [PASSED] ttm_device ====================
[21:36:58] ================== ttm_pool (8 subtests) ===================
[21:36:58] ================== ttm_pool_alloc_basic ===================
[21:36:58] [PASSED] One page
[21:36:58] [PASSED] More than one page
[21:36:58] [PASSED] Above the allocation limit
[21:36:58] [PASSED] One page, with coherent DMA mappings enabled
[21:36:58] [PASSED] Above the allocation limit, with coherent DMA mappings enabled
[21:36:58] ============== [PASSED] ttm_pool_alloc_basic ===============
[21:36:58] ============== ttm_pool_alloc_basic_dma_addr ==============
[21:36:58] [PASSED] One page
[21:36:58] [PASSED] More than one page
[21:36:58] [PASSED] Above the allocation limit
[21:36:58] [PASSED] One page, with coherent DMA mappings enabled
[21:36:58] [PASSED] Above the allocation limit, with coherent DMA mappings enabled
[21:36:58] ========== [PASSED] ttm_pool_alloc_basic_dma_addr ==========
[21:36:58] [PASSED] ttm_pool_alloc_order_caching_match
[21:36:58] [PASSED] ttm_pool_alloc_caching_mismatch
[21:36:58] [PASSED] ttm_pool_alloc_order_mismatch
[21:36:58] [PASSED] ttm_pool_free_dma_alloc
[21:36:58] [PASSED] ttm_pool_free_no_dma_alloc
[21:36:58] [PASSED] ttm_pool_fini_basic
[21:36:58] ==================== [PASSED] ttm_pool =====================
[21:36:58] ================ ttm_resource (8 subtests) =================
[21:36:58] ================= ttm_resource_init_basic =================
[21:36:58] [PASSED] Init resource in TTM_PL_SYSTEM
[21:36:58] [PASSED] Init resource in TTM_PL_VRAM
[21:36:58] [PASSED] Init resource in a private placement
[21:36:58] [PASSED] Init resource in TTM_PL_SYSTEM, set placement flags
[21:36:58] ============= [PASSED] ttm_resource_init_basic =============
[21:36:58] [PASSED] ttm_resource_init_pinned
[21:36:58] [PASSED] ttm_resource_fini_basic
[21:36:58] [PASSED] ttm_resource_manager_init_basic
[21:36:58] [PASSED] ttm_resource_manager_usage_basic
[21:36:58] [PASSED] ttm_resource_manager_set_used_basic
[21:36:58] [PASSED] ttm_sys_man_alloc_basic
[21:36:58] [PASSED] ttm_sys_man_free_basic
[21:36:58] ================== [PASSED] ttm_resource ===================
[21:36:58] =================== ttm_tt (15 subtests) ===================
[21:36:58] ==================== ttm_tt_init_basic ====================
[21:36:58] [PASSED] Page-aligned size
[21:36:58] [PASSED] Extra pages requested
[21:36:58] ================ [PASSED] ttm_tt_init_basic ================
[21:36:58] [PASSED] ttm_tt_init_misaligned
[21:36:58] [PASSED] ttm_tt_fini_basic
[21:36:58] [PASSED] ttm_tt_fini_sg
[21:36:58] [PASSED] ttm_tt_fini_shmem
[21:36:58] [PASSED] ttm_tt_create_basic
[21:36:58] [PASSED] ttm_tt_create_invalid_bo_type
[21:36:58] [PASSED] ttm_tt_create_ttm_exists
[21:36:58] [PASSED] ttm_tt_create_failed
[21:36:58] [PASSED] ttm_tt_destroy_basic
[21:36:58] [PASSED] ttm_tt_populate_null_ttm
[21:36:58] [PASSED] ttm_tt_populate_populated_ttm
[21:36:58] [PASSED] ttm_tt_unpopulate_basic
[21:36:58] [PASSED] ttm_tt_unpopulate_empty_ttm
[21:36:58] [PASSED] ttm_tt_swapin_basic
[21:36:58] ===================== [PASSED] ttm_tt ======================
[21:36:58] =================== ttm_bo (14 subtests) ===================
[21:36:58] =========== ttm_bo_reserve_optimistic_no_ticket ===========
[21:36:58] [PASSED] Cannot be interrupted and sleeps
[21:36:58] [PASSED] Cannot be interrupted, locks straight away
[21:36:58] [PASSED] Can be interrupted, sleeps
[21:36:58] ======= [PASSED] ttm_bo_reserve_optimistic_no_ticket =======
[21:36:58] [PASSED] ttm_bo_reserve_locked_no_sleep
[21:36:58] [PASSED] ttm_bo_reserve_no_wait_ticket
[21:36:58] [PASSED] ttm_bo_reserve_double_resv
[21:36:58] [PASSED] ttm_bo_reserve_interrupted
[21:36:58] [PASSED] ttm_bo_reserve_deadlock
[21:36:58] [PASSED] ttm_bo_unreserve_basic
[21:36:58] [PASSED] ttm_bo_unreserve_pinned
[21:36:58] [PASSED] ttm_bo_unreserve_bulk
[21:36:58] [PASSED] ttm_bo_put_basic
[21:36:58] [PASSED] ttm_bo_put_shared_resv
[21:36:58] [PASSED] ttm_bo_pin_basic
[21:36:58] [PASSED] ttm_bo_pin_unpin_resource
[21:36:58] [PASSED] ttm_bo_multiple_pin_one_unpin
[21:36:58] ===================== [PASSED] ttm_bo ======================
[21:36:58] ============== ttm_bo_validate (22 subtests) ===============
[21:36:58] ============== ttm_bo_init_reserved_sys_man ===============
[21:36:58] [PASSED] Buffer object for userspace
[21:36:58] [PASSED] Kernel buffer object
[21:36:58] [PASSED] Shared buffer object
[21:36:58] ========== [PASSED] ttm_bo_init_reserved_sys_man ===========
[21:36:58] ============== ttm_bo_init_reserved_mock_man ==============
[21:36:58] [PASSED] Buffer object for userspace
[21:36:58] [PASSED] Kernel buffer object
[21:36:58] [PASSED] Shared buffer object
[21:36:58] ========== [PASSED] ttm_bo_init_reserved_mock_man ==========
[21:36:58] [PASSED] ttm_bo_init_reserved_resv
[21:36:58] ================== ttm_bo_validate_basic ==================
[21:36:58] [PASSED] Buffer object for userspace
[21:36:58] [PASSED] Kernel buffer object
[21:36:58] [PASSED] Shared buffer object
[21:36:58] ============== [PASSED] ttm_bo_validate_basic ==============
[21:36:58] [PASSED] ttm_bo_validate_invalid_placement
[21:36:58] ============= ttm_bo_validate_same_placement ==============
[21:36:58] [PASSED] System manager
[21:36:58] [PASSED] VRAM manager
[21:36:58] ========= [PASSED] ttm_bo_validate_same_placement ==========
[21:36:58] [PASSED] ttm_bo_validate_failed_alloc
[21:36:58] [PASSED] ttm_bo_validate_pinned
[21:36:58] [PASSED] ttm_bo_validate_busy_placement
[21:36:58] ================ ttm_bo_validate_multihop =================
[21:36:58] [PASSED] Buffer object for userspace
[21:36:58] [PASSED] Kernel buffer object
[21:36:58] [PASSED] Shared buffer object
[21:36:58] ============ [PASSED] ttm_bo_validate_multihop =============
[21:36:58] ========== ttm_bo_validate_no_placement_signaled ==========
[21:36:58] [PASSED] Buffer object in system domain, no page vector
[21:36:58] [PASSED] Buffer object in system domain with an existing page vector
[21:36:58] ====== [PASSED] ttm_bo_validate_no_placement_signaled ======
[21:36:58] ======== ttm_bo_validate_no_placement_not_signaled ========
[21:36:58] [PASSED] Buffer object for userspace
[21:36:58] [PASSED] Kernel buffer object
[21:36:58] [PASSED] Shared buffer object
[21:36:58] ==== [PASSED] ttm_bo_validate_no_placement_not_signaled ====
[21:36:58] [PASSED] ttm_bo_validate_move_fence_signaled
[21:36:58] ========= ttm_bo_validate_move_fence_not_signaled =========
[21:36:58] [PASSED] Waits for GPU
[21:36:58] [PASSED] Tries to lock straight away
[21:36:58] ===== [PASSED] ttm_bo_validate_move_fence_not_signaled =====
[21:36:58] [PASSED] ttm_bo_validate_swapout
[21:36:58] [PASSED] ttm_bo_validate_happy_evict
[21:36:58] [PASSED] ttm_bo_validate_all_pinned_evict
[21:36:58] [PASSED] ttm_bo_validate_allowed_only_evict
[21:36:58] [PASSED] ttm_bo_validate_deleted_evict
[21:36:58] [PASSED] ttm_bo_validate_busy_domain_evict
[21:36:58] [PASSED] ttm_bo_validate_evict_gutting
[21:36:58] [PASSED] ttm_bo_validate_recrusive_evict
stty: 'standard input': Inappropriate ioctl for device
[21:36:58] ================= [PASSED] ttm_bo_validate =================
[21:36:58] ============================================================
[21:36:58] Testing complete. Ran 102 tests: passed: 102
[21:36:58] Elapsed time: 9.943s total, 1.649s configuring, 7.676s building, 0.526s running
+ cleanup
++ stat -c %u:%g /kernel
+ chown -R 1003:1003 /kernel
^ permalink raw reply [flat|nested] 72+ messages in thread* ✗ CI.Build: failure for MADVISE FOR XE
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (21 preceding siblings ...)
2025-05-27 21:37 ` ✓ CI.KUnit: success " Patchwork
@ 2025-05-27 21:40 ` Patchwork
2025-05-28 7:45 ` ✓ CI.Patch_applied: success " Patchwork
` (3 subsequent siblings)
26 siblings, 0 replies; 72+ messages in thread
From: Patchwork @ 2025-05-27 21:40 UTC (permalink / raw)
To: Himal Prasad Ghimiray; +Cc: intel-xe
== Series Details ==
Series: MADVISE FOR XE
URL : https://patchwork.freedesktop.org/series/149550/
State : failure
^ permalink raw reply [flat|nested] 72+ messages in thread* ✓ CI.Patch_applied: success for MADVISE FOR XE
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (22 preceding siblings ...)
2025-05-27 21:40 ` ✗ CI.Build: failure " Patchwork
@ 2025-05-28 7:45 ` Patchwork
2025-05-28 7:45 ` ✗ CI.checkpatch: warning " Patchwork
` (2 subsequent siblings)
26 siblings, 0 replies; 72+ messages in thread
From: Patchwork @ 2025-05-28 7:45 UTC (permalink / raw)
To: Ghimiray, Himal Prasad; +Cc: intel-xe
== Series Details ==
Series: MADVISE FOR XE
URL : https://patchwork.freedesktop.org/series/149550/
State : success
== Summary ==
=== Applying kernel patches on branch 'drm-tip' with base: ===
Base commit: 07b6736f75df drm-tip: 2025y-05m-27d-20h-13m-12s UTC integration manifest
=== git am output follows ===
Applying: Introduce drm_gpuvm_sm_map_ops_flags enums for sm_map_ops
Applying: drm/xe/uapi: Add madvise interface
Applying: drm/xe/vm: Add attributes struct as member of vma
Applying: drm/xe/vma: Move pat_index to vma attributes
Applying: drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter
Applying: drm/gpusvm: Make drm_gpusvm_for_each_* macros public
Applying: drm/xe/vm: Add a helper xe_vm_range_tilemask_tlb_invalidation()
Applying: drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping
Applying: drm/xe/svm: Split system allocator vma incase of madvise call
Applying: drm/xe: Implement madvise ioctl for xe
Applying: drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise
Applying: drm/xe/svm : Add svm ranges migration policy on atomic access
Applying: drm/xe/madvise: Update migration policy based on preferred location
Applying: drm/xe/svm: Support DRM_XE_SVM_ATTR_PAT memory attribute
Applying: drm/xe/uapi: Add flag for consulting madvise hints on svm prefetch
Applying: drm/xe/svm: Consult madvise preferred location in prefetch
Applying: drm/xe/uapi: Add UAPI for querying VMA count and memory attributes
Applying: drm/xe/bo: Add attributes field to xe_bo
Applying: drm/xe/bo: Update atomic_access attribute on madvise
^ permalink raw reply [flat|nested] 72+ messages in thread* ✗ CI.checkpatch: warning for MADVISE FOR XE
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (23 preceding siblings ...)
2025-05-28 7:45 ` ✓ CI.Patch_applied: success " Patchwork
@ 2025-05-28 7:45 ` Patchwork
2025-05-28 7:46 ` ✓ CI.KUnit: success " Patchwork
2025-05-28 7:50 ` ✗ CI.Build: failure " Patchwork
26 siblings, 0 replies; 72+ messages in thread
From: Patchwork @ 2025-05-28 7:45 UTC (permalink / raw)
To: Ghimiray, Himal Prasad; +Cc: intel-xe
== Series Details ==
Series: MADVISE FOR XE
URL : https://patchwork.freedesktop.org/series/149550/
State : warning
== Summary ==
+ KERNEL=/kernel
+ git clone https://gitlab.freedesktop.org/drm/maintainer-tools mt
Cloning into 'mt'...
warning: redirecting to https://gitlab.freedesktop.org/drm/maintainer-tools.git/
+ git -C mt rev-list -n1 origin/master
202708c00696422fd217223bb679a353a5936e23
+ cd /kernel
+ git config --global --add safe.directory /kernel
+ git log -n1
commit 0fe86083d80c63fd88eb12ffeee86c52dd26488c
Author: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Date: Tue May 27 22:10:03 2025 +0530
drm/xe/bo: Update atomic_access attribute on madvise
Update the bo_atomic_access based on user-provided input and determine
the migration to smem during a CPU fault
v2 (Matthew Brost)
- Avoid cpu unmapping if bo is already in smem
- check atomics on smem too for ioctl
- Add comments
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
+ /mt/dim checkpatch 07b6736f75df3e60d5e20eda53ecb97f6d9a66c0 drm-intel
c5126839808a Introduce drm_gpuvm_sm_map_ops_flags enums for sm_map_ops
5d7b706d58b4 drm/xe/uapi: Add madvise interface
-:37: WARNING:LONG_LINE: line length of 114 exceeds 100 columns
#37: FILE: include/uapi/drm/xe_drm.h:122:
+#define DRM_IOCTL_XE_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
total: 0 errors, 1 warnings, 0 checks, 121 lines checked
b74db14d5768 drm/xe/vm: Add attributes struct as member of vma
bfa2976f7f30 drm/xe/vma: Move pat_index to vma attributes
3adf1b095f5b drm/xe/vma: Modify new_vma to accept struct xe_vma_mem_attr as parameter
0a83d3180595 drm/gpusvm: Make drm_gpusvm_for_each_* macros public
-:224: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'range__' - possible side-effects?
#224: FILE: include/drm/drm_gpusvm.h:548:
+#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
+ for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_range_next(range__); \
+ (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
+ (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
-:224: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'next__' - possible side-effects?
#224: FILE: include/drm/drm_gpusvm.h:548:
+#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
+ for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_range_next(range__); \
+ (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
+ (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
-:224: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'end__' - possible side-effects?
#224: FILE: include/drm/drm_gpusvm.h:548:
+#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
+ for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_range_next(range__); \
+ (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
+ (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
-:257: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'notifier__' - possible side-effects?
#257: FILE: include/drm/drm_gpusvm.h:581:
+#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
+ for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = __drm_gpusvm_notifier_next(notifier__))
-:257: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'end__' - possible side-effects?
#257: FILE: include/drm/drm_gpusvm.h:581:
+#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
+ for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = __drm_gpusvm_notifier_next(notifier__))
-:273: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'notifier__' - possible side-effects?
#273: FILE: include/drm/drm_gpusvm.h:597:
+#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
+ for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_notifier_next(notifier__); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
-:273: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'next__' - possible side-effects?
#273: FILE: include/drm/drm_gpusvm.h:597:
+#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
+ for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_notifier_next(notifier__); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
-:273: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'end__' - possible side-effects?
#273: FILE: include/drm/drm_gpusvm.h:597:
+#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
+ for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_notifier_next(notifier__); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
total: 0 errors, 0 warnings, 8 checks, 248 lines checked
778c6afb7f87 drm/xe/vm: Add a helper xe_vm_range_tilemask_tlb_invalidation()
d7fa7153beaa drm/xe/svm: Add xe_svm_ranges_zap_ptes_in_range() for PTE zapping
abb369db431d drm/xe/svm: Split system allocator vma incase of madvise call
d7389470784e drm/xe: Implement madvise ioctl for xe
-:57: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#57:
new file mode 100644
total: 0 errors, 1 warnings, 0 checks, 300 lines checked
3dbf973d244a drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise
134ce88c281e drm/xe/svm : Add svm ranges migration policy on atomic access
1756d0569cb0 drm/xe/madvise: Update migration policy based on preferred location
53352aa9120e drm/xe/svm: Support DRM_XE_SVM_ATTR_PAT memory attribute
8e77979add7a drm/xe/uapi: Add flag for consulting madvise hints on svm prefetch
bf144addb682 drm/xe/svm: Consult madvise preferred location in prefetch
db06e061d259 drm/xe/uapi: Add UAPI for querying VMA count and memory attributes
-:71: WARNING:LONG_LINE: line length of 107 exceeds 100 columns
#71: FILE: drivers/gpu/drm/xe/xe_vm.c:2193:
+ mem_attrs[i].preferred_mem_loc.migration_policy = vma->attr.preferred_loc.migration_policy;
-:170: WARNING:LONG_LINE: line length of 137 exceeds 100 columns
#170: FILE: include/uapi/drm/xe_drm.h:125:
+#define DRM_IOCTL_XE_VM_QUERY_VMAS_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_VMAS_ATTRS, struct drm_xe_vm_query_vmas_attr)
total: 0 errors, 2 warnings, 0 checks, 214 lines checked
012c225dca26 drm/xe/bo: Add attributes field to xe_bo
0fe86083d80c drm/xe/bo: Update atomic_access attribute on madvise
^ permalink raw reply [flat|nested] 72+ messages in thread* ✓ CI.KUnit: success for MADVISE FOR XE
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (24 preceding siblings ...)
2025-05-28 7:45 ` ✗ CI.checkpatch: warning " Patchwork
@ 2025-05-28 7:46 ` Patchwork
2025-05-28 7:50 ` ✗ CI.Build: failure " Patchwork
26 siblings, 0 replies; 72+ messages in thread
From: Patchwork @ 2025-05-28 7:46 UTC (permalink / raw)
To: Ghimiray, Himal Prasad; +Cc: intel-xe
== Series Details ==
Series: MADVISE FOR XE
URL : https://patchwork.freedesktop.org/series/149550/
State : success
== Summary ==
+ trap cleanup EXIT
+ /kernel/tools/testing/kunit/kunit.py run --kunitconfig /kernel/drivers/gpu/drm/xe/.kunitconfig
[07:45:30] Configuring KUnit Kernel ...
Generating .config ...
Populating config with:
$ make ARCH=um O=.kunit olddefconfig
[07:45:34] Building KUnit Kernel ...
Populating config with:
$ make ARCH=um O=.kunit olddefconfig
Building with:
$ make all compile_commands.json scripts_gdb ARCH=um O=.kunit --jobs=48
[07:46:00] Starting KUnit Kernel (1/1)...
[07:46:00] ============================================================
Running tests with:
$ .kunit/linux kunit.enable=1 mem=1G console=tty kunit_shutdown=halt
[07:46:01] ================== guc_buf (11 subtests) ===================
[07:46:01] [PASSED] test_smallest
[07:46:01] [PASSED] test_largest
[07:46:01] [PASSED] test_granular
[07:46:01] [PASSED] test_unique
[07:46:01] [PASSED] test_overlap
[07:46:01] [PASSED] test_reusable
[07:46:01] [PASSED] test_too_big
[07:46:01] [PASSED] test_flush
[07:46:01] [PASSED] test_lookup
[07:46:01] [PASSED] test_data
[07:46:01] [PASSED] test_class
[07:46:01] ===================== [PASSED] guc_buf =====================
[07:46:01] =================== guc_dbm (7 subtests) ===================
[07:46:01] [PASSED] test_empty
[07:46:01] [PASSED] test_default
[07:46:01] ======================== test_size ========================
[07:46:01] [PASSED] 4
[07:46:01] [PASSED] 8
[07:46:01] [PASSED] 32
[07:46:01] [PASSED] 256
[07:46:01] ==================== [PASSED] test_size ====================
[07:46:01] ======================= test_reuse ========================
[07:46:01] [PASSED] 4
[07:46:01] [PASSED] 8
[07:46:01] [PASSED] 32
[07:46:01] [PASSED] 256
[07:46:01] =================== [PASSED] test_reuse ====================
[07:46:01] =================== test_range_overlap ====================
[07:46:01] [PASSED] 4
[07:46:01] [PASSED] 8
[07:46:01] [PASSED] 32
[07:46:01] [PASSED] 256
[07:46:01] =============== [PASSED] test_range_overlap ================
[07:46:01] =================== test_range_compact ====================
[07:46:01] [PASSED] 4
[07:46:01] [PASSED] 8
[07:46:01] [PASSED] 32
[07:46:01] [PASSED] 256
[07:46:01] =============== [PASSED] test_range_compact ================
[07:46:01] ==================== test_range_spare =====================
[07:46:01] [PASSED] 4
[07:46:01] [PASSED] 8
[07:46:01] [PASSED] 32
[07:46:01] [PASSED] 256
[07:46:01] ================ [PASSED] test_range_spare =================
[07:46:01] ===================== [PASSED] guc_dbm =====================
[07:46:01] =================== guc_idm (6 subtests) ===================
[07:46:01] [PASSED] bad_init
[07:46:01] [PASSED] no_init
[07:46:01] [PASSED] init_fini
[07:46:01] [PASSED] check_used
[07:46:01] [PASSED] check_quota
[07:46:01] [PASSED] check_all
[07:46:01] ===================== [PASSED] guc_idm =====================
[07:46:01] ================== no_relay (3 subtests) ===================
[07:46:01] [PASSED] xe_drops_guc2pf_if_not_ready
[07:46:01] [PASSED] xe_drops_guc2vf_if_not_ready
[07:46:01] [PASSED] xe_rejects_send_if_not_ready
[07:46:01] ==================== [PASSED] no_relay =====================
[07:46:01] ================== pf_relay (14 subtests) ==================
[07:46:01] [PASSED] pf_rejects_guc2pf_too_short
[07:46:01] [PASSED] pf_rejects_guc2pf_too_long
[07:46:01] [PASSED] pf_rejects_guc2pf_no_payload
[07:46:01] [PASSED] pf_fails_no_payload
[07:46:01] [PASSED] pf_fails_bad_origin
[07:46:01] [PASSED] pf_fails_bad_type
[07:46:01] [PASSED] pf_txn_reports_error
[07:46:01] [PASSED] pf_txn_sends_pf2guc
[07:46:01] [PASSED] pf_sends_pf2guc
[07:46:01] [SKIPPED] pf_loopback_nop
[07:46:01] [SKIPPED] pf_loopback_echo
[07:46:01] [SKIPPED] pf_loopback_fail
[07:46:01] [SKIPPED] pf_loopback_busy
[07:46:01] [SKIPPED] pf_loopback_retry
[07:46:01] ==================== [PASSED] pf_relay =====================
[07:46:01] ================== vf_relay (3 subtests) ===================
[07:46:01] [PASSED] vf_rejects_guc2vf_too_short
[07:46:01] [PASSED] vf_rejects_guc2vf_too_long
[07:46:01] [PASSED] vf_rejects_guc2vf_no_payload
[07:46:01] ==================== [PASSED] vf_relay =====================
[07:46:01] ================= pf_service (11 subtests) =================
[07:46:01] [PASSED] pf_negotiate_any
[07:46:01] [PASSED] pf_negotiate_base_match
[07:46:01] [PASSED] pf_negotiate_base_newer
[07:46:01] [PASSED] pf_negotiate_base_next
[07:46:01] [SKIPPED] pf_negotiate_base_older
[07:46:01] [PASSED] pf_negotiate_base_prev
[07:46:01] [PASSED] pf_negotiate_latest_match
[07:46:01] [PASSED] pf_negotiate_latest_newer
[07:46:01] [PASSED] pf_negotiate_latest_next
[07:46:01] [SKIPPED] pf_negotiate_latest_older
[07:46:01] [SKIPPED] pf_negotiate_latest_prev
[07:46:01] =================== [PASSED] pf_service ====================
[07:46:01] ===================== lmtt (1 subtest) =====================
[07:46:01] ======================== test_ops =========================
[07:46:01] [PASSED] 2-level
[07:46:01] [PASSED] multi-level
[07:46:01] ==================== [PASSED] test_ops =====================
[07:46:01] ====================== [PASSED] lmtt =======================
[07:46:01] =================== xe_mocs (2 subtests) ===================
[07:46:01] ================ xe_live_mocs_kernel_kunit ================
[07:46:01] =========== [SKIPPED] xe_live_mocs_kernel_kunit ============
[07:46:01] ================ xe_live_mocs_reset_kunit =================
[07:46:01] ============ [SKIPPED] xe_live_mocs_reset_kunit ============
[07:46:01] ==================== [SKIPPED] xe_mocs =====================
[07:46:01] ================= xe_migrate (2 subtests) ==================
[07:46:01] ================= xe_migrate_sanity_kunit =================
[07:46:01] ============ [SKIPPED] xe_migrate_sanity_kunit =============
[07:46:01] ================== xe_validate_ccs_kunit ==================
[07:46:01] ============= [SKIPPED] xe_validate_ccs_kunit ==============
[07:46:01] =================== [SKIPPED] xe_migrate ===================
[07:46:01] ================== xe_dma_buf (1 subtest) ==================
[07:46:01] ==================== xe_dma_buf_kunit =====================
[07:46:01] ================ [SKIPPED] xe_dma_buf_kunit ================
[07:46:01] =================== [SKIPPED] xe_dma_buf ===================
[07:46:01] ================= xe_bo_shrink (1 subtest) =================
[07:46:01] =================== xe_bo_shrink_kunit ====================
[07:46:01] =============== [SKIPPED] xe_bo_shrink_kunit ===============
[07:46:01] ================== [SKIPPED] xe_bo_shrink ==================
[07:46:01] ==================== xe_bo (2 subtests) ====================
[07:46:01] ================== xe_ccs_migrate_kunit ===================
[07:46:01] ============== [SKIPPED] xe_ccs_migrate_kunit ==============
[07:46:01] ==================== xe_bo_evict_kunit ====================
[07:46:01] =============== [SKIPPED] xe_bo_evict_kunit ================
[07:46:01] ===================== [SKIPPED] xe_bo ======================
[07:46:01] ==================== args (11 subtests) ====================
[07:46:01] [PASSED] count_args_test
[07:46:01] [PASSED] call_args_example
[07:46:01] [PASSED] call_args_test
[07:46:01] [PASSED] drop_first_arg_example
[07:46:01] [PASSED] drop_first_arg_test
[07:46:01] [PASSED] first_arg_example
[07:46:01] [PASSED] first_arg_test
[07:46:01] [PASSED] last_arg_example
[07:46:01] [PASSED] last_arg_test
[07:46:01] [PASSED] pick_arg_example
[07:46:01] [PASSED] sep_comma_example
[07:46:01] ====================== [PASSED] args =======================
[07:46:01] =================== xe_pci (2 subtests) ====================
[07:46:01] [PASSED] xe_gmdid_graphics_ip
[07:46:01] [PASSED] xe_gmdid_media_ip
[07:46:01] ===================== [PASSED] xe_pci ======================
[07:46:01] =================== xe_rtp (2 subtests) ====================
[07:46:01] =============== xe_rtp_process_to_sr_tests ================
[07:46:01] [PASSED] coalesce-same-reg
[07:46:01] [PASSED] no-match-no-add
[07:46:01] [PASSED] match-or
[07:46:01] [PASSED] match-or-xfail
[07:46:01] [PASSED] no-match-no-add-multiple-rules
[07:46:01] [PASSED] two-regs-two-entries
[07:46:01] [PASSED] clr-one-set-other
[07:46:01] [PASSED] set-field
[07:46:01] [PASSED] conflict-duplicate
[07:46:01] [PASSED] conflict-not-disjoint
stty: 'standard input': Inappropriate ioctl for device
[07:46:01] [PASSED] conflict-reg-type
[07:46:01] =========== [PASSED] xe_rtp_process_to_sr_tests ============
[07:46:01] ================== xe_rtp_process_tests ===================
[07:46:01] [PASSED] active1
[07:46:01] [PASSED] active2
[07:46:01] [PASSED] active-inactive
[07:46:01] [PASSED] inactive-active
[07:46:01] [PASSED] inactive-1st_or_active-inactive
[07:46:01] [PASSED] inactive-2nd_or_active-inactive
[07:46:01] [PASSED] inactive-last_or_active-inactive
[07:46:01] [PASSED] inactive-no_or_active-inactive
[07:46:01] ============== [PASSED] xe_rtp_process_tests ===============
[07:46:01] ===================== [PASSED] xe_rtp ======================
[07:46:01] ==================== xe_wa (1 subtest) =====================
[07:46:01] ======================== xe_wa_gt =========================
[07:46:01] [PASSED] TIGERLAKE (B0)
[07:46:01] [PASSED] DG1 (A0)
[07:46:01] [PASSED] DG1 (B0)
[07:46:01] [PASSED] ALDERLAKE_S (A0)
[07:46:01] [PASSED] ALDERLAKE_S (B0)
[07:46:01] [PASSED] ALDERLAKE_S (C0)
[07:46:01] [PASSED] ALDERLAKE_S (D0)
[07:46:01] [PASSED] ALDERLAKE_P (A0)
[07:46:01] [PASSED] ALDERLAKE_P (B0)
[07:46:01] [PASSED] ALDERLAKE_P (C0)
[07:46:01] [PASSED] ALDERLAKE_S_RPLS (D0)
[07:46:01] [PASSED] ALDERLAKE_P_RPLU (E0)
[07:46:01] [PASSED] DG2_G10 (C0)
[07:46:01] [PASSED] DG2_G11 (B1)
[07:46:01] [PASSED] DG2_G12 (A1)
[07:46:01] [PASSED] METEORLAKE (g:A0, m:A0)
[07:46:01] [PASSED] METEORLAKE (g:A0, m:A0)
[07:46:01] [PASSED] METEORLAKE (g:A0, m:A0)
[07:46:01] [PASSED] LUNARLAKE (g:A0, m:A0)
[07:46:01] [PASSED] LUNARLAKE (g:B0, m:A0)
[07:46:01] [PASSED] BATTLEMAGE (g:A0, m:A1)
[07:46:01] ==================== [PASSED] xe_wa_gt =====================
[07:46:01] ====================== [PASSED] xe_wa ======================
[07:46:01] ============================================================
[07:46:01] Testing complete. Ran 133 tests: passed: 117, skipped: 16
[07:46:01] Elapsed time: 30.983s total, 4.180s configuring, 26.487s building, 0.310s running
+ /kernel/tools/testing/kunit/kunit.py run --kunitconfig /kernel/drivers/gpu/drm/tests/.kunitconfig
[07:46:01] Configuring KUnit Kernel ...
Regenerating .config ...
Populating config with:
$ make ARCH=um O=.kunit olddefconfig
[07:46:03] Building KUnit Kernel ...
Populating config with:
$ make ARCH=um O=.kunit olddefconfig
Building with:
$ make all compile_commands.json scripts_gdb ARCH=um O=.kunit --jobs=48
[07:46:23] Starting KUnit Kernel (1/1)...
[07:46:23] ============================================================
Running tests with:
$ .kunit/linux kunit.enable=1 mem=1G console=tty kunit_shutdown=halt
[07:46:24] == drm_test_atomic_get_connector_for_encoder (1 subtest) ===
[07:46:24] [PASSED] drm_test_drm_atomic_get_connector_for_encoder
[07:46:24] ==== [PASSED] drm_test_atomic_get_connector_for_encoder ====
[07:46:24] =========== drm_validate_clone_mode (2 subtests) ===========
[07:46:24] ============== drm_test_check_in_clone_mode ===============
[07:46:24] [PASSED] in_clone_mode
[07:46:24] [PASSED] not_in_clone_mode
[07:46:24] ========== [PASSED] drm_test_check_in_clone_mode ===========
[07:46:24] =============== drm_test_check_valid_clones ===============
[07:46:24] [PASSED] not_in_clone_mode
[07:46:24] [PASSED] valid_clone
[07:46:24] [PASSED] invalid_clone
[07:46:24] =========== [PASSED] drm_test_check_valid_clones ===========
[07:46:24] ============= [PASSED] drm_validate_clone_mode =============
[07:46:24] ============= drm_validate_modeset (1 subtest) =============
[07:46:24] [PASSED] drm_test_check_connector_changed_modeset
[07:46:24] ============== [PASSED] drm_validate_modeset ===============
[07:46:24] ====== drm_test_bridge_get_current_state (2 subtests) ======
[07:46:24] [PASSED] drm_test_drm_bridge_get_current_state_atomic
[07:46:24] [PASSED] drm_test_drm_bridge_get_current_state_legacy
[07:46:24] ======== [PASSED] drm_test_bridge_get_current_state ========
[07:46:24] ====== drm_test_bridge_helper_reset_crtc (3 subtests) ======
[07:46:24] [PASSED] drm_test_drm_bridge_helper_reset_crtc_atomic
[07:46:24] [PASSED] drm_test_drm_bridge_helper_reset_crtc_atomic_disabled
[07:46:24] [PASSED] drm_test_drm_bridge_helper_reset_crtc_legacy
[07:46:24] ======== [PASSED] drm_test_bridge_helper_reset_crtc ========
[07:46:24] ================== drm_buddy (7 subtests) ==================
[07:46:24] [PASSED] drm_test_buddy_alloc_limit
[07:46:24] [PASSED] drm_test_buddy_alloc_optimistic
[07:46:24] [PASSED] drm_test_buddy_alloc_pessimistic
[07:46:24] [PASSED] drm_test_buddy_alloc_pathological
[07:46:24] [PASSED] drm_test_buddy_alloc_contiguous
[07:46:24] [PASSED] drm_test_buddy_alloc_clear
[07:46:24] [PASSED] drm_test_buddy_alloc_range_bias
[07:46:24] ==================== [PASSED] drm_buddy ====================
[07:46:24] ============= drm_cmdline_parser (40 subtests) =============
[07:46:24] [PASSED] drm_test_cmdline_force_d_only
[07:46:24] [PASSED] drm_test_cmdline_force_D_only_dvi
[07:46:24] [PASSED] drm_test_cmdline_force_D_only_hdmi
[07:46:24] [PASSED] drm_test_cmdline_force_D_only_not_digital
[07:46:24] [PASSED] drm_test_cmdline_force_e_only
[07:46:24] [PASSED] drm_test_cmdline_res
[07:46:24] [PASSED] drm_test_cmdline_res_vesa
[07:46:24] [PASSED] drm_test_cmdline_res_vesa_rblank
[07:46:24] [PASSED] drm_test_cmdline_res_rblank
[07:46:24] [PASSED] drm_test_cmdline_res_bpp
[07:46:24] [PASSED] drm_test_cmdline_res_refresh
[07:46:24] [PASSED] drm_test_cmdline_res_bpp_refresh
[07:46:24] [PASSED] drm_test_cmdline_res_bpp_refresh_interlaced
[07:46:24] [PASSED] drm_test_cmdline_res_bpp_refresh_margins
[07:46:24] [PASSED] drm_test_cmdline_res_bpp_refresh_force_off
[07:46:24] [PASSED] drm_test_cmdline_res_bpp_refresh_force_on
[07:46:24] [PASSED] drm_test_cmdline_res_bpp_refresh_force_on_analog
[07:46:24] [PASSED] drm_test_cmdline_res_bpp_refresh_force_on_digital
[07:46:24] [PASSED] drm_test_cmdline_res_bpp_refresh_interlaced_margins_force_on
[07:46:24] [PASSED] drm_test_cmdline_res_margins_force_on
[07:46:24] [PASSED] drm_test_cmdline_res_vesa_margins
[07:46:24] [PASSED] drm_test_cmdline_name
[07:46:24] [PASSED] drm_test_cmdline_name_bpp
[07:46:24] [PASSED] drm_test_cmdline_name_option
[07:46:24] [PASSED] drm_test_cmdline_name_bpp_option
[07:46:24] [PASSED] drm_test_cmdline_rotate_0
[07:46:24] [PASSED] drm_test_cmdline_rotate_90
[07:46:24] [PASSED] drm_test_cmdline_rotate_180
[07:46:24] [PASSED] drm_test_cmdline_rotate_270
[07:46:24] [PASSED] drm_test_cmdline_hmirror
[07:46:24] [PASSED] drm_test_cmdline_vmirror
[07:46:24] [PASSED] drm_test_cmdline_margin_options
[07:46:24] [PASSED] drm_test_cmdline_multiple_options
[07:46:24] [PASSED] drm_test_cmdline_bpp_extra_and_option
[07:46:24] [PASSED] drm_test_cmdline_extra_and_option
[07:46:24] [PASSED] drm_test_cmdline_freestanding_options
[07:46:24] [PASSED] drm_test_cmdline_freestanding_force_e_and_options
[07:46:24] [PASSED] drm_test_cmdline_panel_orientation
[07:46:24] ================ drm_test_cmdline_invalid =================
[07:46:24] [PASSED] margin_only
[07:46:24] [PASSED] interlace_only
[07:46:24] [PASSED] res_missing_x
[07:46:24] [PASSED] res_missing_y
[07:46:24] [PASSED] res_bad_y
[07:46:24] [PASSED] res_missing_y_bpp
[07:46:24] [PASSED] res_bad_bpp
[07:46:24] [PASSED] res_bad_refresh
[07:46:24] [PASSED] res_bpp_refresh_force_on_off
[07:46:24] [PASSED] res_invalid_mode
[07:46:24] [PASSED] res_bpp_wrong_place_mode
[07:46:24] [PASSED] name_bpp_refresh
[07:46:24] [PASSED] name_refresh
[07:46:24] [PASSED] name_refresh_wrong_mode
[07:46:24] [PASSED] name_refresh_invalid_mode
[07:46:24] [PASSED] rotate_multiple
[07:46:24] [PASSED] rotate_invalid_val
[07:46:24] [PASSED] rotate_truncated
[07:46:24] [PASSED] invalid_option
[07:46:24] [PASSED] invalid_tv_option
[07:46:24] [PASSED] truncated_tv_option
[07:46:24] ============ [PASSED] drm_test_cmdline_invalid =============
[07:46:24] =============== drm_test_cmdline_tv_options ===============
[07:46:24] [PASSED] NTSC
[07:46:24] [PASSED] NTSC_443
[07:46:24] [PASSED] NTSC_J
[07:46:24] [PASSED] PAL
[07:46:24] [PASSED] PAL_M
[07:46:24] [PASSED] PAL_N
[07:46:24] [PASSED] SECAM
[07:46:24] [PASSED] MONO_525
[07:46:24] [PASSED] MONO_625
[07:46:24] =========== [PASSED] drm_test_cmdline_tv_options ===========
[07:46:24] =============== [PASSED] drm_cmdline_parser ================
[07:46:24] ========== drmm_connector_hdmi_init (20 subtests) ==========
[07:46:24] [PASSED] drm_test_connector_hdmi_init_valid
[07:46:24] [PASSED] drm_test_connector_hdmi_init_bpc_8
[07:46:24] [PASSED] drm_test_connector_hdmi_init_bpc_10
[07:46:24] [PASSED] drm_test_connector_hdmi_init_bpc_12
[07:46:24] [PASSED] drm_test_connector_hdmi_init_bpc_invalid
[07:46:24] [PASSED] drm_test_connector_hdmi_init_bpc_null
[07:46:24] [PASSED] drm_test_connector_hdmi_init_formats_empty
[07:46:24] [PASSED] drm_test_connector_hdmi_init_formats_no_rgb
[07:46:24] === drm_test_connector_hdmi_init_formats_yuv420_allowed ===
[07:46:24] [PASSED] supported_formats=0x9 yuv420_allowed=1
[07:46:24] [PASSED] supported_formats=0x9 yuv420_allowed=0
[07:46:24] [PASSED] supported_formats=0x3 yuv420_allowed=1
[07:46:24] [PASSED] supported_formats=0x3 yuv420_allowed=0
[07:46:24] === [PASSED] drm_test_connector_hdmi_init_formats_yuv420_allowed ===
[07:46:24] [PASSED] drm_test_connector_hdmi_init_null_ddc
[07:46:24] [PASSED] drm_test_connector_hdmi_init_null_product
[07:46:24] [PASSED] drm_test_connector_hdmi_init_null_vendor
[07:46:24] [PASSED] drm_test_connector_hdmi_init_product_length_exact
[07:46:24] [PASSED] drm_test_connector_hdmi_init_product_length_too_long
[07:46:24] [PASSED] drm_test_connector_hdmi_init_product_valid
[07:46:24] [PASSED] drm_test_connector_hdmi_init_vendor_length_exact
[07:46:24] [PASSED] drm_test_connector_hdmi_init_vendor_length_too_long
[07:46:24] [PASSED] drm_test_connector_hdmi_init_vendor_valid
[07:46:24] ========= drm_test_connector_hdmi_init_type_valid =========
[07:46:24] [PASSED] HDMI-A
[07:46:24] [PASSED] HDMI-B
[07:46:24] ===== [PASSED] drm_test_connector_hdmi_init_type_valid =====
[07:46:24] ======== drm_test_connector_hdmi_init_type_invalid ========
[07:46:24] [PASSED] Unknown
[07:46:24] [PASSED] VGA
[07:46:24] [PASSED] DVI-I
[07:46:24] [PASSED] DVI-D
[07:46:24] [PASSED] DVI-A
[07:46:24] [PASSED] Composite
[07:46:24] [PASSED] SVIDEO
[07:46:24] [PASSED] LVDS
[07:46:24] [PASSED] Component
[07:46:24] [PASSED] DIN
[07:46:24] [PASSED] DP
[07:46:24] [PASSED] TV
[07:46:24] [PASSED] eDP
[07:46:24] [PASSED] Virtual
[07:46:24] [PASSED] DSI
[07:46:24] [PASSED] DPI
[07:46:24] [PASSED] Writeback
[07:46:24] [PASSED] SPI
[07:46:24] [PASSED] USB
[07:46:24] ==== [PASSED] drm_test_connector_hdmi_init_type_invalid ====
[07:46:24] ============ [PASSED] drmm_connector_hdmi_init =============
[07:46:24] ============= drmm_connector_init (3 subtests) =============
[07:46:24] [PASSED] drm_test_drmm_connector_init
[07:46:24] [PASSED] drm_test_drmm_connector_init_null_ddc
[07:46:24] ========= drm_test_drmm_connector_init_type_valid =========
[07:46:24] [PASSED] Unknown
[07:46:24] [PASSED] VGA
[07:46:24] [PASSED] DVI-I
[07:46:24] [PASSED] DVI-D
[07:46:24] [PASSED] DVI-A
[07:46:24] [PASSED] Composite
[07:46:24] [PASSED] SVIDEO
[07:46:24] [PASSED] LVDS
[07:46:24] [PASSED] Component
[07:46:24] [PASSED] DIN
[07:46:24] [PASSED] DP
[07:46:24] [PASSED] HDMI-A
[07:46:24] [PASSED] HDMI-B
[07:46:24] [PASSED] TV
[07:46:24] [PASSED] eDP
[07:46:24] [PASSED] Virtual
[07:46:24] [PASSED] DSI
[07:46:24] [PASSED] DPI
[07:46:24] [PASSED] Writeback
[07:46:24] [PASSED] SPI
[07:46:24] [PASSED] USB
[07:46:24] ===== [PASSED] drm_test_drmm_connector_init_type_valid =====
[07:46:24] =============== [PASSED] drmm_connector_init ===============
[07:46:24] ========= drm_connector_dynamic_init (6 subtests) ==========
[07:46:24] [PASSED] drm_test_drm_connector_dynamic_init
[07:46:24] [PASSED] drm_test_drm_connector_dynamic_init_null_ddc
[07:46:24] [PASSED] drm_test_drm_connector_dynamic_init_not_added
[07:46:24] [PASSED] drm_test_drm_connector_dynamic_init_properties
[07:46:24] ===== drm_test_drm_connector_dynamic_init_type_valid ======
[07:46:24] [PASSED] Unknown
[07:46:24] [PASSED] VGA
[07:46:24] [PASSED] DVI-I
[07:46:24] [PASSED] DVI-D
[07:46:24] [PASSED] DVI-A
[07:46:24] [PASSED] Composite
[07:46:24] [PASSED] SVIDEO
[07:46:24] [PASSED] LVDS
[07:46:24] [PASSED] Component
[07:46:24] [PASSED] DIN
[07:46:24] [PASSED] DP
[07:46:24] [PASSED] HDMI-A
[07:46:24] [PASSED] HDMI-B
[07:46:24] [PASSED] TV
[07:46:24] [PASSED] eDP
[07:46:24] [PASSED] Virtual
[07:46:24] [PASSED] DSI
[07:46:24] [PASSED] DPI
[07:46:24] [PASSED] Writeback
[07:46:24] [PASSED] SPI
[07:46:24] [PASSED] USB
[07:46:24] = [PASSED] drm_test_drm_connector_dynamic_init_type_valid ==
[07:46:24] ======== drm_test_drm_connector_dynamic_init_name =========
[07:46:24] [PASSED] Unknown
[07:46:24] [PASSED] VGA
[07:46:24] [PASSED] DVI-I
[07:46:24] [PASSED] DVI-D
[07:46:24] [PASSED] DVI-A
[07:46:24] [PASSED] Composite
[07:46:24] [PASSED] SVIDEO
[07:46:24] [PASSED] LVDS
[07:46:24] [PASSED] Component
[07:46:24] [PASSED] DIN
[07:46:24] [PASSED] DP
[07:46:24] [PASSED] HDMI-A
[07:46:24] [PASSED] HDMI-B
[07:46:24] [PASSED] TV
[07:46:24] [PASSED] eDP
[07:46:24] [PASSED] Virtual
[07:46:24] [PASSED] DSI
[07:46:24] [PASSED] DPI
[07:46:24] [PASSED] Writeback
[07:46:24] [PASSED] SPI
[07:46:24] [PASSED] USB
[07:46:24] ==== [PASSED] drm_test_drm_connector_dynamic_init_name =====
[07:46:24] =========== [PASSED] drm_connector_dynamic_init ============
[07:46:24] ==== drm_connector_dynamic_register_early (4 subtests) =====
[07:46:24] [PASSED] drm_test_drm_connector_dynamic_register_early_on_list
[07:46:24] [PASSED] drm_test_drm_connector_dynamic_register_early_defer
[07:46:24] [PASSED] drm_test_drm_connector_dynamic_register_early_no_init
[07:46:24] [PASSED] drm_test_drm_connector_dynamic_register_early_no_mode_object
[07:46:24] ====== [PASSED] drm_connector_dynamic_register_early =======
[07:46:24] ======= drm_connector_dynamic_register (7 subtests) ========
[07:46:24] [PASSED] drm_test_drm_connector_dynamic_register_on_list
[07:46:24] [PASSED] drm_test_drm_connector_dynamic_register_no_defer
[07:46:24] [PASSED] drm_test_drm_connector_dynamic_register_no_init
[07:46:24] [PASSED] drm_test_drm_connector_dynamic_register_mode_object
[07:46:24] [PASSED] drm_test_drm_connector_dynamic_register_sysfs
[07:46:24] [PASSED] drm_test_drm_connector_dynamic_register_sysfs_name
[07:46:24] [PASSED] drm_test_drm_connector_dynamic_register_debugfs
[07:46:24] ========= [PASSED] drm_connector_dynamic_register ==========
[07:46:24] = drm_connector_attach_broadcast_rgb_property (2 subtests) =
[07:46:24] [PASSED] drm_test_drm_connector_attach_broadcast_rgb_property
[07:46:24] [PASSED] drm_test_drm_connector_attach_broadcast_rgb_property_hdmi_connector
[07:46:24] === [PASSED] drm_connector_attach_broadcast_rgb_property ===
[07:46:24] ========== drm_get_tv_mode_from_name (2 subtests) ==========
[07:46:24] ========== drm_test_get_tv_mode_from_name_valid ===========
[07:46:24] [PASSED] NTSC
[07:46:24] [PASSED] NTSC-443
[07:46:24] [PASSED] NTSC-J
[07:46:24] [PASSED] PAL
[07:46:24] [PASSED] PAL-M
[07:46:24] [PASSED] PAL-N
[07:46:24] [PASSED] SECAM
[07:46:24] [PASSED] Mono
[07:46:24] ====== [PASSED] drm_test_get_tv_mode_from_name_valid =======
[07:46:24] [PASSED] drm_test_get_tv_mode_from_name_truncated
[07:46:24] ============ [PASSED] drm_get_tv_mode_from_name ============
[07:46:24] = drm_test_connector_hdmi_compute_mode_clock (12 subtests) =
[07:46:24] [PASSED] drm_test_drm_hdmi_compute_mode_clock_rgb
[07:46:24] [PASSED] drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc
[07:46:24] [PASSED] drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc_vic_1
[07:46:24] [PASSED] drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc
[07:46:24] [PASSED] drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc_vic_1
[07:46:24] [PASSED] drm_test_drm_hdmi_compute_mode_clock_rgb_double
[07:46:24] = drm_test_connector_hdmi_compute_mode_clock_yuv420_valid =
[07:46:24] [PASSED] VIC 96
[07:46:24] [PASSED] VIC 97
[07:46:24] [PASSED] VIC 101
[07:46:24] [PASSED] VIC 102
[07:46:24] [PASSED] VIC 106
[07:46:24] [PASSED] VIC 107
[07:46:24] === [PASSED] drm_test_connector_hdmi_compute_mode_clock_yuv420_valid ===
[07:46:24] [PASSED] drm_test_connector_hdmi_compute_mode_clock_yuv420_10_bpc
[07:46:24] [PASSED] drm_test_connector_hdmi_compute_mode_clock_yuv420_12_bpc
[07:46:24] [PASSED] drm_test_connector_hdmi_compute_mode_clock_yuv422_8_bpc
[07:46:24] [PASSED] drm_test_connector_hdmi_compute_mode_clock_yuv422_10_bpc
[07:46:24] [PASSED] drm_test_connector_hdmi_compute_mode_clock_yuv422_12_bpc
[07:46:24] === [PASSED] drm_test_connector_hdmi_compute_mode_clock ====
[07:46:24] == drm_hdmi_connector_get_broadcast_rgb_name (2 subtests) ==
[07:46:24] === drm_test_drm_hdmi_connector_get_broadcast_rgb_name ====
[07:46:24] [PASSED] Automatic
[07:46:24] [PASSED] Full
[07:46:24] [PASSED] Limited 16:235
[07:46:24] === [PASSED] drm_test_drm_hdmi_connector_get_broadcast_rgb_name ===
[07:46:24] [PASSED] drm_test_drm_hdmi_connector_get_broadcast_rgb_name_invalid
[07:46:24] ==== [PASSED] drm_hdmi_connector_get_broadcast_rgb_name ====
[07:46:24] == drm_hdmi_connector_get_output_format_name (2 subtests) ==
[07:46:24] === drm_test_drm_hdmi_connector_get_output_format_name ====
[07:46:24] [PASSED] RGB
[07:46:24] [PASSED] YUV 4:2:0
[07:46:24] [PASSED] YUV 4:2:2
[07:46:24] [PASSED] YUV 4:4:4
[07:46:24] === [PASSED] drm_test_drm_hdmi_connector_get_output_format_name ===
[07:46:24] [PASSED] drm_test_drm_hdmi_connector_get_output_format_name_invalid
[07:46:24] ==== [PASSED] drm_hdmi_connector_get_output_format_name ====
[07:46:24] ============= drm_damage_helper (21 subtests) ==============
[07:46:24] [PASSED] drm_test_damage_iter_no_damage
[07:46:24] [PASSED] drm_test_damage_iter_no_damage_fractional_src
[07:46:24] [PASSED] drm_test_damage_iter_no_damage_src_moved
[07:46:24] [PASSED] drm_test_damage_iter_no_damage_fractional_src_moved
[07:46:24] [PASSED] drm_test_damage_iter_no_damage_not_visible
[07:46:24] [PASSED] drm_test_damage_iter_no_damage_no_crtc
[07:46:24] [PASSED] drm_test_damage_iter_no_damage_no_fb
[07:46:24] [PASSED] drm_test_damage_iter_simple_damage
[07:46:24] [PASSED] drm_test_damage_iter_single_damage
[07:46:24] [PASSED] drm_test_damage_iter_single_damage_intersect_src
[07:46:24] [PASSED] drm_test_damage_iter_single_damage_outside_src
[07:46:24] [PASSED] drm_test_damage_iter_single_damage_fractional_src
[07:46:24] [PASSED] drm_test_damage_iter_single_damage_intersect_fractional_src
[07:46:24] [PASSED] drm_test_damage_iter_single_damage_outside_fractional_src
[07:46:24] [PASSED] drm_test_damage_iter_single_damage_src_moved
[07:46:24] [PASSED] drm_test_damage_iter_single_damage_fractional_src_moved
[07:46:24] [PASSED] drm_test_damage_iter_damage
[07:46:24] [PASSED] drm_test_damage_iter_damage_one_intersect
[07:46:24] [PASSED] drm_test_damage_iter_damage_one_outside
[07:46:24] [PASSED] drm_test_damage_iter_damage_src_moved
[07:46:24] [PASSED] drm_test_damage_iter_damage_not_visible
[07:46:24] ================ [PASSED] drm_damage_helper ================
[07:46:24] ============== drm_dp_mst_helper (3 subtests) ==============
[07:46:24] ============== drm_test_dp_mst_calc_pbn_mode ==============
[07:46:24] [PASSED] Clock 154000 BPP 30 DSC disabled
[07:46:24] [PASSED] Clock 234000 BPP 30 DSC disabled
[07:46:24] [PASSED] Clock 297000 BPP 24 DSC disabled
[07:46:24] [PASSED] Clock 332880 BPP 24 DSC enabled
[07:46:24] [PASSED] Clock 324540 BPP 24 DSC enabled
[07:46:24] ========== [PASSED] drm_test_dp_mst_calc_pbn_mode ==========
[07:46:24] ============== drm_test_dp_mst_calc_pbn_div ===============
[07:46:24] [PASSED] Link rate 2000000 lane count 4
[07:46:24] [PASSED] Link rate 2000000 lane count 2
[07:46:24] [PASSED] Link rate 2000000 lane count 1
[07:46:24] [PASSED] Link rate 1350000 lane count 4
[07:46:24] [PASSED] Link rate 1350000 lane count 2
[07:46:24] [PASSED] Link rate 1350000 lane count 1
[07:46:24] [PASSED] Link rate 1000000 lane count 4
[07:46:24] [PASSED] Link rate 1000000 lane count 2
[07:46:24] [PASSED] Link rate 1000000 lane count 1
[07:46:24] [PASSED] Link rate 810000 lane count 4
[07:46:24] [PASSED] Link rate 810000 lane count 2
[07:46:24] [PASSED] Link rate 810000 lane count 1
[07:46:24] [PASSED] Link rate 540000 lane count 4
[07:46:24] [PASSED] Link rate 540000 lane count 2
[07:46:24] [PASSED] Link rate 540000 lane count 1
[07:46:24] [PASSED] Link rate 270000 lane count 4
[07:46:24] [PASSED] Link rate 270000 lane count 2
[07:46:24] [PASSED] Link rate 270000 lane count 1
[07:46:24] [PASSED] Link rate 162000 lane count 4
[07:46:24] [PASSED] Link rate 162000 lane count 2
[07:46:24] [PASSED] Link rate 162000 lane count 1
[07:46:24] ========== [PASSED] drm_test_dp_mst_calc_pbn_div ===========
[07:46:24] ========= drm_test_dp_mst_sideband_msg_req_decode =========
[07:46:24] [PASSED] DP_ENUM_PATH_RESOURCES with port number
[07:46:24] [PASSED] DP_POWER_UP_PHY with port number
[07:46:24] [PASSED] DP_POWER_DOWN_PHY with port number
[07:46:24] [PASSED] DP_ALLOCATE_PAYLOAD with SDP stream sinks
[07:46:24] [PASSED] DP_ALLOCATE_PAYLOAD with port number
[07:46:24] [PASSED] DP_ALLOCATE_PAYLOAD with VCPI
[07:46:24] [PASSED] DP_ALLOCATE_PAYLOAD with PBN
[07:46:24] [PASSED] DP_QUERY_PAYLOAD with port number
[07:46:24] [PASSED] DP_QUERY_PAYLOAD with VCPI
[07:46:24] [PASSED] DP_REMOTE_DPCD_READ with port number
[07:46:24] [PASSED] DP_REMOTE_DPCD_READ with DPCD address
[07:46:24] [PASSED] DP_REMOTE_DPCD_READ with max number of bytes
[07:46:24] [PASSED] DP_REMOTE_DPCD_WRITE with port number
[07:46:24] [PASSED] DP_REMOTE_DPCD_WRITE with DPCD address
[07:46:24] [PASSED] DP_REMOTE_DPCD_WRITE with data array
[07:46:24] [PASSED] DP_REMOTE_I2C_READ with port number
[07:46:24] [PASSED] DP_REMOTE_I2C_READ with I2C device ID
[07:46:24] [PASSED] DP_REMOTE_I2C_READ with transactions array
[07:46:24] [PASSED] DP_REMOTE_I2C_WRITE with port number
[07:46:24] [PASSED] DP_REMOTE_I2C_WRITE with I2C device ID
[07:46:24] [PASSED] DP_REMOTE_I2C_WRITE with data array
[07:46:24] [PASSED] DP_QUERY_STREAM_ENC_STATUS with stream ID
[07:46:24] [PASSED] DP_QUERY_STREAM_ENC_STATUS with client ID
[07:46:24] [PASSED] DP_QUERY_STREAM_ENC_STATUS with stream event
[07:46:24] [PASSED] DP_QUERY_STREAM_ENC_STATUS with valid stream event
[07:46:24] [PASSED] DP_QUERY_STREAM_ENC_STATUS with stream behavior
[07:46:24] [PASSED] DP_QUERY_STREAM_ENC_STATUS with a valid stream behavior
[07:46:24] ===== [PASSED] drm_test_dp_mst_sideband_msg_req_decode =====
[07:46:24] ================ [PASSED] drm_dp_mst_helper ================
[07:46:24] ================== drm_exec (7 subtests) ===================
[07:46:24] [PASSED] sanitycheck
[07:46:24] [PASSED] test_lock
[07:46:24] [PASSED] test_lock_unlock
[07:46:24] [PASSED] test_duplicates
[07:46:24] [PASSED] test_prepare
[07:46:24] [PASSED] test_prepare_array
[07:46:24] [PASSED] test_multiple_loops
[07:46:24] ==================== [PASSED] drm_exec =====================
[07:46:24] =========== drm_format_helper_test (18 subtests) ===========
[07:46:24] ============== drm_test_fb_xrgb8888_to_gray8 ==============
[07:46:24] [PASSED] single_pixel_source_buffer
[07:46:24] [PASSED] single_pixel_clip_rectangle
[07:46:24] [PASSED] well_known_colors
[07:46:24] [PASSED] destination_pitch
[07:46:24] ========== [PASSED] drm_test_fb_xrgb8888_to_gray8 ==========
[07:46:24] ============= drm_test_fb_xrgb8888_to_rgb332 ==============
[07:46:24] [PASSED] single_pixel_source_buffer
[07:46:24] [PASSED] single_pixel_clip_rectangle
[07:46:24] [PASSED] well_known_colors
[07:46:24] [PASSED] destination_pitch
[07:46:24] ========= [PASSED] drm_test_fb_xrgb8888_to_rgb332 ==========
[07:46:24] ============= drm_test_fb_xrgb8888_to_rgb565 ==============
[07:46:24] [PASSED] single_pixel_source_buffer
[07:46:24] [PASSED] single_pixel_clip_rectangle
[07:46:24] [PASSED] well_known_colors
[07:46:24] [PASSED] destination_pitch
[07:46:24] ========= [PASSED] drm_test_fb_xrgb8888_to_rgb565 ==========
[07:46:24] ============ drm_test_fb_xrgb8888_to_xrgb1555 =============
[07:46:24] [PASSED] single_pixel_source_buffer
[07:46:24] [PASSED] single_pixel_clip_rectangle
[07:46:24] [PASSED] well_known_colors
[07:46:24] [PASSED] destination_pitch
[07:46:24] ======== [PASSED] drm_test_fb_xrgb8888_to_xrgb1555 =========
[07:46:24] ============ drm_test_fb_xrgb8888_to_argb1555 =============
[07:46:24] [PASSED] single_pixel_source_buffer
[07:46:24] [PASSED] single_pixel_clip_rectangle
[07:46:24] [PASSED] well_known_colors
[07:46:24] [PASSED] destination_pitch
[07:46:24] ======== [PASSED] drm_test_fb_xrgb8888_to_argb1555 =========
[07:46:24] ============ drm_test_fb_xrgb8888_to_rgba5551 =============
[07:46:24] [PASSED] single_pixel_source_buffer
[07:46:24] [PASSED] single_pixel_clip_rectangle
[07:46:24] [PASSED] well_known_colors
[07:46:24] [PASSED] destination_pitch
[07:46:24] ======== [PASSED] drm_test_fb_xrgb8888_to_rgba5551 =========
[07:46:24] ============= drm_test_fb_xrgb8888_to_rgb888 ==============
[07:46:24] [PASSED] single_pixel_source_buffer
[07:46:24] [PASSED] single_pixel_clip_rectangle
[07:46:24] [PASSED] well_known_colors
[07:46:24] [PASSED] destination_pitch
[07:46:24] ========= [PASSED] drm_test_fb_xrgb8888_to_rgb888 ==========
[07:46:24] ============= drm_test_fb_xrgb8888_to_bgr888 ==============
[07:46:24] [PASSED] single_pixel_source_buffer
[07:46:24] [PASSED] single_pixel_clip_rectangle
[07:46:24] [PASSED] well_known_colors
[07:46:24] [PASSED] destination_pitch
[07:46:24] ========= [PASSED] drm_test_fb_xrgb8888_to_bgr888 ==========
[07:46:24] ============ drm_test_fb_xrgb8888_to_argb8888 =============
[07:46:24] [PASSED] single_pixel_source_buffer
[07:46:24] [PASSED] single_pixel_clip_rectangle
[07:46:24] [PASSED] well_known_colors
[07:46:24] [PASSED] destination_pitch
[07:46:24] ======== [PASSED] drm_test_fb_xrgb8888_to_argb8888 =========
[07:46:24] =========== drm_test_fb_xrgb8888_to_xrgb2101010 ===========
[07:46:24] [PASSED] single_pixel_source_buffer
[07:46:24] [PASSED] single_pixel_clip_rectangle
[07:46:24] [PASSED] well_known_colors
[07:46:24] [PASSED] destination_pitch
[07:46:24] ======= [PASSED] drm_test_fb_xrgb8888_to_xrgb2101010 =======
[07:46:24] =========== drm_test_fb_xrgb8888_to_argb2101010 ===========
[07:46:24] [PASSED] single_pixel_source_buffer
[07:46:24] [PASSED] single_pixel_clip_rectangle
[07:46:24] [PASSED] well_known_colors
[07:46:24] [PASSED] destination_pitch
[07:46:24] ======= [PASSED] drm_test_fb_xrgb8888_to_argb2101010 =======
[07:46:24] ============== drm_test_fb_xrgb8888_to_mono ===============
[07:46:24] [PASSED] single_pixel_source_buffer
[07:46:24] [PASSED] single_pixel_clip_rectangle
[07:46:24] [PASSED] well_known_colors
[07:46:24] [PASSED] destination_pitch
[07:46:24] ========== [PASSED] drm_test_fb_xrgb8888_to_mono ===========
[07:46:24] ==================== drm_test_fb_swab =====================
[07:46:24] [PASSED] single_pixel_source_buffer
[07:46:24] [PASSED] single_pixel_clip_rectangle
[07:46:24] [PASSED] well_known_colors
[07:46:24] [PASSED] destination_pitch
[07:46:24] ================ [PASSED] drm_test_fb_swab =================
[07:46:24] ============ drm_test_fb_xrgb8888_to_xbgr8888 =============
[07:46:24] [PASSED] single_pixel_source_buffer
[07:46:24] [PASSED] single_pixel_clip_rectangle
[07:46:24] [PASSED] well_known_colors
[07:46:24] [PASSED] destination_pitch
[07:46:24] ======== [PASSED] drm_test_fb_xrgb8888_to_xbgr8888 =========
[07:46:24] ============ drm_test_fb_xrgb8888_to_abgr8888 =============
[07:46:24] [PASSED] single_pixel_source_buffer
[07:46:24] [PASSED] single_pixel_clip_rectangle
[07:46:24] [PASSED] well_known_colors
[07:46:24] [PASSED] destination_pitch
[07:46:24] ======== [PASSED] drm_test_fb_xrgb8888_to_abgr8888 =========
[07:46:24] ================= drm_test_fb_clip_offset =================
[07:46:24] [PASSED] pass through
[07:46:24] [PASSED] horizontal offset
[07:46:24] [PASSED] vertical offset
[07:46:24] [PASSED] horizontal and vertical offset
[07:46:24] [PASSED] horizontal offset (custom pitch)
[07:46:24] [PASSED] vertical offset (custom pitch)
[07:46:24] [PASSED] horizontal and vertical offset (custom pitch)
[07:46:24] ============= [PASSED] drm_test_fb_clip_offset =============
[07:46:24] ============== drm_test_fb_build_fourcc_list ==============
[07:46:24] [PASSED] no native formats
[07:46:24] [PASSED] XRGB8888 as native format
[07:46:24] [PASSED] remove duplicates
[07:46:24] [PASSED] convert alpha formats
[07:46:24] [PASSED] random formats
[07:46:24] ========== [PASSED] drm_test_fb_build_fourcc_list ==========
[07:46:24] =================== drm_test_fb_memcpy ====================
[07:46:24] [PASSED] single_pixel_source_buffer: XR24 little-endian (0x34325258)
[07:46:24] [PASSED] single_pixel_source_buffer: XRA8 little-endian (0x38415258)
[07:46:24] [PASSED] single_pixel_source_buffer: YU24 little-endian (0x34325559)
[07:46:24] [PASSED] single_pixel_clip_rectangle: XB24 little-endian (0x34324258)
[07:46:24] [PASSED] single_pixel_clip_rectangle: XRA8 little-endian (0x38415258)
[07:46:24] [PASSED] single_pixel_clip_rectangle: YU24 little-endian (0x34325559)
[07:46:24] [PASSED] well_known_colors: XB24 little-endian (0x34324258)
[07:46:24] [PASSED] well_known_colors: XRA8 little-endian (0x38415258)
[07:46:24] [PASSED] well_known_colors: YU24 little-endian (0x34325559)
[07:46:24] [PASSED] destination_pitch: XB24 little-endian (0x34324258)
[07:46:24] [PASSED] destination_pitch: XRA8 little-endian (0x38415258)
[07:46:24] [PASSED] destination_pitch: YU24 little-endian (0x34325559)
[07:46:24] =============== [PASSED] drm_test_fb_memcpy ================
[07:46:24] ============= [PASSED] drm_format_helper_test ==============
[07:46:24] ================= drm_format (18 subtests) =================
[07:46:24] [PASSED] drm_test_format_block_width_invalid
[07:46:24] [PASSED] drm_test_format_block_width_one_plane
[07:46:24] [PASSED] drm_test_format_block_width_two_plane
[07:46:24] [PASSED] drm_test_format_block_width_three_plane
[07:46:24] [PASSED] drm_test_format_block_width_tiled
[07:46:24] [PASSED] drm_test_format_block_height_invalid
[07:46:24] [PASSED] drm_test_format_block_height_one_plane
[07:46:24] [PASSED] drm_test_format_block_height_two_plane
[07:46:24] [PASSED] drm_test_format_block_height_three_plane
[07:46:24] [PASSED] drm_test_format_block_height_tiled
[07:46:24] [PASSED] drm_test_format_min_pitch_invalid
[07:46:24] [PASSED] drm_test_format_min_pitch_one_plane_8bpp
[07:46:24] [PASSED] drm_test_format_min_pitch_one_plane_16bpp
[07:46:24] [PASSED] drm_test_format_min_pitch_one_plane_24bpp
[07:46:24] [PASSED] drm_test_format_min_pitch_one_plane_32bpp
[07:46:24] [PASSED] drm_test_format_min_pitch_two_plane
[07:46:24] [PASSED] drm_test_format_min_pitch_three_plane_8bpp
[07:46:24] [PASSED] drm_test_format_min_pitch_tiled
[07:46:24] =================== [PASSED] drm_format ====================
[07:46:24] ============== drm_framebuffer (10 subtests) ===============
[07:46:24] ========== drm_test_framebuffer_check_src_coords ==========
[07:46:24] [PASSED] Success: source fits into fb
[07:46:24] [PASSED] Fail: overflowing fb with x-axis coordinate
[07:46:24] [PASSED] Fail: overflowing fb with y-axis coordinate
[07:46:24] [PASSED] Fail: overflowing fb with source width
[07:46:24] [PASSED] Fail: overflowing fb with source height
[07:46:24] ====== [PASSED] drm_test_framebuffer_check_src_coords ======
[07:46:24] [PASSED] drm_test_framebuffer_cleanup
[07:46:24] =============== drm_test_framebuffer_create ===============
[07:46:24] [PASSED] ABGR8888 normal sizes
[07:46:24] [PASSED] ABGR8888 max sizes
[07:46:24] [PASSED] ABGR8888 pitch greater than min required
[07:46:24] [PASSED] ABGR8888 pitch less than min required
[07:46:24] [PASSED] ABGR8888 Invalid width
[07:46:24] [PASSED] ABGR8888 Invalid buffer handle
[07:46:24] [PASSED] No pixel format
[07:46:24] [PASSED] ABGR8888 Width 0
[07:46:24] [PASSED] ABGR8888 Height 0
[07:46:24] [PASSED] ABGR8888 Out of bound height * pitch combination
[07:46:24] [PASSED] ABGR8888 Large buffer offset
[07:46:24] [PASSED] ABGR8888 Buffer offset for inexistent plane
[07:46:24] [PASSED] ABGR8888 Invalid flag
[07:46:24] [PASSED] ABGR8888 Set DRM_MODE_FB_MODIFIERS without modifiers
[07:46:24] [PASSED] ABGR8888 Valid buffer modifier
[07:46:24] [PASSED] ABGR8888 Invalid buffer modifier(DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)
[07:46:24] [PASSED] ABGR8888 Extra pitches without DRM_MODE_FB_MODIFIERS
[07:46:24] [PASSED] ABGR8888 Extra pitches with DRM_MODE_FB_MODIFIERS
[07:46:24] [PASSED] NV12 Normal sizes
[07:46:24] [PASSED] NV12 Max sizes
[07:46:24] [PASSED] NV12 Invalid pitch
[07:46:24] [PASSED] NV12 Invalid modifier/missing DRM_MODE_FB_MODIFIERS flag
[07:46:24] [PASSED] NV12 different modifier per-plane
[07:46:24] [PASSED] NV12 with DRM_FORMAT_MOD_SAMSUNG_64_32_TILE
[07:46:24] [PASSED] NV12 Valid modifiers without DRM_MODE_FB_MODIFIERS
[07:46:24] [PASSED] NV12 Modifier for inexistent plane
[07:46:24] [PASSED] NV12 Handle for inexistent plane
[07:46:24] [PASSED] NV12 Handle for inexistent plane without DRM_MODE_FB_MODIFIERS
[07:46:24] [PASSED] YVU420 DRM_MODE_FB_MODIFIERS set without modifier
[07:46:24] [PASSED] YVU420 Normal sizes
[07:46:24] [PASSED] YVU420 Max sizes
[07:46:24] [PASSED] YVU420 Invalid pitch
[07:46:24] [PASSED] YVU420 Different pitches
[07:46:24] [PASSED] YVU420 Different buffer offsets/pitches
[07:46:24] [PASSED] YVU420 Modifier set just for plane 0, without DRM_MODE_FB_MODIFIERS
[07:46:24] [PASSED] YVU420 Modifier set just for planes 0, 1, without DRM_MODE_FB_MODIFIERS
[07:46:24] [PASSED] YVU420 Modifier set just for plane 0, 1, with DRM_MODE_FB_MODIFIERS
[07:46:24] [PASSED] YVU420 Valid modifier
[07:46:24] [PASSED] YVU420 Different modifiers per plane
[07:46:24] [PASSED] YVU420 Modifier for inexistent plane
[07:46:24] [PASSED] YUV420_10BIT Invalid modifier(DRM_FORMAT_MOD_LINEAR)
[07:46:24] [PASSED] X0L2 Normal sizes
[07:46:24] [PASSED] X0L2 Max sizes
[07:46:24] [PASSED] X0L2 Invalid pitch
[07:46:24] [PASSED] X0L2 Pitch greater than minimum required
[07:46:24] [PASSED] X0L2 Handle for inexistent plane
[07:46:24] [PASSED] X0L2 Offset for inexistent plane, without DRM_MODE_FB_MODIFIERS set
[07:46:24] [PASSED] X0L2 Modifier without DRM_MODE_FB_MODIFIERS set
[07:46:24] [PASSED] X0L2 Valid modifier
[07:46:24] [PASSED] X0L2 Modifier for inexistent plane
[07:46:24] =========== [PASSED] drm_test_framebuffer_create ===========
[07:46:24] [PASSED] drm_test_framebuffer_free
[07:46:24] [PASSED] drm_test_framebuffer_init
[07:46:24] [PASSED] drm_test_framebuffer_init_bad_format
[07:46:24] [PASSED] drm_test_framebuffer_init_dev_mismatch
[07:46:24] [PASSED] drm_test_framebuffer_lookup
[07:46:24] [PASSED] drm_test_framebuffer_lookup_inexistent
[07:46:24] [PASSED] drm_test_framebuffer_modifiers_not_supported
[07:46:24] ================= [PASSED] drm_framebuffer =================
[07:46:24] ================ drm_gem_shmem (8 subtests) ================
[07:46:24] [PASSED] drm_gem_shmem_test_obj_create
[07:46:24] [PASSED] drm_gem_shmem_test_obj_create_private
[07:46:24] [PASSED] drm_gem_shmem_test_pin_pages
[07:46:24] [PASSED] drm_gem_shmem_test_vmap
[07:46:24] [PASSED] drm_gem_shmem_test_get_pages_sgt
[07:46:24] [PASSED] drm_gem_shmem_test_get_sg_table
[07:46:24] [PASSED] drm_gem_shmem_test_madvise
[07:46:24] [PASSED] drm_gem_shmem_test_purge
[07:46:24] ================== [PASSED] drm_gem_shmem ==================
[07:46:24] === drm_atomic_helper_connector_hdmi_check (23 subtests) ===
[07:46:24] [PASSED] drm_test_check_broadcast_rgb_auto_cea_mode
[07:46:24] [PASSED] drm_test_check_broadcast_rgb_auto_cea_mode_vic_1
[07:46:24] [PASSED] drm_test_check_broadcast_rgb_full_cea_mode
[07:46:24] [PASSED] drm_test_check_broadcast_rgb_full_cea_mode_vic_1
[07:46:24] [PASSED] drm_test_check_broadcast_rgb_limited_cea_mode
[07:46:24] [PASSED] drm_test_check_broadcast_rgb_limited_cea_mode_vic_1
[07:46:24] [PASSED] drm_test_check_broadcast_rgb_crtc_mode_changed
[07:46:24] [PASSED] drm_test_check_broadcast_rgb_crtc_mode_not_changed
[07:46:24] [PASSED] drm_test_check_disable_connector
[07:46:24] [PASSED] drm_test_check_hdmi_funcs_reject_rate
[07:46:24] [PASSED] drm_test_check_max_tmds_rate_bpc_fallback
[07:46:24] [PASSED] drm_test_check_max_tmds_rate_format_fallback
[07:46:24] [PASSED] drm_test_check_output_bpc_crtc_mode_changed
[07:46:24] [PASSED] drm_test_check_output_bpc_crtc_mode_not_changed
[07:46:24] [PASSED] drm_test_check_output_bpc_dvi
[07:46:24] [PASSED] drm_test_check_output_bpc_format_vic_1
[07:46:24] [PASSED] drm_test_check_output_bpc_format_display_8bpc_only
[07:46:24] [PASSED] drm_test_check_output_bpc_format_display_rgb_only
[07:46:24] [PASSED] drm_test_check_output_bpc_format_driver_8bpc_only
[07:46:24] [PASSED] drm_test_check_output_bpc_format_driver_rgb_only
[07:46:24] [PASSED] drm_test_check_tmds_char_rate_rgb_8bpc
[07:46:24] [PASSED] drm_test_check_tmds_char_rate_rgb_10bpc
[07:46:24] [PASSED] drm_test_check_tmds_char_rate_rgb_12bpc
[07:46:24] ===== [PASSED] drm_atomic_helper_connector_hdmi_check ======
[07:46:24] === drm_atomic_helper_connector_hdmi_reset (6 subtests) ====
[07:46:24] [PASSED] drm_test_check_broadcast_rgb_value
[07:46:24] [PASSED] drm_test_check_bpc_8_value
[07:46:24] [PASSED] drm_test_check_bpc_10_value
[07:46:24] [PASSED] drm_test_check_bpc_12_value
[07:46:24] [PASSED] drm_test_check_format_value
[07:46:24] [PASSED] drm_test_check_tmds_char_value
[07:46:24] ===== [PASSED] drm_atomic_helper_connector_hdmi_reset ======
[07:46:24] = drm_atomic_helper_connector_hdmi_mode_valid (4 subtests) =
[07:46:24] [PASSED] drm_test_check_mode_valid
[07:46:24] [PASSED] drm_test_check_mode_valid_reject
[07:46:24] [PASSED] drm_test_check_mode_valid_reject_rate
[07:46:24] [PASSED] drm_test_check_mode_valid_reject_max_clock
[07:46:24] === [PASSED] drm_atomic_helper_connector_hdmi_mode_valid ===
[07:46:24] ================= drm_managed (2 subtests) =================
[07:46:24] [PASSED] drm_test_managed_release_action
[07:46:24] [PASSED] drm_test_managed_run_action
[07:46:24] =================== [PASSED] drm_managed ===================
[07:46:24] =================== drm_mm (6 subtests) ====================
[07:46:24] [PASSED] drm_test_mm_init
[07:46:24] [PASSED] drm_test_mm_debug
[07:46:24] [PASSED] drm_test_mm_align32
[07:46:24] [PASSED] drm_test_mm_align64
[07:46:24] [PASSED] drm_test_mm_lowest
[07:46:24] [PASSED] drm_test_mm_highest
[07:46:24] ===================== [PASSED] drm_mm ======================
[07:46:24] ============= drm_modes_analog_tv (5 subtests) =============
[07:46:24] [PASSED] drm_test_modes_analog_tv_mono_576i
[07:46:24] [PASSED] drm_test_modes_analog_tv_ntsc_480i
[07:46:24] [PASSED] drm_test_modes_analog_tv_ntsc_480i_inlined
[07:46:24] [PASSED] drm_test_modes_analog_tv_pal_576i
[07:46:24] [PASSED] drm_test_modes_analog_tv_pal_576i_inlined
[07:46:24] =============== [PASSED] drm_modes_analog_tv ===============
[07:46:24] ============== drm_plane_helper (2 subtests) ===============
[07:46:24] =============== drm_test_check_plane_state ================
[07:46:24] [PASSED] clipping_simple
[07:46:24] [PASSED] clipping_rotate_reflect
[07:46:24] [PASSED] positioning_simple
[07:46:24] [PASSED] upscaling
[07:46:24] [PASSED] downscaling
[07:46:24] [PASSED] rounding1
[07:46:24] [PASSED] rounding2
[07:46:24] [PASSED] rounding3
[07:46:24] [PASSED] rounding4
[07:46:24] =========== [PASSED] drm_test_check_plane_state ============
[07:46:24] =========== drm_test_check_invalid_plane_state ============
[07:46:24] [PASSED] positioning_invalid
[07:46:24] [PASSED] upscaling_invalid
[07:46:24] [PASSED] downscaling_invalid
[07:46:24] ======= [PASSED] drm_test_check_invalid_plane_state ========
[07:46:24] ================ [PASSED] drm_plane_helper =================
[07:46:24] ====== drm_connector_helper_tv_get_modes (1 subtest) =======
[07:46:24] ====== drm_test_connector_helper_tv_get_modes_check =======
[07:46:24] [PASSED] None
[07:46:24] [PASSED] PAL
[07:46:24] [PASSED] NTSC
[07:46:24] [PASSED] Both, NTSC Default
[07:46:24] [PASSED] Both, PAL Default
[07:46:24] [PASSED] Both, NTSC Default, with PAL on command-line
[07:46:24] [PASSED] Both, PAL Default, with NTSC on command-line
[07:46:24] == [PASSED] drm_test_connector_helper_tv_get_modes_check ===
[07:46:24] ======== [PASSED] drm_connector_helper_tv_get_modes ========
[07:46:24] ================== drm_rect (9 subtests) ===================
[07:46:24] [PASSED] drm_test_rect_clip_scaled_div_by_zero
[07:46:24] [PASSED] drm_test_rect_clip_scaled_not_clipped
[07:46:24] [PASSED] drm_test_rect_clip_scaled_clipped
[07:46:24] [PASSED] drm_test_rect_clip_scaled_signed_vs_unsigned
[07:46:24] ================= drm_test_rect_intersect =================
[07:46:24] [PASSED] top-left x bottom-right: 2x2+1+1 x 2x2+0+0
[07:46:24] [PASSED] top-right x bottom-left: 2x2+0+0 x 2x2+1-1
[07:46:24] [PASSED] bottom-left x top-right: 2x2+1-1 x 2x2+0+0
[07:46:24] [PASSED] bottom-right x top-left: 2x2+0+0 x 2x2+1+1
[07:46:24] [PASSED] right x left: 2x1+0+0 x 3x1+1+0
[07:46:24] [PASSED] left x right: 3x1+1+0 x 2x1+0+0
[07:46:24] [PASSED] up x bottom: 1x2+0+0 x 1x3+0-1
[07:46:24] [PASSED] bottom x up: 1x3+0-1 x 1x2+0+0
[07:46:24] [PASSED] touching corner: 1x1+0+0 x 2x2+1+1
[07:46:24] [PASSED] touching side: 1x1+0+0 x 1x1+1+0
[07:46:24] [PASSED] equal rects: 2x2+0+0 x 2x2+0+0
[07:46:24] [PASSED] inside another: 2x2+0+0 x 1x1+1+1
[07:46:24] [PASSED] far away: 1x1+0+0 x 1x1+3+6
[07:46:24] [PASSED] points intersecting: 0x0+5+10 x 0x0+5+10
[07:46:24] [PASSED] points not intersecting: 0x0+0+0 x 0x0+5+10
[07:46:24] ============= [PASSED] drm_test_rect_intersect =============
[07:46:24] ================ drm_test_rect_calc_hscale ================
[07:46:24] [PASSED] normal use
[07:46:24] [PASSED] out of max range
[07:46:24] [PASSED] out of min range
[07:46:24] [PASSED] zero dst
[07:46:24] [PASSED] negative src
[07:46:24] [PASSED] negative dst
[07:46:24] ============ [PASSED] drm_test_rect_calc_hscale ============
[07:46:24] ================ drm_test_rect_calc_vscale ================
[07:46:24] [PASSED] normal use
[07:46:24] [PASSED] out of max range
[07:46:24] [PASSED] out of min range
[07:46:24] [PASSED] zero dst
[07:46:24] [PASSED] negative src
[07:46:24] [PASSED] negative dst
[07:46:24] ============ [PASSED] drm_test_rect_calc_vscale ============
[07:46:24] ================== drm_test_rect_rotate ===================
[07:46:24] [PASSED] reflect-x
[07:46:24] [PASSED] reflect-y
[07:46:24] [PASSED] rotate-0
[07:46:24] [PASSED] rotate-90
[07:46:24] [PASSED] rotate-180
[07:46:24] [PASSED] rotate-270
[07:46:24] ============== [PASSED] drm_test_rect_rotate ===============
[07:46:24] ================ drm_test_rect_rotate_inv =================
[07:46:24] [PASSED] reflect-x
[07:46:24] [PASSED] reflect-y
[07:46:24] [PASSED] rotate-0
[07:46:24] [PASSED] rotate-90
[07:46:24] [PASSED] rotate-180
[07:46:24] [PASSED] rotate-270
[07:46:24] ============ [PASSED] drm_test_rect_rotate_inv =============
stty: 'standard input': Inappropriate ioctl for device
[07:46:24] ==================== [PASSED] drm_rect =====================
[07:46:24] ============================================================
[07:46:24] Testing complete. Ran 608 tests: passed: 608
[07:46:24] Elapsed time: 22.836s total, 1.702s configuring, 20.964s building, 0.153s running
+ /kernel/tools/testing/kunit/kunit.py run --kunitconfig /kernel/drivers/gpu/drm/ttm/tests/.kunitconfig
[07:46:24] Configuring KUnit Kernel ...
Regenerating .config ...
Populating config with:
$ make ARCH=um O=.kunit olddefconfig
[07:46:25] Building KUnit Kernel ...
Populating config with:
$ make ARCH=um O=.kunit olddefconfig
Building with:
$ make all compile_commands.json scripts_gdb ARCH=um O=.kunit --jobs=48
[07:46:33] Starting KUnit Kernel (1/1)...
[07:46:33] ============================================================
Running tests with:
$ .kunit/linux kunit.enable=1 mem=1G console=tty kunit_shutdown=halt
[07:46:33] ================= ttm_device (5 subtests) ==================
[07:46:33] [PASSED] ttm_device_init_basic
[07:46:33] [PASSED] ttm_device_init_multiple
[07:46:33] [PASSED] ttm_device_fini_basic
[07:46:33] [PASSED] ttm_device_init_no_vma_man
[07:46:33] ================== ttm_device_init_pools ==================
[07:46:33] [PASSED] No DMA allocations, no DMA32 required
[07:46:33] [PASSED] DMA allocations, DMA32 required
[07:46:33] [PASSED] No DMA allocations, DMA32 required
[07:46:33] [PASSED] DMA allocations, no DMA32 required
[07:46:33] ============== [PASSED] ttm_device_init_pools ==============
[07:46:33] =================== [PASSED] ttm_device ====================
[07:46:33] ================== ttm_pool (8 subtests) ===================
[07:46:33] ================== ttm_pool_alloc_basic ===================
[07:46:33] [PASSED] One page
[07:46:33] [PASSED] More than one page
[07:46:33] [PASSED] Above the allocation limit
[07:46:33] [PASSED] One page, with coherent DMA mappings enabled
[07:46:33] [PASSED] Above the allocation limit, with coherent DMA mappings enabled
[07:46:33] ============== [PASSED] ttm_pool_alloc_basic ===============
[07:46:33] ============== ttm_pool_alloc_basic_dma_addr ==============
[07:46:33] [PASSED] One page
[07:46:33] [PASSED] More than one page
[07:46:33] [PASSED] Above the allocation limit
[07:46:33] [PASSED] One page, with coherent DMA mappings enabled
[07:46:33] [PASSED] Above the allocation limit, with coherent DMA mappings enabled
[07:46:33] ========== [PASSED] ttm_pool_alloc_basic_dma_addr ==========
[07:46:33] [PASSED] ttm_pool_alloc_order_caching_match
[07:46:33] [PASSED] ttm_pool_alloc_caching_mismatch
[07:46:33] [PASSED] ttm_pool_alloc_order_mismatch
[07:46:33] [PASSED] ttm_pool_free_dma_alloc
[07:46:33] [PASSED] ttm_pool_free_no_dma_alloc
[07:46:33] [PASSED] ttm_pool_fini_basic
[07:46:33] ==================== [PASSED] ttm_pool =====================
[07:46:33] ================ ttm_resource (8 subtests) =================
[07:46:33] ================= ttm_resource_init_basic =================
[07:46:33] [PASSED] Init resource in TTM_PL_SYSTEM
[07:46:33] [PASSED] Init resource in TTM_PL_VRAM
[07:46:33] [PASSED] Init resource in a private placement
[07:46:33] [PASSED] Init resource in TTM_PL_SYSTEM, set placement flags
[07:46:33] ============= [PASSED] ttm_resource_init_basic =============
[07:46:33] [PASSED] ttm_resource_init_pinned
[07:46:33] [PASSED] ttm_resource_fini_basic
[07:46:33] [PASSED] ttm_resource_manager_init_basic
[07:46:33] [PASSED] ttm_resource_manager_usage_basic
[07:46:33] [PASSED] ttm_resource_manager_set_used_basic
[07:46:33] [PASSED] ttm_sys_man_alloc_basic
[07:46:33] [PASSED] ttm_sys_man_free_basic
[07:46:33] ================== [PASSED] ttm_resource ===================
[07:46:33] =================== ttm_tt (15 subtests) ===================
[07:46:33] ==================== ttm_tt_init_basic ====================
[07:46:33] [PASSED] Page-aligned size
[07:46:33] [PASSED] Extra pages requested
[07:46:33] ================ [PASSED] ttm_tt_init_basic ================
[07:46:33] [PASSED] ttm_tt_init_misaligned
[07:46:33] [PASSED] ttm_tt_fini_basic
[07:46:33] [PASSED] ttm_tt_fini_sg
[07:46:33] [PASSED] ttm_tt_fini_shmem
[07:46:33] [PASSED] ttm_tt_create_basic
[07:46:33] [PASSED] ttm_tt_create_invalid_bo_type
[07:46:33] [PASSED] ttm_tt_create_ttm_exists
[07:46:33] [PASSED] ttm_tt_create_failed
[07:46:33] [PASSED] ttm_tt_destroy_basic
[07:46:33] [PASSED] ttm_tt_populate_null_ttm
[07:46:33] [PASSED] ttm_tt_populate_populated_ttm
[07:46:33] [PASSED] ttm_tt_unpopulate_basic
[07:46:33] [PASSED] ttm_tt_unpopulate_empty_ttm
[07:46:33] [PASSED] ttm_tt_swapin_basic
[07:46:33] ===================== [PASSED] ttm_tt ======================
[07:46:33] =================== ttm_bo (14 subtests) ===================
[07:46:33] =========== ttm_bo_reserve_optimistic_no_ticket ===========
[07:46:33] [PASSED] Cannot be interrupted and sleeps
[07:46:33] [PASSED] Cannot be interrupted, locks straight away
[07:46:33] [PASSED] Can be interrupted, sleeps
[07:46:33] ======= [PASSED] ttm_bo_reserve_optimistic_no_ticket =======
[07:46:33] [PASSED] ttm_bo_reserve_locked_no_sleep
[07:46:33] [PASSED] ttm_bo_reserve_no_wait_ticket
[07:46:33] [PASSED] ttm_bo_reserve_double_resv
[07:46:33] [PASSED] ttm_bo_reserve_interrupted
[07:46:33] [PASSED] ttm_bo_reserve_deadlock
[07:46:33] [PASSED] ttm_bo_unreserve_basic
[07:46:33] [PASSED] ttm_bo_unreserve_pinned
[07:46:33] [PASSED] ttm_bo_unreserve_bulk
[07:46:33] [PASSED] ttm_bo_put_basic
[07:46:33] [PASSED] ttm_bo_put_shared_resv
[07:46:33] [PASSED] ttm_bo_pin_basic
[07:46:33] [PASSED] ttm_bo_pin_unpin_resource
[07:46:33] [PASSED] ttm_bo_multiple_pin_one_unpin
[07:46:33] ===================== [PASSED] ttm_bo ======================
[07:46:33] ============== ttm_bo_validate (22 subtests) ===============
[07:46:33] ============== ttm_bo_init_reserved_sys_man ===============
[07:46:33] [PASSED] Buffer object for userspace
[07:46:33] [PASSED] Kernel buffer object
[07:46:33] [PASSED] Shared buffer object
[07:46:33] ========== [PASSED] ttm_bo_init_reserved_sys_man ===========
[07:46:33] ============== ttm_bo_init_reserved_mock_man ==============
[07:46:33] [PASSED] Buffer object for userspace
[07:46:33] [PASSED] Kernel buffer object
[07:46:33] [PASSED] Shared buffer object
[07:46:33] ========== [PASSED] ttm_bo_init_reserved_mock_man ==========
[07:46:33] [PASSED] ttm_bo_init_reserved_resv
[07:46:33] ================== ttm_bo_validate_basic ==================
[07:46:33] [PASSED] Buffer object for userspace
[07:46:33] [PASSED] Kernel buffer object
[07:46:33] [PASSED] Shared buffer object
[07:46:33] ============== [PASSED] ttm_bo_validate_basic ==============
[07:46:33] [PASSED] ttm_bo_validate_invalid_placement
[07:46:33] ============= ttm_bo_validate_same_placement ==============
[07:46:33] [PASSED] System manager
[07:46:33] [PASSED] VRAM manager
[07:46:33] ========= [PASSED] ttm_bo_validate_same_placement ==========
[07:46:33] [PASSED] ttm_bo_validate_failed_alloc
[07:46:33] [PASSED] ttm_bo_validate_pinned
[07:46:33] [PASSED] ttm_bo_validate_busy_placement
[07:46:33] ================ ttm_bo_validate_multihop =================
[07:46:33] [PASSED] Buffer object for userspace
[07:46:33] [PASSED] Kernel buffer object
[07:46:33] [PASSED] Shared buffer object
[07:46:33] ============ [PASSED] ttm_bo_validate_multihop =============
[07:46:33] ========== ttm_bo_validate_no_placement_signaled ==========
[07:46:33] [PASSED] Buffer object in system domain, no page vector
[07:46:33] [PASSED] Buffer object in system domain with an existing page vector
[07:46:33] ====== [PASSED] ttm_bo_validate_no_placement_signaled ======
[07:46:33] ======== ttm_bo_validate_no_placement_not_signaled ========
[07:46:33] [PASSED] Buffer object for userspace
[07:46:33] [PASSED] Kernel buffer object
[07:46:33] [PASSED] Shared buffer object
[07:46:33] ==== [PASSED] ttm_bo_validate_no_placement_not_signaled ====
[07:46:33] [PASSED] ttm_bo_validate_move_fence_signaled
[07:46:33] ========= ttm_bo_validate_move_fence_not_signaled =========
[07:46:33] [PASSED] Waits for GPU
[07:46:33] [PASSED] Tries to lock straight away
[07:46:34] ===== [PASSED] ttm_bo_validate_move_fence_not_signaled =====
[07:46:34] [PASSED] ttm_bo_validate_swapout
[07:46:34] [PASSED] ttm_bo_validate_happy_evict
[07:46:34] [PASSED] ttm_bo_validate_all_pinned_evict
[07:46:34] [PASSED] ttm_bo_validate_allowed_only_evict
[07:46:34] [PASSED] ttm_bo_validate_deleted_evict
[07:46:34] [PASSED] ttm_bo_validate_busy_domain_evict
[07:46:34] [PASSED] ttm_bo_validate_evict_gutting
[07:46:34] [PASSED] ttm_bo_validate_recrusive_evict
stty: 'standard input': Inappropriate ioctl for device
[07:46:34] ================= [PASSED] ttm_bo_validate =================
[07:46:34] ============================================================
[07:46:34] Testing complete. Ran 102 tests: passed: 102
[07:46:34] Elapsed time: 9.879s total, 1.656s configuring, 7.607s building, 0.503s running
+ cleanup
++ stat -c %u:%g /kernel
+ chown -R 1003:1003 /kernel
^ permalink raw reply [flat|nested] 72+ messages in thread* ✗ CI.Build: failure for MADVISE FOR XE
2025-05-27 16:39 [PATCH v3 00/19] MADVISE FOR XE Himal Prasad Ghimiray
` (25 preceding siblings ...)
2025-05-28 7:46 ` ✓ CI.KUnit: success " Patchwork
@ 2025-05-28 7:50 ` Patchwork
26 siblings, 0 replies; 72+ messages in thread
From: Patchwork @ 2025-05-28 7:50 UTC (permalink / raw)
To: Ghimiray, Himal Prasad; +Cc: intel-xe
== Series Details ==
Series: MADVISE FOR XE
URL : https://patchwork.freedesktop.org/series/149550/
State : failure
== Summary ==
CC drivers/dma/hsu/hsu.o
CC net/ncsi/ncsi-rsp.o
CC lib/decompress_bunzip2.o
CC [M] arch/x86/kvm/mtrr.o
CC kernel/cgroup/cpuset.o
CC net/core/flow_offload.o
CC [M] fs/nls/mac-turkish.o
CC [M] net/ipv4/netfilter/ipt_REJECT.o
CC [M] net/netfilter/nf_conntrack_proto_udp.o
CC fs/ext4/symlink.o
CC [M] drivers/leds/leds-lm3532.o
CC drivers/video/fbdev/core/cfbcopyarea.o
CC [M] net/ipv6/netfilter/ip6t_rpfilter.o
AR drivers/clk/x86/built-in.a
CC drivers/clk/clk-devres.o
CC kernel/events/ring_buffer.o
AR net/devlink/built-in.a
CC [M] sound/pci/hda/hda_hwdep.o
CC drivers/acpi/acpica/exoparg1.o
CC drivers/acpi/nvs.o
CC drivers/acpi/acpica/exoparg2.o
CC drivers/video/fbdev/core/cfbimgblt.o
CC drivers/acpi/acpica/exoparg3.o
CC drivers/acpi/wakeup.o
CC [M] drivers/video/backlight/ktd2801-backlight.o
CC net/core/gro.o
CC [M] fs/nls/nls_ucs2_utils.o
CC [M] drivers/gpio/gpio-pcf857x.o
CC block/bio-integrity-auto.o
CC fs/tracefs/inode.o
CC kernel/trace/ring_buffer.o
CC [M] drivers/gpio/gpio-pcie-idio-24.o
CC lib/decompress_inflate.o
CC arch/x86/kernel/ldt.o
CC kernel/trace/trace.o
CC crypto/drbg.o
CC net/core/netdev-genl.o
CC net/core/netdev-genl-gen.o
CC drivers/pci/irq.o
CC [M] arch/x86/kvm/debugfs.o
CC crypto/jitterentropy.o
CC drivers/acpi/acpica/exoparg6.o
CC kernel/trace/trace_output.o
CC [M] arch/x86/kvm/mmu/mmu.o
CC [M] arch/x86/kvm/mmu/page_track.o
CC [M] sound/pci/hda/hda_beep.o
CC net/core/gso.o
CC net/mctp/route.o
CC [M] net/ipv4/netfilter/ipt_SYNPROXY.o
AR drivers/dma/idxd/built-in.a
CC [M] drivers/leds/leds-lm3533.o
CC [M] drivers/dma/idxd/bus.o
CC net/core/net-sysfs.o
AR drivers/dma/hsu/built-in.a
CC kernel/trace/trace_seq.o
CC net/ncsi/ncsi-aen.o
CC [M] drivers/video/backlight/ktz8866.o
CC lib/decompress_unlz4.o
CC drivers/clk/clk-bulk.o
CC kernel/time/posix-timers.o
CC [M] net/ipv6/netfilter/ip6t_rt.o
CC [M] net/netfilter/nf_conntrack_proto_icmp.o
CC drivers/acpi/acpica/exprep.o
CC kernel/bpf/tnum.o
CC crypto/jitterentropy-kcapi.o
CC [M] drivers/gpio/gpio-pci-idio-16.o
CC kernel/cgroup/misc.o
CC fs/tracefs/event_inode.o
AR drivers/soc/apple/built-in.a
CC [M] arch/x86/kvm/mmu/spte.o
AR drivers/soc/bcm/built-in.a
AR drivers/soc/aspeed/built-in.a
CC [M] arch/x86/kvm/mmu/tdp_iter.o
AR fs/nls/built-in.a
CC [M] net/sched/em_cmp.o
AR drivers/soc/fsl/built-in.a
CC arch/x86/kernel/setup.o
CC drivers/video/fbdev/core/fb_io_fops.o
AR drivers/soc/fujitsu/built-in.a
CC drivers/acpi/acpica/exregion.o
AR drivers/soc/hisilicon/built-in.a
CC net/ipv6/ip6_fib.o
CC net/ncsi/ncsi-manage.o
AR drivers/soc/imx/built-in.a
CC [M] drivers/leds/leds-lm355x.o
CC block/blk-zoned.o
AR drivers/soc/ixp4xx/built-in.a
CC lib/decompress_unlzma.o
AR drivers/soc/loongson/built-in.a
CC fs/ext4/sysfs.o
AR drivers/soc/mediatek/built-in.a
AR drivers/soc/microchip/built-in.a
CC block/blk-wbt.o
AR drivers/soc/nuvoton/built-in.a
CC [M] drivers/soc/nuvoton/wpcm450-soc.o
CC block/blk-mq-debugfs.o
CC drivers/pci/vpd.o
CC [M] drivers/leds/leds-lm36274.o
CC [M] drivers/video/backlight/lm3509_bl.o
CC kernel/bpf/log.o
CC crypto/ghash-generic.o
AR drivers/pmdomain/actions/built-in.a
AR drivers/pmdomain/amlogic/built-in.a
CC [M] drivers/gpio/gpio-pisosr.o
AR drivers/pmdomain/apple/built-in.a
AR drivers/pmdomain/arm/built-in.a
AR drivers/pmdomain/bcm/built-in.a
AR drivers/pmdomain/imx/built-in.a
CC kernel/trace/trace_stat.o
AR drivers/pmdomain/mediatek/built-in.a
CC [M] sound/pci/hda/hda_generic.o
AR drivers/pmdomain/qcom/built-in.a
CC drivers/acpi/acpica/exresnte.o
AR drivers/pmdomain/renesas/built-in.a
CC [M] net/ipv4/netfilter/arp_tables.o
CC drivers/clk/clkdev.o
AR drivers/pmdomain/rockchip/built-in.a
AR drivers/pmdomain/samsung/built-in.a
AR drivers/pmdomain/st/built-in.a
AR drivers/pmdomain/starfive/built-in.a
AR drivers/pmdomain/sunxi/built-in.a
AR drivers/pmdomain/tegra/built-in.a
AR drivers/pmdomain/thead/built-in.a
AR drivers/pmdomain/ti/built-in.a
CC [M] drivers/dma/idxd/init.o
AR drivers/pmdomain/xilinx/built-in.a
CC drivers/pmdomain/core.o
AR drivers/soc/pxa/built-in.a
CC [M] net/netfilter/nf_conntrack_extend.o
CC [M] net/ipv6/netfilter/ip6t_srh.o
CC mm/mlock.o
AR drivers/soc/amlogic/built-in.a
AR drivers/soc/qcom/built-in.a
CC [M] drivers/soc/qcom/pmic_pdcharger_ulog.o
CC net/mctp/neigh.o
AR fs/tracefs/built-in.a
CC mm/mmap.o
CC kernel/fork.o
CC kernel/bpf/token.o
CC lib/decompress_unlzo.o
CC net/handshake/alert.o
CC crypto/ecc.o
CC [M] net/sched/em_nbyte.o
CC net/handshake/genl.o
CC [M] drivers/leds/leds-lm3642.o
CC drivers/acpi/acpica/exresolv.o
CC kernel/bpf/bpf_iter.o
CC kernel/time/posix-cpu-timers.o
CC [M] drivers/video/backlight/lm3533_bl.o
CC [M] drivers/gpio/gpio-rdc321x.o
CC [M] arch/x86/kvm/mmu/tdp_mmu.o
CC [M] net/ipv4/netfilter/arpt_mangle.o
CC kernel/events/callchain.o
CC drivers/video/fbdev/core/sysfillrect.o
CC drivers/video/screen_info_pci.o
CC drivers/acpi/acpica/exresop.o
CC drivers/pci/setup-bus.o
CC net/ipv6/ipv6_sockglue.o
CC lib/decompress_unxz.o
CC drivers/acpi/sleep.o
CC drivers/clk/clk.o
AR kernel/cgroup/built-in.a
CC [M] drivers/soc/qcom/qcom-pbs.o
CC drivers/acpi/device_sysfs.o
CC arch/x86/kernel/x86_init.o
CC drivers/acpi/device_pm.o
CC [M] drivers/leds/leds-lp3944.o
CC kernel/exec_domain.o
CC [M] net/ipv4/netfilter/arptable_filter.o
CC net/ipv6/ndisc.o
CC [M] net/sched/em_u32.o
CC [M] drivers/gpio/gpio-sch311x.o
AR net/mctp/built-in.a
CC [M] net/ipv6/netfilter/ip6t_NPT.o
CC [M] drivers/gpio/gpio-sch.o
CC net/core/hotdata.o
CC drivers/acpi/acpica/exserial.o
CC [M] drivers/video/backlight/lm3630a_bl.o
CC [M] drivers/dma/idxd/irq.o
CC block/sed-opal.o
CC net/ipv6/udp.o
CC [M] net/ipv4/netfilter/nf_dup_ipv4.o
CC block/blk-pm.o
CC kernel/time/posix-clock.o
CC lib/decompress_unzstd.o
CC drivers/acpi/acpica/exstore.o
CC net/ipv6/udplite.o
CC block/blk-crypto.o
CC net/ncsi/ncsi-netlink.o
CC [M] net/netfilter/nf_conntrack_acct.o
CC net/core/netdev_rx_queue.o
CC [M] drivers/leds/leds-lp3952.o
CC block/blk-crypto-profile.o
CC [M] net/ipv6/netfilter/ip6t_REJECT.o
CC drivers/acpi/acpica/exstoren.o
CC kernel/events/hw_breakpoint.o
CC drivers/video/fbdev/core/syscopyarea.o
CC lib/dump_stack.o
CC fs/btrfs/super.o
CC drivers/pmdomain/governor.o
AR drivers/soc/renesas/built-in.a
CC [M] drivers/gpio/gpio-sim.o
CC kernel/events/uprobes.o
AR drivers/soc/rockchip/built-in.a
AR drivers/soc/sunxi/built-in.a
AR drivers/soc/ti/built-in.a
CC [M] net/sched/em_meta.o
AR drivers/soc/versatile/built-in.a
AR drivers/soc/xilinx/built-in.a
AR drivers/soc/built-in.a
CC crypto/ecdh.o
CC drivers/dma/lgm/lgm-dma.o
CC net/handshake/netlink.o
CC [M] net/ipv6/netfilter/ip6t_SYNPROXY.o
CC drivers/acpi/acpica/exstorob.o
CC drivers/video/cmdline.o
CC arch/x86/kernel/i8259.o
CC mm/mmu_gather.o
CC [M] drivers/video/backlight/lm3639_bl.o
CC drivers/video/nomodeset.o
CC [M] drivers/dma/idxd/device.o
CC drivers/acpi/acpica/exsystem.o
CC drivers/clk/clk-divider.o
CC net/ipv6/raw.o
CC [M] sound/pci/hda/patch_realtek.o
CC mm/mprotect.o
CC drivers/pci/vc.o
CC drivers/video/hdmi.o
AR drivers/pmdomain/built-in.a
CC [M] drivers/leds/leds-lp50xx.o
CC arch/x86/kernel/irqinit.o
CC kernel/time/itimer.o
CC [M] net/netfilter/nf_conntrack_seqadj.o
CC net/ipv6/icmp.o
CC crypto/ecdh_helper.o
CC drivers/video/display_timing.o
CC drivers/acpi/acpica/extrace.o
CC [M] drivers/dma/idxd/sysfs.o
CC kernel/trace/trace_printk.o
AR net/ncsi/built-in.a
CC fs/ext4/xattr.o
CC [M] drivers/video/backlight/lp855x_bl.o
CC fs/ext4/xattr_hurd.o
CC lib/earlycpio.o
CC [M] drivers/gpio/gpio-siox.o
CC fs/ext4/xattr_trusted.o
CC fs/ext4/xattr_user.o
CC fs/ext4/fast_commit.o
CC drivers/acpi/proc.o
CC lib/extable.o
CC [M] net/ipv4/netfilter/nf_nat_snmp_basic.asn1.o
CC fs/btrfs/ctree.o
CC block/blk-crypto-sysfs.o
CC [M] net/ipv4/netfilter/nf_nat_snmp_basic_main.o
CC net/core/page_pool.o
CC drivers/pci/mmap.o
CC lib/flex_proportions.o
CC [M] drivers/video/backlight/lp8788_bl.o
CC drivers/acpi/acpica/exutils.o
CC [M] net/sched/em_text.o
AR drivers/dma/lgm/built-in.a
CC drivers/video/videomode.o
CC drivers/video/fbdev/core/sysimgblt.o
LD [M] net/ipv6/netfilter/nf_defrag_ipv6.o
CC kernel/time/clockevents.o
CC kernel/time/tick-common.o
CC drivers/pci/devres.o
CC [M] arch/x86/kvm/hyperv.o
CC [M] drivers/leds/leds-lp8788.o
CC crypto/xor.o
CC arch/x86/kernel/jump_label.o
CC [M] sound/pci/hda/patch_senarytech.o
CC kernel/panic.o
CC kernel/bpf/map_iter.o
CC fs/btrfs/extent-tree.o
CC [M] drivers/gpio/gpio-sloppy-logic-analyzer.o
CC drivers/acpi/acpica/hwacpi.o
CC crypto/hash_info.o
CC mm/mremap.o
CC arch/x86/kernel/irq_work.o
CC mm/msync.o
CC kernel/time/tick-broadcast.o
CC net/handshake/request.o
CC lib/idr.o
CC drivers/virtio/virtio.o
CC net/handshake/tlshd.o
CC drivers/xen/events/events_base.o
CC [M] net/netfilter/nf_conntrack_proto_icmpv6.o
CC block/blk-crypto-fallback.o
CC fs/btrfs/print-tree.o
CC fs/ext4/orphan.o
CC [M] drivers/video/backlight/lv5207lp.o
CC drivers/xen/events/events_2l.o
CC [M] drivers/leds/leds-lt3593.o
CC net/core/page_pool_user.o
CC drivers/acpi/acpica/hwesleep.o
CC [M] drivers/dma/idxd/submit.o
CC kernel/trace/pid_list.o
AR kernel/events/built-in.a
CC [M] net/sched/em_ipt.o
CC drivers/pci/proc.o
CC [M] drivers/dma/idxd/dma.o
CC drivers/pci/pci-sysfs.o
CC [M] drivers/gpio/gpio-tangier.o
LD [M] net/ipv4/netfilter/nf_nat_snmp_basic.o
CC net/ipv4/ip_forward.o
CC [M] drivers/gpio/gpio-tpic2810.o
CC drivers/clk/clk-fixed-factor.o
CC crypto/kdf_sp800108.o
CC drivers/pci/slot.o
CC kernel/cpu.o
CC [M] crypto/crypto_engine.o
CC drivers/acpi/acpica/hwgpe.o
CC arch/x86/kernel/probe_roms.o
CC drivers/acpi/bus.o
CC kernel/exit.o
CC lib/iomem_copy.o
CC [M] drivers/leds/leds-max8997.o
CC drivers/video/fbdev/core/fb_sys_fops.o
CC [M] drivers/video/backlight/max8925_bl.o
CC net/core/net-procfs.o
CC block/holder.o
CC fs/btrfs/root-tree.o
CC lib/irq_regs.o
CC [M] crypto/echainiv.o
CC net/ipv6/mcast.o
CC drivers/xen/xenbus/xenbus_client.o
AR drivers/xen/xen-pciback/built-in.a
CC [M] drivers/xen/xen-pciback/pci_stub.o
CC fs/ext4/acl.o
CC [M] drivers/xen/xen-pciback/pciback_ops.o
CC drivers/virtio/virtio_ring.o
CC [M] drivers/dma/amd/ptdma/ptdma-dev.o
CC kernel/time/tick-broadcast-hrtimer.o
CC [M] sound/pci/hda/patch_hdmi.o
CC drivers/acpi/acpica/hwregs.o
CC [M] drivers/dma/amd/ptdma/ptdma-dmaengine.o
CC [M] net/netfilter/nf_conntrack_timeout.o
CC kernel/trace/tracing_map.o
CC mm/page_vma_mapped.o
CC [M] drivers/xen/xenfs/super.o
CC drivers/virtio/virtio_anchor.o
CC lib/is_single_threaded.o
CC [M] drivers/dma/amd/ptdma/ptdma-debugfs.o
CC drivers/regulator/core.o
CC [M] drivers/dma/amd/ptdma/ptdma-pci.o
CC [M] block/kyber-iosched.o
CC [M] drivers/leds/leds-mc13783.o
CC [M] drivers/gpio/gpio-tps65086.o
CC drivers/clk/clk-fixed-rate.o
CC [M] drivers/leds/leds-menf21bmc.o
CC kernel/time/tick-oneshot.o
CC [M] drivers/dma/idxd/cdev.o
CC [M] net/llc/llc_core.o
CC [M] drivers/video/backlight/mp3309c.o
AR drivers/reset/amlogic/built-in.a
AR drivers/reset/hisilicon/built-in.a
AR drivers/reset/starfive/built-in.a
CC drivers/pci/pci-acpi.o
AR drivers/reset/sti/built-in.a
AR net/sched/built-in.a
CC [M] drivers/video/backlight/mt6370-backlight.o
CC net/ipv4/ip_options.o
CC drivers/acpi/acpica/hwsleep.o
AR drivers/reset/tegra/built-in.a
CC net/handshake/trace.o
CC drivers/reset/core.o
CC [M] drivers/video/backlight/pandora_bl.o
CC [M] crypto/ecdsa.o
CC arch/x86/kernel/sys_ia32.o
CC lib/klist.o
CC fs/ext4/xattr_security.o
CC drivers/tty/vt/vt_ioctl.o
CC kernel/bpf/task_iter.o
CC kernel/bpf/prog_iter.o
CC [M] drivers/xen/xenfs/xenstored.o
CC net/core/netpoll.o
CC [M] net/tls/tls_main.o
CC [M] drivers/video/fbdev/core/fb_ddc.o
CC [M] net/key/af_key.o
CC [M] drivers/gpio/gpio-tps65912.o
CC drivers/acpi/acpica/hwvalid.o
CC drivers/xen/events/events_fifo.o
CC drivers/iommu/amd/iommu.o
CC drivers/char/hw_random/core.o
CC kernel/time/tick-sched.o
CC mm/pagewalk.o
CC lib/kobject.o
CC [M] drivers/xen/xen-pciback/xenbus.o
CC [M] arch/x86/kvm/xen.o
CC drivers/xen/xenbus/xenbus_comms.o
CC kernel/trace/trace_sched_switch.o
CC [M] drivers/leds/leds-mlxcpld.o
AR drivers/dma/amd/built-in.a
CC drivers/iommu/amd/init.o
CC [M] drivers/char/hw_random/timeriomem-rng.o
ASN.1 crypto/ecdsasignature.asn1.[ch]
CC [M] crypto/ecdsa-p1363.o
CC [M] crypto/ecdsasignature.asn1.o
CC [M] crypto/crypto_user.o
LD [M] drivers/dma/amd/ptdma/ptdma.o
CC drivers/clk/clk-gate.o
CC [M] crypto/cmac.o
CC [M] drivers/video/backlight/pwm_bl.o
CC [M] drivers/char/hw_random/intel-rng.o
CC [M] drivers/char/hw_random/amd-rng.o
CC [M] drivers/xen/xenfs/xensyms.o
CC drivers/acpi/acpica/hwxface.o
CC fs/ext4/verity.o
CC [M] drivers/gpio/gpio-tps68470.o
CC arch/x86/kernel/signal_32.o
CC [M] net/netfilter/nf_conntrack_timestamp.o
CC drivers/acpi/glue.o
CC drivers/reset/reset-simple.o
CC [M] sound/pci/hda/hda_eld.o
CC [M] net/llc/llc_input.o
CC [M] drivers/leds/leds-mlxreg.o
CC [M] block/bfq-iosched.o
CC [M] block/bfq-wf2q.o
CC lib/kobject_uevent.o
AR net/handshake/built-in.a
AR drivers/xen/events/built-in.a
CC [M] drivers/leds/leds-mt6323.o
CC drivers/virtio/virtio_pci_modern_dev.o
CC drivers/tty/vt/vc_screen.o
CC drivers/tty/hvc/hvc_console.o
CC drivers/pci/iomap.o
CC [M] drivers/dma/idxd/debugfs.o
CC drivers/clk/clk-multiplier.o
CC [M] sound/pci/rme9652/hdsp.o
CC kernel/bpf/link_iter.o
CC drivers/clk/clk-mux.o
CC [M] drivers/xen/xen-pciback/conf_space.o
CC drivers/xen/xenbus/xenbus_xs.o
CC drivers/clk/clk-composite.o
CC [M] drivers/dma/idxd/defaults.o
CC drivers/tty/vt/selection.o
AR drivers/video/fbdev/core/built-in.a
AR drivers/video/fbdev/built-in.a
LD [M] drivers/xen/xenfs/xenfs.o
CC drivers/acpi/acpica/hwxfsleep.o
CC drivers/tty/serial/8250/8250_core.o
CC [M] drivers/video/backlight/qcom-wled.o
CC [M] drivers/tty/serial/jsm/jsm_driver.o
CC [M] drivers/char/hw_random/ba431-rng.o
CC [M] drivers/gpio/gpio-tqmx86.o
CC net/core/fib_rules.o
CC kernel/trace/trace_functions.o
CC fs/btrfs/dir-item.o
CC mm/pgtable-generic.o
CC fs/ext4/crypto.o
CC drivers/char/agp/backend.o
CC kernel/softirq.o
CC [M] crypto/xcbc.o
CC arch/x86/kernel/sys_x86_64.o
CC net/ipv4/ip_output.o
CC [M] sound/pci/hda/hda_component.o
CC [M] drivers/leds/leds-nic78bx.o
CC [M] net/tls/tls_sw.o
CC kernel/time/timer_migration.o
CC [M] drivers/reset/reset-gpio.o
CC [M] net/netfilter/nf_conntrack_ecache.o
CC drivers/clk/clk-fractional-divider.o
CC [M] arch/x86/kvm/smm.o
CC drivers/xen/xenbus/xenbus_probe.o
CC fs/btrfs/file-item.o
CC [M] drivers/char/hw_random/via-rng.o
CC drivers/acpi/acpica/hwpci.o
CC [M] drivers/xen/xen-pciback/conf_space_header.o
CC [M] drivers/dma/idxd/perfmon.o
CC [M] drivers/gpio/gpio-twl4030.o
CC [M] drivers/tty/serial/jsm/jsm_neo.o
CC [M] drivers/gpio/gpio-twl6040.o
CC kernel/bpf/hashtab.o
CC [M] drivers/reset/reset-ti-syscon.o
CC drivers/acpi/scan.o
CC [M] drivers/video/backlight/rt4831-backlight.o
CC drivers/pci/quirks.o
CC drivers/tty/serial/serial_core.o
CC [M] crypto/md4.o
CC drivers/char/agp/generic.o
CC [M] drivers/leds/leds-pca9532.o
CC drivers/virtio/virtio_pci_legacy_dev.o
CC kernel/time/vsyscall.o
AR fs/ext4/built-in.a
CC drivers/iommu/amd/quirks.o
CC [M] net/llc/llc_output.o
CC drivers/tty/hvc/hvc_irq.o
CC mm/rmap.o
CC drivers/tty/vt/keyboard.o
CC arch/x86/kernel/espfix_64.o
CC [M] drivers/gpio/gpio-viperboard.o
CC kernel/trace/trace_preemptirq.o
CC drivers/acpi/acpica/nsaccess.o
AR drivers/gpu/host1x/built-in.a
CC drivers/gpu/vga/vga_switcheroo.o
CC [M] drivers/char/hw_random/virtio-rng.o
AR drivers/gpu/drm/tests/built-in.a
CC [M] drivers/gpu/drm/tests/drm_kunit_helpers.o
CC drivers/tty/serial/8250/8250_platform.o
CC net/ipv6/reassembly.o
CC [M] drivers/reset/reset-tps380x.o
CC fs/pstore/inode.o
CC [M] drivers/gpu/drm/tests/drm_atomic_test.o
CC [M] drivers/gpu/drm/tests/drm_atomic_state_test.o
CC lib/logic_pio.o
CC net/ipv6/tcp_ipv6.o
CC [M] arch/x86/kvm/kvm_onhyperv.o
CC [M] sound/pci/hda/hda_intel.o
CC drivers/tty/hvc/hvc_xen.o
CC [M] crypto/rmd160.o
CC [M] drivers/xen/xen-pciback/conf_space_capability.o
CC drivers/clk/clk-gpio.o
CC [M] drivers/video/backlight/kb3886_bl.o
CC drivers/tty/vt/vt.o
CC drivers/acpi/acpica/nsalloc.o
LD [M] drivers/dma/idxd/idxd_bus.o
LD [M] drivers/dma/idxd/idxd.o
CC [M] drivers/tty/serial/jsm/jsm_tty.o
AR drivers/dma/mediatek/built-in.a
AR drivers/dma/qcom/built-in.a
CC drivers/regulator/dummy.o
CC [M] drivers/dma/qcom/hidma_mgmt.o
CC drivers/iommu/amd/io_pgtable.o
CC [M] drivers/gpio/gpio-virtuser.o
CC [M] drivers/leds/leds-pca955x.o
CC [M] sound/pci/rme9652/hdspm.o
CC [M] drivers/char/hw_random/xiphera-trng.o
CC arch/x86/kernel/ksysfs.o
AR drivers/reset/built-in.a
CC drivers/char/agp/isoch.o
CC drivers/virtio/virtio_mmio.o
CC fs/btrfs/inode-item.o
CC drivers/xen/xenbus/xenbus_probe_backend.o
CC kernel/time/timekeeping_debug.o
CC [M] drivers/gpio/gpio-virtio.o
CC lib/maple_tree.o
CC drivers/acpi/acpica/nsarguments.o
CC [M] net/netfilter/nf_conntrack_labels.o
CC net/core/net-traces.o
CC [M] drivers/dma/qcom/hidma_mgmt_sys.o
CC fs/pstore/platform.o
CC [M] drivers/xen/xen-pciback/conf_space_quirks.o
CC [M] drivers/clk/clk_kunit_helpers.o
CC [M] drivers/video/backlight/sky81452-backlight.o
CC [M] net/llc/llc_if.o
CC [M] crypto/sm3.o
CC drivers/xen/xenbus/xenbus_dev_frontend.o
CC [M] drivers/gpu/drm/tests/drm_bridge_test.o
CC drivers/regulator/fixed-helper.o
CC drivers/tty/serial/serial_base_bus.o
CC kernel/bpf/arraymap.o
AR drivers/tty/hvc/built-in.a
CC [M] net/netfilter/nf_conntrack_ovs.o
CC drivers/tty/serial/8250/8250_pnp.o
CC kernel/trace/trace_sched_wakeup.o
CC drivers/acpi/acpica/nsconvert.o
AR drivers/char/hw_random/built-in.a
CC drivers/xen/cpu_hotplug.o
CC mm/vmalloc.o
CC arch/x86/kernel/bootflag.o
AR drivers/gpu/vga/built-in.a
CC drivers/tty/serial/8250/8250_rsa.o
CC arch/x86/kernel/e820.o
CC [M] net/llc/llc_c_ev.o
CC drivers/char/agp/amd64-agp.o
CC [M] drivers/tty/serial/jsm/jsm_cls.o
CC [M] drivers/leds/leds-pca963x.o
CC drivers/iommu/amd/io_pgtable_v2.o
CC [M] net/tls/tls_proc.o
CC kernel/time/namespace.o
CC drivers/virtio/virtio_pci_modern.o
CC drivers/xen/xenbus/xenbus_dev_backend.o
CC drivers/iommu/amd/ppr.o
CC drivers/iommu/amd/pasid.o
CC [M] arch/x86/kvm/vmx/vmx.o
CC [M] drivers/gpio/gpio-vx855.o
CC drivers/acpi/acpica/nsdump.o
LD [M] sound/pci/hda/snd-hda-codec.o
LD [M] sound/pci/hda/snd-hda-codec-generic.o
LD [M] sound/pci/hda/snd-hda-codec-realtek.o
LD [M] sound/pci/hda/snd-hda-codec-senarytech.o
LD [M] sound/pci/hda/snd-hda-codec-hdmi.o
LD [M] sound/pci/hda/snd-hda-scodec-component.o
CC [M] drivers/xen/xen-pciback/vpci.o
LD [M] sound/pci/hda/snd-hda-intel.o
CC [M] block/bfq-cgroup.o
CC drivers/iommu/intel/dmar.o
CC [M] drivers/video/backlight/wm831x_bl.o
CC [M] net/llc/llc_c_ac.o
CC net/ipv4/ip_sockglue.o
CC [M] drivers/gpu/drm/tests/drm_buddy_test.o
CC drivers/regulator/helpers.o
CC drivers/pci/ats.o
AR drivers/dma/stm32/built-in.a
CC drivers/connector/cn_queue.o
CC [M] drivers/dma/qcom/hidma_ll.o
CC kernel/bpf/percpu_freelist.o
CC drivers/regulator/devres.o
CC fs/btrfs/disk-io.o
CC drivers/base/power/sysfs.o
CC [M] drivers/clk/clk-cdce706.o
CC drivers/base/firmware_loader/builtin/main.o
CC drivers/tty/serial/8250/8250_port.o
CC drivers/base/regmap/regmap.o
CC fs/pstore/pmsg.o
CC drivers/acpi/acpica/nseval.o
CC drivers/char/agp/intel-agp.o
AR kernel/time/built-in.a
CC net/core/drop_monitor.o
CC [M] drivers/leds/leds-pca995x.o
LD [M] drivers/tty/serial/jsm/jsm.o
CC drivers/base/regmap/regcache.o
CC [M] net/tls/trace.o
CC drivers/base/regmap/regcache-rbtree.o
CC drivers/base/regmap/regcache-flat.o
CC [M] drivers/gpio/gpio-wcove.o
CC kernel/trace/trace_hwlat.o
AR drivers/dma/ti/built-in.a
AR drivers/iommu/amd/built-in.a
CC [M] net/netfilter/nf_conntrack_proto_dccp.o
CC drivers/xen/xenbus/xenbus_probe_frontend.o
AR drivers/dma/xilinx/built-in.a
CC [M] crypto/sm3_generic.o
CC [M] drivers/dma/xilinx/xilinx_dma.o
CC net/ipv6/ping.o
CC [M] drivers/xen/xen-pciback/passthrough.o
LD [M] sound/pci/rme9652/snd-hdsp.o
CC drivers/acpi/mipi-disco-img.o
CC [M] drivers/video/backlight/arcxcnn_bl.o
LD [M] sound/pci/rme9652/snd-hdspm.o
CC [M] sound/pci/ad1889.o
AR drivers/base/firmware_loader/builtin/built-in.a
CC kernel/bpf/bpf_lru_list.o
CC drivers/acpi/acpica/nsinit.o
CC fs/efivarfs/inode.o
CC drivers/base/firmware_loader/fallback_table.o
COPY drivers/tty/vt/defkeymap.c
CC [M] drivers/gpio/gpio-winbond.o
AR block/built-in.a
LD [M] block/bfq.o
CC drivers/regulator/irq_helpers.o
CC [M] fs/pstore/ram.o
CC [M] drivers/dma/qcom/hidma.o
CC [M] drivers/gpu/drm/tests/drm_cmdline_parser_test.o
CC drivers/connector/connector.o
CC mm/vma.o
CC drivers/virtio/virtio_pci_common.o
CC [M] drivers/clk/clk-cs2000-cp.o
CC drivers/tty/vt/consolemap.o
CC drivers/char/agp/intel-gtt.o
CC arch/x86/kernel/pci-dma.o
CC drivers/base/power/generic_ops.o
CC drivers/pci/iov.o
HOSTCC drivers/tty/vt/conmakehash
CC drivers/base/power/common.o
CC kernel/trace/trace_osnoise.o
CC [M] drivers/leds/leds-pwm.o
CC drivers/acpi/acpica/nsload.o
CC [M] net/llc/llc_conn.o
CC kernel/bpf/lpm_trie.o
CC fs/efivarfs/file.o
CC kernel/trace/trace_nop.o
CC drivers/virtio/virtio_pci_legacy.o
CC drivers/virtio/virtio_pci_admin_legacy_io.o
CC drivers/virtio/virtio_balloon.o
CC [M] net/tls/tls_strp.o
LD [M] drivers/xen/xen-pciback/xen-pciback.o
CC [M] drivers/video/backlight/rave-sp-backlight.o
AR drivers/iommu/arm/arm-smmu/built-in.a
AR drivers/iommu/arm/arm-smmu-v3/built-in.a
CC [M] crypto/streebog_generic.o
AR drivers/iommu/arm/built-in.a
CC [M] drivers/gpio/gpio-wm831x.o
CC [M] net/netfilter/nf_conntrack_proto_sctp.o
CC net/core/selftests.o
CC [M] drivers/video/vgastate.o
CC [M] sound/pci/als300.o
CC drivers/acpi/acpica/nsnames.o
CC [M] fs/pstore/ram_core.o
CC drivers/tty/serial/8250/8250_dma.o
CC [M] drivers/block/xen-blkback/blkback.o
CC [M] drivers/leds/leds-regulator.o
AR drivers/xen/xenbus/built-in.a
CC drivers/xen/grant-table.o
CC [M] drivers/dma/xilinx/xdma.o
CC [M] drivers/dma/qcom/hidma_dbg.o
CC [M] drivers/clk/clk-lmk04832.o
CC drivers/base/firmware_loader/main.o
CC fs/efivarfs/super.o
CC drivers/base/power/qos.o
CC drivers/pci/pci-label.o
CC net/ipv6/exthdrs.o
CC drivers/regulator/event.o
CC [M] drivers/gpio/gpio-wm8350.o
CC drivers/pci/p2pdma.o
CC [M] crypto/wp512.o
AR drivers/misc/eeprom/built-in.a
CC [M] drivers/misc/eeprom/at24.o
CC drivers/acpi/acpica/nsobject.o
CC [M] drivers/misc/eeprom/at25.o
CC drivers/iommu/intel/iommu.o
CC arch/x86/kernel/quirks.o
CC [M] crypto/pcbc.o
CC [M] sound/pci/als4000.o
CC net/ipv4/inet_hashtables.o
CC [M] crypto/lrw.o
CC drivers/tty/vt/defkeymap.o
CC drivers/connector/cn_proc.o
AR drivers/video/backlight/built-in.a
CC [M] drivers/leds/leds-tca6507.o
CC [M] crypto/xctr.o
CC [M] net/llc/llc_c_st.o
CC kernel/bpf/map_in_map.o
CC net/ipv4/inet_timewait_sock.o
CC drivers/char/agp/via-agp.o
CC [M] drivers/misc/eeprom/max6875.o
CC [M] fs/pstore/zone.o
CC [M] drivers/virtio/virtio_input.o
LD [M] drivers/dma/qcom/hdma_mgmt.o
LD [M] drivers/dma/qcom/hdma.o
CC [M] drivers/misc/eeprom/eeprom_93cx6.o
CC fs/btrfs/transaction.o
CC drivers/acpi/acpica/nsparse.o
CONMK drivers/tty/vt/consolemap_deftbl.c
CC drivers/tty/vt/consolemap_deftbl.o
CC drivers/base/regmap/regcache-maple.o
AR drivers/tty/vt/built-in.a
CC [M] net/tls/tls_device.o
CC drivers/iommu/iommufd/iova_bitmap.o
CC [M] net/netfilter/nf_conntrack_proto_gre.o
CC drivers/base/regmap/regmap-debugfs.o
CC drivers/tty/serial/8250/8250_dwlib.o
CC fs/efivarfs/vars.o
CC [M] drivers/gpio/gpio-wm8994.o
CC [M] drivers/gpu/drm/tests/drm_connector_test.o
CC drivers/iommu/iommufd/driver.o
CC [M] drivers/clk/clk-max9485.o
CC [M] drivers/regulator/fixed.o
CC drivers/base/regmap/regmap-i2c.o
CC drivers/base/regmap/regmap-spi.o
CC [M] sound/soc/codecs/ac97.o
CC mm/process_vm_access.o
CC [M] drivers/block/xen-blkback/xenbus.o
AR drivers/video/built-in.a
CC [M] drivers/misc/eeprom/eeprom_93xx46.o
CC arch/x86/kernel/kdebugfs.o
CC drivers/acpi/acpica/nspredef.o
CC [M] drivers/char/agp/sis-agp.o
CC [M] crypto/hctr2.o
CC [M] sound/pci/cs4281.o
CC kernel/bpf/bloom_filter.o
CC [M] drivers/virtio/virtio_vdpa.o
CC [M] drivers/clk/clk-palmas.o
CC kernel/trace/trace_stack.o
CC [M] net/llc/llc_pdu.o
CC [M] drivers/leds/leds-ti-lmu-common.o
CC [M] drivers/dma/dw/core.o
CC [M] net/llc/llc_sap.o
CC kernel/trace/trace_mmiotrace.o
CC [M] sound/soc/codecs/ak4619.o
CC [M] drivers/clk/clk-pwm.o
CC [M] drivers/gpio/gpio-ws16c48.o
CC drivers/acpi/acpica/nsprepkg.o
CC drivers/base/firmware_loader/fallback.o
CC drivers/xen/features.o
AR drivers/connector/built-in.a
CC [M] drivers/gpio/gpio-xra1403.o
CC [M] drivers/clk/clk-si5341.o
CC [M] drivers/clk/clk-si5351.o
AR fs/efivarfs/built-in.a
CC drivers/pci/vgaarb.o
CC drivers/base/power/runtime.o
CC drivers/tty/serial/8250/8250_fintek.o
CC [M] crypto/adiantum.o
CC lib/memcat_p.o
CC drivers/base/firmware_loader/fallback_platform.o
CC [M] drivers/regulator/virtual.o
CC [M] net/tls/tls_device_fallback.o
CC mm/mseal.o
CC [M] drivers/misc/eeprom/idt_89hpesx.o
AR drivers/char/agp/built-in.a
CC drivers/char/tpm/tpm-chip.o
CC [M] drivers/char/tpm/st33zp24/st33zp24.o
CC [M] fs/pstore/blk.o
CC [M] drivers/virtio/virtio_mem.o
CC kernel/bpf/local_storage.o
CC [M] drivers/iommu/iommufd/device.o
CC arch/x86/kernel/alternative.o
LD [M] sound/pci/snd-ad1889.o
CC [M] drivers/regulator/userspace-consumer.o
CC drivers/acpi/acpica/nsrepair.o
CC [M] drivers/regulator/88pg86x.o
CC [M] drivers/dma/dw/dw.o
CC drivers/xen/balloon.o
LD [M] sound/pci/snd-als300.o
LD [M] sound/pci/snd-als4000.o
LD [M] sound/pci/snd-cs4281.o
CC drivers/xen/manage.o
CC [M] drivers/leds/leds-tlc591xx.o
CC drivers/base/regmap/regmap-mmio.o
CC [M] fs/netfs/buffered_read.o
CC [M] drivers/regulator/88pm800-regulator.o
CC [M] sound/soc/codecs/cs40l50-codec.o
CC lib/min_heap.o
CC mm/page_alloc.o
CC kernel/trace/trace_functions_graph.o
CC [M] drivers/gpu/drm/tests/drm_damage_helper_test.o
CC mm/shuffle.o
LD [M] drivers/block/xen-blkback/xen-blkback.o
CC drivers/tty/serial/8250/8250_pcilib.o
CC [M] drivers/block/drbd/drbd_buildtag.o
CC fs/btrfs/inode.o
CC kernel/trace/blktrace.o
CC [M] crypto/nhpoly1305.o
CC drivers/base/firmware_loader/sysfs.o
CC mm/page_frag_cache.o
CC net/ipv6/datagram.o
CC drivers/acpi/acpica/nsrepair2.o
CC [M] net/llc/llc_s_ac.o
CC [M] net/netfilter/nf_conntrack_netlink.o
CC mm/init-mm.o
AR drivers/gpio/built-in.a
CC drivers/acpi/resource.o
CC drivers/tty/serial/8250/8250_early.o
CC [M] drivers/clk/clk-si544.o
CC [M] drivers/dma/dw/idma32.o
CC drivers/tty/serial/8250/8250_dw.o
CC lib/nmi_backtrace.o
CC [M] drivers/misc/eeprom/ee1004.o
CC drivers/base/power/wakeirq.o
CC [M] drivers/block/drbd/drbd_bitmap.o
CC net/ipv6/ip6_flowlabel.o
CC [M] drivers/leds/leds-tps6105x.o
CC net/ipv4/inet_connection_sock.o
CC [M] drivers/regulator/88pm8607.o
CC [M] sound/soc/codecs/cs530x.o
CC drivers/acpi/acpica/nssearch.o
CC net/core/timestamping.o
CC drivers/pci/doe.o
LD [M] fs/pstore/ramoops.o
CC drivers/tty/serial/8250/8250_mid.o
LD [M] fs/pstore/pstore_zone.o
LD [M] fs/pstore/pstore_blk.o
AR fs/pstore/built-in.a
CC [M] crypto/ccm.o
CC [M] drivers/regulator/aat2870-regulator.o
CC net/ipv4/tcp.o
CC [M] drivers/char/tpm/st33zp24/i2c.o
CC drivers/xen/time.o
CC [M] drivers/gpu/drm/tests/drm_dp_mst_helper_test.o
CC [M] drivers/iommu/iommufd/eventq.o
AR drivers/iommu/riscv/built-in.a
CC kernel/bpf/queue_stack_maps.o
CC drivers/base/firmware_loader/sysfs_upload.o
CC [M] fs/nfs/filelayout/filelayout.o
CC drivers/base/regmap/regmap-irq.o
CC [M] fs/nfs/blocklayout/blocklayout.o
CC drivers/iommu/intel/pasid.o
CC [M] fs/nfs/blocklayout/dev.o
CC [M] fs/nfs/flexfilelayout/flexfilelayout.o
CC [M] net/llc/llc_s_ev.o
AR drivers/gpu/drm/arm/built-in.a
CC drivers/acpi/acpica/nsutils.o
CC [M] fs/nfs/blocklayout/extent_tree.o
CC lib/objpool.o
CC [M] drivers/clk/clk-tps68470.o
CC [M] drivers/char/mwave/mwavedd.o
CC [M] drivers/char/xillybus/xillybus_class.o
CC [M] drivers/char/mwave/smapi.o
CC [M] drivers/virtio/virtio_dma_buf.o
CC kernel/resource.o
CC [M] drivers/leds/leds-wm831x-status.o
CC kernel/sysctl.o
CC drivers/char/mem.o
AR drivers/misc/cb710/built-in.a
CC [M] drivers/misc/cb710/core.o
CC drivers/char/random.o
CC [M] fs/netfs/buffered_write.o
CC drivers/tty/serial/8250/8250_pci.o
LD [M] net/tls/tls.o
CC [M] crypto/chacha20poly1305.o
CC [M] drivers/dma/dw/acpi.o
CC drivers/base/power/main.o
CC [M] drivers/regulator/act8865-regulator.o
CC drivers/xen/mem-reservation.o
CC [M] drivers/char/tpm/st33zp24/spi.o
CC arch/x86/kernel/i8253.o
CC arch/x86/kernel/hw_breakpoint.o
CC drivers/acpi/acpica/nswalk.o
CC kernel/bpf/ringbuf.o
CC [M] drivers/iommu/iommufd/hw_pagetable.o
CC [M] drivers/gpu/drm/tests/drm_exec_test.o
CC [M] drivers/clk/clk-twl6040.o
CC lib/plist.o
CC [M] drivers/regulator/ad5398.o
AR drivers/base/firmware_loader/built-in.a
CC [M] fs/netfs/direct_read.o
CC [M] drivers/leds/leds-wm8350.o
CC [M] drivers/pci/pci-stub.o
CC lib/radix-tree.o
CC [M] sound/soc/codecs/cs530x-i2c.o
CC [M] drivers/char/mwave/tp3780i.o
CC [M] net/llc/llc_s_st.o
CC drivers/acpi/acpica/nsxfeval.o
CC kernel/trace/fgraph.o
CC [M] drivers/char/xillybus/xillybus_core.o
CC [M] drivers/char/xillybus/xillybus_pcie.o
CC drivers/iommu/intel/nested.o
AR drivers/virtio/built-in.a
CC net/ipv6/inet6_connection_sock.o
CC kernel/trace/trace_events.o
CC [M] drivers/base/regmap/regmap-slimbus.o
CC [M] net/llc/af_llc.o
LD [M] drivers/char/tpm/st33zp24/tpm_st33zp24.o
CC [M] crypto/aegis128-core.o
LD [M] drivers/char/tpm/st33zp24/tpm_st33zp24_i2c.o
LD [M] drivers/char/tpm/st33zp24/tpm_st33zp24_spi.o
CC [M] drivers/base/regmap/regmap-spmi.o
CC net/core/ptp_classifier.o
CC drivers/char/tpm/tpm-dev-common.o
CC arch/x86/kernel/tsc.o
CC drivers/gpu/drm/clients/drm_client_setup.o
CC [M] drivers/block/drbd/drbd_proc.o
CC [M] fs/nfs/blocklayout/rpc_pipefs.o
CC [M] fs/nfs/filelayout/filelayoutdev.o
CC [M] drivers/gpu/drm/tests/drm_format_helper_test.o
CC [M] net/netfilter/nfnetlink_cttimeout.o
CC [M] drivers/dma/dw/platform.o
CC [M] drivers/dma/dw-edma/dw-edma-core.o
CC [M] drivers/iommu/iommufd/io_pagetable.o
CC [M] drivers/clk/clk-twl.o
CC [M] drivers/pci/pci-pf-stub.o
CC [M] drivers/misc/cb710/sgbuf2.o
CC drivers/xen/pci.o
CC [M] drivers/regulator/arizona-ldo1.o
CC [M] drivers/leds/leds-dac124s085.o
CC [M] sound/soc/codecs/es8311.o
CC [M] drivers/char/mwave/3780i.o
CC kernel/bpf/bpf_local_storage.o
CC [M] fs/lockd/clntlock.o
CC [M] fs/smb/common/cifs_arc4.o
CC drivers/acpi/acpica/nsxfname.o
CC [M] fs/nfs/client.o
CC arch/x86/kvm/kvm-asm-offsets.s
CC net/ipv4/tcp_input.o
CC drivers/tty/serial/8250/8250_rt288x.o
AR drivers/misc/lis3lv02d/built-in.a
CC [M] drivers/misc/lis3lv02d/lis3lv02d.o
CC drivers/char/tpm/tpm-dev.o
CC [M] crypto/pcrypt.o
CC [M] drivers/clk/clk-wm831x.o
CC [M] fs/nfs/flexfilelayout/flexfilelayoutdev.o
CC [M] drivers/pci/xen-pcifront.o
CC [M] fs/netfs/direct_write.o
CC drivers/iommu/intel/cache.o
CC [M] crypto/cryptd.o
CC [M] drivers/dma/dw/pci.o
CC [M] crypto/des_generic.o
CC [M] drivers/base/regmap/regmap-w1.o
CC drivers/gpu/drm/clients/drm_fbdev_client.o
CC drivers/char/tpm/tpm-interface.o
CC [M] drivers/leds/leds-spi-byte.o
CC [M] crypto/fcrypt.o
CC lib/ratelimit.o
LD [M] drivers/char/mwave/mwave.o
CC [M] drivers/iommu/iommufd/ioas.o
CC [M] drivers/regulator/arizona-micsupp.o
CC [M] arch/x86/kvm/vmx/pmu_intel.o
LD [M] fs/nfs/blocklayout/blocklayoutdriver.o
CC [M] drivers/dma/dw-edma/dw-edma-v0-core.o
CC drivers/base/power/wakeup.o
CC [M] net/llc/llc_station.o
CC [M] sound/soc/codecs/hda.o
CC drivers/acpi/acpica/nsxfobj.o
CC [M] sound/soc/codecs/hda-dai.o
LD [M] drivers/misc/cb710/cb710.o
CC [M] net/llc/llc_proc.o
CC [M] drivers/dma/dw-edma/dw-hdma-v0-core.o
CC [M] drivers/block/drbd/drbd_worker.o
CC [M] fs/smb/common/cifs_md4.o
CC [M] net/netfilter/nfnetlink_cthelper.o
LD [M] fs/nfs/filelayout/nfs_layout_nfsv41_files.o
CC drivers/char/tpm/tpm1-cmd.o
CC drivers/acpi/acpica/psargs.o
CC [M] drivers/char/xillybus/xillyusb.o
CC arch/x86/kernel/tsc_msr.o
CC [M] drivers/dma/dw-edma/dw-edma-v0-debugfs.o
CC [M] drivers/dma/dw-edma/dw-hdma-v0-debugfs.o
AR drivers/clk/built-in.a
CC drivers/acpi/acpica/psloop.o
CC net/core/netprio_cgroup.o
CC drivers/xen/dbgp.o
CC lib/rbtree.o
CC [M] drivers/gpu/drm/tests/drm_format_test.o
LD [M] drivers/dma/dw/dw_dmac_core.o
LD [M] drivers/dma/dw/dw_dmac.o
LD [M] drivers/dma/dw/dw_dmac_pci.o
CC mm/memblock.o
CC kernel/capability.o
CC [M] drivers/leds/uleds.o
CC drivers/char/tpm/tpm2-cmd.o
CC net/ipv6/udp_offload.o
AR drivers/gpu/drm/clients/built-in.a
CC [M] drivers/tty/serial/8250/8250_exar.o
CC [M] drivers/dma/dw-edma/dw-edma-pcie.o
CC kernel/bpf/bpf_task_storage.o
CC [M] drivers/base/regmap/regmap-i3c.o
AR drivers/pci/built-in.a
CC [M] drivers/regulator/as3711-regulator.o
CC fs/btrfs/file.o
CC [M] drivers/dma/ioat/init.o
CC [M] fs/nfs/dir.o
CC drivers/iommu/intel/prq.o
CC [M] drivers/iommu/iommufd/main.o
CC [M] net/bridge/netfilter/nft_meta_bridge.o
CC drivers/acpi/acpica/psobject.o
CC arch/x86/kernel/io_delay.o
CC lib/seq_buf.o
LD [M] fs/nfs/flexfilelayout/nfs_layout_flexfiles.o
AR drivers/gpu/drm/display/built-in.a
CC [M] drivers/gpu/drm/display/drm_display_helper_mod.o
CC [M] fs/netfs/iterator.o
CC [M] drivers/dma/sf-pdma/sf-pdma.o
CC [M] drivers/misc/lis3lv02d/lis3lv02d_i2c.o
CC [M] net/bridge/netfilter/nft_reject_bridge.o
CC [M] fs/lockd/clntproc.o
CC drivers/tty/serial/serial_ctrl.o
CC [M] crypto/blowfish_generic.o
CC drivers/xen/acpi.o
CC [M] drivers/iommu/iommufd/pages.o
AR drivers/misc/cardreader/built-in.a
CC [M] drivers/misc/cardreader/alcor_pci.o
CC kernel/bpf/bpf_inode_storage.o
CC [M] net/sunrpc/auth_gss/auth_gss.o
CC [M] net/llc/sysctl_net_llc.o
CC [M] fs/smb/client/trace.o
CC [M] net/kcm/kcmsock.o
CC [M] arch/x86/kvm/vmx/vmcs12.o
CC [M] sound/soc/codecs/hdmi-codec.o
CC [M] drivers/gpu/drm/tests/drm_framebuffer_test.o
AR drivers/leds/built-in.a
LD [M] net/llc/llc.o
CC [M] fs/netfs/locking.o
CC kernel/ptrace.o
CC drivers/acpi/acpica/psopcode.o
CC [M] drivers/regulator/atc260x-regulator.o
CC net/core/netclassid_cgroup.o
LD [M] drivers/dma/dw-edma/dw-edma.o
CC [M] drivers/base/regmap/regmap-spi-avmm.o
CC [M] drivers/gpu/drm/display/drm_bridge_connector.o
CC drivers/iommu/intel/trace.o
CC drivers/xen/xen-acpi-pad.o
CC [M] net/netfilter/nf_conntrack_amanda.o
CC arch/x86/kernel/rtc.o
CC [M] drivers/tty/serial/8250/8250_lpss.o
CC drivers/xen/pcpu.o
CC kernel/trace/trace_export.o
AR drivers/gpu/drm/renesas/rcar-du/built-in.a
CC net/ipv6/seg6.o
AR drivers/gpu/drm/renesas/rz-du/built-in.a
AR drivers/gpu/drm/renesas/built-in.a
CC [M] drivers/iommu/iommufd/vfio_compat.o
CC drivers/base/power/wakeup_stats.o
AR drivers/gpu/drm/omapdrm/built-in.a
CC [M] net/bridge/netfilter/nf_conntrack_bridge.o
CC [M] crypto/blowfish_common.o
CC [M] arch/x86/kvm/vmx/nested.o
CC drivers/char/tpm/tpmrm-dev.o
CC drivers/acpi/acpica/psopinfo.o
CC [M] drivers/dma/ioat/dma.o
CC kernel/trace/trace_syscalls.o
CC drivers/char/tpm/tpm2-space.o
CC drivers/base/power/trace.o
CC lib/siphash.o
LD [M] net/llc/llc2.o
AR drivers/gpu/drm/tilcdc/built-in.a
CC arch/x86/kernel/resource.o
AR drivers/gpu/drm/imx/built-in.a
CC drivers/acpi/acpi_processor.o
CC drivers/acpi/processor_core.o
CC drivers/acpi/acpica/psparse.o
CC [M] drivers/gpu/drm/tests/drm_gem_shmem_test.o
CC [M] drivers/regulator/aw37503-regulator.o
CC [M] drivers/misc/cardreader/rtsx_pcr.o
CC mm/memory_hotplug.o
CC kernel/bpf/disasm.o
CC [M] drivers/gpu/drm/tests/drm_hdmi_state_helper_test.o
CC drivers/acpi/acpica/psscope.o
CC [M] arch/x86/kvm/vmx/posted_intr.o
AR drivers/misc/pvpanic/built-in.a
CC [M] drivers/misc/pvpanic/pvpanic.o
CC drivers/mfd/88pm860x-core.o
AR drivers/misc/keba/built-in.a
CC [M] drivers/tty/serial/8250/8250_men_mcb.o
CC [M] drivers/misc/keba/cp500.o
CC [M] sound/soc/codecs/pcm6240.o
LD [M] sound/soc/codecs/snd-soc-ac97.o
CC drivers/acpi/processor_pdc.o
CC [M] drivers/regulator/axp20x-regulator.o
CC drivers/xen/biomerge.o
CC kernel/bpf/mprog.o
CC [M] fs/nfs/file.o
AR drivers/nfc/built-in.a
CC [M] fs/netfs/main.o
CC [M] drivers/misc/ibmasm/module.o
AS arch/x86/kernel/irqflags.o
CC kernel/trace/trace_event_perf.o
CC lib/string.o
CC [M] drivers/gpu/drm/display/drm_dp_dual_mode_helper.o
CC drivers/acpi/acpica/pstree.o
AR drivers/base/regmap/built-in.a
CC drivers/iommu/intel/svm.o
CC drivers/char/tpm/tpm-sysfs.o
CC arch/x86/kernel/static_call.o
CC [M] drivers/block/drbd/drbd_receiver.o
CC [M] crypto/twofish_generic.o
CC net/ipv6/fib6_notifier.o
CC [M] drivers/dma/ioat/prep.o
CC [M] drivers/gpu/drm/display/drm_dp_helper.o
CC lib/timerqueue.o
CC [M] drivers/tty/serial/8250/8250_pci1xxxx.o
CC drivers/base/power/clock_ops.o
CC [M] drivers/iommu/iommufd/viommu.o
CC [M] fs/lockd/clntxdr.o
CC drivers/acpi/ec.o
CC [M] net/netfilter/nf_conntrack_ftp.o
CC fs/btrfs/defrag.o
CC net/core/lwtunnel.o
CC drivers/xen/xen-balloon.o
CC [M] drivers/dma/ioat/dca.o
CC [M] net/kcm/kcmproc.o
CC [M] fs/lockd/host.o
CC [M] crypto/twofish_common.o
CC kernel/trace/trace_events_filter.o
CC [M] drivers/tty/serial/8250/8250_pericom.o
CC drivers/acpi/acpica/psutils.o
CC [M] net/bridge/netfilter/ebtables.o
CC [M] net/sunrpc/auth_gss/gss_mech_switch.o
CC [M] drivers/regulator/bcm590xx-regulator.o
CC [M] drivers/misc/ibmasm/ibmasmfs.o
CC arch/x86/kernel/process.o
CC drivers/tty/serdev/core.o
CC lib/union_find.o
CC drivers/char/ttyprintk.o
CC [M] drivers/misc/pvpanic/pvpanic-mmio.o
CC kernel/bpf/trampoline.o
CC [M] net/bridge/br.o
CC lib/vsprintf.o
LD [M] sound/soc/codecs/snd-soc-ak4619.o
LD [M] sound/soc/codecs/snd-soc-cs40l50.o
LD [M] sound/soc/codecs/snd-soc-cs530x.o
LD [M] sound/soc/codecs/snd-soc-cs530x-i2c.o
LD [M] sound/soc/codecs/snd-soc-es8311.o
LD [M] sound/soc/codecs/snd-soc-hda-codec.o
CC drivers/acpi/acpica/pswalk.o
LD [M] sound/soc/codecs/snd-soc-hdmi-codec.o
LD [M] sound/soc/codecs/snd-soc-pcm6240.o
CC mm/slub.o
CC drivers/char/tpm/eventlog/common.o
CC [M] sound/soc/amd/acp-config.o
CC drivers/xen/sys-hypervisor.o
CC drivers/iommu/intel/irq_remapping.o
AR drivers/iommu/iommufd/built-in.a
LD [M] drivers/iommu/iommufd/iommufd.o
CC net/ipv4/tcp_output.o
CC drivers/mfd/88pm860x-i2c.o
CC kernel/user.o
CC net/ipv6/rpl.o
CC drivers/mfd/wm8400-core.o
AR drivers/tty/serial/8250/built-in.a
CC drivers/tty/serial/serial_port.o
CC [M] drivers/dma/ioat/sysfs.o
CC [M] drivers/regulator/bd9571mwv-regulator.o
CC drivers/acpi/acpica/psxface.o
CC drivers/iommu/intel/perfmon.o
CC [M] drivers/gpu/drm/tests/drm_managed_test.o
CC [M] drivers/misc/pvpanic/pvpanic-pci.o
CC [M] drivers/regulator/da903x-regulator.o
AR drivers/base/power/built-in.a
CC [M] drivers/regulator/da9052-regulator.o
CC [M] drivers/misc/cardreader/rts5209.o
AR drivers/base/test/built-in.a
CC [M] drivers/misc/ibmasm/event.o
CC [M] sound/hda/ext/hdac_ext_bus.o
CC drivers/base/component.o
CC drivers/base/core.o
CC [M] drivers/block/mtip32xx/mtip32xx.o
LD [M] net/kcm/kcm.o
CC kernel/bpf/btf.o
CC [M] fs/lockd/svc.o
CC fs/btrfs/extent_map.o
CC drivers/acpi/acpica/rsaddr.o
CC [M] net/netfilter/nf_conntrack_h323_main.o
CC drivers/char/tpm/eventlog/tpm1.o
CC drivers/xen/platform-pci.o
CC drivers/tty/serdev/serdev-ttyport.o
CC [M] drivers/gpu/drm/display/drm_dp_mst_topology.o
CC [M] fs/smb/client/cifsfs.o
CC [M] drivers/misc/ibmasm/command.o
CC [M] drivers/gpu/drm/tests/drm_mm_test.o
CC [M] drivers/gpu/drm/tests/drm_modes_test.o
LD [M] drivers/dma/ioat/ioatdma.o
CC drivers/dma/dmaengine.o
CC [M] fs/netfs/misc.o
CC arch/x86/kernel/ptrace.o
CC [M] drivers/gpu/drm/tests/drm_plane_helper_test.o
CC net/core/lwt_bpf.o
CC [M] drivers/misc/cardreader/rts5229.o
AR drivers/gpu/drm/panel/built-in.a
CC [M] drivers/gpu/drm/panel/panel-auo-a030jtn01.o
AR drivers/gpu/drm/bridge/analogix/built-in.a
CC [M] drivers/gpu/drm/bridge/analogix/analogix-anx78xx.o
LD [M] sound/soc/amd/snd-acp-config.o
CC net/ipv6/ioam6.o
CC drivers/acpi/acpica/rscalc.o
CC [M] drivers/regulator/da9055-regulator.o
CC [M] fs/lockd/svclock.o
CC [M] fs/nfs/getroot.o
CC [M] sound/soc/dwc/dwc-i2s.o
AR drivers/gpu/drm/bridge/cadence/built-in.a
AR drivers/gpu/drm/hisilicon/built-in.a
CC [M] sound/soc/dwc/dwc-pcm.o
CC lib/win_minmax.o
CC drivers/tty/serial/earlycon.o
CC [M] net/sunrpc/auth_gss/svcauth_gss.o
CC drivers/nvdimm/core.o
CC [M] crypto/serpent_generic.o
CC [M] net/l2tp/l2tp_core.o
CC drivers/mfd/wm831x-core.o
AR drivers/iommu/intel/built-in.a
CC drivers/iommu/iommu.o
CC [M] sound/hda/ext/hdac_ext_controller.o
AR drivers/tty/serdev/built-in.a
CC [M] fs/romfs/storage.o
CC kernel/trace/trace_events_trigger.o
CC [M] drivers/misc/ibmasm/remote.o
CC kernel/bpf/memalloc.o
CC drivers/char/tpm/eventlog/tpm2.o
CC kernel/trace/trace_eprobe.o
CC drivers/xen/swiotlb-xen.o
CC [M] net/sunrpc/auth_gss/gss_rpc_upcall.o
CC [M] fs/autofs/init.o
CC drivers/acpi/acpica/rscreate.o
AR drivers/gpu/drm/mxsfb/built-in.a
CC [M] drivers/gpu/drm/tests/drm_probe_helper_test.o
CC drivers/dax/hmem/device.o
CC [M] drivers/misc/cardreader/rtl8411.o
CC [M] drivers/gpu/drm/panel/panel-ilitek-ili9341.o
CC [M] drivers/regulator/da9062-regulator.o
CC [M] drivers/gpu/drm/display/drm_dp_tunnel.o
CC drivers/tty/serial/max310x.o
CC fs/btrfs/sysfs.o
CC arch/x86/kernel/tls.o
LD [M] sound/soc/dwc/designware_i2s.o
CC [M] drivers/gpu/drm/bridge/analogix/analogix_dp_core.o
CC [M] fs/romfs/super.o
CC [M] drivers/misc/cardreader/rts5227.o
CC [M] fs/nfs/inode.o
CC [M] drivers/misc/ibmasm/heartbeat.o
CC [M] fs/netfs/objects.o
CC [M] sound/soc/intel/common/soc-acpi-intel-byt-match.o
CC [M] net/bridge/netfilter/ebtable_broute.o
CC drivers/char/tpm/tpm-buf.o
CC [M] fs/autofs/inode.o
CC net/core/dst_cache.o
CC [M] drivers/block/drbd/drbd_req.o
CC drivers/mfd/wm831x-irq.o
CC drivers/acpi/acpica/rsdumpinfo.o
CC [M] drivers/gpu/drm/tests/drm_rect_test.o
CC drivers/dma/virt-dma.o
CC drivers/tty/serial/sccnxp.o
CC [M] fs/lockd/svcshare.o
CC drivers/xen/mcelog.o
CC arch/x86/kernel/step.o
CC [M] sound/hda/ext/hdac_ext_stream.o
CC [M] sound/soc/intel/common/soc-acpi-intel-cht-match.o
CC [M] drivers/dax/hmem/hmem.o
CC [M] net/l2tp/l2tp_ip.o
CC [M] drivers/regulator/da9210-regulator.o
CC [M] drivers/gpu/drm/panel/panel-orisetech-ota5601a.o
CC [M] fs/smb/client/cifs_debug.o
CC net/ipv6/sysctl_net_ipv6.o
CC [M] drivers/misc/ibmasm/r_heartbeat.o
CC drivers/nvdimm/bus.o
CC drivers/acpi/acpica/rsinfo.o
CC lib/xarray.o
../drivers/gpu/drm/bridge/analogix/analogix_dp_core.c: In function ‘analogix_dp_probe’:
../drivers/gpu/drm/bridge/analogix/analogix_dp_core.c:1589:17: error: label ‘err_disable_clk’ used but not defined
1589 | goto err_disable_clk;
| ^~~~
CC drivers/base/bus.o
make[8]: *** [../scripts/Makefile.build:203: drivers/gpu/drm/bridge/analogix/analogix_dp_core.o] Error 1
make[7]: *** [../scripts/Makefile.build:461: drivers/gpu/drm/bridge/analogix] Error 2
make[6]: *** [../scripts/Makefile.build:461: drivers/gpu/drm/bridge] Error 2
make[6]: *** Waiting for unfinished jobs....
CC drivers/acpi/acpica/rsio.o
CC [M] drivers/misc/cardreader/rts5249.o
CC [M] sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.o
CC [M] crypto/sm4.o
LD [M] fs/romfs/romfs.o
CC [M] drivers/misc/ibmasm/dot_command.o
CC [M] net/netfilter/nf_conntrack_h323_asn1.o
CC [M] fs/netfs/read_collect.o
CC [M] fs/autofs/root.o
CC [M] drivers/misc/cardreader/rts5260.o
CC kernel/trace/trace_events_inject.o
CC [M] drivers/misc/ibmasm/lowlevel.o
CC arch/x86/kernel/tboot.o
CC drivers/acpi/acpica/rsirq.o
CC drivers/xen/xen-acpi-processor.o
CC drivers/mfd/wm831x-otp.o
AR drivers/dax/hmem/built-in.a
CC drivers/char/tpm/tpm2-sessions.o
CC [M] drivers/misc/sgi-xp/xp_main.o
LD [M] drivers/dax/hmem/dax_hmem.o
CC drivers/dax/super.o
CC [M] net/bridge/netfilter/ebtable_filter.o
CC arch/x86/kernel/i8237.o
CC [M] drivers/misc/sgi-xp/xp_uv.o
CC drivers/tty/serial/serial_mctrl_gpio.o
CC [M] drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.o
CC [M] drivers/regulator/da9211-regulator.o
CC [M] drivers/misc/ibmasm/uart.o
CC [M] fs/lockd/svcproc.o
CC drivers/dma/acpi-dma.o
CC [M] net/netfilter/nf_conntrack_irc.o
CC drivers/acpi/acpica/rslist.o
CC [M] drivers/block/zram/zcomp.o
CC drivers/iommu/iommu-traces.o
CC fs/btrfs/accessors.o
CC [M] fs/lockd/svcsubs.o
CC [M] drivers/block/null_blk/main.o
CC [M] net/sunrpc/auth_gss/gss_rpc_xdr.o
CC drivers/block/loop.o
LD [M] sound/hda/ext/snd-hda-ext-core.o
CC [M] sound/hda/hda_bus_type.o
CC net/ipv4/tcp_timer.o
CC [M] drivers/misc/sgi-xp/xpc_main.o
CC net/ipv6/ip6mr.o
CC [M] drivers/misc/cardreader/rts5261.o
CC net/core/gro_cells.o
CC net/core/failover.o
CC [M] crypto/sm4_generic.o
CC [M] sound/soc/intel/common/soc-acpi-intel-skl-match.o
CC drivers/mfd/wm831x-auxadc.o
CC [M] arch/x86/kvm/vmx/main.o
CC drivers/base/dd.o
CC drivers/acpi/acpica/rsmemory.o
CC [M] drivers/misc/sgi-gru/grufile.o
CC [M] drivers/block/zram/zram_drv.o
CC [M] fs/autofs/symlink.o
CC drivers/block/nbd.o
CC arch/x86/kernel/stacktrace.o
LD [M] drivers/misc/ibmasm/ibmasm.o
CC kernel/trace/trace_events_synth.o
CC [M] drivers/misc/sgi-gru/grumain.o
CC drivers/xen/efi.o
CC [M] drivers/gpu/drm/display/drm_dsc_helper.o
CC [M] net/bridge/netfilter/ebtable_nat.o
CC drivers/dax/bus.o
CC [M] fs/smb/client/connect.o
CC mm/madvise.o
CC [M] drivers/gpu/drm/panel/panel-widechips-ws2401.o
CC [M] drivers/misc/sgi-gru/grufault.o
CC [M] drivers/regulator/fan53555.o
CC drivers/nvdimm/dimm_devs.o
CC drivers/acpi/acpica/rsmisc.o
CC drivers/char/tpm/tpm_ppi.o
CC [M] crypto/aes_ti.o
CC lib/lockref.o
CC [M] drivers/block/drbd/drbd_actlog.o
CC drivers/tty/serial/kgdboc.o
CC [M] net/l2tp/l2tp_netlink.o
CC [M] drivers/misc/cardreader/rts5228.o
CC [M] drivers/dma/altera-msgdma.o
CC drivers/block/virtio_blk.o
CC lib/bcd.o
CC [M] sound/hda/hdac_bus.o
CC arch/x86/kernel/reboot.o
CC [M] net/sunrpc/auth_gss/trace.o
CC drivers/char/tpm/eventlog/acpi.o
CC [M] fs/autofs/waitq.o
CC [M] arch/x86/kvm/vmx/sgx.o
CC [M] drivers/misc/sgi-xp/xpc_channel.o
CC fs/btrfs/xattr.o
CC lib/sort.o
CC drivers/xen/xlate_mmu.o
CC [M] fs/netfs/read_pgpriv2.o
CC [M] sound/soc/intel/common/soc-acpi-intel-kbl-match.o
CC drivers/acpi/acpica/rsserial.o
CC drivers/mfd/wm831x-i2c.o
CC [M] crypto/camellia_generic.o
CC [M] crypto/cast_common.o
CC [M] drivers/regulator/gpio-regulator.o
CC [M] fs/nfs/super.o
CC [M] net/bridge/netfilter/ebt_802_3.o
CC drivers/iommu/iommu-sysfs.o
CC drivers/mfd/wm831x-spi.o
CC net/ipv4/tcp_ipv4.o
CC [M] fs/lockd/mon.o
CC [M] net/netfilter/nf_conntrack_broadcast.o
CC [M] drivers/block/null_blk/trace.o
CC drivers/acpi/acpica/rsutils.o
CC [M] drivers/misc/sgi-xp/xpc_partition.o
CC [M] fs/autofs/expire.o
CC drivers/base/syscore.o
CC [M] drivers/tty/serial/altera_jtaguart.o
CC fs/btrfs/ordered-data.o
CC [M] drivers/block/zram/backend_lzorle.o
CC [M] fs/fuse/dev.o
CC net/core/skmsg.o
CC lib/parser.o
CC [M] drivers/misc/sgi-gru/grutlbpurge.o
CC [M] drivers/misc/cardreader/rts5264.o
CC [M] drivers/gpu/drm/display/drm_hdcp_helper.o
CC [M] drivers/dma/idma64.o
CC drivers/char/tpm/eventlog/efi.o
CC [M] crypto/cast5_generic.o
CC [M] drivers/dax/device.o
CC [M] fs/nfs/io.o
CC net/ipv6/xfrm6_policy.o
CC [M] fs/netfs/read_retry.o
CC [M] drivers/block/zram/backend_lzo.o
CC drivers/acpi/acpica/rsxface.o
CC kernel/bpf/rqspinlock.o
CC [M] drivers/regulator/isl6271a-regulator.o
CC [M] sound/soc/intel/common/soc-acpi-intel-bxt-match.o
CC [M] sound/hda/hdac_device.o
CC [M] drivers/block/null_blk/zoned.o
CC [M] arch/x86/kvm/vmx/hyperv.o
CC [M] fs/fuse/dir.o
CC drivers/mfd/wm8350-core.o
CC [M] drivers/regulator/isl9305.o
CC drivers/nvdimm/nd_perf.o
CC [M] fs/fuse/file.o
CC mm/page_io.o
CC [M] drivers/misc/sgi-xp/xpc_uv.o
CC [M] drivers/tty/serial/altera_uart.o
CC [M] net/bridge/netfilter/ebt_among.o
CC drivers/xen/unpopulated-alloc.o
CC arch/x86/kernel/early-quirks.o
CC lib/debug_locks.o
LD [M] drivers/block/zram/zram.o
CC [M] fs/autofs/dev-ioctl.o
CC drivers/iommu/dma-iommu.o
CC [M] fs/9p/vfs_super.o
CC [M] fs/overlayfs/super.o
CC net/ipv6/xfrm6_state.o
CC kernel/trace/trace_events_hist.o
CC [M] drivers/misc/sgi-gru/gruprocfs.o
CC drivers/char/tpm/tpm_tis_core.o
CC [M] drivers/misc/cardreader/rtsx_usb.o
CC [M] drivers/block/drbd/drbd_main.o
CC [M] fs/lockd/trace.o
CC [M] drivers/dma/plx_dma.o
CC [M] net/l2tp/l2tp_eth.o
CC [M] drivers/dax/kmem.o
CC drivers/acpi/acpica/tbdata.o
CC lib/random32.o
CC [M] fs/9p/vfs_inode.o
CC [M] drivers/gpu/drm/display/drm_hdmi_audio_helper.o
CC [M] drivers/regulator/lm363x-regulator.o
CC [M] fs/9p/vfs_inode_dotl.o
CC [M] fs/netfs/read_single.o
CC [M] crypto/cast6_generic.o
CC drivers/base/driver.o
CC [M] net/sunrpc/auth_gss/gss_krb5_mech.o
CC [M] sound/soc/intel/common/soc-acpi-intel-glk-match.o
CC [M] drivers/misc/sgi-gru/grukservices.o
CC [M] drivers/tty/serial/arc_uart.o
CC [M] net/netfilter/nf_conntrack_netbios_ns.o
CC drivers/iommu/io-pgtable.o
CC fs/btrfs/extent_io.o
CC drivers/iommu/iova.o
CC drivers/block/xen-blkfront.o
CC [M] arch/x86/kvm/vmx/hyperv_evmcs.o
LD [M] fs/autofs/autofs4.o
CC fs/btrfs/volumes.o
CC [M] net/bridge/netfilter/ebt_arp.o
CC drivers/xen/grant-dma-ops.o
LD [M] drivers/block/null_blk/null_blk.o
CC fs/btrfs/async-thread.o
CC [M] sound/hda/hdac_sysfs.o
CC drivers/mfd/wm8350-regmap.o
CC [M] fs/nfs/direct.o
CC [M] drivers/dax/pmem.o
CC mm/swap_state.o
CC lib/bust_spinlocks.o
CC net/ipv6/xfrm6_input.o
CC [M] drivers/misc/sgi-xp/xpnet.o
CC drivers/nvdimm/dimm.o
AR drivers/dma/built-in.a
CC kernel/bpf/arena.o
CC [M] fs/nfs/pagelist.o
CC drivers/acpi/acpica/tbfadt.o
CC [M] fs/nfs/read.o
CC arch/x86/kernel/smp.o
CC [M] fs/lockd/xdr.o
CC [M] drivers/regulator/lp3971.o
CC [M] fs/overlayfs/namei.o
CC [M] drivers/tty/serial/fsl_linflexuart.o
LD [M] drivers/misc/cardreader/rtsx_pci.o
CC drivers/mfd/wm8350-gpio.o
CC [M] net/l2tp/l2tp_debugfs.o
CC drivers/base/class.o
CC drivers/mfd/wm8350-irq.o
CC [M] fs/9p/vfs_addr.o
CC [M] crypto/aria_generic.o
CC drivers/acpi/acpica/tbfind.o
CC [M] sound/soc/intel/common/soc-acpi-intel-cnl-match.o
CC drivers/char/tpm/tpm_tis.o
CC [M] fs/netfs/rolling_buffer.o
CC [M] drivers/gpu/drm/display/drm_hdmi_cec_helper.o
CC lib/kasprintf.o
CC [M] fs/smb/client/dir.o
CC [M] arch/x86/kvm/vmx/vmx_onhyperv.o
CC [M] fs/nfs/symlink.o
CC drivers/base/platform.o
CC [M] net/netfilter/nf_conntrack_snmp.o
CC [M] drivers/xen/evtchn.o
CC [M] fs/nfs/unlink.o
CC [M] sound/hda/hdac_regmap.o
CC [M] drivers/dax/cxl.o
CC drivers/nvdimm/region_devs.o
CC net/core/sock_map.o
CC net/core/bpf_sk_storage.o
CC [M] net/bridge/netfilter/ebt_ip.o
CC [M] drivers/tty/serial/fsl_lpuart.o
CC [M] fs/nfs/write.o
CC drivers/acpi/acpica/tbinstal.o
CC [M] drivers/regulator/lp3972.o
CC drivers/iommu/irq_remapping.o
LD [M] drivers/misc/sgi-xp/xp.o
LD [M] drivers/misc/sgi-xp/xpc.o
CC [M] net/dccp/ccid.o
CC [M] fs/fuse/inode.o
CC kernel/bpf/range_tree.o
CC [M] drivers/misc/sgi-gru/gruhandles.o
CC [M] net/sunrpc/auth_gss/gss_krb5_seal.o
CC [M] sound/soc/intel/common/soc-acpi-intel-cfl-match.o
CC [M] fs/9p/vfs_file.o
CC mm/swapfile.o
CC lib/bitmap.o
CC [M] fs/lockd/netlink.o
CC [M] fs/overlayfs/util.o
CC drivers/char/tpm/tpm_crb.o
CC drivers/acpi/acpica/tbprint.o
LD [M] drivers/dax/device_dax.o
LD [M] drivers/dax/dax_pmem.o
LD [M] drivers/dax/dax_cxl.o
AR drivers/dax/built-in.a
CC net/ipv6/xfrm6_output.o
CC drivers/mfd/wm8350-i2c.o
CC [M] net/l2tp/l2tp_ip6.o
CC [M] arch/x86/kvm/svm/svm.o
CC [M] fs/cachefiles/cache.o
CC [M] drivers/xen/gntdev.o
CC [M] crypto/chacha_generic.o
CC [M] fs/smb/client/file.o
CC [M] fs/vboxsf/dir.o
CC net/ipv4/tcp_minisocks.o
CC arch/x86/kernel/smpboot.o
CC [M] drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.o
CC [M] drivers/regulator/lp872x.o
CC drivers/acpi/acpica/tbutils.o
CC [M] drivers/misc/sgi-gru/grukdump.o
CC [M] sound/hda/hdac_controller.o
CC [M] fs/overlayfs/inode.o
CC [M] net/bridge/netfilter/ebt_ip6.o
CC [M] fs/netfs/write_collect.o
CC [M] fs/smb/client/inode.o
CC kernel/trace/trace_events_user.o
CC [M] fs/9p/vfs_dir.o
CC kernel/bpf/dispatcher.o
CC drivers/iommu/hyperv-iommu.o
CC [M] fs/lockd/clnt4xdr.o
CC [M] fs/fuse/control.o
CC [M] net/sunrpc/auth_gss/gss_krb5_unseal.o
CC drivers/base/cpu.o
CC [M] crypto/poly1305_generic.o
CC [M] sound/soc/intel/common/soc-acpi-intel-cml-match.o
CC drivers/mfd/tps65910.o
CC [M] net/dccp/feat.o
CC [M] drivers/char/tpm/tpm_tis_spi_main.o
CC [M] crypto/michael_mic.o
CC [M] net/netfilter/nf_conntrack_pptp.o
CC [M] sound/soc/intel/atom/sst/sst.o
CC drivers/acpi/acpica/tbxface.o
CC kernel/bpf/devmap.o
CC drivers/base/firmware.o
CC mm/zswap.o
CC [M] fs/cachefiles/daemon.o
CC [M] fs/vboxsf/file.o
CC [M] net/bridge/br_device.o
CC [M] drivers/tty/serial/lantiq.o
CC lib/scatterlist.o
CC drivers/nvdimm/region.o
CC [M] drivers/regulator/lp8788-buck.o
CC [M] drivers/xen/gntdev-dmabuf.o
CC [M] crypto/crc32_generic.o
CC net/ipv6/xfrm6_protocol.o
LD [M] drivers/misc/sgi-gru/gru.o
CC [M] fs/9p/vfs_dentry.o
CC [M] sound/soc/intel/common/soc-acpi-intel-icl-match.o
CC [M] drivers/misc/c2port/core.o
CC [M] drivers/block/drbd/drbd_strings.o
CC drivers/iommu/virtio-iommu.o
CC [M] drivers/gpu/drm/display/drm_hdmi_helper.o
CC net/core/devmem.o
CC [M] drivers/block/drbd/drbd_nl.o
CC drivers/iommu/iommu-sva.o
CC [M] drivers/misc/altera-stapl/altera-jtag.o
CC [M] net/bridge/netfilter/ebt_limit.o
CC [M] fs/fuse/xattr.o
CC [M] fs/nfs/namespace.o
CC [M] fs/overlayfs/file.o
CC drivers/acpi/acpica/tbxfload.o
CC drivers/mfd/tps65912-core.o
CC [M] drivers/char/tpm/tpm_tis_spi_cr50.o
CC drivers/base/init.o
CC [M] fs/lockd/xdr4.o
CC [M] net/sunrpc/auth_gss/gss_krb5_wrap.o
CC [M] sound/hda/hdac_stream.o
CC [M] crypto/authenc.o
CC [M] fs/lockd/svc4proc.o
CC [M] drivers/tty/serial/max3100.o
CC [M] fs/vboxsf/utils.o
CC [M] drivers/regulator/lp8788-ldo.o
CC drivers/nvdimm/namespace_devs.o
CC [M] fs/cachefiles/interface.o
CC arch/x86/kernel/tsc_sync.o
CC [M] fs/lockd/procfs.o
CC [M] fs/9p/v9fs.o
CC [M] drivers/misc/altera-stapl/altera-comp.o
CC [M] drivers/misc/mei/hdcp/mei_hdcp.o
CC [M] drivers/xen/gntalloc.o
CC [M] fs/netfs/write_issue.o
CC fs/btrfs/ioctl.o
CC [M] drivers/misc/altera-stapl/altera.o
CC kernel/trace/bpf_trace.o
CC [M] fs/fuse/acl.o
CC [M] sound/soc/intel/atom/sst/sst_ipc.o
CC drivers/base/map.o
CC [M] sound/soc/intel/common/soc-acpi-intel-tgl-match.o
CC kernel/bpf/cpumap.o
CC [M] fs/fuse/readdir.o
CC lib/list_sort.o
CC drivers/acpi/acpica/tbxfroot.o
CC [M] drivers/char/tpm/tpm_tis_i2c_cr50.o
CC [M] drivers/gpu/drm/display/drm_scdc_helper.o
CC [M] fs/fuse/ioctl.o
CC [M] net/bridge/netfilter/ebt_mark_m.o
CC [M] fs/overlayfs/dir.o
CC drivers/acpi/acpica/utaddress.o
CC mm/dmapool.o
CC [M] drivers/misc/c2port/c2port-duramar2150.o
CC [M] net/core/pktgen.o
CC arch/x86/kernel/setup_percpu.o
CC lib/uuid.o
CC drivers/mfd/tps65912-i2c.o
CC net/ipv4/tcp_cong.o
CC drivers/iommu/io-pgfault.o
CC [M] drivers/regulator/lp8755.o
CC [M] fs/vboxsf/vboxsf_wrappers.o
CC [M] net/sunrpc/auth_gss/gss_krb5_crypto.o
CC [M] net/netfilter/nf_conntrack_sane.o
AR drivers/tty/ipwireless/built-in.a
CC [M] drivers/tty/serial/men_z135_uart.o
CC [M] fs/nfs/mount_clnt.o
CC [M] net/sunrpc/auth_gss/gss_krb5_keys.o
CC [M] fs/9p/fid.o
CC lib/iov_iter.o
CC drivers/base/devres.o
CC [M] fs/nfs/nfstrace.o
CC drivers/acpi/acpica/utalloc.o
LD [M] fs/lockd/lockd.o
CC [M] net/dccp/input.o
CC net/ipv4/tcp_metrics.o
CC [M] drivers/tty/serial/rp2.o
CC [M] fs/cachefiles/io.o
CC net/ipv6/netfilter.o
CC [M] drivers/xen/privcmd.o
CC [M] sound/soc/intel/atom/sst/sst_stream.o
CC [M] fs/netfs/write_retry.o
CC [M] sound/hda/array.o
CC [M] drivers/misc/mei/pxp/mei_pxp.o
CC [M] fs/netfs/stats.o
CC [M] fs/netfs/fscache_cache.o
UPD arch/x86/kvm/kvm-asm-offsets.h
CC [M] arch/x86/kvm/svm/pmu.o
CC [M] drivers/char/tpm/tpm_tis_i2c.o
CC [M] fs/smb/client/link.o
CC [M] sound/soc/intel/atom/sst/sst_drv_interface.o
CC [M] sound/soc/intel/common/soc-acpi-intel-ehl-match.o
CC [M] crypto/authencesn.o
CC [M] fs/cachefiles/key.o
CC drivers/mfd/tps65912-spi.o
CC drivers/acpi/acpica/utascii.o
CC [M] net/bridge/netfilter/ebt_pkttype.o
CC [M] fs/vboxsf/super.o
CC mm/hugetlb.o
CC [M] fs/cachefiles/main.o
CC [M] drivers/regulator/ltc3589.o
CC [M] fs/9p/xattr.o
CC [M] fs/overlayfs/readdir.o
CC arch/x86/kernel/mpparse.o
CC [M] drivers/gpu/drm/display/drm_hdmi_state_helper.o
CC [M] fs/fuse/iomode.o
CC kernel/bpf/offload.o
AR drivers/iommu/built-in.a
CC [M] fs/cachefiles/namei.o
CC [M] net/netfilter/nf_conntrack_sip.o
CC drivers/acpi/acpica/utbuffer.o
CC [M] drivers/xen/privcmd-buf.o
CC [M] drivers/misc/altera-stapl/altera-lpt.o
CC [M] drivers/misc/mei/gsc_proxy/mei_gsc_proxy.o
CC [M] drivers/tty/serial/sc16is7xx.o
CC drivers/dma-buf/heaps/system_heap.o
CC drivers/cxl/core/suspend.o
CC drivers/mfd/twl-core.o
AR drivers/macintosh/built-in.a
CC drivers/dma-buf/dma-buf.o
CC [M] drivers/char/tpm/tpm_i2c_atmel.o
CC drivers/nvdimm/label.o
CC [M] sound/soc/intel/atom/sst/sst_loader.o
LD [M] drivers/misc/altera-stapl/altera-stapl.o
CC drivers/dma-buf/dma-fence.o
AR drivers/scsi/device_handler/built-in.a
CC [M] drivers/scsi/device_handler/scsi_dh_rdac.o
CC [M] sound/hda/hdmi_chmap.o
CC drivers/base/attribute_container.o
CC [M] sound/hda/trace.o
CC [M] sound/soc/intel/common/soc-acpi-intel-jsl-match.o
CC [M] arch/x86/kvm/svm/nested.o
CC [M] crypto/lz4.o
CC drivers/acpi/acpica/utcksum.o
CC [M] fs/9p/cache.o
CC [M] net/bridge/netfilter/ebt_stp.o
LD [M] fs/vboxsf/vboxsf.o
CC [M] drivers/regulator/ltc3676.o
CC [M] fs/fuse/dax.o
AR drivers/scsi/megaraid/built-in.a
CC [M] drivers/scsi/megaraid/megaraid_mm.o
CC [M] net/bridge/br_fdb.o
CC [M] drivers/xen/xen-scsiback.o
LD [M] net/sunrpc/auth_gss/auth_rpcgss.o
LD [M] net/sunrpc/auth_gss/rpcsec_gss_krb5.o
CC arch/x86/kernel/ftrace.o
CC [M] net/sunrpc/clnt.o
CC [M] drivers/misc/mei/init.o
CC [M] fs/netfs/fscache_cookie.o
CC [M] fs/smb/client/misc.o
CC drivers/acpi/acpica/utcopy.o
CC [M] crypto/lz4hc.o
CC [M] fs/overlayfs/copy_up.o
AR drivers/dma-buf/heaps/built-in.a
CC [M] fs/fuse/passthrough.o
CC [M] drivers/char/tpm/tpm_i2c_infineon.o
CC [M] sound/soc/intel/common/soc-acpi-intel-adl-match.o
CC drivers/acpi/acpica/utexcep.o
CC net/ipv4/tcp_fastopen.o
CC [M] sound/soc/intel/atom/sst/sst_pvt.o
CC [M] drivers/gpu/drm/display/drm_dp_aux_dev.o
CC [M] net/dccp/minisocks.o
CC lib/clz_ctz.o
CC [M] drivers/scsi/device_handler/scsi_dh_hp_sw.o
CC [M] fs/9p/acl.o
CC [M] sound/x86/intel_hdmi_audio.o
CC net/ipv6/fib6_rules.o
CC [M] drivers/cxl/core/port.o
CC fs/btrfs/locking.o
CC net/ipv4/tcp_rate.o
CC drivers/base/transport_class.o
CC [M] drivers/block/floppy.o
CC [M] drivers/regulator/max14577-regulator.o
CC drivers/nvme/common/auth.o
AR net/core/built-in.a
CC drivers/nvme/host/core.o
CC [M] net/bridge/netfilter/ebt_vlan.o
AR drivers/nvme/target/built-in.a
CC drivers/mfd/twl4030-irq.o
CC [M] drivers/nvme/target/core.o
CC [M] crypto/842.o
CC [M] drivers/tty/serial/sc16is7xx_spi.o
CC lib/bsearch.o
CC [M] fs/cachefiles/security.o
CC drivers/nvdimm/badrange.o
AS arch/x86/kernel/ftrace_64.o
CC [M] sound/hda/hdac_component.o
CC kernel/trace/trace_kprobe.o
CC [M] drivers/block/drbd/drbd_interval.o
CC drivers/acpi/acpica/utdebug.o
CC [M] drivers/xen/pvcalls-front.o
CC [M] fs/netfs/fscache_io.o
CC [M] drivers/misc/mei/hbm.o
CC [M] drivers/scsi/device_handler/scsi_dh_emc.o
CC [M] drivers/char/tpm/tpm_i2c_nuvoton.o
CC kernel/bpf/net_namespace.o
CC [M] drivers/scsi/megaraid/megaraid_mbox.o
LD [M] fs/9p/9p.o
CC [M] sound/soc/intel/common/soc-acpi-intel-rpl-match.o
CC [M] drivers/gpu/drm/display/drm_dp_cec.o
CC drivers/dma-buf/dma-fence-array.o
CC [M] drivers/nvme/target/configfs.o
CC [M] drivers/block/drbd/drbd_state.o
CC [M] drivers/scsi/device_handler/scsi_dh_alua.o
CC arch/x86/kernel/trace_clock.o
CC [M] fs/overlayfs/export.o
CC [M] arch/x86/kvm/svm/avic.o
CC [M] crypto/ansi_cprng.o
CC [M] drivers/regulator/max1586.o
CC [M] fs/fuse/sysctl.o
CC drivers/mfd/twl6030-irq.o
CC [M] drivers/tty/serial/sc16is7xx_i2c.o
CC drivers/base/topology.o
LD [M] sound/x86/snd-hdmi-lpe-audio.o
CC [M] drivers/nvme/target/admin-cmd.o
CC fs/btrfs/orphan.o
CC [M] sound/soc/intel/atom/sst/sst_pci.o
CC arch/x86/kernel/trace.o
CC [M] fs/cachefiles/volume.o
CC [M] net/netfilter/nf_conntrack_tftp.o
CC [M] net/bridge/netfilter/ebt_arpreply.o
CC drivers/acpi/acpica/utdecode.o
CC [M] fs/nfs/export.o
CC net/ipv4/tcp_recovery.o
CC lib/find_bit.o
CC drivers/nvme/common/keyring.o
CC [M] fs/smb/client/netmisc.o
CC [M] drivers/nvme/target/fabrics-cmd.o
CC [M] sound/hda/hdac_i915.o
CC [M] crypto/tcrypt.o
CC [M] fs/fuse/dev_uring.o
CC drivers/dma-buf/dma-fence-chain.o
CC [M] drivers/char/tpm/tpm_nsc.o
CC drivers/nvdimm/claim.o
CC drivers/acpi/acpica/utdelete.o
CC [M] fs/overlayfs/params.o
CC [M] drivers/regulator/max77503-regulator.o
CC [M] sound/soc/intel/common/soc-acpi-intel-mtl-match.o
CC arch/x86/kernel/rethook.o
CC [M] net/dccp/options.o
CC lib/llist.o
CC [M] drivers/tty/serial/sprd_serial.o
CC [M] drivers/cxl/core/pmem.o
CC [M] sound/soc/intel/atom/sst/sst_acpi.o
CC fs/btrfs/export.o
CC kernel/bpf/tcx.o
CC [M] drivers/misc/vmw_vmci/vmci_context.o
CC drivers/base/container.o
CC net/ipv6/proc.o
CC [M] fs/netfs/fscache_main.o
CC [M] drivers/misc/mei/interrupt.o
CC [M] fs/cachefiles/xattr.o
CC lib/lwq.o
CC [M] drivers/nvme/target/discovery.o
CC [M] drivers/misc/genwqe/card_base.o
CC [M] drivers/misc/echo/echo.o
CC [M] net/bridge/netfilter/ebt_mark.o
CC drivers/mfd/twl4030-audio.o
LD [M] drivers/gpu/drm/display/drm_display_helper.o
CC [M] arch/x86/kvm/svm/sev.o
make[5]: *** [../scripts/Makefile.build:461: drivers/gpu/drm] Error 2
make[4]: *** [../scripts/Makefile.build:461: drivers/gpu] Error 2
make[4]: *** Waiting for unfinished jobs....
CC drivers/acpi/acpica/uterror.o
CC drivers/acpi/acpica/uteval.o
CC drivers/acpi/dock.o
CC [M] drivers/scsi/megaraid/megaraid_sas_base.o
CC [M] net/bridge/br_forward.o
CC arch/x86/kernel/vmcore_info_64.o
AR drivers/nvme/common/built-in.a
CC drivers/base/property.o
CC drivers/char/misc.o
CC [M] sound/soc/intel/common/soc-acpi-intel-arl-match.o
CC [M] drivers/char/tpm/tpm_atmel.o
CC lib/memweight.o
CC [M] sound/soc/intel/atom/sst-mfld-platform-pcm.o
CC [M] drivers/regulator/max77541-regulator.o
CC net/ipv4/tcp_ulp.o
CC [M] fs/nfs/sysfs.o
CC drivers/dma-buf/dma-fence-unwrap.o
CC drivers/mfd/twl6040.o
CC [M] drivers/tty/serial/uartlite.o
CC [M] drivers/xen/xen-front-pgdir-shbuf.o
CC [M] sound/hda/intel-dsp-config.o
LD [M] sound/soc/intel/atom/sst/snd-intel-sst-core.o
CC [M] fs/smb/client/smbencrypt.o
LD [M] sound/soc/intel/atom/sst/snd-intel-sst-pci.o
LD [M] sound/soc/intel/atom/sst/snd-intel-sst-acpi.o
CC drivers/dma-buf/dma-resv.o
CC drivers/acpi/acpica/utglobal.o
CC fs/btrfs/tree-log.o
CC [M] fs/fuse/cuse.o
CC kernel/signal.o
CC lib/kfifo.o
CC [M] fs/overlayfs/xattrs.o
CC [M] drivers/nvme/target/io-cmd-file.o
CC kernel/bpf/stackmap.o
CC [M] net/netfilter/nf_log_syslog.o
CC [M] drivers/block/drbd/drbd_nla.o
CC kernel/trace/error_report-traces.o
CC arch/x86/kernel/machine_kexec_64.o
CC [M] drivers/cxl/core/regs.o
CC [M] fs/cachefiles/error_inject.o
CC [M] drivers/misc/genwqe/card_dev.o
CC drivers/nvdimm/btt_devs.o
CC mm/hugetlb_vmemmap.o
CC [M] net/sunrpc/xprt.o
CC [M] drivers/misc/mei/client.o
CC [M] net/bridge/netfilter/ebt_dnat.o
CC net/ipv6/syncookies.o
CC [M] drivers/block/drbd/drbd_debugfs.o
CC [M] drivers/cxl/core/memdev.o
CC [M] drivers/misc/vmw_vmci/vmci_datagram.o
CC [M] drivers/char/tpm/tpm_infineon.o
CC [M] drivers/regulator/max8649.o
CC drivers/nvme/host/ioctl.o
CC [M] drivers/cxl/core/mbox.o
CC drivers/acpi/acpica/uthex.o
CC [M] sound/soc/intel/common/soc-acpi-intel-lnl-match.o
AS arch/x86/kernel/relocate_kernel_64.o
LD [M] fs/overlayfs/overlay.o
CC [M] net/bridge/netfilter/ebt_redirect.o
AR drivers/tty/serial/built-in.a
CC mm/mempolicy.o
CC drivers/tty/tty_io.o
CC drivers/nvme/host/sysfs.o
CC [M] fs/fuse/virtio_fs.o
CC [M] crypto/polyval-generic.o
CC [M] drivers/nvme/target/io-cmd-bdev.o
CC [M] fs/netfs/fscache_volume.o
CC [M] sound/soc/intel/atom/sst-mfld-platform-compress.o
CC net/ipv4/tcp_offload.o
CC [M] net/dccp/output.o
LD [M] fs/cachefiles/cachefiles.o
CC [M] sound/hda/intel-nhlt.o
CC drivers/acpi/acpica/utids.o
CC [M] drivers/nvme/target/pr.o
LD [M] drivers/xen/xen-evtchn.o
CC [M] fs/smb/client/transport.o
LD [M] drivers/xen/xen-gntdev.o
LD [M] drivers/xen/xen-gntalloc.o
LD [M] drivers/xen/xen-privcmd.o
AR drivers/xen/built-in.a
CC drivers/char/virtio_console.o
CC drivers/mfd/mfd-core.o
CC arch/x86/kernel/kexec-bzimage64.o
CC drivers/dma-buf/dma-heap.o
CC [M] drivers/regulator/max8660.o
CC lib/percpu-refcount.o
CC [M] drivers/misc/genwqe/card_ddcb.o
CC kernel/bpf/cgroup_iter.o
CC [M] drivers/char/tpm/xen-tpmfront.o
CC drivers/base/cacheinfo.o
CC [M] net/bridge/br_if.o
CC [M] drivers/misc/vmw_vmci/vmci_doorbell.o
CC kernel/trace/power-traces.o
CC kernel/bpf/bpf_cgrp_storage.o
CC drivers/acpi/acpica/utinit.o
CC [M] net/netfilter/nf_nat_core.o
CC drivers/char/hpet.o
CC drivers/dma-buf/sync_file.o
CC kernel/trace/rpm-traces.o
CC [M] fs/nfs/fs_context.o
CC kernel/sys.o
CC [M] sound/soc/intel/common/soc-acpi-intel-ptl-match.o
CC [M] fs/netfs/fscache_proc.o
CC drivers/nvdimm/pfn_devs.o
CC [M] sound/soc/intel/atom/sst-atom-controls.o
CC [M] crypto/af_alg.o
CC [M] drivers/misc/bcm-vk/bcm_vk_dev.o
CC [M] net/bridge/netfilter/ebt_snat.o
CC [M] sound/hda/intel-sdw-acpi.o
CC drivers/acpi/acpica/utlock.o
LD [M] drivers/block/drbd/drbd.o
CC [M] drivers/block/brd.o
CC arch/x86/kernel/crash_dump_64.o
CC [M] fs/smb/client/cached_dir.o
CC [M] drivers/regulator/max8893.o
CC [M] drivers/cxl/core/pci.o
CC [M] fs/nfs/sysctl.o
CC [M] drivers/char/tpm/tpm_vtpm_proxy.o
LD [M] fs/fuse/fuse.o
CC [M] drivers/misc/genwqe/card_sysfs.o
LD [M] fs/fuse/virtiofs.o
AR sound/built-in.a
CC [M] arch/x86/kvm/svm/hyperv.o
CC [M] drivers/misc/mei/main.o
CC [M] net/bridge/netfilter/ebt_log.o
CC drivers/base/swnode.o
CC [M] drivers/nvme/target/passthru.o
CC net/ipv6/calipso.o
CC lib/rhashtable.o
CC drivers/nvme/host/pr.o
CC drivers/acpi/acpica/utmath.o
CC drivers/mfd/ezx-pcap.o
CC [M] net/sunrpc/socklib.o
CC [M] drivers/misc/vmw_vmci/vmci_driver.o
CC drivers/acpi/pci_root.o
CC [M] fs/netfs/fscache_stats.o
CC drivers/acpi/pci_link.o
CC kernel/bpf/cgroup.o
CC drivers/dma-buf/sw_sync.o
CC [M] drivers/scsi/megaraid/megaraid_sas_fusion.o
CC kernel/trace/trace_kdb.o
CC [M] sound/soc/intel/common/soc-acpi-intel-hda-match.o
CC drivers/tty/n_tty.o
CC arch/x86/kernel/crash.o
LD [M] sound/soc/intel/atom/snd-soc-sst-atom-hifi2-platform.o
CC [M] drivers/scsi/megaraid/megaraid_sas_fp.o
CC drivers/acpi/acpica/utmisc.o
CC arch/x86/kernel/module.o
CC fs/btrfs/free-space-cache.o
CC [M] drivers/block/ublk_drv.o
CC [M] drivers/regulator/max8907-regulator.o
CC [M] drivers/misc/genwqe/card_debugfs.o
LD [M] sound/hda/snd-hda-core.o
LD [M] sound/hda/snd-intel-dspcfg.o
LD [M] sound/hda/snd-intel-sdw-acpi.o
LD [M] drivers/char/tpm/tpm_tis_spi.o
CC [M] drivers/misc/bcm-vk/bcm_vk_msg.o
AR drivers/char/tpm/built-in.a
CC net/ipv4/tcp_plb.o
CC [M] drivers/char/uv_mmtimer.o
CC [M] net/sctp/sm_statetable.o
CC [M] net/dccp/proto.o
CC [M] net/sunrpc/xprtsock.o
CC drivers/acpi/pci_irq.o
CC [M] arch/x86/kvm/svm/svm_onhyperv.o
CC kernel/umh.o
CC drivers/tty/tty_ioctl.o
CC drivers/nvdimm/dax_devs.o
CC drivers/acpi/acpica/utmutex.o
CC drivers/nvme/host/trace.o
CC [M] drivers/nvme/target/zns.o
CC [M] drivers/cxl/core/hdm.o
CC [M] drivers/misc/vmw_vmci/vmci_event.o
CC [M] drivers/nvme/target/fabrics-cmd-auth.o
CC [M] net/bridge/netfilter/ebt_nflog.o
CC [M] net/sunrpc/sched.o
CC [M] crypto/algif_hash.o
CC [M] crypto/algif_skcipher.o
CC [M] drivers/misc/mei/dma-ring.o
CC arch/x86/kernel/kgdb.o
CC drivers/dma-buf/sync_debug.o
CC [M] sound/soc/intel/common/soc-acpi-intel-sdw-mockup-match.o
CC [M] fs/smb/client/cifs_unicode.o
CC mm/sparse.o
CC drivers/base/faux.o
CC [M] drivers/regulator/max8925-regulator.o
CC [M] drivers/misc/genwqe/card_utils.o
CC [M] net/netfilter/nf_nat_proto.o
CC drivers/acpi/acpica/utnonansi.o
CC drivers/mfd/da903x.o
LD [M] fs/netfs/netfs.o
CC [M] drivers/char/lp.o
CC [M] net/rds/af_rds.o
CC arch/x86/kernel/early_printk.o
CC kernel/trace/trace_dynevent.o
CC drivers/dma-buf/udmabuf.o
CC net/ipv6/seg6_iptunnel.o
CC [M] net/rds/bind.o
CC [M] net/sunrpc/auth.o
LD [M] arch/x86/kvm/kvm.o
CC drivers/tty/tty_ldisc.o
CC [M] net/rds/cong.o
CC [M] fs/nfs/fscache.o
CC lib/base64.o
AS [M] arch/x86/kvm/vmx/vmenter.o
AS [M] arch/x86/kvm/svm/vmenter.o
CC [M] drivers/misc/bcm-vk/bcm_vk_sg.o
CC [M] net/sctp/sm_statefuns.o
CC kernel/bpf/reuseport_array.o
CC drivers/acpi/acpica/utobject.o
CC [M] sound/soc/intel/common/soc-acpi-intel-ssp-common.o
LD [M] arch/x86/kvm/kvm-intel.o
CC [M] drivers/misc/bcm-vk/bcm_vk_tty.o
CC [M] drivers/misc/mei/bus.o
LD [M] arch/x86/kvm/kvm-amd.o
CC [M] drivers/nvme/target/auth.o
CC [M] drivers/misc/mei/bus-fixup.o
CC lib/once.o
CC [M] drivers/misc/vmw_vmci/vmci_guest.o
CC net/ipv4/datagram.o
CC [M] net/bridge/br_input.o
CC mm/sparse-vmemmap.o
CC [M] crypto/algif_rng.o
CC [M] drivers/regulator/max8952.o
CC [M] drivers/misc/vmw_vmci/vmci_handle_array.o
CC [M] drivers/nvme/target/trace.o
CC drivers/nvdimm/security.o
CC [M] drivers/scsi/libsas/sas_init.o
CC arch/x86/kernel/hpet.o
CC drivers/nvme/host/multipath.o
CC drivers/tty/tty_buffer.o
CC drivers/base/auxiliary.o
AR drivers/block/built-in.a
CC fs/btrfs/zlib.o
CC [M] drivers/char/applicom.o
CC drivers/acpi/acpica/utosi.o
CC [M] drivers/scsi/megaraid/megaraid_sas_debugfs.o
CC [M] fs/smb/client/nterr.o
LD [M] drivers/misc/genwqe/genwqe_card.o
CC drivers/nvdimm/e820.o
CC lib/refcount.o
CC [M] drivers/cxl/core/pmu.o
CC [M] drivers/dma-buf/selftest.o
CC lib/rcuref.o
CC [M] net/9p/mod.o
LD [M] drivers/misc/bcm-vk/bcm_vk.o
CC [M] net/9p/client.o
CC drivers/mfd/da9052-irq.o
CC [M] net/netfilter/nf_nat_helper.o
CC net/ipv4/raw.o
CC [M] net/9p/error.o
CC [M] drivers/misc/vmw_vmci/vmci_host.o
CC [M] net/sunrpc/auth_null.o
CC drivers/acpi/acpica/utownerid.o
CC [M] net/netfilter/nf_nat_redirect.o
CC kernel/trace/trace_probe.o
CC [M] sound/soc/intel/common/soc-acpi-intel-sdca-quirks.o
CC [M] fs/smb/client/cifsencrypt.o
CC [M] net/sunrpc/auth_tls.o
CC [M] drivers/regulator/max8997-regulator.o
CC [M] drivers/nvme/target/loop.o
CC [M] drivers/misc/uacce/uacce.o
CC kernel/bpf/bpf_struct_ops.o
CC [M] drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.o
CC [M] net/dccp/timer.o
CC lib/usercopy.o
CC [M] crypto/algif_aead.o
CC [M] drivers/dma-buf/st-dma-fence.o
CC [M] drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.o
CC mm/mmu_notifier.o
LD [M] drivers/scsi/megaraid/megaraid_sas.o
CC mm/ksm.o
CC drivers/misc/sram.o
CC fs/open.o
CC drivers/mfd/da9052-core.o
CC drivers/base/devtmpfs.o
CC [M] drivers/nvdimm/pmem.o
CC drivers/acpi/acpica/utpredef.o
CC [M] drivers/char/nvram.o
CC [M] drivers/misc/mei/debugfs.o
CC drivers/tty/tty_port.o
CC fs/btrfs/lzo.o
CC [M] net/rds/connection.o
CC kernel/bpf/cpumask.o
CC arch/x86/kernel/amd_nb.o
CC [M] drivers/misc/vmw_vmci/vmci_queue_pair.o
CC [M] fs/nfs/nfs2super.o
CC [M] drivers/cxl/core/cdat.o
CC lib/errseq.o
CC net/ipv4/udp.o
CC drivers/acpi/acpica/utresdecode.o
CC net/ipv6/seg6_local.o
CC [M] drivers/char/ppdev.o
CC [M] drivers/scsi/libsas/sas_phy.o
CC [M] drivers/regulator/max8998.o
CC [M] drivers/dma-buf/st-dma-fence-chain.o
CC [M] fs/nfs/proc.o
CC arch/x86/kernel/amd_node.o
CC lib/bucket_locks.o
LD [M] sound/soc/intel/common/snd-soc-acpi-intel-match.o
CC [M] drivers/misc/mei/mei-trace.o
LD [M] sound/soc/intel/common/snd-soc-acpi-intel-sdca-quirks.o
CC [M] drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.o
CC [M] net/sunrpc/auth_unix.o
CC arch/x86/kernel/kvm.o
CC [M] net/psample/psample.o
CC arch/x86/kernel/kvmclock.o
CC [M] drivers/nvme/target/fc.o
CC fs/read_write.o
CC drivers/nvme/host/zns.o
CC [M] sound/soc/sdca/sdca_functions.o
CC drivers/acpi/acpica/utresrc.o
CC [M] net/bridge/br_ioctl.o
CC drivers/tty/tty_mutex.o
CC [M] crypto/zstd.o
CC drivers/base/isa.o
CC [M] drivers/nvdimm/btt.o
CC [M] drivers/char/tlclk.o
CC drivers/mfd/da9052-spi.o
CC [M] sound/soc/sdca/sdca_device.o
CC kernel/trace/trace_uprobe.o
CC fs/btrfs/zstd.o
CC kernel/bpf/bpf_lsm.o
CC [M] drivers/scsi/libsas/sas_port.o
CC drivers/mfd/da9052-i2c.o
CC [M] fs/smb/client/readdir.o
CC [M] net/vmw_vsock/af_vsock.o
CC drivers/acpi/acpica/utstate.o
CC drivers/mfd/lp8788.o
CC [M] net/sctp/sm_sideeffect.o
CC [M] net/vmw_vsock/af_vsock_tap.o
CC arch/x86/kernel/paravirt.o
CC [M] drivers/regulator/max20086-regulator.o
CC [M] drivers/dma-buf/st-dma-fence-unwrap.o
CC [M] drivers/misc/vmw_vmci/vmci_resource.o
CC [M] net/sctp/protocol.o
CC [M] net/netfilter/nf_nat_masquerade.o
CC [M] crypto/essiv.o
CC lib/generic-radix-tree.o
CC [M] net/dccp/qpolicy.o
CC drivers/nvme/host/fault_inject.o
CC drivers/acpi/acpica/utstring.o
CC net/ipv4/udplite.o
CC [M] drivers/scsi/libsas/sas_event.o
CC [M] net/9p/protocol.o
CC net/ipv4/udp_offload.o
CC [M] drivers/scsi/libsas/sas_discover.o
CC [M] fs/nfs/nfs2xdr.o
CC drivers/tty/tty_ldsem.o
CC [M] drivers/char/hangcheck-timer.o
CC drivers/base/node.o
CC [M] drivers/cxl/core/ras.o
CC drivers/base/memory.o
CC mm/page_poison.o
CC [M] net/bridge/br_stp.o
CC [M] drivers/cxl/port.o
CC [M] net/vmw_vsock/vsock_addr.o
CC fs/btrfs/compression.o
CC [M] net/vmw_vsock/vsock_bpf.o
CC drivers/acpi/acpica/utstrsuppt.o
CC [M] drivers/misc/mei/pci-me.o
CC [M] drivers/dma-buf/st-dma-resv.o
CC [M] drivers/regulator/max20411-regulator.o
CC [M] drivers/misc/vmw_vmci/vmci_route.o
CC [M] net/rds/info.o
CC lib/bitmap-str.o
CC [M] sound/soc/sdca/sdca_regmap.o
CC [M] crypto/curve25519-generic.o
CC arch/x86/kernel/paravirt-spinlocks.o
CC drivers/acpi/acpi_apd.o
AR drivers/char/built-in.a
CC net/ipv6/seg6_hmac.o
COPY drivers/scsi/aic7xxx/aic7xxx_seq.h
COPY drivers/scsi/aic7xxx/aic7xxx_reg.h
COPY drivers/scsi/aic7xxx/aic7xxx_reg_print.c
CC [M] drivers/scsi/aic7xxx/aic7xxx_osm.o
CC kernel/bpf/crypto.o
CC drivers/acpi/acpica/utstrtoul64.o
LD [M] drivers/misc/vmw_vmci/vmw_vmci.o
CC drivers/base/module.o
CC drivers/tty/tty_baudrate.o
CC [M] drivers/scsi/aic7xxx/aic7xxx_proc.o
AR drivers/dma-buf/built-in.a
LD [M] drivers/dma-buf/dmabuf_selftests.o
CC arch/x86/kernel/pvclock.o
CC drivers/mfd/lp8788-irq.o
CC [M] net/sunrpc/svc.o
CC [M] drivers/cxl/core/acpi.o
CC [M] net/dccp/ccids/ccid2.o
CC [M] sound/sound_core.o
CC [M] drivers/nvme/target/tcp.o
CC [M] drivers/misc/mei/hw-me.o
CC drivers/tty/tty_jobctrl.o
CC [M] net/dccp/ackvec.o
CC drivers/nvme/host/hwmon.o
CC kernel/trace/trace_boot.o
CC drivers/tty/n_null.o
CC [M] fs/smb/client/ioctl.o
CC drivers/tty/pty.o
CC [M] drivers/nvdimm/virtio_pmem.o
CC kernel/workqueue.o
CC drivers/tty/tty_audit.o
CC drivers/acpi/acpica/utxface.o
CC [M] net/9p/trans_common.o
CC [M] drivers/regulator/max77693-regulator.o
CC mm/memtest.o
ASN.1 crypto/ecrdsa_params.asn1.[ch]
ASN.1 crypto/ecrdsa_pub_key.asn1.[ch]
CC [M] drivers/scsi/libsas/sas_expander.o
CC [M] crypto/ecrdsa_params.asn1.o
CC [M] net/sctp/endpointola.o
CC [M] crypto/ecrdsa_pub_key.asn1.o
CC drivers/base/auxiliary_sysfs.o
CC drivers/tty/sysrq.o
CC [M] crypto/simd.o
CC [M] fs/nfs/nfs3super.o
CC [M] drivers/tty/n_hdlc.o
LD [M] sound/soc/sdca/snd-soc-sdca.o
CC drivers/mfd/da9055-core.o
CC [M] net/bridge/br_stp_bpdu.o
CC [M] drivers/tty/n_gsm.o
CC [M] drivers/cxl/core/trace.o
CC [M] net/dccp/sysctl.o
CC lib/string_helpers.o
CC kernel/trace/fprobe.o
CC [M] sound/soc/soc-acpi.o
CC fs/btrfs/delayed-ref.o
CC drivers/acpi/acpica/utxfinit.o
CC [M] drivers/regulator/max77826-regulator.o
CC [M] net/netfilter/nf_nat_ovs.o
CC net/ipv6/ioam6_iptunnel.o
CC mm/migrate.o
CC [M] net/vmw_vsock/diag.o
CC arch/x86/kernel/pmem.o
CC drivers/nvme/host/auth.o
CC [M] drivers/tty/moxa.o
CC drivers/nvme/host/pci.o
CC [M] net/sunrpc/svcsock.o
CC [M] drivers/regulator/max77857-regulator.o
CC [M] net/sunrpc/svcauth.o
CC [M] drivers/nvdimm/nd_virtio.o
CC [M] drivers/scsi/aic7xxx/aic7xxx_osm_pci.o
CC [M] net/sctp/associola.o
CC [M] net/sunrpc/svcauth_unix.o
CC [M] net/9p/trans_fd.o
CC [M] drivers/cxl/acpi.o
CC drivers/acpi/acpica/utxferror.o
CC kernel/bpf/relo_core.o
CC drivers/base/hypervisor.o
CC drivers/acpi/acpica/utxfmutex.o
CC [M] net/rds/message.o
CC [M] net/sunrpc/addr.o
CC [M] drivers/tty/mxser.o
CC arch/x86/kernel/jailhouse.o
CC lib/hexdump.o
CC [M] fs/nfs/nfs3client.o
CC [M] drivers/tty/nozomi.o
CC drivers/mfd/da9055-i2c.o
CC net/ipv4/arp.o
CC [M] fs/smb/client/sess.o
CC [M] crypto/ecdsa-x962.o
CC drivers/acpi/acpica/dbcmds.o
CC [M] net/bridge/br_stp_if.o
CC [M] drivers/tty/ttynull.o
CC [M] sound/soc/soc-core.o
CC [M] net/vmw_vsock/vmci_transport.o
COPY drivers/scsi/aic7xxx/aic79xx_seq.h
COPY drivers/scsi/aic7xxx/aic79xx_reg.h
CC [M] drivers/regulator/mc13783-regulator.o
COPY drivers/scsi/aic7xxx/aic79xx_reg_print.c
CC [M] drivers/scsi/aic7xxx/aic79xx_osm.o
LD [M] drivers/nvme/target/nvmet.o
CC net/devres.o
CC lib/kstrtox.o
LD [M] drivers/nvme/target/nvme-loop.o
CC kernel/trace/rethook.o
LD [M] drivers/nvme/target/nvmet-fc.o
LD [M] drivers/nvme/target/nvmet-tcp.o
CC [M] fs/smb/client/export.o
CC [M] net/vmw_vsock/vmci_transport_notify.o
CC [M] net/dccp/ipv4.o
CC [M] drivers/misc/mei/gsc-me.o
CC net/ipv6/addrconf_core.o
CC fs/file_table.o
CC drivers/mfd/da9063-core.o
CC [M] drivers/scsi/aic7xxx/aic79xx_proc.o
CC [M] drivers/scsi/libsas/sas_scsi_host.o
CC drivers/base/soc.o
CC fs/btrfs/relocation.o
CC arch/x86/kernel/pcspeaker.o
LD [M] crypto/aegis128.o
CC [M] drivers/scsi/aic7xxx/aic79xx_osm_pci.o
CC [M] crypto/ecrdsa.o
CC drivers/acpi/acpica/dbconvert.o
LD [M] drivers/nvdimm/nd_pmem.o
LD [M] drivers/nvdimm/nd_btt.o
CC drivers/base/pinctrl.o
AR drivers/nvdimm/built-in.a
CC net/socket.o
CC [M] net/sunrpc/rpcb_clnt.o
CC mm/memory-tiers.o
CC [M] net/9p/trans_xen.o
CC [M] net/netfilter/nf_nat_amanda.o
CC [M] net/dccp/ipv6.o
CC [M] net/sctp/transport.o
CC [M] drivers/regulator/mc13892-regulator.o
CC [M] fs/smb/client/unc.o
CC [M] drivers/cxl/core/region.o
CC drivers/mfd/da9063-irq.o
CC lib/debug_info.o
CC kernel/bpf/btf_iter.o
CC lib/iomap.o
CC kernel/trace/trace_fprobe.o
CC [M] drivers/misc/mei/pci-txe.o
CC net/ipv4/icmp.o
CC drivers/acpi/acpica/dbdisply.o
CC arch/x86/kernel/check.o
CC [M] drivers/tty/rpmsg_tty.o
CC [M] drivers/nvme/host/fabrics.o
CC fs/btrfs/delayed-inode.o
CC lib/iomap_copy.o
LD [M] crypto/crypto_simd.o
CC [M] net/bridge/br_stp_timer.o
CC crypto/rsapubkey.asn1.o
CC [M] drivers/regulator/mc13xxx-regulator-core.o
CC net/compat.o
CC crypto/rsaprivkey.asn1.o
CC [M] drivers/cxl/core/mce.o
LD [M] crypto/ecdsa_generic.o
CC [M] sound/ac97_bus.o
LD [M] crypto/ecrdsa_generic.o
AR crypto/built-in.a
CC [M] fs/nfs/nfs3proc.o
CC arch/x86/kernel/uprobes.o
CC [M] net/rds/recv.o
CC [M] net/vmw_vsock/vmci_transport_notify_qstate.o
CC drivers/base/devcoredump.o
CC drivers/mfd/da9063-i2c.o
CC [M] drivers/scsi/aic7xxx/aic7xxx_core.o
CC drivers/acpi/acpi_platform.o
CC drivers/mfd/max14577.o
CC drivers/acpi/acpica/dbexec.o
CC [M] drivers/nvme/host/fc.o
CC arch/x86/kernel/perf_regs.o
CC net/ipv6/exthdrs_core.o
CC [M] net/netfilter/nf_nat_ftp.o
CC [M] drivers/misc/mei/hw-txe.o
CC kernel/bpf/btf_relocate.o
CC [M] net/9p/trans_virtio.o
CC [M] net/netfilter/nf_nat_irc.o
AR drivers/tty/built-in.a
CC [M] drivers/scsi/isci/init.o
CC net/ipv4/devinet.o
CC [M] fs/smb/client/winucase.o
CC [M] net/bridge/br_netlink.o
CC kernel/bpf/kmem_cache_iter.o
CC drivers/mfd/max77693.o
LD [M] net/9p/9pnet.o
CC [M] fs/smb/client/smb2ops.o
CC [M] drivers/scsi/libsas/sas_task.o
CC [M] drivers/scsi/aic7xxx/aic7xxx_93cx6.o
CC net/sysctl_net.o
CC lib/devres.o
CC [M] net/sctp/chunk.o
CC drivers/acpi/acpica/dbhistry.o
CC mm/migrate_device.o
CC lib/check_signature.o
CC mm/huge_memory.o
CC [M] drivers/scsi/aic7xxx/aic7xxx_pci.o
CC lib/interval_tree.o
CC [M] sound/soc/soc-dapm.o
CC [M] net/vmw_vsock/virtio_transport.o
CC [M] drivers/regulator/mp8859.o
CC fs/btrfs/scrub.o
AR kernel/trace/built-in.a
CC arch/x86/kernel/tracepoint.o
CC [M] drivers/scsi/isci/phy.o
CC drivers/acpi/acpi_pnp.o
CC drivers/base/platform-msi.o
CC [M] net/dccp/diag.o
CC drivers/base/physical_location.o
CC [M] net/bridge/br_netlink_tunnel.o
CC [M] drivers/misc/mei/vsc-tp.o
CC drivers/acpi/acpica/dbinput.o
CC net/ipv4/af_inet.o
CC [M] net/netfilter/nf_nat_sip.o
CC mm/khugepaged.o
CC lib/assoc_array.o
CC [M] net/sunrpc/timer.o
AR kernel/bpf/built-in.a
CC kernel/pid.o
CC [M] net/bridge/br_arp_nd_proxy.o
CC [M] drivers/scsi/libsas/sas_ata.o
CC [M] net/rds/send.o
CC [M] fs/nfs/nfs3xdr.o
CC drivers/mfd/max77843.o
CC [M] net/netfilter/nf_nat_tftp.o
CC net/ipv4/igmp.o
CC lib/list_debug.o
CC [M] drivers/scsi/aic7xxx/aic7xxx_reg_print.o
CC net/ipv4/fib_frontend.o
LD [M] net/9p/9pnet_fd.o
LD [M] net/9p/9pnet_xen.o
LD [M] net/9p/9pnet_virtio.o
CC [M] net/sctp/sm_make_chunk.o
CC arch/x86/kernel/itmt.o
CC drivers/acpi/acpica/dbmethod.o
CC [M] fs/nfs/nfs3acl.o
CC [M] drivers/nvme/host/tcp.o
CC drivers/mfd/max8925-core.o
CC [M] drivers/cxl/pmem.o
CC [M] drivers/regulator/mt6311-regulator.o
CC net/ipv4/fib_semantics.o
CC net/ipv6/ip6_checksum.o
CC [M] drivers/scsi/mpt3sas/mpt3sas_base.o
CC [M] net/bridge/br_sysfs_if.o
AR drivers/cxl/core/built-in.a
LD [M] drivers/cxl/core/cxl_core.o
CC [M] drivers/scsi/mpt3sas/mpt3sas_config.o
CC drivers/acpi/acpica/dbnames.o
CC [M] drivers/scsi/mpt3sas/mpt3sas_scsih.o
CC drivers/base/trace.o
CC [M] drivers/scsi/isci/request.o
CC lib/debugobjects.o
CC fs/super.o
CC net/ipv6/ip6_icmp.o
CC [M] drivers/cxl/security.o
CC [M] net/vmw_vsock/virtio_transport_common.o
CC [M] sound/soc/soc-jack.o
LD [M] net/dccp/dccp.o
CC [M] drivers/scsi/mpt3sas/mpt3sas_transport.o
CC arch/x86/kernel/umip.o
CC [M] drivers/regulator/mt6315-regulator.o
LD [M] net/dccp/dccp_ipv4.o
LD [M] net/dccp/dccp_ipv6.o
LD [M] net/dccp/dccp_diag.o
CC drivers/acpi/power.o
CC [M] net/netfilter/nf_synproxy_core.o
CC drivers/acpi/acpica/dbobject.o
CC [M] drivers/misc/mei/vsc-fw-loader.o
CC arch/x86/kernel/unwind_frame.o
CC fs/btrfs/backref.o
CC fs/btrfs/ulist.o
CC [M] drivers/scsi/aic7xxx/aic79xx_core.o
CC [M] drivers/scsi/aic7xxx/aic79xx_pci.o
CC [M] net/rds/stats.o
CC drivers/mfd/max8925-i2c.o
CC kernel/task_work.o
CC [M] drivers/misc/mei/platform-vsc.o
CC drivers/acpi/event.o
CC drivers/acpi/acpica/dbstats.o
CC kernel/extable.o
CC [M] drivers/scsi/libsas/sas_host_smp.o
CC [M] drivers/cxl/mem.o
CC [M] net/sunrpc/xdr.o
AR drivers/base/built-in.a
CC lib/bitrev.o
CC [M] fs/nfs/nfs4proc.o
CC [M] drivers/misc/ad525x_dpot.o
CC [M] drivers/regulator/mt6323-regulator.o
CC [M] drivers/scsi/mpt3sas/mpt3sas_ctl.o
CC [M] drivers/scsi/isci/remote_device.o
CC [M] net/bridge/br_sysfs_br.o
CC [M] net/sctp/ulpevent.o
LD [M] sound/soundcore.o
LD [M] drivers/nvme/host/nvme-fabrics.o
CC drivers/acpi/acpica/dbutils.o
CC [M] drivers/misc/ad525x_dpot-i2c.o
CC [M] net/sctp/inqueue.o
CC [M] drivers/scsi/aic7xxx/aic79xx_reg_print.o
CC [M] net/sunrpc/sunrpc_syms.o
CC arch/x86/kernel/callthunks.o
CC [M] drivers/scsi/isci/port.o
CC fs/btrfs/qgroup.o
CC [M] net/bridge/br_nf_core.o
LD [M] drivers/misc/mei/mei.o
LD [M] drivers/misc/mei/mei-me.o
LD [M] drivers/nvme/host/nvme-fc.o
LD [M] drivers/misc/mei/mei-gsc.o
CC [M] net/sunrpc/cache.o
CC [M] drivers/regulator/mt6331-regulator.o
LD [M] drivers/misc/mei/mei-txe.o
LD [M] drivers/misc/mei/mei-vsc-hw.o
CC [M] fs/nfs/nfs4xdr.o
LD [M] drivers/misc/mei/mei-vsc.o
CC [M] drivers/scsi/mpt3sas/mpt3sas_trigger_diag.o
CC drivers/mfd/max8997.o
CC drivers/acpi/evged.o
AR drivers/nvme/host/built-in.a
CC [M] fs/nfs/nfs4state.o
CC [M] drivers/cxl/pci.o
CC drivers/acpi/acpica/dbxface.o
CC kernel/params.o
CC net/ipv6/output_core.o
CC [M] net/rds/sysctl.o
CC lib/linear_ranges.o
CC [M] sound/soc/soc-utils.o
CC drivers/acpi/sysfs.o
CC [M] sound/soc/soc-dai.o
CC [M] drivers/scsi/mpt3sas/mpt3sas_warpdrive.o
CC net/ipv4/fib_trie.o
CC [M] fs/smb/client/smb2maperror.o
LD [M] drivers/nvme/host/nvme-tcp.o
AR drivers/nvme/built-in.a
CC fs/btrfs/send.o
CC net/ipv6/protocol.o
CC fs/btrfs/dev-replace.o
CC arch/x86/kernel/cet.o
LD [M] drivers/scsi/libsas/libsas.o
CC drivers/acpi/acpica/rsdump.o
CC [M] drivers/misc/ad525x_dpot-spi.o
CC net/ipv4/fib_notifier.o
CC net/ipv4/inet_fragment.o
CC [M] drivers/regulator/mt6332-regulator.o
CC [M] drivers/regulator/mt6357-regulator.o
CC net/ipv4/ping.o
CC [M] net/netfilter/nf_conncount.o
CC [M] net/rds/threads.o
CC [M] net/vmw_vsock/hyperv_transport.o
CC [M] sound/soc/soc-component.o
CC fs/btrfs/raid56.o
CC [M] net/sctp/outqueue.o
CC [M] net/netfilter/nf_dup_netdev.o
CC drivers/mfd/max8997-irq.o
CC [M] net/bridge/br_multicast.o
CC [M] drivers/scsi/isci/host.o
CC [M] net/netfilter/nf_tables_core.o
LD [M] drivers/cxl/cxl_port.o
LD [M] drivers/cxl/cxl_acpi.o
LD [M] drivers/cxl/cxl_pmem.o
LD [M] drivers/cxl/cxl_mem.o
CC [M] net/bridge/br_mdb.o
AR drivers/acpi/acpica/built-in.a
LD [M] drivers/cxl/cxl_pci.o
AR drivers/cxl/built-in.a
CC [M] sound/soc/soc-pcm.o
CC mm/page_counter.o
CC [M] drivers/scsi/mpt3sas/mpt3sas_debugfs.o
CC lib/packing.o
LD [M] drivers/scsi/aic7xxx/aic7xxx.o
CC [M] net/bridge/br_multicast_eht.o
CC drivers/acpi/property.o
CC [M] drivers/regulator/mt6358-regulator.o
CC fs/btrfs/uuid-tree.o
CC [M] net/vmw_vsock/vsock_loopback.o
CC [M] drivers/misc/dummy-irq.o
CC [M] net/rds/transport.o
CC arch/x86/kernel/shstk.o
CC [M] net/sctp/ulpqueue.o
CC fs/btrfs/props.o
CC [M] sound/soc/soc-devres.o
CC drivers/mfd/max8998.o
CC mm/memcontrol.o
CC [M] net/bridge/br_vlan.o
CC lib/crc-ccitt.o
CC [M] fs/smb/client/smb2transport.o
CC kernel/kthread.o
CC [M] net/rds/loop.o
CC [M] net/bridge/br_vlan_tunnel.o
CC [M] drivers/misc/ics932s401.o
CC [M] drivers/scsi/isci/task.o
CC [M] fs/nfs/nfs4renewd.o
CC fs/char_dev.o
CC net/ipv6/ip6_offload.o
CC drivers/scsi/scsi.o
CC [M] drivers/regulator/mt6359-regulator.o
CC drivers/acpi/debugfs.o
CC kernel/sys_ni.o
LD [M] net/vmw_vsock/vsock.o
LD [M] net/vmw_vsock/vsock_diag.o
LD [M] net/vmw_vsock/vmw_vsock_vmci_transport.o
LD [M] net/vmw_vsock/vmw_vsock_virtio_transport.o
LD [M] net/vmw_vsock/vmw_vsock_virtio_transport_common.o
LD [M] net/vmw_vsock/hv_sock.o
CC mm/vmpressure.o
CC arch/x86/kernel/audit_64.o
CC [M] net/rds/page.o
LD [M] drivers/scsi/aic7xxx/aic79xx.o
CC [M] fs/nfs/nfs4super.o
CC drivers/mfd/max8998-irq.o
CC [M] net/bridge/br_vlan_options.o
CC drivers/mfd/adp5520.o
CC drivers/acpi/acpi_lpat.o
CC [M] fs/nfs/nfs4file.o
CC lib/crc16.o
CC fs/btrfs/free-space-tree.o
CC lib/crc-t10dif.o
CC net/ipv6/tcpv6_offload.o
CC arch/x86/kernel/amd_gart_64.o
CC [M] drivers/misc/tifm_core.o
CC [M] sound/soc/soc-ops.o
CC [M] net/netfilter/nf_tables_api.o
CC arch/x86/kernel/aperture_64.o
LD [M] drivers/scsi/mpt3sas/mpt3sas.o
CC [M] net/sctp/tsnmap.o
CC drivers/scsi/hosts.o
CC [M] sound/soc/soc-link.o
CC [M] net/sunrpc/rpc_pipe.o
CC arch/x86/kernel/mmconf-fam10h_64.o
CC [M] net/sunrpc/sysfs.o
CC arch/x86/kernel/vsmp_64.o
CC [M] drivers/scsi/isci/probe_roms.o
CC [M] drivers/regulator/mt6360-regulator.o
CC drivers/scsi/scsi_ioctl.o
CC fs/btrfs/tree-checker.o
CC [M] fs/smb/client/smb2misc.o
CC [M] drivers/scsi/isci/remote_node_context.o
CC fs/btrfs/space-info.o
CC net/ipv6/exthdrs_offload.o
CC drivers/scsi/scsicam.o
CC [M] net/sctp/bind_addr.o
CC [M] net/netfilter/nft_chain_filter.o
CC fs/stat.o
CC [M] drivers/scsi/isci/remote_node_table.o
CC mm/swap_cgroup.o
CC [M] net/sunrpc/svc_xprt.o
CC [M] arch/x86/kernel/msr.o
CC kernel/nsproxy.o
CC lib/crc-itu-t.o
CC net/ipv4/ip_tunnel_core.o
CC [M] arch/x86/kernel/cpuid.o
HOSTCC lib/gen_crc32table
HOSTCC lib/gen_crc64table
CC drivers/acpi/acpi_fpdt.o
CC [M] net/netfilter/nf_tables_trace.o
CC drivers/scsi/scsi_error.o
CC [M] net/rds/rdma.o
CC [M] drivers/misc/tifm_7xx1.o
CC net/ipv6/inet6_hashtables.o
CC [M] net/netfilter/nft_immediate.o
CC [M] drivers/regulator/mt6370-regulator.o
CC [M] net/sunrpc/xprtmultipath.o
CC [M] drivers/misc/phantom.o
CC drivers/mfd/tps6586x.o
CC [M] net/sunrpc/debugfs.o
CC fs/btrfs/block-rsv.o
CC kernel/notifier.o
CC net/ipv4/gre_offload.o
CC drivers/acpi/acpi_lpit.o
CC [M] drivers/scsi/isci/unsolicited_frame_control.o
CC [M] sound/soc/soc-card.o
CC net/ipv4/metrics.o
CC [M] net/rds/tcp.o
CC [M] net/bridge/br_mst.o
CC [M] fs/smb/client/smb2pdu.o
CC drivers/acpi/acpi_watchdog.o
CC [M] drivers/regulator/mt6397-regulator.o
CC kernel/ksysfs.o
CC drivers/acpi/prmt.o
CC [M] net/bridge/br_switchdev.o
CC [M] drivers/regulator/qcom-labibb-regulator.o
AR arch/x86/kernel/built-in.a
AR arch/x86/built-in.a
CC fs/btrfs/delalloc-space.o
CC [M] drivers/regulator/qcom_spmi-regulator.o
CC [M] fs/nfs/delegation.o
CC net/ipv4/netlink.o
CC kernel/cred.o
CC lib/xxhash.o
CC [M] net/sctp/socket.o
CC [M] net/sunrpc/backchannel_rqst.o
CC net/ipv4/nexthop.o
CC kernel/reboot.o
CC net/ipv4/udp_tunnel_stub.o
CC drivers/acpi/acpi_pcc.o
CC fs/btrfs/block-group.o
CC [M] net/sunrpc/stats.o
CC [M] drivers/misc/bh1770glc.o
CC [M] drivers/scsi/isci/port_config.o
CC mm/hugetlb_cgroup.o
CC drivers/scsi/scsi_lib.o
CC fs/btrfs/discard.o
CC [M] net/rds/tcp_connect.o
CC fs/exec.o
CC [M] drivers/regulator/qcom_usb_vbus-regulator.o
CC [M] fs/nfs/nfs4idmap.o
CC drivers/scsi/constants.o
CC [M] net/rds/tcp_listen.o
CC [M] net/rds/tcp_recv.o
CC mm/memory-failure.o
CC [M] sound/soc/soc-compress.o
CC [M] net/sunrpc/sysctl.o
CC lib/genalloc.o
CC mm/kmemleak.o
CC drivers/scsi/scsi_lib_dma.o
CC [M] drivers/regulator/palmas-regulator.o
CC drivers/mfd/tps65090.o
CC net/ipv4/sysctl_net_ipv4.o
CC net/ipv6/mcast_snoop.o
CC [M] net/sctp/primitive.o
CC [M] drivers/misc/apds990x.o
CC drivers/acpi/acpi_ffh.o
CC drivers/acpi/acpi_adxl.o
CC [M] drivers/regulator/pca9450-regulator.o
CC drivers/acpi/ac.o
CC [M] net/netfilter/nft_cmp.o
CC net/ipv6/tcp_ao.o
LD [M] drivers/scsi/isci/isci.o
CC [M] drivers/regulator/pv88060-regulator.o
CC [M] net/sctp/output.o
CC fs/pipe.o
CC [M] net/bridge/br_mrp_switchdev.o
CC kernel/async.o
CC drivers/acpi/button.o
CC mm/page_isolation.o
CC drivers/acpi/fan_core.o
CC [M] net/bridge/br_mrp.o
CC [M] net/rds/tcp_send.o
CC kernel/range.o
CC drivers/mfd/aat2870-core.o
CC [M] drivers/misc/enclosure.o
CC drivers/scsi/scsi_scan.o
CC [M] sound/soc/soc-generic-dmaengine-pcm.o
CC drivers/acpi/fan_attr.o
CC [M] sound/soc/soc-ac97.o
CC kernel/smpboot.o
CC [M] fs/nfs/callback.o
CC [M] net/rds/tcp_stats.o
CC drivers/acpi/fan_hwmon.o
CC [M] net/ipv6/ah6.o
CC [M] net/sctp/input.o
LD [M] net/rds/rds.o
CC [M] net/bridge/br_mrp_netlink.o
CC [M] net/netfilter/nft_range.o
CC fs/btrfs/reflink.o
CC [M] drivers/regulator/pv88080-regulator.o
CC drivers/mfd/intel-lpss.o
CC lib/textsearch.o
CC kernel/ucount.o
CC net/ipv4/proc.o
CC mm/zpool.o
CC drivers/mfd/intel-lpss-pci.o
CC fs/btrfs/subpage.o
CC [M] net/sctp/debug.o
CC [M] net/sctp/stream.o
CC [M] fs/nfs/callback_xdr.o
CC [M] fs/nfs/callback_proc.o
LD [M] net/sunrpc/sunrpc.o
CC lib/percpu_counter.o
LD [M] sound/soc/snd-soc-acpi.o
CC [M] net/bridge/br_cfm.o
CC [M] net/ipv6/esp6.o
CC kernel/regset.o
CC drivers/mfd/intel-lpss-acpi.o
CC lib/iommu-helper.o
CC net/ipv4/fib_rules.o
CC lib/fault-inject.o
CC [M] drivers/regulator/pv88090-regulator.o
CC drivers/acpi/pci_slot.o
CC fs/namei.o
GEN drivers/scsi/scsi_devinfo_tbl.c
CC [M] fs/smb/client/smb2inode.o
CC [M] drivers/misc/smpro-errmon.o
CC fs/fcntl.o
CC fs/btrfs/tree-mod-log.o
CC fs/ioctl.o
CC drivers/scsi/scsi_devinfo.o
CC net/ipv4/ipmr.o
CC [M] drivers/regulator/pwm-regulator.o
CC drivers/acpi/processor_driver.o
CC mm/zsmalloc.o
CC drivers/scsi/scsi_netlink.o
CC drivers/acpi/processor_thermal.o
CC drivers/scsi/scsi_sysctl.o
LD [M] net/rds/rds_tcp.o
CC drivers/mfd/palmas.o
LD [M] sound/soc/snd-soc-core.o
CC [M] fs/smb/client/smb2file.o
CC [M] drivers/misc/smpro-misc.o
CC net/ipv4/ipmr_base.o
CC [M] net/ipv6/esp6_offload.o
CC [M] fs/nfs/nfs4namespace.o
CC kernel/ksyms_common.o
CC [M] net/sctp/auth.o
CC [M] drivers/misc/hpilo.o
CC [M] net/sctp/offload.o
CC [M] drivers/regulator/tps51632-regulator.o
CC [M] fs/smb/client/cifsacl.o
CC [M] net/netfilter/nft_bitwise.o
CC fs/readdir.o
CC [M] fs/nfs/nfs4getroot.o
CC [M] fs/smb/client/fs_context.o
CC [M] drivers/misc/apds9802als.o
CC drivers/scsi/scsi_proc.o
CC [M] drivers/regulator/pcap-regulator.o
CC drivers/mfd/rc5t583.o
CC fs/select.o
CC [M] net/sctp/stream_sched.o
CC lib/error-inject.o
CC [M] net/sctp/stream_sched_prio.o
CC fs/btrfs/extent-io-tree.o
CC drivers/acpi/processor_idle.o
CC [M] drivers/regulator/raa215300.o
CC [M] fs/nfs/nfs4client.o
CC [M] net/bridge/br_cfm_netlink.o
CC [M] drivers/misc/isl29003.o
CC kernel/groups.o
CC net/ipv4/syncookies.o
CC mm/early_ioremap.o
CC fs/btrfs/fs.o
CC drivers/acpi/processor_throttling.o
CC [M] fs/smb/client/dns_resolve.o
CC [M] net/netfilter/nft_byteorder.o
CC [M] drivers/misc/isl29020.o
CC fs/btrfs/messages.o
CC fs/btrfs/bio.o
CC kernel/vhost_task.o
CC fs/btrfs/lru_cache.o
CC [M] drivers/regulator/rc5t583-regulator.o
CC lib/syscall.o
CC [M] net/ipv6/ipcomp6.o
CC [M] drivers/misc/tsl2550.o
CC drivers/scsi/scsi_debugfs.o
CC fs/btrfs/raid-stripe-tree.o
CC lib/dynamic_debug.o
CC kernel/kcmp.o
CC [M] net/sctp/stream_sched_rr.o
CC [M] drivers/regulator/rt4801-regulator.o
CC [M] net/sctp/stream_sched_fc.o
CC [M] drivers/misc/ds1682.o
CC [M] drivers/misc/hmc6352.o
CC fs/btrfs/fiemap.o
ASN.1 fs/smb/client/cifs_spnego_negtokeninit.asn1.[ch]
CC [M] fs/nfs/nfs4session.o
CC drivers/acpi/processor_perflib.o
CC [M] drivers/regulator/rt4803.o
CC fs/dcache.o
CC mm/numa.o
CC [M] drivers/regulator/rt4831-regulator.o
CC drivers/mfd/rc5t583-irq.o
CC [M] net/ipv6/xfrm6_tunnel.o
CC [M] drivers/regulator/rt5033-regulator.o
CC [M] fs/nfs/dns_resolve.o
CC [M] drivers/regulator/rt5120-regulator.o
CC [M] fs/nfs/nfs4trace.o
CC mm/numa_memblks.o
CC lib/errname.o
CC [M] net/netfilter/nft_payload.o
CC [M] net/bridge/br_netfilter_hooks.o
CC drivers/scsi/scsi_trace.o
CC [M] drivers/regulator/rt5190a-regulator.o
CC drivers/mfd/syscon.o
CC [M] net/sctp/stream_interleave.o
CC [M] net/sctp/proc.o
CC [M] net/netfilter/nft_lookup.o
CC kernel/freezer.o
CC mm/balloon_compaction.o
CC [M] fs/nfs/nfs4sysctl.o
CC fs/inode.o
CC kernel/profile.o
CC drivers/scsi/scsi_logging.o
CC [M] net/sctp/sysctl.o
CC drivers/acpi/container.o
CC [M] net/sctp/ipv6.o
CC [M] net/netfilter/nft_dynset.o
CC drivers/scsi/scsi_pm.o
CC [M] fs/smb/client/namespace.o
CC [M] net/bridge/br_netfilter_ipv6.o
CC [M] drivers/misc/vmw_balloon.o
CC net/ipv4/netfilter.o
CC drivers/acpi/thermal_lib.o
CC [M] net/ipv6/tunnel6.o
CC drivers/scsi/scsi_dh.o
CC [M] net/netfilter/nft_meta.o
CC fs/btrfs/direct-io.o
CC [M] drivers/misc/lattice-ecp3-config.o
LD [M] net/bridge/bridge.o
CC drivers/mfd/as3711.o
CC net/ipv4/tcp_cubic.o
CC lib/nlattr.o
CC fs/btrfs/acl.o
CC fs/attr.o
CC lib/cpu_rmap.o
CC drivers/mfd/intel_soc_pmic_crc.o
CC [M] fs/smb/client/reparse.o
CC mm/secretmem.o
CC [M] drivers/regulator/rt5739.o
CC drivers/acpi/thermal.o
CC fs/btrfs/zoned.o
CC kernel/stacktrace.o
CC [M] fs/nfs/pnfs.o
CC [M] net/netfilter/nft_rt.o
CC [M] fs/nfs/pnfs_dev.o
CC [M] drivers/regulator/rt5759-regulator.o
CC [M] drivers/misc/dw-xdata-pcie.o
CC net/ipv4/tcp_sigpool.o
CC fs/btrfs/verity.o
CC kernel/dma.o
CC drivers/mfd/intel_soc_pmic_chtwc.o
CC [M] drivers/misc/xilinx_sdfec.o
CC [M] fs/smb/client/xattr.o
CC fs/bad_inode.o
CC drivers/acpi/nhlt.o
CC mm/userfaultfd.o
CC net/ipv4/tcp_bpf.o
CC lib/closure.o
CC [M] drivers/regulator/rt6160-regulator.o
CC [M] drivers/misc/tps6594-esm.o
CC [M] drivers/mfd/88pm800.o
CC mm/page_idle.o
CC [M] drivers/misc/tps6594-pfsm.o
CC drivers/scsi/scsi_bsg.o
CC kernel/smp.o
CC [M] net/sctp/diag.o
CC [M] drivers/mfd/88pm80x.o
CC [M] net/netfilter/nft_exthdr.o
CC drivers/acpi/acpi_memhotplug.o
CC [M] drivers/mfd/88pm805.o
CC lib/dynamic_queue_limits.o
CC drivers/acpi/ioapic.o
CC [M] drivers/regulator/rt6190-regulator.o
CC [M] net/netfilter/nft_last.o
CC net/ipv4/udp_bpf.o
CC kernel/uid16.o
LD [M] net/bridge/br_netfilter.o
CC drivers/scsi/scsi_common.o
CC [M] net/ipv6/mip6.o
CC drivers/scsi/virtio_scsi.o
CC [M] drivers/mfd/sm501.o
CC drivers/acpi/battery.o
CC [M] net/ipv6/ip6_vti.o
CC [M] fs/smb/client/cifs_spnego.o
CC [M] drivers/mfd/bcm590xx.o
CC drivers/acpi/hed.o
CC fs/file.o
CC kernel/module_signature.o
CC [M] drivers/misc/nsm.o
LD [M] net/sctp/sctp.o
CC mm/usercopy.o
CC kernel/kallsyms.o
CC [M] fs/nfs/pnfs_nfs.o
CC drivers/acpi/bgrt.o
CC [M] fs/smb/client/dfs_cache.o
CC [M] net/netfilter/nft_counter.o
CC drivers/acpi/cppc_acpi.o
CC net/ipv4/cipso_ipv4.o
CC [M] fs/nfs/nfs42proc.o
CC [M] net/ipv6/sit.o
CC lib/glob.o
CC fs/filesystems.o
CC lib/digsig.o
CC net/ipv4/xfrm4_policy.o
CC drivers/scsi/sd.o
CC [M] drivers/mfd/bd9571mwv.o
CC net/ipv4/xfrm4_state.o
CC [M] drivers/regulator/rt6245-regulator.o
CC [M] fs/nfs/nfs42xattr.o
CC mm/memremap.o
CC drivers/acpi/spcr.o
CC [M] fs/smb/client/dfs.o
CC [M] net/ipv6/ip6_tunnel.o
CC drivers/acpi/acpi_dbg.o
LD [M] net/sctp/sctp_diag.o
LD [M] fs/nfs/nfs.o
CC [M] fs/smb/client/netlink.o
CC [M] net/netfilter/nft_objref.o
CC drivers/acpi/viot.o
CC net/ipv4/xfrm4_input.o
CC [M] net/ipv6/ip6_gre.o
AR fs/btrfs/built-in.a
CC mm/hmm.o
CC [M] net/ipv6/fou6.o
CC net/ipv4/xfrm4_output.o
CC mm/memfd.o
CC lib/strncpy_from_user.o
AR drivers/misc/built-in.a
CC fs/namespace.o
LD [M] fs/nfs/nfsv2.o
CC [M] drivers/regulator/rtmv20-regulator.o
CC drivers/scsi/sd_dif.o
CC kernel/acct.o
CC [M] net/netfilter/nft_inner.o
CC fs/seq_file.o
CC net/ipv4/xfrm4_protocol.o
CC [M] net/netfilter/nft_chain_route.o
CC net/ipv4/tcp_ao.o
CC net/ipv4/bpf_tcp_ca.o
CC [M] drivers/mfd/cros_ec_dev.o
LD [M] fs/nfs/nfsv3.o
CC [M] drivers/regulator/rtq2134-regulator.o
CC [M] net/ipv6/ip6_udp_tunnel.o
CC [M] fs/smb/client/cifs_swn.o
CC drivers/scsi/sd_zbc.o
CC fs/xattr.o
CC [M] net/netfilter/nf_tables_offload.o
CC [M] drivers/acpi/acpi_ipmi.o
CC kernel/vmcore_info.o
CC lib/strnlen_user.o
CC [M] net/netfilter/nft_set_hash.o
CC mm/ptdump.o
CC [M] drivers/regulator/rtq6752-regulator.o
CC [M] net/ipv4/ip_tunnel.o
CC lib/net_utils.o
CC [M] net/ipv4/ipip.o
CC mm/page_reporting.o
CC fs/libfs.o
CC [M] fs/smb/client/fscache.o
CC [M] drivers/mfd/cs42l43.o
CC [M] fs/smb/client/smb1ops.o
CC lib/sg_pool.o
CC [M] net/netfilter/nft_set_bitmap.o
CC [M] fs/smb/client/cifssmb.o
CC [M] drivers/regulator/rtq2208-regulator.o
CC kernel/elfcorehdr.o
CC [M] drivers/mfd/cs42l43-i2c.o
CC [M] fs/smb/client/cifs_spnego_negtokeninit.asn1.o
CC drivers/scsi/sr.o
CC lib/memregion.o
CC fs/fs-writeback.o
CC [M] fs/smb/client/asn1.o
CC [M] net/netfilter/nft_set_rbtree.o
CC lib/irq_poll.o
CC [M] drivers/acpi/acpi_video.o
CC drivers/scsi/sr_ioctl.o
CC [M] drivers/mfd/lp873x.o
CC [M] drivers/regulator/sky81452-regulator.o
CC [M] net/ipv4/fou_core.o
CC fs/pnode.o
CC [M] drivers/mfd/tqmx86.o
CC [M] net/netfilter/nft_set_pipapo.o
CC lib/stackdepot.o
CC [M] drivers/acpi/video_detect.o
CC [M] drivers/regulator/slg51000-regulator.o
CC drivers/scsi/sr_vendor.o
CC [M] drivers/acpi/acpi_tad.o
CC [M] drivers/regulator/sy7636a-regulator.o
CC mm/bootmem_info.o
CC [M] net/ipv4/fou_nl.o
CC mm/execmem.o
CC [M] drivers/regulator/tps6105x-regulator.o
CC kernel/crash_reserve.o
CC [M] net/netfilter/nft_set_pipapo_avx2.o
CC [M] drivers/acpi/platform_profile.o
CC [M] net/ipv4/fou_bpf.o
CC drivers/scsi/sg.o
CC [M] drivers/regulator/tps62360-regulator.o
CC fs/splice.o
CC lib/ref_tracker.o
CC [M] drivers/scsi/raid_class.o
CC mm/shmem_quota.o
CC [M] net/netfilter/nft_ct_fast.o
CC [M] drivers/scsi/scsi_transport_spi.o
CC [M] drivers/acpi/sbshc.o
CC mm/pt_reclaim.o
CC kernel/kexec_core.o
CC lib/bootconfig.o
CC [M] net/netfilter/nft_compat.o
CC kernel/crash_core.o
CC lib/asn1_decoder.o
CC [M] net/netfilter/nft_connlimit.o
CC [M] net/ipv4/gre_demux.o
CC [M] drivers/mfd/arizona-core.o
CC [M] drivers/mfd/arizona-irq.o
CC [M] drivers/regulator/tps65023-regulator.o
CC [M] drivers/scsi/scsi_transport_fc.o
CC [M] drivers/scsi/scsi_transport_iscsi.o
CC fs/sync.o
CC [M] drivers/acpi/sbs.o
CC [M] drivers/regulator/tps6507x-regulator.o
CC [M] net/netfilter/nft_numgen.o
CC [M] net/netfilter/nft_ct.o
CC fs/utimes.o
CC lib/asn1_encoder.o
GEN lib/oid_registry_data.c
CC [M] drivers/regulator/tps65086-regulator.o
CC fs/d_path.o
CC kernel/kexec.o
CC [M] net/ipv4/ip_gre.o
LD [M] fs/nfs/nfsv4.o
AR net/ipv6/built-in.a
CC [M] drivers/acpi/ec_sys.o
CC [M] drivers/regulator/tps65090-regulator.o
CC [M] drivers/scsi/scsi_transport_sas.o
CC [M] net/netfilter/nft_flow_offload.o
CC fs/stack.o
CC kernel/kexec_file.o
CC [M] mm/hwpoison-inject.o
CC lib/ucs2_string.o
CC lib/ubsan.o
CC lib/sbitmap.o
CC [M] net/netfilter/nft_limit.o
CC [M] drivers/mfd/wm5102-tables.o
CC [M] drivers/scsi/scsi_transport_srp.o
CC [M] drivers/mfd/wm5110-tables.o
CC [M] net/ipv4/udp_tunnel_core.o
CC fs/fs_struct.o
CC [M] drivers/scsi/iscsi_boot_sysfs.o
CC [M] drivers/regulator/tps6524x-regulator.o
CC fs/statfs.o
CC [M] net/netfilter/nft_nat.o
CC [M] drivers/regulator/tps6586x-regulator.o
CC [M] drivers/regulator/tps65910-regulator.o
CC [M] drivers/regulator/tps65912-regulator.o
CC fs/fs_pin.o
CC [M] drivers/acpi/pfr_update.o
CC fs/nsfs.o
CC [M] drivers/acpi/pfr_telemetry.o
CC [M] drivers/scsi/megaraid.o
CC [M] drivers/acpi/acpi_pad.o
CC kernel/compat.o
CC [M] drivers/acpi/acpi_extlog.o
CC [M] drivers/regulator/tps65132-regulator.o
CC [M] drivers/acpi/acpi_configfs.o
CC [M] net/netfilter/nft_queue.o
CC lib/group_cpus.o
CC fs/fs_types.o
CC [M] drivers/mfd/wm8997-tables.o
AR mm/built-in.a
CC [M] drivers/scsi/vmw_pvscsi.o
CC [M] net/netfilter/nft_quota.o
CC lib/fw_table.o
CC kernel/utsname.o
CC [M] net/ipv4/udp_tunnel_nic.o
CC [M] lib/crc7.o
CC [M] drivers/scsi/xen-scsifront.o
CC [M] net/netfilter/nft_reject.o
CC [M] drivers/regulator/tps68470-regulator.o
CC fs/fs_context.o
CC kernel/user_namespace.o
CC [M] drivers/mfd/wm8998-tables.o
CC kernel/pid_namespace.o
CC [M] net/netfilter/nft_reject_inet.o
CC [M] drivers/regulator/twl-regulator.o
CC [M] drivers/mfd/cs47l24-tables.o
CC [M] lib/crc8.o
CC [M] net/ipv4/ip_vti.o
UPD kernel/config_data
CC kernel/stop_machine.o
CC [M] net/netfilter/nft_reject_netdev.o
CC [M] drivers/mfd/arizona-i2c.o
CC [M] net/ipv4/ah4.o
CC [M] lib/bch.o
CC kernel/audit.o
CC [M] drivers/scsi/storvsc_drv.o
CC [M] net/netfilter/nft_tunnel.o
CC [M] net/ipv4/esp4.o
CC fs/fs_parser.o
LD [M] fs/smb/client/cifs.o
CC kernel/auditfilter.o
CC [M] net/ipv4/esp4_offload.o
CC fs/fsopen.o
CC kernel/auditsc.o
CC [M] drivers/scsi/ses.o
CC [M] drivers/mfd/arizona-spi.o
CC [M] drivers/scsi/scsi_debug.o
LD [M] drivers/acpi/video.o
AR drivers/acpi/built-in.a
CC [M] drivers/regulator/twl6030-regulator.o
CC fs/init.o
CC [M] lib/ts_kmp.o
CC [M] drivers/mfd/wcd934x.o
CC kernel/audit_watch.o
CC fs/kernel_read_file.o
CC [M] drivers/mfd/wm8994-core.o
CC [M] net/ipv4/ipcomp.o
CC [M] drivers/regulator/wm831x-dcdc.o
CC [M] net/ipv4/xfrm4_tunnel.o
CC fs/mnt_idmapping.o
CC [M] net/ipv4/tunnel4.o
CC [M] drivers/mfd/wm8994-irq.o
CC fs/remap_range.o
CC [M] drivers/regulator/wm831x-isink.o
CC [M] net/ipv4/inet_diag.o
CC [M] net/netfilter/nft_log.o
CC [M] drivers/regulator/wm831x-ldo.o
CC [M] lib/ts_bm.o
CC [M] net/ipv4/tcp_diag.o
CC [M] net/netfilter/nft_masq.o
CC fs/pidfs.o
CC fs/buffer.o
CC [M] net/ipv4/udp_diag.o
CC [M] drivers/regulator/wm8350-regulator.o
CC kernel/audit_fsnotify.o
CC fs/mpage.o
CC [M] net/ipv4/raw_diag.o
CC [M] net/netfilter/nft_redir.o
CC [M] lib/ts_fsm.o
CC [M] net/netfilter/nft_hash.o
CC kernel/audit_tree.o
CC [M] lib/notifier-error-inject.o
CC [M] net/netfilter/nft_fib.o
CC kernel/kprobes.o
CC [M] drivers/regulator/wm8400-regulator.o
CC [M] drivers/regulator/wm8994-regulator.o
CC [M] drivers/mfd/wm8994-regmap.o
CC [M] lib/pm-notifier-error-inject.o
CC fs/proc_namespace.o
CC fs/direct-io.o
CC [M] drivers/mfd/madera-core.o
CC kernel/fail_function.o
CC [M] net/netfilter/nft_fib_inet.o
CC fs/eventpoll.o
CC [M] lib/memory-notifier-error-inject.o
CC fs/anon_inodes.o
CC kernel/hung_task.o
CC [M] net/netfilter/nft_fib_netdev.o
CC [M] drivers/mfd/cs47l15-tables.o
CC kernel/watchdog.o
CC [M] net/netfilter/nft_socket.o
CC kernel/watchdog_perf.o
LD [M] drivers/scsi/hv_storvsc.o
CC kernel/seccomp.o
CC [M] lib/lru_cache.o
CC [M] drivers/mfd/cs47l35-tables.o
CC [M] drivers/mfd/cs47l85-tables.o
CC [M] net/netfilter/nft_osf.o
CC drivers/scsi/scsi_sysfs.o
CC [M] drivers/mfd/cs47l90-tables.o
CC [M] net/netfilter/nft_tproxy.o
CC kernel/relay.o
CC fs/signalfd.o
CC [M] drivers/mfd/cs47l92-tables.o
CC fs/timerfd.o
CC [M] net/netfilter/nft_xfrm.o
CC kernel/utsname_sysctl.o
CC [M] drivers/mfd/madera-i2c.o
CC [M] net/netfilter/nft_synproxy.o
CC [M] drivers/mfd/madera-spi.o
CC [M] drivers/mfd/cs40l50-core.o
CC [M] net/netfilter/nft_chain_nat.o
CC kernel/delayacct.o
CC fs/eventfd.o
CC [M] net/netfilter/nft_dup_netdev.o
CC fs/userfaultfd.o
CC kernel/taskstats.o
CC kernel/tsacct.o
CC [M] net/netfilter/nft_fwd_netdev.o
CC fs/aio.o
CC [M] net/netfilter/nf_flow_table_core.o
CC [M] drivers/mfd/cs40l50-i2c.o
CC [M] net/netfilter/nf_flow_table_ip.o
CC kernel/tracepoint.o
CC kernel/latencytop.o
CC fs/dax.o
CC kernel/irq_work.o
CC [M] net/netfilter/nf_flow_table_offload.o
CC [M] drivers/mfd/cs40l50-spi.o
LD [M] net/ipv4/fou.o
CC kernel/static_call.o
CC [M] net/netfilter/nf_flow_table_xdp.o
CC [M] drivers/mfd/tps6105x.o
AR lib/lib.a
GEN lib/crc32table.h
GEN lib/crc64table.h
CC lib/oid_registry.o
LD [M] net/ipv4/gre.o
CC [M] net/netfilter/nf_flow_table_inet.o
CC [M] drivers/mfd/tps65010.o
CC kernel/static_call_inline.o
LD [M] net/ipv4/udp_tunnel.o
CC fs/locks.o
CC kernel/user-return-notifier.o
AR net/ipv4/built-in.a
AR drivers/regulator/built-in.a
CC [M] drivers/mfd/tps6507x.o
CC fs/binfmt_script.o
CC kernel/padata.o
CC [M] net/netfilter/x_tables.o
CC kernel/jump_label.o
CC [M] drivers/mfd/tps65086.o
CC fs/binfmt_elf.o
CC [M] net/netfilter/xt_tcpudp.o
CC [M] drivers/mfd/tps6594-core.o
CC fs/compat_binfmt_elf.o
CC [M] net/netfilter/xt_mark.o
CC [M] drivers/mfd/tps6594-i2c.o
CC fs/backing-file.o
CC [M] net/netfilter/xt_connmark.o
CC [M] drivers/mfd/tps6594-spi.o
CC [M] net/netfilter/xt_nat.o
CC fs/mbcache.o
CC [M] drivers/mfd/mc13xxx-core.o
CC [M] drivers/mfd/mc13xxx-spi.o
CC fs/posix_acl.o
CC fs/coredump.o
CC kernel/context_tracking.o
CC kernel/iomem.o
CC [M] net/netfilter/xt_AUDIT.o
CC [M] net/netfilter/xt_CHECKSUM.o
CC [M] drivers/mfd/mc13xxx-i2c.o
CC [M] net/netfilter/xt_CLASSIFY.o
CC fs/drop_caches.o
CC [M] net/netfilter/xt_CONNSECMARK.o
CC kernel/rseq.o
CC fs/sysctls.o
CC kernel/watch_queue.o
CC fs/fhandle.o
CC [M] net/netfilter/xt_CT.o
CC [M] net/netfilter/xt_DSCP.o
CHK kernel/kheaders_data.tar.xz
AR drivers/scsi/built-in.a
CC [M] drivers/mfd/ocelot-core.o
CC fs/bpf_fs_kfuncs.o
GEN kernel/kheaders_data.tar.xz
CC [M] fs/binfmt_misc.o
CC [M] drivers/mfd/ocelot-spi.o
CC lib/crc32.o
GZIP kernel/config_data.gz
CC lib/crc64.o
CC [M] drivers/mfd/axp20x.o
CC [M] net/netfilter/xt_HL.o
CC [M] drivers/mfd/axp20x-i2c.o
CC kernel/configs.o
CC [M] net/netfilter/xt_HMARK.o
CC [M] drivers/mfd/lp3943.o
CC [M] net/netfilter/xt_LED.o
CC [M] drivers/mfd/ti-lmu.o
CC [M] net/netfilter/xt_LOG.o
CC [M] drivers/mfd/da9062-core.o
CC [M] net/netfilter/xt_NETMAP.o
CC [M] net/netfilter/xt_NFLOG.o
CC [M] drivers/mfd/da9150-core.o
CC [M] net/netfilter/xt_NFQUEUE.o
CC [M] drivers/mfd/max77541.o
CC [M] drivers/mfd/max8907.o
CC [M] drivers/mfd/mp2629.o
CC [M] net/netfilter/xt_RATEEST.o
CC [M] net/netfilter/xt_REDIRECT.o
CC [M] drivers/mfd/mt6360-core.o
CC [M] net/netfilter/xt_MASQUERADE.o
CC [M] drivers/mfd/mt6370.o
CC [M] drivers/mfd/mt6397-core.o
CC [M] drivers/mfd/mt6397-irq.o
CC [M] net/netfilter/xt_SECMARK.o
CC [M] net/netfilter/xt_TPROXY.o
CC [M] drivers/mfd/mt6358-irq.o
CC [M] net/netfilter/xt_TCPMSS.o
CC [M] net/netfilter/xt_TCPOPTSTRIP.o
CC [M] drivers/mfd/kempld-core.o
CC [M] net/netfilter/xt_TEE.o
CC [M] drivers/mfd/intel_quark_i2c_gpio.o
CC [M] drivers/mfd/lpc_sch.o
CC [M] net/netfilter/xt_TRACE.o
CC [M] drivers/mfd/lpc_ich.o
CC [M] net/netfilter/xt_IDLETIMER.o
CC [M] drivers/mfd/rdc321x-southbridge.o
AR lib/built-in.a
CC [M] net/netfilter/xt_addrtype.o
CC [M] drivers/mfd/janz-cmodio.o
CC [M] net/netfilter/xt_bpf.o
CC [M] drivers/mfd/vx855.o
CC [M] drivers/mfd/wl1273-core.o
CC [M] net/netfilter/xt_cluster.o
CC [M] net/netfilter/xt_comment.o
CC [M] net/netfilter/xt_connbytes.o
CC [M] drivers/mfd/si476x-cmd.o
CC [M] net/netfilter/xt_connlabel.o
CC [M] drivers/mfd/si476x-prop.o
CC [M] drivers/mfd/si476x-i2c.o
CC [M] net/netfilter/xt_connlimit.o
CC [M] net/netfilter/xt_conntrack.o
AR kernel/built-in.a
CC [M] net/netfilter/xt_cpu.o
CC [M] drivers/mfd/intel_pmc_bxt.o
CC [M] drivers/mfd/viperboard.o
CC [M] drivers/mfd/lm3533-core.o
CC [M] drivers/mfd/lm3533-ctrlbank.o
CC [M] net/netfilter/xt_dccp.o
CC [M] drivers/mfd/retu-mfd.o
CC [M] net/netfilter/xt_devgroup.o
CC [M] drivers/mfd/iqs62x.o
CC [M] net/netfilter/xt_dscp.o
CC [M] net/netfilter/xt_ecn.o
CC [M] drivers/mfd/menf21bmc.o
CC [M] net/netfilter/xt_esp.o
CC [M] drivers/mfd/dln2.o
CC [M] net/netfilter/xt_hashlimit.o
CC [M] net/netfilter/xt_helper.o
CC [M] drivers/mfd/rt4831.o
CC [M] net/netfilter/xt_hl.o
CC [M] net/netfilter/xt_ipcomp.o
CC [M] drivers/mfd/rt5033.o
AR fs/built-in.a
CC [M] drivers/mfd/rt5120.o
CC [M] net/netfilter/xt_iprange.o
CC [M] net/netfilter/xt_l2tp.o
CC [M] drivers/mfd/sky81452.o
CC [M] drivers/mfd/intel_soc_pmic_bxtwc.o
CC [M] net/netfilter/xt_length.o
CC [M] drivers/mfd/intel_soc_pmic_chtdc_ti.o
CC [M] net/netfilter/xt_limit.o
CC [M] net/netfilter/xt_mac.o
CC [M] net/netfilter/xt_multiport.o
CC [M] drivers/mfd/intel_soc_pmic_mrfld.o
CC [M] drivers/mfd/rave-sp.o
CC [M] net/netfilter/xt_nfacct.o
CC [M] drivers/mfd/simple-mfd-i2c.o
CC [M] net/netfilter/xt_osf.o
CC [M] drivers/mfd/smpro-core.o
CC [M] net/netfilter/xt_owner.o
CC [M] drivers/mfd/intel-m10-bmc-core.o
CC [M] net/netfilter/xt_cgroup.o
CC [M] net/netfilter/xt_physdev.o
CC [M] drivers/mfd/intel-m10-bmc-spi.o
CC [M] net/netfilter/xt_pkttype.o
CC [M] drivers/mfd/atc260x-core.o
CC [M] drivers/mfd/atc260x-i2c.o
CC [M] net/netfilter/xt_policy.o
CC [M] net/netfilter/xt_quota.o
CC [M] net/netfilter/xt_rateest.o
CC [M] net/netfilter/xt_realm.o
CC [M] net/netfilter/xt_recent.o
CC [M] net/netfilter/xt_sctp.o
CC [M] net/netfilter/xt_socket.o
CC [M] net/netfilter/xt_state.o
CC [M] net/netfilter/xt_statistic.o
CC [M] net/netfilter/xt_string.o
CC [M] net/netfilter/xt_tcpmss.o
CC [M] net/netfilter/xt_time.o
CC [M] net/netfilter/xt_u32.o
LD [M] drivers/mfd/arizona.o
LD [M] drivers/mfd/wm8994.o
LD [M] drivers/mfd/madera.o
LD [M] drivers/mfd/ocelot-soc.o
LD [M] drivers/mfd/mt6397.o
AR drivers/mfd/built-in.a
LD [M] net/netfilter/nf_conntrack.o
LD [M] net/netfilter/nf_conntrack_h323.o
LD [M] net/netfilter/nf_nat.o
LD [M] net/netfilter/nf_tables.o
LD [M] net/netfilter/nf_flow_table.o
AR net/netfilter/built-in.a
LD [M] drivers/mfd/si476x-core.o
make[3]: *** [../scripts/Makefile.build:461: drivers] Error 2
make[3]: *** Waiting for unfinished jobs....
AR net/built-in.a
CC [M] kernel/kheaders.o
make[2]: *** [/kernel/Makefile:2003: .] Error 2
make[1]: *** [/kernel/Makefile:248: __sub-make] Error 2
make[1]: Leaving directory '/kernel/build64-debug'
make: *** [Makefile:248: __sub-make] Error 2
+ cleanup
++ stat -c %u:%g /kernel
+ chown -R 1003:1003 /kernel
^ permalink raw reply [flat|nested] 72+ messages in thread