From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
To: intel-xe@lists.freedesktop.org
Cc: simona.vetter@ffwll.ch, matthew.brost@intel.com,
christian.koenig@amd.com, thomas.hellstrom@linux.intel.com,
joonas.lahtinen@linux.intel.com, gustavo.sousa@intel.com,
jan.maslak@intel.com, dominik.karol.piatkowski@intel.com,
rodrigo.vivi@intel.com, andrzej.hajda@intel.com,
matthew.auld@intel.com, maciej.patelczyk@intel.com,
gwan-gyeong.mun@intel.com,
Mika Kuoppala <mika.kuoppala@linux.intel.com>
Subject: [PATCH 11/24] drm/xe/eudebug: vm open/pread/pwrite
Date: Thu, 30 Apr 2026 13:51:07 +0300 [thread overview]
Message-ID: <20260430105121.712843-12-mika.kuoppala@linux.intel.com> (raw)
In-Reply-To: <20260430105121.712843-1-mika.kuoppala@linux.intel.com>
Debugger needs access to the client's vm to read and write. For
example inspecting ISA/ELF and setting up breakpoints.
Add ioctl to open target vm with debugger client and vm_handle
and hook up pread/pwrite possibility.
Open will take timeout argument so that standard fsync
can be used for explicit flushing between cpu/gpu for
the target vm.
Implement this for bo backed storage. userptr will
be done in following patch.
v2: - checkpatch (Maciej)
- 32bit fixes (Andrzej)
- bo_vmap (Mika)
- fix vm leak if can't allocate k_buffer (Mika)
- assert vm write held for vma (Matthew)
v3: - fw ref, ttm_bo_access
- timeout boundary check (Dominik)
- dont try to copy to user on zero bytes (Mika)
v4: - offset as unsigned long (Thomas)
- check XE_VMA_DESTROYED
v5: - drm_dev_put before releasing debugger (Mika)
v6: - stop flushing on first error (Mika)
Cc: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
Documentation/gpu/xe/xe_eudebug.rst | 6 +
drivers/gpu/drm/xe/Makefile | 2 +-
drivers/gpu/drm/xe/regs/xe_gt_regs.h | 24 ++
drivers/gpu/drm/xe/xe_eudebug.c | 40 ++-
drivers/gpu/drm/xe/xe_eudebug.h | 12 +
drivers/gpu/drm/xe/xe_eudebug_types.h | 6 +
drivers/gpu/drm/xe/xe_eudebug_vm.c | 420 ++++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_eudebug_vm.h | 8 +
include/uapi/drm/xe_drm_eudebug.h | 29 ++
9 files changed, 543 insertions(+), 4 deletions(-)
create mode 100644 drivers/gpu/drm/xe/xe_eudebug_vm.c
create mode 100644 drivers/gpu/drm/xe/xe_eudebug_vm.h
diff --git a/Documentation/gpu/xe/xe_eudebug.rst b/Documentation/gpu/xe/xe_eudebug.rst
index db52945714f3..466d366c1e83 100644
--- a/Documentation/gpu/xe/xe_eudebug.rst
+++ b/Documentation/gpu/xe/xe_eudebug.rst
@@ -66,3 +66,9 @@ Resource Event Types
.. kernel-doc:: include/uapi/drm/xe_drm_eudebug.h
:identifiers: drm_xe_eudebug_event_vm_bind_ufence
+
+VM Access
+=========
+
+.. kernel-doc:: include/uapi/drm/xe_drm_eudebug.h
+ :identifiers: drm_xe_eudebug_vm_open
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 97c721cd32aa..83e88d75c3e0 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -158,7 +158,7 @@ xe-$(CONFIG_DRM_XE_GPUSVM) += xe_svm.o
xe-$(CONFIG_DRM_GPUSVM) += xe_userptr.o
# debugging shaders with gdb (eudebug) support
-xe-$(CONFIG_DRM_XE_EUDEBUG) += xe_eudebug.o
+xe-$(CONFIG_DRM_XE_EUDEBUG) += xe_eudebug.o xe_eudebug_vm.o
# graphics hardware monitoring (HWMON) support
xe-$(CONFIG_HWMON) += xe_hwmon.o
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 16c87ce3f614..1787f4906775 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -614,6 +614,30 @@
#define CCS_MODE_CSLICE(cslice, ccs) \
((ccs) << ((cslice) * CCS_MODE_CSLICE_WIDTH))
+#define RCU_ASYNC_FLUSH XE_REG(0x149fc)
+#define RCU_ASYNC_FLUSH_IN_PROGRESS REG_BIT(31)
+#define RCU_ASYNC_FLUSH_ENGINE_ID_SHIFT 28
+#define RCU_ASYNC_FLUSH_ENGINE_ID_DECODE1 REG_BIT(26)
+#define RCU_ASYNC_FLUSH_AMFS REG_BIT(8)
+#define RCU_ASYNC_FLUSH_PREFETCH REG_BIT(7)
+#define RCU_ASYNC_FLUSH_DATA_PORT REG_BIT(6)
+#define RCU_ASYNC_FLUSH_DATA_CACHE REG_BIT(5)
+#define RCU_ASYNC_FLUSH_HDC_PIPELINE REG_BIT(4)
+#define RCU_ASYNC_INVALIDATE_HDC_PIPELINE REG_BIT(3)
+#define RCU_ASYNC_INVALIDATE_CONSTANT_CACHE REG_BIT(2)
+#define RCU_ASYNC_INVALIDATE_TEXTURE_CACHE REG_BIT(1)
+#define RCU_ASYNC_INVALIDATE_INSTRUCTION_CACHE REG_BIT(0)
+#define RCU_ASYNC_FLUSH_AND_INVALIDATE_ALL ( \
+ RCU_ASYNC_FLUSH_AMFS | \
+ RCU_ASYNC_FLUSH_PREFETCH | \
+ RCU_ASYNC_FLUSH_DATA_PORT | \
+ RCU_ASYNC_FLUSH_DATA_CACHE | \
+ RCU_ASYNC_FLUSH_HDC_PIPELINE | \
+ RCU_ASYNC_INVALIDATE_HDC_PIPELINE | \
+ RCU_ASYNC_INVALIDATE_CONSTANT_CACHE | \
+ RCU_ASYNC_INVALIDATE_TEXTURE_CACHE | \
+ RCU_ASYNC_INVALIDATE_INSTRUCTION_CACHE)
+
#define FORCEWAKE_ACK_GT XE_REG(0x130044)
/* Applicable for all FORCEWAKE_DOMAIN and FORCEWAKE_ACK_DOMAIN regs */
diff --git a/drivers/gpu/drm/xe/xe_eudebug.c b/drivers/gpu/drm/xe/xe_eudebug.c
index 14be97b5b4eb..0027dea2c396 100644
--- a/drivers/gpu/drm/xe/xe_eudebug.c
+++ b/drivers/gpu/drm/xe/xe_eudebug.c
@@ -16,6 +16,7 @@
#include "xe_device.h"
#include "xe_eudebug.h"
#include "xe_eudebug_types.h"
+#include "xe_eudebug_vm.h"
#include "xe_exec_queue.h"
#include "xe_hw_engine.h"
#include "xe_macros.h"
@@ -145,8 +146,7 @@ event_fifo_num_events_peek(const struct xe_eudebug * const d)
return kfifo_len(&d->events.fifo);
}
-static bool
-xe_eudebug_detached(struct xe_eudebug *d)
+bool xe_eudebug_detached(struct xe_eudebug *d)
{
return READ_ONCE(d->target.xef) == NULL;
}
@@ -265,7 +265,7 @@ static void xe_eudebug_free(struct kref *ref)
kfree(d);
}
-static void xe_eudebug_put(struct xe_eudebug *d)
+void xe_eudebug_put(struct xe_eudebug *d)
{
kref_put(&d->ref, xe_eudebug_free);
}
@@ -743,6 +743,34 @@ static int xe_eudebug_remove_handle(struct xe_eudebug *d, int type, void *p,
return ret;
}
+static void *find_resource__unlocked(struct xe_eudebug *d,
+ int type,
+ u32 id)
+{
+
+ struct xe_eudebug_resource *r;
+ struct xe_eudebug_handle *h;
+
+ r = resource_from_type(d, type);
+ h = xa_load(&r->xa, id);
+
+ return h ? (void *)(uintptr_t)h->key : NULL;
+}
+
+struct xe_vm *xe_eudebug_vm_get(struct xe_eudebug *d, u32 id)
+{
+ struct xe_vm *vm;
+
+ mutex_lock(&d->target.lock);
+ vm = find_resource__unlocked(d, XE_EUDEBUG_RES_TYPE_VM, id);
+ if (vm)
+ xe_vm_get(vm);
+ mutex_unlock(&d->target.lock);
+
+ return vm;
+}
+
+
static struct drm_xe_eudebug_event *
xe_eudebug_create_event(struct xe_eudebug *d, u16 type, u64 seqno, u16 flags,
u32 len)
@@ -1744,6 +1772,10 @@ static long xe_eudebug_ioctl(struct file *file,
ret = xe_eudebug_ack_event_ioctl(d, cmd, arg);
eu_dbg(d, "ioctl cmd=EVENT_ACK ret=%ld\n", ret);
break;
+ case DRM_XE_EUDEBUG_IOCTL_VM_OPEN:
+ ret = xe_eudebug_vm_open_ioctl(d, arg);
+ eu_dbg(d, "ioctl cmd=VM_OPEN ret=%ld\n", ret);
+ break;
default:
ret = -EINVAL;
}
@@ -1809,6 +1841,8 @@ xe_eudebug_connect(struct xe_device *xe,
spin_lock_init(&d->acks.lock);
d->acks.tree = RB_ROOT;
+ mutex_init(&d->hw.lock);
+
err = xe_eudebug_resources_init(d);
if (XE_IOCTL_DBG(xe, err))
goto err_free;
diff --git a/drivers/gpu/drm/xe/xe_eudebug.h b/drivers/gpu/drm/xe/xe_eudebug.h
index d0f1b51564dc..74171cc81fe1 100644
--- a/drivers/gpu/drm/xe/xe_eudebug.h
+++ b/drivers/gpu/drm/xe/xe_eudebug.h
@@ -18,6 +18,7 @@ struct xe_vma;
struct xe_vma_ops;
struct xe_exec_queue;
struct xe_user_fence;
+struct xe_eudebug;
#if IS_ENABLED(CONFIG_DRM_XE_EUDEBUG)
@@ -38,6 +39,10 @@ struct xe_user_fence;
#define xe_eudebug_assert(d, ...) xe_assert((d)->xe, ##__VA_ARGS__)
+#define xe_eudebug_for_each_hw_engine(__hwe, __gt, __id) \
+ for_each_hw_engine(__hwe, __gt, __id) \
+ if (xe_hw_engine_has_eudebug(__hwe))
+
int xe_eudebug_connect_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file);
@@ -47,10 +52,15 @@ bool xe_eudebug_is_enabled(struct xe_device *xe);
void xe_eudebug_file_close(struct xe_file *xef);
+bool xe_eudebug_detached(struct xe_eudebug *d);
+
void xe_eudebug_vm_create(struct xe_file *xef, struct xe_vm *vm);
void xe_eudebug_vm_destroy(struct xe_file *xef, struct xe_vm *vm);
+
int xe_eudebug_enable(struct xe_device *xe, bool enable);
+struct xe_vm *xe_eudebug_vm_get(struct xe_eudebug *d, u32 vm_id);
+
void xe_eudebug_exec_queue_create(struct xe_file *xef, struct xe_exec_queue *q);
void xe_eudebug_exec_queue_destroy(struct xe_file *xef, struct xe_exec_queue *q);
@@ -60,6 +70,8 @@ void xe_eudebug_ufence_init(struct xe_user_fence *ufence);
void xe_eudebug_ufence_fini(struct xe_user_fence *ufence);
bool xe_eudebug_ufence_track(struct xe_user_fence *ufence);
+void xe_eudebug_put(struct xe_eudebug *d);
+
#else
static inline int xe_eudebug_connect_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/xe/xe_eudebug_types.h b/drivers/gpu/drm/xe/xe_eudebug_types.h
index 0f18667a5ab8..10d19a43ba6b 100644
--- a/drivers/gpu/drm/xe/xe_eudebug_types.h
+++ b/drivers/gpu/drm/xe/xe_eudebug_types.h
@@ -133,6 +133,12 @@ struct xe_eudebug {
/** @tree: pending acks by seqnos */
struct rb_root tree;
} acks;
+
+ /** @hw: hw access */
+ struct {
+ /** @lock: guards access to hw state */
+ struct mutex lock;
+ } hw;
};
#endif /* _XE_EUDEBUG_TYPES_H_ */
diff --git a/drivers/gpu/drm/xe/xe_eudebug_vm.c b/drivers/gpu/drm/xe/xe_eudebug_vm.c
new file mode 100644
index 000000000000..dd75227555ee
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_eudebug_vm.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2025 Intel Corporation
+ */
+
+#include "xe_eudebug_vm.h"
+
+#include <linux/anon_inodes.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_drv.h>
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_eudebug.h"
+#include "xe_eudebug_types.h"
+#include "xe_force_wake.h"
+#include "xe_gt.h"
+#include "xe_mmio.h"
+#include "xe_vm.h"
+
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_engine_regs.h"
+
+static int xe_eudebug_vma_access(struct xe_vma *vma,
+ unsigned long offset_in_vma,
+ void *buf, unsigned long len, bool write)
+{
+ struct xe_bo *bo;
+ u64 bytes;
+
+ lockdep_assert_held_write(&xe_vma_vm(vma)->lock);
+
+ if (XE_WARN_ON(offset_in_vma >= xe_vma_size(vma)))
+ return -EINVAL;
+
+ if (vma->gpuva.flags & XE_VMA_DESTROYED)
+ return -EINVAL;
+
+ bytes = min_t(u64, len, xe_vma_size(vma) - offset_in_vma);
+ if (!bytes)
+ return 0;
+
+ bo = xe_bo_get(xe_vma_bo(vma));
+ if (bo) {
+ int ret;
+
+ ret = ttm_bo_access(&bo->ttm, offset_in_vma, buf, bytes, write);
+
+ xe_bo_put(bo);
+
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+static int xe_eudebug_vm_access(struct xe_vm *vm, unsigned long offset,
+ void *buf, unsigned long len, bool write)
+{
+ struct xe_vma *vma;
+ int ret;
+
+ down_write(&vm->lock);
+
+ vma = xe_vm_find_overlapping_vma(vm, offset, len);
+ if (vma) {
+ /* XXX: why find overlapping returns below start? */
+ if (offset < xe_vma_start(vma) ||
+ offset >= (xe_vma_start(vma) + xe_vma_size(vma))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Offset into vma */
+ offset -= xe_vma_start(vma);
+ ret = xe_eudebug_vma_access(vma, offset, buf, len, write);
+ } else {
+ ret = -EINVAL;
+ }
+
+out:
+ up_write(&vm->lock);
+
+ return ret;
+}
+
+struct vm_file {
+ struct xe_eudebug *debugger;
+ struct xe_vm *vm;
+ u64 flags;
+ u64 vm_handle;
+ unsigned int timeout_us;
+};
+
+static ssize_t __vm_read_write(struct xe_vm *vm,
+ void *bb,
+ char __user *r_buffer,
+ const char __user *w_buffer,
+ unsigned long offset,
+ unsigned long len,
+ const bool write)
+{
+ ssize_t ret;
+
+ if (!len)
+ return 0;
+
+ if (write) {
+ ret = copy_from_user(bb, w_buffer, len);
+ if (ret)
+ return -EFAULT;
+
+ ret = xe_eudebug_vm_access(vm, offset, bb, len, true);
+ if (ret <= 0)
+ return ret;
+
+ len = ret;
+ } else {
+ ret = xe_eudebug_vm_access(vm, offset, bb, len, false);
+ if (ret <= 0)
+ return ret;
+
+ len = ret;
+
+ ret = copy_to_user(r_buffer, bb, len);
+ if (ret)
+ return -EFAULT;
+ }
+
+ return len;
+}
+
+static ssize_t __xe_eudebug_vm_access(struct file *file,
+ char __user *r_buffer,
+ const char __user *w_buffer,
+ size_t count, loff_t *__pos)
+{
+ struct vm_file *vmf = file->private_data;
+ struct xe_eudebug * const d = vmf->debugger;
+ struct xe_device * const xe = d->xe;
+ const bool write = !!w_buffer;
+ struct xe_vm *vm;
+ ssize_t copied = 0;
+ ssize_t bytes_left = count;
+ ssize_t ret;
+ unsigned long alloc_len;
+ loff_t pos = *__pos;
+ void *k_buffer;
+
+ if (XE_IOCTL_DBG(xe, write && r_buffer))
+ return -EINVAL;
+
+ vm = xe_eudebug_vm_get(d, vmf->vm_handle);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, vm != vmf->vm)) {
+ eu_warn(d, "vm_access(%s): vm handle mismatch vm_handle=%llu, flags=0x%llx, pos=%llu, count=%zu\n",
+ write ? "write" : "read",
+ vmf->vm_handle, vmf->flags, pos, count);
+ xe_vm_put(vm);
+ return -EINVAL;
+ }
+
+ if (!count) {
+ xe_vm_put(vm);
+ return 0;
+ }
+
+ alloc_len = min_t(unsigned long, ALIGN(count, PAGE_SIZE), 64 * SZ_1M);
+ do {
+ k_buffer = vmalloc(alloc_len);
+ if (k_buffer)
+ break;
+
+ alloc_len >>= 1;
+ } while (alloc_len > PAGE_SIZE);
+
+ if (XE_IOCTL_DBG(xe, !k_buffer)) {
+ xe_vm_put(vm);
+ return -ENOMEM;
+ }
+
+ do {
+ const ssize_t len = min_t(ssize_t, bytes_left, alloc_len);
+
+ ret = __vm_read_write(vm, k_buffer,
+ write ? NULL : r_buffer + copied,
+ write ? w_buffer + copied : NULL,
+ pos + copied,
+ len,
+ write);
+ if (ret <= 0)
+ break;
+
+ bytes_left -= ret;
+ copied += ret;
+ } while (bytes_left > 0);
+
+ vfree(k_buffer);
+ xe_vm_put(vm);
+
+ if (XE_WARN_ON(copied < 0))
+ copied = 0;
+
+ *__pos += copied;
+
+ return copied ?: ret;
+}
+
+static ssize_t xe_eudebug_vm_read(struct file *file,
+ char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ return __xe_eudebug_vm_access(file, buffer, NULL, count, pos);
+}
+
+static ssize_t xe_eudebug_vm_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ return __xe_eudebug_vm_access(file, NULL, buffer, count, pos);
+}
+
+static int engine_rcu_flush(struct xe_eudebug *d,
+ struct xe_hw_engine *hwe,
+ unsigned int timeout_us)
+{
+ const struct xe_reg psmi_addr = RING_PSMI_CTL(hwe->mmio_base);
+ struct xe_gt *gt = hwe->gt;
+ unsigned int fw_ref;
+ u32 mask = RCU_ASYNC_FLUSH_AND_INVALIDATE_ALL;
+ u32 psmi_ctrl;
+ u32 id;
+ int ret;
+
+ if (hwe->class == XE_ENGINE_CLASS_RENDER)
+ id = 0;
+ else if (hwe->class == XE_ENGINE_CLASS_COMPUTE)
+ id = hwe->instance + 1;
+ else
+ return -EINVAL;
+
+ if (id < 8)
+ mask |= id << RCU_ASYNC_FLUSH_ENGINE_ID_SHIFT;
+ else
+ mask |= (id - 8) << RCU_ASYNC_FLUSH_ENGINE_ID_SHIFT |
+ RCU_ASYNC_FLUSH_ENGINE_ID_DECODE1;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), hwe->domain);
+ if (!fw_ref)
+ return -ETIMEDOUT;
+
+ /* Prevent concurrent flushes */
+ mutex_lock(&d->hw.lock);
+ psmi_ctrl = xe_mmio_read32(>->mmio, psmi_addr);
+ if (!(psmi_ctrl & IDLE_MSG_DISABLE))
+ xe_mmio_write32(>->mmio, psmi_addr,
+ REG_MASKED_FIELD_ENABLE(IDLE_MSG_DISABLE));
+
+ /* XXX: Timeout is per operation but in here we flush previous */
+ ret = xe_mmio_wait32(>->mmio, RCU_ASYNC_FLUSH,
+ RCU_ASYNC_FLUSH_IN_PROGRESS, 0,
+ timeout_us, NULL, false);
+ if (ret)
+ goto out;
+
+ xe_mmio_write32(>->mmio, RCU_ASYNC_FLUSH, mask);
+
+ ret = xe_mmio_wait32(>->mmio, RCU_ASYNC_FLUSH,
+ RCU_ASYNC_FLUSH_IN_PROGRESS, 0,
+ timeout_us, NULL, false);
+out:
+ if (!(psmi_ctrl & IDLE_MSG_DISABLE))
+ xe_mmio_write32(>->mmio, psmi_addr,
+ REG_MASKED_FIELD_DISABLE(IDLE_MSG_DISABLE));
+
+ mutex_unlock(&d->hw.lock);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
+ return ret;
+}
+
+static int xe_eudebug_vm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ struct vm_file *vmf = file->private_data;
+ struct xe_eudebug *d = vmf->debugger;
+ struct xe_gt *gt;
+ int gt_id;
+ int ret = -EINVAL;
+
+ eu_dbg(d, "vm_fsync: vm_handle=%llu, flags=0x%llx, start=%llu, end=%llu datasync=%d\n",
+ vmf->vm_handle, vmf->flags, start, end, datasync);
+
+ for_each_gt(gt, d->xe, gt_id) {
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ /* XXX: vm open per engine? */
+ xe_eudebug_for_each_hw_engine(hwe, gt, id) {
+ ret = engine_rcu_flush(d, hwe, vmf->timeout_us);
+ if (ret)
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+static int xe_eudebug_vm_release(struct inode *inode, struct file *file)
+{
+ struct vm_file *vmf = file->private_data;
+ struct xe_eudebug *d = vmf->debugger;
+
+ eu_dbg(d, "vm_release: vm_handle=%llu, flags=0x%llx",
+ vmf->vm_handle, vmf->flags);
+
+ xe_vm_put(vmf->vm);
+ drm_dev_put(&d->xe->drm);
+ xe_eudebug_put(d);
+
+ kfree(vmf);
+
+ return 0;
+}
+
+static const struct file_operations vm_fops = {
+ .owner = THIS_MODULE,
+ .llseek = generic_file_llseek,
+ .read = xe_eudebug_vm_read,
+ .write = xe_eudebug_vm_write,
+ .fsync = xe_eudebug_vm_fsync,
+ .mmap = NULL,
+ .release = xe_eudebug_vm_release,
+};
+
+long xe_eudebug_vm_open_ioctl(struct xe_eudebug *d, unsigned long arg)
+{
+ struct drm_xe_eudebug_vm_open param;
+ struct xe_device * const xe = d->xe;
+ struct vm_file *vmf = NULL;
+ struct xe_vm *vm;
+ struct file *file;
+ long ret = 0;
+ int fd;
+
+ if (XE_IOCTL_DBG(xe, _IOC_SIZE(DRM_XE_EUDEBUG_IOCTL_VM_OPEN) != sizeof(param)))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, !(_IOC_DIR(DRM_XE_EUDEBUG_IOCTL_VM_OPEN) & _IOC_WRITE)))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, copy_from_user(¶m, (void __user *)arg, sizeof(param))))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(xe, param.flags))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, xe_eudebug_detached(d)))
+ return -ENOTCONN;
+
+ vm = xe_eudebug_vm_get(d, param.vm_handle);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -EINVAL;
+
+ vmf = kzalloc_obj(*vmf, GFP_KERNEL);
+ if (XE_IOCTL_DBG(xe, !vmf)) {
+ ret = -ENOMEM;
+ goto out_vm_put;
+ }
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (XE_IOCTL_DBG(xe, fd < 0)) {
+ ret = fd;
+ goto out_free;
+ }
+
+ kref_get(&d->ref);
+ vmf->debugger = d;
+ vmf->vm = vm;
+ vmf->flags = param.flags;
+ vmf->vm_handle = param.vm_handle;
+ vmf->timeout_us = div64_u64(param.timeout_ns, 1000ull);
+
+ file = anon_inode_getfile("[xe_eudebug.vm]", &vm_fops, vmf, O_RDWR);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ XE_IOCTL_DBG(xe, ret);
+ file = NULL;
+ goto out_fd_put;
+ }
+
+ drm_dev_get(&xe->drm);
+
+ file->f_mode |= FMODE_PREAD | FMODE_PWRITE |
+ FMODE_READ | FMODE_WRITE | FMODE_LSEEK;
+
+ fd_install(fd, file);
+
+ eu_dbg(d, "vm_open: handle=%llu, flags=0x%llx, fd=%d",
+ vmf->vm_handle, vmf->flags, fd);
+
+ XE_WARN_ON(ret);
+
+ return fd;
+
+out_fd_put:
+ put_unused_fd(fd);
+ xe_eudebug_put(d);
+out_free:
+ kfree(vmf);
+out_vm_put:
+ xe_vm_put(vm);
+
+ XE_WARN_ON(ret >= 0);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_eudebug_vm.h b/drivers/gpu/drm/xe/xe_eudebug_vm.h
new file mode 100644
index 000000000000..b3dc5618a5e6
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_eudebug_vm.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2025 Intel Corporation
+ */
+
+struct xe_eudebug;
+
+long xe_eudebug_vm_open_ioctl(struct xe_eudebug *d, unsigned long arg);
diff --git a/include/uapi/drm/xe_drm_eudebug.h b/include/uapi/drm/xe_drm_eudebug.h
index fb53174869ef..029a51340777 100644
--- a/include/uapi/drm/xe_drm_eudebug.h
+++ b/include/uapi/drm/xe_drm_eudebug.h
@@ -12,6 +12,7 @@ extern "C" {
#define DRM_XE_EUDEBUG_IOCTL_READ_EVENT _IO('j', 0x0)
#define DRM_XE_EUDEBUG_IOCTL_ACK_EVENT _IOW('j', 0x1, struct drm_xe_eudebug_ack)
+#define DRM_XE_EUDEBUG_IOCTL_VM_OPEN _IOW('j', 0x2, struct drm_xe_eudebug_vm_open)
/**
* struct drm_xe_eudebug_event - Base type of event delivered by xe_eudebug.
@@ -242,6 +243,34 @@ struct drm_xe_eudebug_ack {
__u64 seqno;
};
+/**
+ * struct drm_xe_eudebug_vm_open - Open a target vm
+ *
+ * Open target VM for reading and writing with DRM_XE_EUDEBUG_IOCTL_VM_OPEN.
+ *
+ * File descriptor is returned which can be used with pread and pwrite
+ * to inspect and modify the target VM.
+ *
+ * Multiple operations can be synced with calling fsync(fd). If
+ * timeout_ns was specified, the fsync will timeout if the
+ * VM can't be guaranteed to be in sync. Caller should re-read the
+ * state with pread again.
+ *
+ */
+struct drm_xe_eudebug_vm_open {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @vm_handle: handle of vm to be accessed */
+ __u64 vm_handle;
+
+ /** @flags: flags, must be zero */
+ __u64 flags;
+
+ /** @timeout_ns: Timeout value in nanoseconds */
+ __u64 timeout_ns;
+};
+
#if defined(__cplusplus)
}
#endif
--
2.43.0
next prev parent reply other threads:[~2026-04-30 10:52 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-30 10:50 [PATCH 00/24] Intel Xe GPU Debug Support (eudebug) v8 Mika Kuoppala
2026-04-30 10:50 ` [PATCH 01/24] drm/xe/eudebug: Introduce eudebug interface Mika Kuoppala
2026-04-30 10:50 ` [PATCH 02/24] drm/xe/eudebug: Add documentation Mika Kuoppala
2026-04-30 10:50 ` [PATCH 03/24] drm/xe/eudebug: Add connection establishment documentation Mika Kuoppala
2026-04-30 10:51 ` [PATCH 04/24] drm/xe/eudebug: Introduce discovery for resources Mika Kuoppala
2026-04-30 10:51 ` [PATCH 05/24] drm/xe/eudebug: Introduce exec_queue events Mika Kuoppala
2026-04-30 10:51 ` [PATCH 06/24] drm/xe: Add EUDEBUG_ENABLE exec queue property Mika Kuoppala
2026-04-30 10:51 ` [PATCH 07/24] drm/xe/eudebug: Mark guc contexts as debuggable Mika Kuoppala
2026-04-30 10:51 ` [PATCH 08/24] drm/xe: Introduce ADD_DEBUG_DATA and REMOVE_DEBUG_DATA vm bind ops Mika Kuoppala
2026-04-30 10:51 ` [PATCH 09/24] drm/xe/eudebug: Introduce vm bind and vm bind debug data events Mika Kuoppala
2026-04-30 10:51 ` [PATCH 10/24] drm/xe/eudebug: Add ufence events with acks Mika Kuoppala
2026-04-30 10:51 ` Mika Kuoppala [this message]
2026-04-30 10:51 ` [PATCH 12/24] drm/xe/eudebug: userptr vm pread/pwrite Mika Kuoppala
2026-04-30 10:51 ` [PATCH 13/24] drm/xe/eudebug: hw enablement for eudebug Mika Kuoppala
2026-04-30 10:51 ` [PATCH 14/24] drm/xe/eudebug: Introduce EU control interface Mika Kuoppala
2026-04-30 10:51 ` [PATCH 15/24] drm/xe/eudebug: Introduce per device attention scan worker Mika Kuoppala
2026-04-30 10:51 ` [PATCH 16/24] drm/xe/eudebug_test: Introduce xe_eudebug wa kunit test Mika Kuoppala
2026-04-30 14:16 ` Michal Wajdeczko
2026-04-30 10:51 ` [PATCH 17/24] drm/xe: Implement SR-IOV and eudebug exclusivity Mika Kuoppala
2026-04-30 10:51 ` [PATCH 18/24] drm/xe: Add xe_client_debugfs and introduce debug_data file Mika Kuoppala
2026-04-30 10:51 ` [PATCH 19/24] drm/xe/eudebug: Allow getting eudebug instance during discovery Mika Kuoppala
2026-04-30 10:51 ` [PATCH 20/24] drm/xe/eudebug: Add read/count/compare helper for eu attention Mika Kuoppala
2026-04-30 10:51 ` [PATCH 21/24] drm/xe/vm: Support for adding null page VMA to VM on request Mika Kuoppala
2026-04-30 10:51 ` [PATCH 22/24] drm/xe/eudebug: Introduce EU pagefault handling interface Mika Kuoppala
2026-04-30 19:50 ` Gwan-gyeong Mun
2026-04-30 10:51 ` [PATCH 23/24] drm/xe/eudebug: Enable EU pagefault handling Mika Kuoppala
2026-04-30 10:51 ` [PATCH 24/24] drm/xe/eudebug: Disable SVM in Xe for Eudebug Mika Kuoppala
2026-04-30 19:22 ` Matthew Brost
2026-04-30 11:09 ` ✗ CI.checkpatch: warning for Intel Xe GPU Debug Support (eudebug) v8 Patchwork
2026-04-30 11:10 ` ✓ CI.KUnit: success " Patchwork
2026-04-30 12:06 ` ✓ Xe.CI.BAT: " Patchwork
2026-04-30 22:41 ` ✗ Xe.CI.FULL: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260430105121.712843-12-mika.kuoppala@linux.intel.com \
--to=mika.kuoppala@linux.intel.com \
--cc=andrzej.hajda@intel.com \
--cc=christian.koenig@amd.com \
--cc=dominik.karol.piatkowski@intel.com \
--cc=gustavo.sousa@intel.com \
--cc=gwan-gyeong.mun@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=jan.maslak@intel.com \
--cc=joonas.lahtinen@linux.intel.com \
--cc=maciej.patelczyk@intel.com \
--cc=matthew.auld@intel.com \
--cc=matthew.brost@intel.com \
--cc=rodrigo.vivi@intel.com \
--cc=simona.vetter@ffwll.ch \
--cc=thomas.hellstrom@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox