intel-xe.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: <intel-xe@lists.freedesktop.org>,
	Maciej Patelczyk <maciej.patelczyk@intel.com>
Subject: Re: [PATCH 14/21] drm/xe/eudebug: vm open/pread/pwrite
Date: Fri, 26 Jul 2024 18:59:42 +0000	[thread overview]
Message-ID: <ZqPyHmU7GaFCfGdN@DUT025-TGLU.fm.intel.com> (raw)
In-Reply-To: <20240726140818.54304-15-mika.kuoppala@linux.intel.com>

On Fri, Jul 26, 2024 at 05:08:11PM +0300, Mika Kuoppala wrote:
> Debugger needs access to the client's vm to read and write. For
> example inspecting ISA/ELF and setting up breakpoints.
> 
> Add ioctl to open target vm with debugger client and vm_handle
> and hook up pread/pwrite possibility.
> 
> Open will take timeout argument so that standard fsync
> can be used for explicit flushing between cpu/gpu for
> the target vm.
> 
> Implement this for bo backed storage. userptr will
> be done in following patch.
> 
> v2: checkpatch (Maciej)
> 
> Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> Signed-off-by: Maciej Patelczyk <maciej.patelczyk@intel.com>
> ---
>  drivers/gpu/drm/xe/regs/xe_gt_regs.h |  24 ++
>  drivers/gpu/drm/xe/xe_eudebug.c      | 470 +++++++++++++++++++++++++++
>  include/uapi/drm/xe_drm_eudebug.h    |  18 +
>  3 files changed, 512 insertions(+)
> 
> diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
> index 546bb7cc2337..0d688189a2b3 100644
> --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
> +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
> @@ -520,6 +520,30 @@
>  #define   CCS_MODE_CSLICE(cslice, ccs) \
>  	((ccs) << ((cslice) * CCS_MODE_CSLICE_WIDTH))
>  
> +#define RCU_ASYNC_FLUSH				XE_REG(0x149fc)
> +#define   RCU_ASYNC_FLUSH_IN_PROGRESS	REG_BIT(31)
> +#define   RCU_ASYNC_FLUSH_ENGINE_ID_SHIFT	28
> +#define   RCU_ASYNC_FLUSH_ENGINE_ID_DECODE1 REG_BIT(26)
> +#define   RCU_ASYNC_FLUSH_AMFS		REG_BIT(8)
> +#define   RCU_ASYNC_FLUSH_PREFETCH	REG_BIT(7)
> +#define   RCU_ASYNC_FLUSH_DATA_PORT	REG_BIT(6)
> +#define   RCU_ASYNC_FLUSH_DATA_CACHE	REG_BIT(5)
> +#define   RCU_ASYNC_FLUSH_HDC_PIPELINE	REG_BIT(4)
> +#define   RCU_ASYNC_INVALIDATE_HDC_PIPELINE REG_BIT(3)
> +#define   RCU_ASYNC_INVALIDATE_CONSTANT_CACHE REG_BIT(2)
> +#define   RCU_ASYNC_INVALIDATE_TEXTURE_CACHE REG_BIT(1)
> +#define   RCU_ASYNC_INVALIDATE_INSTRUCTION_CACHE REG_BIT(0)
> +#define   RCU_ASYNC_FLUSH_AND_INVALIDATE_ALL ( \
> +	RCU_ASYNC_FLUSH_AMFS | \
> +	RCU_ASYNC_FLUSH_PREFETCH | \
> +	RCU_ASYNC_FLUSH_DATA_PORT | \
> +	RCU_ASYNC_FLUSH_DATA_CACHE | \
> +	RCU_ASYNC_FLUSH_HDC_PIPELINE | \
> +	RCU_ASYNC_INVALIDATE_HDC_PIPELINE | \
> +	RCU_ASYNC_INVALIDATE_CONSTANT_CACHE | \
> +	RCU_ASYNC_INVALIDATE_TEXTURE_CACHE | \
> +	RCU_ASYNC_INVALIDATE_INSTRUCTION_CACHE)
> +
>  #define RCU_DEBUG_1				XE_REG(0x14a00)
>  #define   RCU_DEBUG_1_ENGINE_STATUS		REG_GENMASK(2, 0)
>  #define   RCU_DEBUG_1_RUNALONE_ACTIVE		REG_BIT(2)
> diff --git a/drivers/gpu/drm/xe/xe_eudebug.c b/drivers/gpu/drm/xe/xe_eudebug.c
> index 5dcb7c9464e9..aa383accc468 100644
> --- a/drivers/gpu/drm/xe/xe_eudebug.c
> +++ b/drivers/gpu/drm/xe/xe_eudebug.c
> @@ -8,7 +8,10 @@
>  #include <linux/anon_inodes.h>
>  #include <linux/poll.h>
>  #include <linux/delay.h>
> +#include <linux/file.h>
> +#include <linux/vmalloc.h>
>  
> +#include <drm/drm_drv.h>
>  #include <drm/drm_managed.h>
>  
>  #include "regs/xe_engine_regs.h"
> @@ -36,6 +39,7 @@
>  #include "xe_wa.h"
>  #include "xe_force_wake.h"
>  #include "xe_sync.h"
> +#include "xe_bo.h"
>  
>  /*
>   * If there is no detected event read by userspace, during this period, assume
> @@ -747,6 +751,17 @@ static struct xe_lrc *find_lrc(struct xe_eudebug *d, const u32 id)
>  	return l;
>  }
>  
> +static struct xe_vm *find_vm(struct xe_eudebug *d, const u32 id)
> +{
> +	struct xe_eudebug_handle *h;
> +
> +	h = find_resource(d->res, XE_EUDEBUG_RES_TYPE_VM, id);
> +	if (h)
> +		return (void *)h->key;
> +
> +	return NULL;
> +}
> +
>  static int _xe_eudebug_add_handle(struct xe_eudebug *d,
>  				  int type,
>  				  void *p,
> @@ -1199,6 +1214,8 @@ static long xe_eudebug_eu_control(struct xe_eudebug *d, const u64 arg)
>  	return ret;
>  }
>  
> +static long xe_eudebug_vm_open_ioctl(struct xe_eudebug *d, unsigned long arg);
> +
>  static long xe_eudebug_ioctl(struct file *file,
>  			     unsigned int cmd,
>  			     unsigned long arg)
> @@ -1219,6 +1236,11 @@ static long xe_eudebug_ioctl(struct file *file,
>  		ret = xe_eudebug_ack_event_ioctl(d, cmd, arg);
>  		eu_dbg(d, "ioctl cmd=EVENT_ACK ret=%ld\n", ret);
>  		break;
> +	case DRM_XE_EUDEBUG_IOCTL_VM_OPEN:
> +		ret = xe_eudebug_vm_open_ioctl(d, arg);
> +		eu_dbg(d, "ioctl cmd=VM_OPEN ret=%ld\n", ret);
> +		break;
> +
>  	default:
>  		ret = -EINVAL;
>  	}
> @@ -2829,3 +2851,451 @@ static void discovery_work_fn(struct work_struct *work)
>  
>  	xe_eudebug_put(d);
>  }
> +
> +static int xe_eudebug_bovma_access(struct xe_bo *bo, u64 offset,
> +				   void *buf, u64 len, bool write)
> +{
> +	struct xe_device * const xe = xe_bo_device(bo);
> +	struct iosys_map src;
> +	int ret;
> +

To make this clear vm->lock is held here in write mode, add lockdep assert.

> +	dma_resv_lock(bo->ttm.base.resv, NULL);
> +
> +	ret = ttm_bo_vmap(&bo->ttm, &src);
> +	if (!ret) {
> +		if (write)
> +			xe_map_memcpy_to(xe, &src, offset, buf, len);
> +		else
> +			xe_map_memcpy_from(xe, buf, &src, offset, len);
> +
> +		ttm_bo_vunmap(&bo->ttm, &src);
> +
> +		ret = len;
> +	}
> +
> +	dma_resv_unlock(bo->ttm.base.resv);
> +
> +	return ret;
> +}
> +
> +static int xe_eudebug_vma_access(struct xe_vma *vma, u64 offset,
> +				 void *buf, u64 len, bool write)
> +{
> +	struct xe_bo *bo;
> +	u64 bytes;
> +

Here too.

It goes for any functions in the EU debugger which access the VM state
with this is lock held. Good practice and self documenting. Helps with
reviews too because I don't need to think about questions like 'how this
is not race with a bind, exec, rebind work, or page fault?'.

Matt

> +	if (XE_WARN_ON(offset >= xe_vma_size(vma)))
> +		return -EINVAL;
> +
> +	bytes = min_t(u64, len, xe_vma_size(vma) - offset);
> +	if (!bytes)
> +		return 0;
> +
> +	bo = xe_vma_bo(vma);
> +	if (bo)
> +		return xe_eudebug_bovma_access(bo, offset, buf, bytes, write);
> +
> +	return -EOPNOTSUPP;
> +}
> +
> +static int xe_eudebug_vm_access(struct xe_vm *vm, u64 offset,
> +				void *buf, u64 len, bool write)
> +{
> +	struct xe_vma *vma;
> +	int ret;
> +
> +	down_write(&vm->lock);
> +
> +	vma = xe_vm_find_overlapping_vma(vm, offset, len);
> +	if (vma) {
> +#ifdef VERBOSE_VM_ACCESS
> +		drm_dbg(&xe_vma_vm(vma)->xe->drm,
> +			"eudbg: offset: 0x%llx: vma start 0x%llx, size 0x%llx, offset_in_vma 0x%llx",
> +			offset, xe_vma_start(vma), xe_vma_size(vma), offset - xe_vma_start(vma));
> +#endif
> +		/* XXX: why find overlapping returns below start? */
> +		if (offset < xe_vma_start(vma) ||
> +		    offset >= (xe_vma_start(vma) + xe_vma_size(vma))) {
> +			ret = -EINVAL;
> +			goto out;
> +		}
> +
> +		/* Offset into vma */
> +		offset -= xe_vma_start(vma);
> +		ret = xe_eudebug_vma_access(vma, offset, buf, len, write);
> +	} else {
> +		ret = -EINVAL;
> +	}
> +
> +out:
> +	up_write(&vm->lock);
> +
> +	return ret;
> +}
> +
> +struct vm_file {
> +	struct xe_eudebug *debugger;
> +	struct xe_vm *vm;
> +	u64 flags;
> +	u64 client_id;
> +	u64 vm_handle;
> +	u64 timeout_ns;
> +};
> +
> +static ssize_t __vm_read_write(struct xe_vm *vm,
> +			       void *bb,
> +			       char __user *r_buffer,
> +			       const char __user *w_buffer,
> +			       unsigned long offset,
> +			       unsigned long len,
> +			       const bool write)
> +{
> +	ssize_t ret;
> +
> +	if (!len)
> +		return 0;
> +
> +	if (write) {
> +		ret = copy_from_user(bb, w_buffer, len);
> +		if (ret)
> +			return -EFAULT;
> +
> +		ret = xe_eudebug_vm_access(vm, offset, bb, len, true);
> +		if (ret < 0)
> +			return ret;
> +
> +		len = ret;
> +	} else {
> +		ret = xe_eudebug_vm_access(vm, offset, bb, len, false);
> +		if (ret < 0)
> +			return ret;
> +
> +		len = ret;
> +
> +		ret = copy_to_user(r_buffer, bb, len);
> +		if (ret)
> +			return -EFAULT;
> +	}
> +
> +	return len;
> +}
> +
> +static ssize_t __xe_eudebug_vm_access(struct file *file,
> +				      char __user *r_buffer,
> +				      const char __user *w_buffer,
> +				      size_t count, loff_t *__pos)
> +{
> +	struct vm_file *vmf = file->private_data;
> +	struct xe_eudebug * const d = vmf->debugger;
> +	struct xe_device * const xe = d->xe;
> +	const bool write = w_buffer != NULL;
> +	struct xe_vm *vm;
> +	ssize_t copied = 0;
> +	ssize_t bytes_left = count;
> +	ssize_t ret;
> +	unsigned long alloc_len;
> +	loff_t pos = *__pos;
> +	void *k_buffer;
> +
> +#ifdef VERBOSE_VM_ACCESS
> +	eu_dbg(d, "vm_access(%s): client_handle=%llu, vm_handle=%llu, flags=0x%llx, pos=0x%llx, count=0x%lx",
> +	       write ? "write" : "read",
> +	       vmf->client_id, vmf->vm_handle, vmf->flags, pos, count);
> +#endif
> +	if (XE_IOCTL_DBG(xe, write && r_buffer))
> +		return -EINVAL;
> +
> +	vm = find_vm(d, vmf->vm_handle);
> +	if (XE_IOCTL_DBG(xe, !vm))
> +		return -EINVAL;
> +
> +	if (XE_IOCTL_DBG(xe, vm != vmf->vm)) {
> +		eu_warn(d, "vm_access(%s): vm handle mismatch client_handle=%llu, vm_handle=%llu, flags=0x%llx, pos=%llu, count=%lu\n",
> +			write ? "write" : "read",
> +			vmf->client_id, vmf->vm_handle, vmf->flags, pos, count);
> +		return -EINVAL;
> +	}
> +
> +	if (!count)
> +		return 0;
> +
> +	alloc_len = min_t(unsigned long, ALIGN(count, PAGE_SIZE), 64 * SZ_1M);
> +	do  {
> +		k_buffer = vmalloc(alloc_len);
> +		if (k_buffer)
> +			break;
> +
> +		alloc_len >>= 1;
> +	} while (alloc_len > PAGE_SIZE);
> +
> +	if (XE_IOCTL_DBG(xe, !k_buffer))
> +		return -ENOMEM;
> +
> +	do {
> +		const ssize_t len = min_t(ssize_t, bytes_left, alloc_len);
> +
> +		ret = __vm_read_write(vm, k_buffer,
> +				      write ? NULL : r_buffer + copied,
> +				      write ? w_buffer + copied : NULL,
> +				      pos + copied,
> +				      len,
> +				      write);
> +#ifdef VERBOSE_VM_ACCESS
> +		eu_dbg(d, "vm_access(%s): pos=0x%llx, len=0x%lx, copied=%lu bytes_left=%lu, ret=%ld",
> +		       write ? "write" : "read", pos + copied, len, copied, bytes_left, ret);
> +#endif
> +		if (ret <= 0)
> +			break;
> +
> +		bytes_left -= ret;
> +		copied += ret;
> +	} while (bytes_left > 0);
> +
> +	vfree(k_buffer);
> +
> +	if (XE_WARN_ON(copied < 0))
> +		copied = 0;
> +
> +	*__pos += copied;
> +
> +#ifdef VERBOSE_VM_ACCESS
> +	eu_dbg(d, "vm_access(%s): pos=0x%llx, count=0x%lx, copied=%lu bytes_left=%lu, ret=%ld",
> +	       write ? "write" : "read", pos, count, copied, bytes_left, copied ?: ret);
> +#endif
> +
> +	return copied ?: ret;
> +}
> +
> +static ssize_t xe_eudebug_vm_read(struct file *file,
> +				  char __user *buffer,
> +				  size_t count, loff_t *pos)
> +{
> +	return __xe_eudebug_vm_access(file, buffer, NULL, count, pos);
> +}
> +
> +static ssize_t xe_eudebug_vm_write(struct file *file,
> +				   const char __user *buffer,
> +				   size_t count, loff_t *pos)
> +{
> +	return __xe_eudebug_vm_access(file, NULL, buffer, count, pos);
> +}
> +
> +static int engine_rcu_flush(struct xe_eudebug *d,
> +			    struct xe_hw_engine *hwe,
> +			    unsigned int timeout_us)
> +{
> +	const struct xe_reg psmi_addr = RING_PSMI_CTL(hwe->mmio_base);
> +	struct xe_gt *gt = hwe->gt;
> +	u32 mask = RCU_ASYNC_FLUSH_AND_INVALIDATE_ALL;
> +	u32 psmi_ctrl;
> +	u32 id;
> +	int ret;
> +
> +	if (hwe->class == XE_ENGINE_CLASS_RENDER)
> +		id = 0;
> +	else if (hwe->class == XE_ENGINE_CLASS_COMPUTE)
> +		id = hwe->instance + 1;
> +	else
> +		return -EINVAL;
> +
> +	if (id < 8)
> +		mask |= id << RCU_ASYNC_FLUSH_ENGINE_ID_SHIFT;
> +	else
> +		mask |= (id - 8) << RCU_ASYNC_FLUSH_ENGINE_ID_SHIFT |
> +			RCU_ASYNC_FLUSH_ENGINE_ID_DECODE1;
> +
> +	ret = xe_force_wake_get(gt_to_fw(gt), hwe->domain);
> +	if (ret)
> +		return ret;
> +
> +	/* Prevent concurrent flushes */
> +	mutex_lock(&d->eu_lock);
> +	psmi_ctrl = xe_mmio_read32(gt, psmi_addr);
> +	if (!(psmi_ctrl & IDLE_MSG_DISABLE))
> +		xe_mmio_write32(gt, psmi_addr, _MASKED_BIT_ENABLE(IDLE_MSG_DISABLE));
> +
> +	ret = xe_mmio_wait32(gt, RCU_ASYNC_FLUSH,
> +			     RCU_ASYNC_FLUSH_IN_PROGRESS, 0,
> +			     timeout_us, NULL, false);
> +	if (ret)
> +		goto out;
> +
> +	xe_mmio_write32(gt, RCU_ASYNC_FLUSH, mask);
> +
> +	ret = xe_mmio_wait32(gt, RCU_ASYNC_FLUSH,
> +			     RCU_ASYNC_FLUSH_IN_PROGRESS, 0,
> +			     timeout_us, NULL, false);
> +out:
> +	if (!(psmi_ctrl & IDLE_MSG_DISABLE))
> +		xe_mmio_write32(gt, psmi_addr, _MASKED_BIT_DISABLE(IDLE_MSG_DISABLE));
> +
> +	mutex_unlock(&d->eu_lock);
> +	xe_force_wake_put(gt_to_fw(gt), hwe->domain);
> +
> +	return ret;
> +}
> +
> +static int xe_eudebug_vm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
> +{
> +	struct vm_file *vmf = file->private_data;
> +	struct xe_eudebug *d = vmf->debugger;
> +	struct xe_gt *gt;
> +	int gt_id;
> +	int ret = -EINVAL;
> +
> +	eu_dbg(d, "vm_fsync: client_handle=%llu, vm_handle=%llu, flags=0x%llx, start=%llu, end=%llu datasync=%d\n",
> +	       vmf->client_id, vmf->vm_handle, vmf->flags, start, end, datasync);
> +
> +	for_each_gt(gt, d->xe, gt_id) {
> +		struct xe_hw_engine *hwe;
> +		enum xe_hw_engine_id id;
> +
> +		/* XXX: vm open per engine? */
> +		for_each_hw_engine(hwe, gt, id) {
> +			if (hwe->class != XE_ENGINE_CLASS_RENDER &&
> +			    hwe->class != XE_ENGINE_CLASS_COMPUTE)
> +				continue;
> +
> +			ret = engine_rcu_flush(d, hwe, vmf->timeout_ns / 1000ull);
> +			if (ret)
> +				break;
> +		}
> +	}
> +
> +	return ret;
> +}
> +
> +static int xe_eudebug_vm_release(struct inode *inode, struct file *file)
> +{
> +	struct vm_file *vmf = file->private_data;
> +	struct xe_eudebug *d = vmf->debugger;
> +
> +	eu_dbg(d, "vm_release: client_handle=%llu, vm_handle=%llu, flags=0x%llx",
> +	       vmf->client_id, vmf->vm_handle, vmf->flags);
> +
> +	drm_dev_get(&d->xe->drm);
> +	xe_vm_put(vmf->vm);
> +	xe_eudebug_put(d);
> +	kfree(vmf);
> +
> +	return 0;
> +}
> +
> +static const struct file_operations vm_fops = {
> +	.owner   = THIS_MODULE,
> +	.llseek  = generic_file_llseek,
> +	.read    = xe_eudebug_vm_read,
> +	.write   = xe_eudebug_vm_write,
> +	.fsync   = xe_eudebug_vm_fsync,
> +	.mmap    = NULL,
> +	.release = xe_eudebug_vm_release,
> +};
> +
> +static long
> +xe_eudebug_vm_open_ioctl(struct xe_eudebug *d, unsigned long arg)
> +{
> +	struct drm_xe_eudebug_vm_open param;
> +	struct xe_device * const xe = d->xe;
> +	struct xe_eudebug *d_ref = NULL;
> +	struct vm_file *vmf = NULL;
> +	struct xe_file *xef;
> +	struct xe_vm *vm;
> +	struct file *file;
> +	long ret = 0;
> +	int fd;
> +
> +	if (XE_IOCTL_DBG(xe, _IOC_SIZE(DRM_XE_EUDEBUG_IOCTL_VM_OPEN) != sizeof(param)))
> +		return -EINVAL;
> +
> +	if (XE_IOCTL_DBG(xe, !(_IOC_DIR(DRM_XE_EUDEBUG_IOCTL_VM_OPEN) & _IOC_WRITE)))
> +		return -EINVAL;
> +
> +	if (XE_IOCTL_DBG(xe, copy_from_user(&param, (void __user *)arg, sizeof(param))))
> +		return -EFAULT;
> +
> +	if (XE_IOCTL_DBG(xe, param.flags))
> +		return -EINVAL;
> +
> +	if (XE_IOCTL_DBG(xe, xe_eudebug_detached(d)))
> +		return -ENOTCONN;
> +
> +	vm = NULL;
> +	mutex_lock(&d->xe->files.lock);
> +	xef = find_client(d, param.client_handle);
> +	if (XE_IOCTL_DBG(xe, !xef)) {
> +		mutex_unlock(&d->xe->files.lock);
> +		return -EINVAL;
> +	}
> +
> +	d_ref = xe_eudebug_get(xef);
> +	if (XE_IOCTL_DBG(xe, !d_ref)) {
> +		mutex_unlock(&d->xe->files.lock);
> +		return -EINVAL;
> +	}
> +
> +	mutex_lock(&xef->vm.lock);
> +	vm = find_vm(d, param.vm_handle);
> +	if (vm)
> +		xe_vm_get(vm);
> +	mutex_unlock(&xef->vm.lock);
> +	mutex_unlock(&d->xe->files.lock);
> +
> +	XE_WARN_ON(d != d_ref);
> +
> +	if (XE_IOCTL_DBG(xe, !vm)) {
> +		ret = -EINVAL;
> +		goto out_eudebug_put;
> +	}
> +
> +	vmf = kmalloc(sizeof(*vmf), GFP_KERNEL);
> +	if (XE_IOCTL_DBG(xe, !vmf)) {
> +		ret = -ENOMEM;
> +		goto out_vm_put;
> +	}
> +
> +	fd = get_unused_fd_flags(O_CLOEXEC);
> +	if (XE_IOCTL_DBG(xe, fd < 0)) {
> +		ret = fd;
> +		goto out_free;
> +	}
> +
> +	vmf->debugger = d_ref;
> +	vmf->vm = vm;
> +	vmf->flags = param.flags;
> +	vmf->client_id = param.client_handle;
> +	vmf->vm_handle = param.vm_handle;
> +	vmf->timeout_ns = param.timeout_ns;
> +
> +	file = anon_inode_getfile("[xe_eudebug.vm]", &vm_fops, vmf, O_RDWR);
> +	if (IS_ERR(file)) {
> +		ret = PTR_ERR(file);
> +		XE_IOCTL_DBG(xe, ret);
> +		file = NULL;
> +		goto out_file_put;
> +	}
> +
> +	drm_dev_get(&d->xe->drm);
> +
> +	file->f_mode |= FMODE_PREAD | FMODE_PWRITE |
> +		FMODE_READ | FMODE_WRITE | FMODE_LSEEK;
> +
> +	fd_install(fd, file);
> +
> +	eu_dbg(d, "vm_open: client_handle=%llu, handle=%llu, flags=0x%llx, fd=%d",
> +	       vmf->client_id, vmf->vm_handle, vmf->flags, fd);
> +
> +	XE_WARN_ON(ret);
> +
> +	return fd;
> +
> +out_file_put:
> +	put_unused_fd(fd);
> +out_free:
> +	kfree(vmf);
> +out_vm_put:
> +	xe_vm_put(vm);
> +out_eudebug_put:
> +	xe_eudebug_put(d_ref);
> +
> +	return ret;
> +}
> diff --git a/include/uapi/drm/xe_drm_eudebug.h b/include/uapi/drm/xe_drm_eudebug.h
> index 1875192e92bd..df79eafb6136 100644
> --- a/include/uapi/drm/xe_drm_eudebug.h
> +++ b/include/uapi/drm/xe_drm_eudebug.h
> @@ -18,6 +18,7 @@ extern "C" {
>  #define DRM_XE_EUDEBUG_IOCTL_READ_EVENT		_IO('j', 0x0)
>  #define DRM_XE_EUDEBUG_IOCTL_EU_CONTROL		_IOWR('j', 0x2, struct drm_xe_eudebug_eu_control)
>  #define DRM_XE_EUDEBUG_IOCTL_ACK_EVENT		_IOW('j', 0x4, struct drm_xe_eudebug_ack_event)
> +#define DRM_XE_EUDEBUG_IOCTL_VM_OPEN		_IOW('j', 0x1, struct drm_xe_eudebug_vm_open)
>  
>  /* XXX: Document events to match their internal counterparts when moved to xe_drm.h */
>  struct drm_xe_eudebug_event {
> @@ -171,6 +172,23 @@ struct drm_xe_eudebug_ack_event {
>  	__u64 seqno;
>  };
>  
> +struct drm_xe_eudebug_vm_open {
> +	/** @extensions: Pointer to the first extension struct, if any */
> +	__u64 extensions;
> +
> +	/** @client_handle: id of client */
> +	__u64 client_handle;
> +
> +	/** @vm_handle: id of vm */
> +	__u64 vm_handle;
> +
> +	/** @flags: flags */
> +	__u64 flags;
> +
> +	/** @timeout_ns: Timeout value in nanoseconds operations (fsync) */
> +	__u64 timeout_ns;
> +};
> +
>  #if defined(__cplusplus)
>  }
>  #endif
> -- 
> 2.34.1
> 

  reply	other threads:[~2024-07-26 19:00 UTC|newest]

Thread overview: 78+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-07-26 14:07 [PATCH 00/21] GPU debug support (eudebug) Mika Kuoppala
2024-07-26 14:07 ` [PATCH 01/21] drm/xe: Export xe_hw_engine's mmio accessors Mika Kuoppala
2024-07-27  5:45   ` Matthew Brost
2024-08-08 11:04   ` Andi Shyti
2024-07-26 14:07 ` [PATCH 02/21] drm/xe: Move and export xe_hw_engine lookup Mika Kuoppala
2024-07-27  5:49   ` Matthew Brost
2024-08-08 11:08   ` Andi Shyti
2024-07-26 14:08 ` [PATCH 03/21] drm/xe/eudebug: Introduce eudebug support Mika Kuoppala
2024-07-26 19:20   ` Matthew Brost
2024-07-30 14:12     ` Mika Kuoppala
2024-07-31  1:18       ` Matthew Brost
2024-08-07  9:34   ` Zbigniew Kempczyński
2024-08-21 11:37     ` Mika Kuoppala
2024-08-12 12:02   ` Zbigniew Kempczyński
2024-08-21 11:38     ` Mika Kuoppala
2024-07-26 14:08 ` [PATCH 04/21] kernel: export ptrace_may_access Mika Kuoppala
2024-07-29 18:56   ` Lucas De Marchi
2024-08-08 11:18   ` Andi Shyti
2024-08-21 11:46     ` Mika Kuoppala
2024-07-26 14:08 ` [PATCH 05/21] drm/xe/eudebug: Use ptrace_may_access for xe_eudebug_attach Mika Kuoppala
2024-07-29 19:00   ` Lucas De Marchi
2024-07-26 14:08 ` [PATCH 06/21] drm/xe/eudebug: Introduce discovery for resources Mika Kuoppala
2024-07-26 14:08 ` [PATCH 07/21] drm/xe/eudebug: Introduce exec_queue events Mika Kuoppala
2024-07-26 14:08 ` [PATCH 08/21] drm/xe/eudebug: hw enablement for eudebug Mika Kuoppala
2024-07-29 19:05   ` Lucas De Marchi
2024-07-29 19:16     ` Lucas De Marchi
2024-07-30  9:01     ` Grzegorzek, Dominik
2024-07-30 13:56       ` Lucas De Marchi
2024-07-26 14:08 ` [PATCH 09/21] drm/xe: Add EUDEBUG_ENABLE exec queue property Mika Kuoppala
2024-07-26 18:35   ` Matthew Brost
2024-07-27  0:54   ` Matthew Brost
2024-07-26 14:08 ` [PATCH 10/21] drm/xe/eudebug: Introduce per device attention scan worker Mika Kuoppala
2024-07-27  5:08   ` Matthew Brost
2024-07-29 10:10     ` Grzegorzek, Dominik
2024-07-31  1:25       ` Matthew Brost
2024-07-27  5:39   ` Matthew Brost
2024-07-26 14:08 ` [PATCH 11/21] drm/xe/eudebug: Introduce EU control interface Mika Kuoppala
2024-07-26 14:08 ` [PATCH 12/21] drm/xe/eudebug: Add vm bind and vm bind ops Mika Kuoppala
2024-07-26 14:08 ` [PATCH 13/21] drm/xe/eudebug: Add UFENCE events with acks Mika Kuoppala
2024-07-27  0:40   ` Matthew Brost
2024-07-30 14:05     ` Mika Kuoppala
2024-07-31  1:33       ` Matthew Brost
2024-07-26 14:08 ` [PATCH 14/21] drm/xe/eudebug: vm open/pread/pwrite Mika Kuoppala
2024-07-26 18:59   ` Matthew Brost [this message]
2024-07-26 14:08 ` [PATCH 15/21] drm/xe/eudebug: implement userptr_vma access Mika Kuoppala
2024-07-26 18:46   ` Matthew Brost
2024-07-26 18:50     ` Matthew Brost
2024-07-27  1:45       ` Matthew Brost
2024-07-31 11:11         ` [PATCH] fixup! " Andrzej Hajda
2024-07-31 17:51           ` Matthew Brost
2024-08-05 16:54             ` [PATCH v2] " Andrzej Hajda
2024-08-05 19:20               ` Cavitt, Jonathan
2024-08-26 14:40             ` [PATCH v2.5] " Andrzej Hajda
2024-08-26 16:45               ` Andrzej Hajda
2024-08-26 17:02               ` Matthew Brost
2024-08-27  8:41                 ` [PATCH v3] " Andrzej Hajda
2024-07-26 14:08 ` [PATCH 16/21] drm/xe: Debug metadata create/destroy ioctls Mika Kuoppala
2024-07-26 14:08 ` [PATCH 17/21] drm/xe: Attach debug metadata to vma Mika Kuoppala
2024-07-26 21:25   ` Matthew Brost
2024-07-26 14:08 ` [PATCH 18/21] drm/xe/eudebug: Add debug metadata support for xe_eudebug Mika Kuoppala
2024-07-26 14:08 ` [PATCH 19/21] drm/xe/eudebug: Implement vm_bind_op discovery Mika Kuoppala
2024-07-27  4:39   ` Matthew Brost
2024-07-26 14:08 ` [PATCH 20/21] drm/xe/eudebug: Dynamically toggle debugger functionality Mika Kuoppala
2024-07-28  4:50   ` Matthew Brost
2024-07-30 15:01     ` Manszewski, Christoph
2024-07-31 18:03       ` Matthew Brost
2024-08-07 10:09     ` Manszewski, Christoph
2024-07-26 14:08 ` [PATCH 21/21] drm/xe/eudebug_test: Introduce xe_eudebug wa kunit test Mika Kuoppala
2024-07-26 14:32 ` ✓ CI.Patch_applied: success for GPU debug support (eudebug) Patchwork
2024-07-26 14:33 ` ✗ CI.checkpatch: warning " Patchwork
2024-07-26 14:34 ` ✓ CI.KUnit: success " Patchwork
2024-07-26 14:46 ` ✓ CI.Build: " Patchwork
2024-07-26 14:48 ` ✗ CI.Hooks: failure " Patchwork
2024-07-26 14:49 ` ✓ CI.checksparse: success " Patchwork
2024-07-26 15:10 ` ✓ CI.BAT: " Patchwork
2024-07-27  2:37 ` ✓ CI.FULL: " Patchwork
2024-07-27  5:23 ` [PATCH 00/21] " Matthew Brost
2024-07-29  8:27   ` Gwan-gyeong Mun

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZqPyHmU7GaFCfGdN@DUT025-TGLU.fm.intel.com \
    --to=matthew.brost@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=maciej.patelczyk@intel.com \
    --cc=mika.kuoppala@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).