Igt-dev Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
To: Bommu Krishnaiah <krishnaiah.bommu@intel.com>
Cc: igt-dev@lists.freedesktop.org
Subject: Re: [igt-dev] [PATCH v4 1/1] drm-uapi/xe: kill xe_wait_user_fence_ioctl when exec_queue reset happen
Date: Wed, 6 Dec 2023 12:06:40 -0500	[thread overview]
Message-ID: <ZXCqICWncxDikEVK@intel.com> (raw)
In-Reply-To: <20231206144452.19745-3-krishnaiah.bommu@intel.com>

On Wed, Dec 06, 2023 at 08:14:51PM +0530, Bommu Krishnaiah wrote:
> Skipping the GPU mapping(vm_bind) for object, so that exec_queue
> reset will happen and xe_wait_ufence will end return EIO not ETIME
> 
> Signed-off-by: Bommu Krishnaiah <krishnaiah.bommu@intel.com>
> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
> Cc: Francois Dugast <francois.dugast@intel.com>
> ---
>  tests/intel/xe_waitfence.c | 83 ++++++++++++++++++++++++++++++++++++++
>  1 file changed, 83 insertions(+)
> 
> diff --git a/tests/intel/xe_waitfence.c b/tests/intel/xe_waitfence.c
> index 0bd7e5dce..b5bf0d5b6 100644
> --- a/tests/intel/xe_waitfence.c
> +++ b/tests/intel/xe_waitfence.c
> @@ -177,6 +177,9 @@ waitfence(int fd, enum waittype wt)
>   *
>   * SUBTEST: invalid-ops
>   * Description: Check query with invalid ops returns expected error code
> + *
> + * SUBTEST: invalid-exec_queue-wait
> + * Description: Check xe_wait_ufence will return expected error code while exec_queue reset happen
>   */
>  
>  static void
> @@ -227,6 +230,83 @@ invalid_ops(int fd)
>  	do_ioctl_err(fd, DRM_IOCTL_XE_WAIT_USER_FENCE, &wait, EINVAL);
>  }
>  
> +static void
> +invalid_exec_queue_wait(int fd)
> +{
> +	uint32_t bo, b;
> +	uint64_t batch_offset;
> +	uint64_t batch_addr;
> +	uint64_t sdi_offset;
> +	uint64_t sdi_addr;
> +	uint64_t addr = 0x1a0000;
> +
> +	struct {
> +		uint32_t batch[16];
> +		uint64_t pad;
> +		uint64_t vm_sync;
> +		uint64_t exec_sync;
> +		uint32_t data;
> +	} *data;
> +
> +#define USER_FENCE_VALUE        0xdeadbeefdeadbeefull
> +	struct drm_xe_sync sync[1] = {
> +		{ .flags = DRM_XE_SYNC_TYPE_USER_FENCE | DRM_XE_SYNC_FLAG_SIGNAL,
> +			.timeline_value = USER_FENCE_VALUE },
> +	};
> +
> +	struct drm_xe_exec exec = {
> +		.num_batch_buffer = 1,
> +		.num_syncs = 1,
> +		.syncs = to_user_pointer(sync),
> +	};
> +
> +	uint32_t vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT, 0);
> +	uint32_t exec_queue = xe_exec_queue_create_class(fd, vm, DRM_XE_ENGINE_CLASS_COPY);
> +	struct drm_xe_wait_user_fence wait = {
> +		.op = DRM_XE_UFENCE_WAIT_OP_EQ,
> +		.flags = 0,
> +		.value = 0xc0ffee,
> +		.mask = DRM_XE_UFENCE_WAIT_MASK_U64,
> +		.timeout = -1,
> +		.exec_queue_id = exec_queue,

this doesn't exist without the patch 2, so this shouldn't be the first patch in the series.
Also, the patch is not doing what the subject tells, so I'm really confused here.

> +	};
> +
> +	bo = xe_bo_create(fd, vm, 0x40000, vram_if_possible(fd, 0), 0);
> +	data = xe_bo_map(fd, bo, 0x40000);
> +
> +	batch_offset = (char *)&data[0].batch - (char *)data;
> +	batch_addr = addr + batch_offset;
> +	sdi_offset = (char *)&data[0].data - (char *)data;
> +	sdi_addr = addr + sdi_offset;
> +
> +	b = 0;
> +	data[0].batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> +	data[0].batch[b++] = sdi_addr;
> +	data[0].batch[b++] = sdi_addr >> 32;
> +	data[0].batch[b++] = 0xc0ffee;
> +	data[0].batch[b++] = MI_BATCH_BUFFER_END;
> +	igt_assert(b <= ARRAY_SIZE(data[0].batch));
> +
> +	wait.addr = to_user_pointer(&data[0].exec_sync);
> +	exec.exec_queue_id = exec_queue;
> +	exec.address = batch_addr;
> +
> +	xe_exec(fd, &exec);
> +
> +	/**
> +	  * Skipping the GPU mapping(vm_bind) for object, so that exec_queue
> +	  * reset will happen and xe_wait_ufence will end return EIO not ETIME
> +	  */
> +	do_ioctl_err(fd, DRM_IOCTL_XE_WAIT_USER_FENCE, &wait, EIO);
> +
> +	xe_exec_queue_destroy(fd, exec_queue);
> +
> +	if (bo) {
> +		 munmap(data, 0x40000);
> +		 gem_close(fd, bo);
> +	}
> +}
> +
>  igt_main
>  {
>  	int fd;
> @@ -249,6 +329,9 @@ igt_main
>  	igt_subtest("invalid-ops")
>  		invalid_ops(fd);
>  
> +	igt_subtest("invalid-exec_queue-wait")
> +		invalid_exec_queue_wait(fd);
> +
>  	igt_fixture
>  		drm_close_driver(fd);
>  }
> -- 
> 2.25.1
> 

  reply	other threads:[~2023-12-06 17:07 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-06 14:44 [igt-dev] [PATCH v4 0/2] RFC: drm-uapi/xe: add exec_queue_id member to drm_xe_wait_user_fence structure Bommu Krishnaiah
2023-12-06 14:44 ` [igt-dev] [PATCH v4 1/2] " Bommu Krishnaiah
2023-12-06 17:04   ` Rodrigo Vivi
2023-12-06 17:23     ` Bommu, Krishnaiah
2023-12-06 17:36       ` Rodrigo Vivi
2023-12-06 14:44 ` [igt-dev] [PATCH v4 1/1] drm-uapi/xe: kill xe_wait_user_fence_ioctl when exec_queue reset happen Bommu Krishnaiah
2023-12-06 17:06   ` Rodrigo Vivi [this message]
2023-12-06 14:44 ` [igt-dev] [PATCH v4 2/2] " Bommu Krishnaiah
2023-12-06 17:08   ` Rodrigo Vivi
2023-12-06 17:25 ` [igt-dev] ✓ Fi.CI.BAT: success for RFC: drm-uapi/xe: add exec_queue_id member to drm_xe_wait_user_fence structure (rev4) Patchwork
2023-12-06 19:09 ` [igt-dev] ✗ Fi.CI.IGT: failure " Patchwork
2023-12-06 19:20 ` [igt-dev] ✗ CI.xeBAT: " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZXCqICWncxDikEVK@intel.com \
    --to=rodrigo.vivi@intel.com \
    --cc=igt-dev@lists.freedesktop.org \
    --cc=krishnaiah.bommu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox