Igt-dev Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: Oak Zeng <oak.zeng@intel.com>
Cc: <igt-dev@lists.freedesktop.org>, <Thomas.Hellstrom@linux.intel.com>
Subject: Re: [i-g-t 2/2] tests/intel/xe_exec_fault_mode: Test scratch page under fault mode
Date: Mon, 10 Feb 2025 09:22:14 -0800	[thread overview]
Message-ID: <Z6o1xkp5oErv0daA@lstrano-desk.jf.intel.com> (raw)
In-Reply-To: <20250204181742.4055152-2-oak.zeng@intel.com>

On Tue, Feb 04, 2025 at 01:17:42PM -0500, Oak Zeng wrote:
> On certain HW (such as lunarlake and battlemage), driver now allows
> scratch page be enabled under fault mode. Test this functionality
> 
> Signed-off-by: Oak Zeng <oak.zeng@intel.com>
> ---
>  tests/intel/xe_exec_fault_mode.c | 128 +++++++++++++++++++------------
>  1 file changed, 79 insertions(+), 49 deletions(-)
> 
> diff --git a/tests/intel/xe_exec_fault_mode.c b/tests/intel/xe_exec_fault_mode.c
> index ae40e099b..935e6c044 100644
> --- a/tests/intel/xe_exec_fault_mode.c
> +++ b/tests/intel/xe_exec_fault_mode.c
> @@ -35,6 +35,7 @@
>  #define INVALID_FAULT	(0x1 << 7)
>  #define INVALID_VA	(0x1 << 8)
>  #define ENABLE_SCRATCH  (0x1 << 9)
> +#define ENABLE_FAULT    (0x1 << 10)
>  
>  /**
>   * SUBTEST: invalid-va
> @@ -45,6 +46,10 @@
>   * Description: Access invalid va without pageafault with scratch page enabled.
>   * Test category: functionality test
>   *
> + * SUBTEST: scratch-fault
> + * Description: Enable scratch page and page fault at the same time.
> + * Test category: functionality test
> + *
>   * SUBTEST: once-%s
>   * Description: Run %arg[1] fault mode test only once
>   * Test category: functionality test
> @@ -115,6 +120,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>  	  int n_exec_queues, int n_execs, unsigned int flags)
>  {
>  	uint32_t vm;
> +	uint32_t vm_flags = DRM_XE_VM_CREATE_FLAG_LR_MODE;
>  	uint64_t addr = 0x1a0000;
>  	uint64_t sync_addr = 0x101a0000;
>  #define USER_FENCE_VALUE	0xdeadbeefdeadbeefull
> @@ -145,11 +151,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
>  	igt_assert_lte(n_exec_queues, MAX_N_EXEC_QUEUES);
>  
>  	if (flags & ENABLE_SCRATCH)
> -		vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> -				  DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE, 0);
> -	else
> -		vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> -				  DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> +		vm_flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
> +	if (flags & ENABLE_FAULT)
> +		vm_flags |= DRM_XE_VM_CREATE_FLAG_FAULT_MODE;

This is always set, right? I wouldn't add this new flag then.

> +	vm = xe_vm_create(fd, vm_flags, 0);
> +
>  	bo_size = sizeof(*data) * n_execs;
>  	bo_size = xe_bb_size(fd, bo_size);
>  	sync_size = sizeof(*exec_sync) * n_execs;
> @@ -405,59 +411,76 @@ igt_main
>  		const char *name;
>  		unsigned int flags;
>  	} sections[] = {
> -		{ "basic", 0 },
> -		{ "userptr", USERPTR },
> -		{ "rebind", REBIND },
> -		{ "userptr-rebind", USERPTR | REBIND },
> -		{ "userptr-invalidate", USERPTR | INVALIDATE },
> -		{ "userptr-invalidate-race", USERPTR | INVALIDATE | RACE },
> -		{ "bindexecqueue", BIND_EXEC_QUEUE },
> -		{ "bindexecqueue-userptr", BIND_EXEC_QUEUE | USERPTR },
> -		{ "bindexecqueue-rebind",  BIND_EXEC_QUEUE | REBIND },
> +		{ "basic", ENABLE_FAULT },
> +		{ "userptr", USERPTR | ENABLE_FAULT },
> +		{ "rebind", REBIND | ENABLE_FAULT },
> +		{ "userptr-rebind", USERPTR | REBIND | ENABLE_FAULT },
> +		{ "userptr-invalidate", USERPTR | INVALIDATE | ENABLE_FAULT },
> +		{ "userptr-invalidate-race", USERPTR | INVALIDATE | RACE |
> +			ENABLE_FAULT },
> +		{ "bindexecqueue", BIND_EXEC_QUEUE | ENABLE_FAULT },
> +		{ "bindexecqueue-userptr", BIND_EXEC_QUEUE | USERPTR |
> +			ENABLE_FAULT },
> +		{ "bindexecqueue-rebind",  BIND_EXEC_QUEUE | REBIND |
> +			ENABLE_FAULT },
>  		{ "bindexecqueue-userptr-rebind", BIND_EXEC_QUEUE | USERPTR |
> -			REBIND },
> -		{ "bindexecqueue-userptr-invalidate", BIND_EXEC_QUEUE | USERPTR |
> -			INVALIDATE },
> -		{ "bindexecqueue-userptr-invalidate-race", BIND_EXEC_QUEUE | USERPTR |
> -			INVALIDATE | RACE },
> -		{ "basic-imm", IMMEDIATE },
> -		{ "userptr-imm", IMMEDIATE | USERPTR },
> -		{ "rebind-imm", IMMEDIATE | REBIND },
> -		{ "userptr-rebind-imm", IMMEDIATE | USERPTR | REBIND },
> -		{ "userptr-invalidate-imm", IMMEDIATE | USERPTR | INVALIDATE },
> +			REBIND | ENABLE_FAULT },
> +		{ "bindexecqueue-userptr-invalidate", BIND_EXEC_QUEUE |
> +			USERPTR | INVALIDATE | ENABLE_FAULT },
> +		{ "bindexecqueue-userptr-invalidate-race", BIND_EXEC_QUEUE |
> +			USERPTR | INVALIDATE | RACE | ENABLE_FAULT },
> +		{ "basic-imm", IMMEDIATE | ENABLE_FAULT },
> +		{ "userptr-imm", IMMEDIATE | USERPTR | ENABLE_FAULT },
> +		{ "rebind-imm", IMMEDIATE | REBIND | ENABLE_FAULT },
> +		{ "userptr-rebind-imm", IMMEDIATE | USERPTR | REBIND |
> +			ENABLE_FAULT },
> +		{ "userptr-invalidate-imm", IMMEDIATE | USERPTR | INVALIDATE |
> +			ENABLE_FAULT },
>  		{ "userptr-invalidate-race-imm", IMMEDIATE | USERPTR |
> -			INVALIDATE | RACE },
> -		{ "bindexecqueue-imm", IMMEDIATE | BIND_EXEC_QUEUE },
> -		{ "bindexecqueue-userptr-imm", IMMEDIATE | BIND_EXEC_QUEUE | USERPTR },
> -		{ "bindexecqueue-rebind-imm", IMMEDIATE | BIND_EXEC_QUEUE | REBIND },
> -		{ "bindexecqueue-userptr-rebind-imm", IMMEDIATE | BIND_EXEC_QUEUE |
> -			USERPTR | REBIND },
> +			INVALIDATE | RACE | ENABLE_FAULT },
> +		{ "bindexecqueue-imm", IMMEDIATE | BIND_EXEC_QUEUE |
> +			ENABLE_FAULT },
> +		{ "bindexecqueue-userptr-imm", IMMEDIATE | BIND_EXEC_QUEUE |
> +			USERPTR | ENABLE_FAULT },
> +		{ "bindexecqueue-rebind-imm", IMMEDIATE | BIND_EXEC_QUEUE |
> +			REBIND | ENABLE_FAULT },
> +		{ "bindexecqueue-userptr-rebind-imm", IMMEDIATE |
> +			BIND_EXEC_QUEUE | USERPTR | REBIND | ENABLE_FAULT },
>  		{ "bindexecqueue-userptr-invalidate-imm", IMMEDIATE | BIND_EXEC_QUEUE |
> -			USERPTR | INVALIDATE },
> +			USERPTR | INVALIDATE | ENABLE_FAULT },
>  		{ "bindexecqueue-userptr-invalidate-race-imm", IMMEDIATE |
> -			BIND_EXEC_QUEUE | USERPTR | INVALIDATE | RACE },
> -
> -		{ "basic-prefetch", PREFETCH },
> -		{ "userptr-prefetch", PREFETCH | USERPTR },
> -		{ "rebind-prefetch", PREFETCH | REBIND },
> -		{ "userptr-rebind-prefetch", PREFETCH | USERPTR | REBIND },
> -		{ "userptr-invalidate-prefetch", PREFETCH | USERPTR | INVALIDATE },
> +			BIND_EXEC_QUEUE | USERPTR | INVALIDATE | RACE |
> +			ENABLE_FAULT },
> +
> +		{ "basic-prefetch", PREFETCH | ENABLE_FAULT },
> +		{ "userptr-prefetch", PREFETCH | USERPTR | ENABLE_FAULT },
> +		{ "rebind-prefetch", PREFETCH | REBIND | ENABLE_FAULT },
> +		{ "userptr-rebind-prefetch", PREFETCH | USERPTR | REBIND |
> +			ENABLE_FAULT },
> +		{ "userptr-invalidate-prefetch", PREFETCH | USERPTR |
> +			INVALIDATE | ENABLE_FAULT },
>  		{ "userptr-invalidate-race-prefetch", PREFETCH | USERPTR |
> -			INVALIDATE | RACE },
> -		{ "bindexecqueue-prefetch", PREFETCH | BIND_EXEC_QUEUE },
> -		{ "bindexecqueue-userptr-prefetch", PREFETCH | BIND_EXEC_QUEUE | USERPTR },
> -		{ "bindexecqueue-rebind-prefetch", PREFETCH | BIND_EXEC_QUEUE | REBIND },
> -		{ "bindexecqueue-userptr-rebind-prefetch", PREFETCH | BIND_EXEC_QUEUE |
> -			USERPTR | REBIND },
> -		{ "bindexecqueue-userptr-invalidate-prefetch", PREFETCH | BIND_EXEC_QUEUE |
> -			USERPTR | INVALIDATE },
> +			INVALIDATE | RACE | ENABLE_FAULT },
> +		{ "bindexecqueue-prefetch", PREFETCH | BIND_EXEC_QUEUE |
> +			ENABLE_FAULT },
> +		{ "bindexecqueue-userptr-prefetch", PREFETCH | BIND_EXEC_QUEUE |
> +			USERPTR | ENABLE_FAULT },
> +		{ "bindexecqueue-rebind-prefetch", PREFETCH | BIND_EXEC_QUEUE |
> +			REBIND | ENABLE_FAULT },
> +		{ "bindexecqueue-userptr-rebind-prefetch", PREFETCH |
> +			BIND_EXEC_QUEUE | USERPTR | REBIND | ENABLE_FAULT },
> +		{ "bindexecqueue-userptr-invalidate-prefetch", PREFETCH |
> +			BIND_EXEC_QUEUE | USERPTR | INVALIDATE | ENABLE_FAULT },
>  		{ "bindexecqueue-userptr-invalidate-race-prefetch", PREFETCH |
> -			BIND_EXEC_QUEUE | USERPTR | INVALIDATE | RACE },
> -		{ "invalid-fault", INVALID_FAULT },
> -		{ "invalid-userptr-fault", INVALID_FAULT | USERPTR },
> +			BIND_EXEC_QUEUE | USERPTR | INVALIDATE | RACE |
> +			ENABLE_FAULT },
> +		{ "invalid-fault", INVALID_FAULT | ENABLE_FAULT },
> +		{ "invalid-userptr-fault", INVALID_FAULT | USERPTR |
> +			ENABLE_FAULT },
>  		{ NULL },
>  	};
>  	int fd;
> +	uint16_t dev_id;
>  
>  	igt_fixture {
>  		struct timespec tv = {};
> @@ -466,6 +489,7 @@ igt_main
>  		int timeout = igt_run_in_simulation() ? 20 : 2;
>  
>  		fd = drm_open_driver(DRIVER_XE);
> +		dev_id = intel_get_drm_devid(fd);
>  		do {
>  			if (ret)
>  				usleep(5000);
> @@ -508,6 +532,12 @@ igt_main
>  		xe_for_each_engine(fd, hwe)
>  			test_exec(fd, hwe, 1, 1, ENABLE_SCRATCH | INVALID_VA);
>  
> +	igt_subtest("scratch-fault") {
> +		igt_skip_on(!IS_LUNARLAKE(dev_id) && !IS_BATTLEMAGE(dev_id));
> +		xe_for_each_engine(fd, hwe)
> +			test_exec(fd, hwe, 1, 1, ENABLE_SCRATCH | ENABLE_FAULT);

This is a good start but we really need this section to roughly do
something like this to get good coverage:

- Run a batch with a scratch page access, wait for batch to complete
- Bind scratch page access /w immediate cleared
- Run a batch with access to newly bound address, wait for batch to
  complete, verify access worked

'access' here likely is DW write.

Matt

> +	}
> +
>  	igt_fixture {
>  		drm_close_driver(fd);
>  	}
> -- 
> 2.26.3
> 

  reply	other threads:[~2025-02-10 17:21 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-02-04 18:17 [i-g-t 1/2] tests/intel/xe_vm: Exclude invalid_flags tests from LNL and BMG Oak Zeng
2025-02-04 18:17 ` [i-g-t 2/2] tests/intel/xe_exec_fault_mode: Test scratch page under fault mode Oak Zeng
2025-02-10 17:22   ` Matthew Brost [this message]
2025-02-11 21:53     ` Zeng, Oak
2025-02-12  7:16       ` Matthew Brost
2025-02-05  1:34 ` ✗ Xe.CI.BAT: failure for series starting with [i-g-t,1/2] tests/intel/xe_vm: Exclude invalid_flags tests from LNL and BMG Patchwork
2025-02-05  1:47 ` ✓ i915.CI.BAT: success " Patchwork
2025-02-05  7:47 ` ✗ Xe.CI.Full: failure " Patchwork
2025-02-05  9:47 ` ✗ i915.CI.Full: " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Z6o1xkp5oErv0daA@lstrano-desk.jf.intel.com \
    --to=matthew.brost@intel.com \
    --cc=Thomas.Hellstrom@linux.intel.com \
    --cc=igt-dev@lists.freedesktop.org \
    --cc=oak.zeng@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox