Igt-dev Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: "Lisovskiy, Stanislav" <stanislav.lisovskiy@intel.com>
To: Bommu Krishnaiah <krishnaiah.bommu@intel.com>
Cc: igt-dev@lists.freedesktop.org, Oak Zeng <oak.zeng@intel.com>,
	Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Subject: Re: [PATCH i-g-t v2 06/10] tests/intel/xe_svm: svm_atomic_access
Date: Tue, 14 May 2024 11:14:21 +0300	[thread overview]
Message-ID: <ZkMdXZAq5eq5ACY2@intel.com> (raw)
In-Reply-To: <20240514071026.748257-7-krishnaiah.bommu@intel.com>

On Tue, May 14, 2024 at 12:40:22PM +0530, Bommu Krishnaiah wrote:
> Verify GPU atomic access any location in malloc'ed memory by using svm

Shouldn't we somehow also explicitly ensure, that GPU malloc'ed memory
access is indeed atomic?
Current test just checks that GPU actually does the increment.
However if that is supposed to be atomic, we probably need to do 
something like modifying it from multiple threads and then end result,
should be then incremented twice or something like that?

Otherwise not clear, what is meant by "atomicity" here.

Stan

> 
> Signed-off-by: Bommu Krishnaiah <krishnaiah.bommu@intel.com>
> Cc: Oak Zeng <oak.zeng@intel.com>
> Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> ---
>  lib/xe/xe_util.c     | 17 +++++++++++++++++
>  lib/xe/xe_util.h     |  1 +
>  tests/intel/xe_svm.c | 39 +++++++++++++++++++++++++++++++++++++++
>  3 files changed, 57 insertions(+)
> 
> diff --git a/lib/xe/xe_util.c b/lib/xe/xe_util.c
> index 1bdb5fa08..0e28c0093 100644
> --- a/lib/xe/xe_util.c
> +++ b/lib/xe/xe_util.c
> @@ -107,6 +107,23 @@ void insert_store(uint32_t *batch, uint64_t dst_va, uint32_t val)
>  	batch[++i] = MI_BATCH_BUFFER_END;
>  }
>  
> +/*
> +a command buffer is a buffer in GT0's vram, filled with gpu commands,
> +plus some memory for a ufence used to sync command submission
> +*/
> +void insert_atomic_inc(uint32_t *batch, uint64_t dst_va, uint32_t val)
> +{
> +	int i = 0;
> +
> +	//suppress compiler warning
> +	(void)(val);
> +
> +	batch[i] = MI_STORE_DWORD_IMM_GEN4;
> +	batch[++i] = dst_va;
> +	batch[++i] = dst_va >> 32;
> +	batch[++i] = MI_BATCH_BUFFER_END;
> +}
> +
>  void xe_create_cmdbuf(struct xe_buffer *cmd_buf, cmdbuf_fill_func_t fill_func, uint64_t dst_va, uint32_t val, struct drm_xe_engine_class_instance *eci)
>  {
>  	//make some room for a exec_ufence, which will be used to sync the
> diff --git a/lib/xe/xe_util.h b/lib/xe/xe_util.h
> index c38f79e60..46e1ccc9a 100644
> --- a/lib/xe/xe_util.h
> +++ b/lib/xe/xe_util.h
> @@ -40,6 +40,7 @@ void xe_create_cmdbuf(struct xe_buffer *cmd_buf, cmdbuf_fill_func_t fill_func,
>  uint64_t xe_cmdbuf_exec_ufence_gpuva(struct xe_buffer *cmd_buf);
>  uint64_t *xe_cmdbuf_exec_ufence_cpuva(struct xe_buffer *cmd_buf);
>  void insert_store(uint32_t *batch, uint64_t dst_va, uint32_t val);
> +void insert_atomic_inc(uint32_t *batch, uint64_t dst_va, uint32_t val);
>  void xe_submit_cmd(struct xe_buffer *cmdbuf);
>  int64_t __xe_submit_cmd(struct xe_buffer *cmdbuf);
>  void xe_destroy_buffer(struct xe_buffer *buffer);
> diff --git a/tests/intel/xe_svm.c b/tests/intel/xe_svm.c
> index 4f2818cc8..421d7fd1a 100644
> --- a/tests/intel/xe_svm.c
> +++ b/tests/intel/xe_svm.c
> @@ -30,6 +30,8 @@
>   * Description: verify SVM basic functionality by using randomly access any location in malloc'ed memory
>   * SUBTEST: svm-huge-page
>   * Description: verify SVM basic functionality by using huge page access
> + * SUBTEST: svm-atomic-access
> + * Description: verify SVM basic functionality by using GPU atomic access any location in malloc'ed memory
>   */
>  
>  #include <fcntl.h>
> @@ -189,6 +191,39 @@ static void svm_thp(int fd, uint32_t vm, struct drm_xe_engine_class_instance *ec
>  	free(dst);
>  }
>  
> +/**
> + *  Test GPU atomic access any location in malloc'ed memory
> + */
> +static void svm_atomic_access(int fd, uint32_t vm, struct drm_xe_engine_class_instance *eci)
> +{
> +	uint64_t gpu_va = 0x1a0000;
> +	int val = 0xc0ffee;
> +	size_t bo_size = xe_bb_size(fd, PAGE_ALIGN_UFENCE);
> +	uint32_t *dst, *dst_to_access;
> +	uint32_t size = 1024*1024, sz_dw = size/4;
> +
> +	struct xe_buffer cmd_buf = {
> +		.fd = fd,
> +		.gpu_addr = (void *)(uintptr_t)gpu_va,
> +		.vm = vm,
> +		.size = bo_size,
> +		.placement = vram_if_possible(fd, eci->gt_id),
> +		.flag = DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM,
> +	};
> +
> +	dst = aligned_alloc(xe_get_default_alignment(fd), size);
> +	dst_to_access = dst + random()%sz_dw;
> +	*dst_to_access = val;
> +
> +	xe_create_cmdbuf(&cmd_buf, insert_atomic_inc, (uint64_t)dst_to_access, val, eci);
> +	xe_submit_cmd(&cmd_buf);
> +
> +	igt_assert_eq(*dst_to_access, val + 1);
> +
> +	xe_destroy_cmdbuf(&cmd_buf);
> +	free(dst);
> +}
> +
>  igt_main
>  {
>  	int fd;
> @@ -223,6 +258,10 @@ igt_main
>  		xe_for_each_engine(fd, hwe)
>  			svm_thp(fd, vm, hwe);
>  
> +	igt_subtest_f("svm-atomic-access")
> +		xe_for_each_engine(fd, hwe)
> +			svm_atomic_access(fd, vm, hwe);
> +
>  	igt_fixture {
>  		xe_vm_destroy(fd, vm);
>  		drm_close_driver(fd);
> -- 
> 2.25.1
> 

  reply	other threads:[~2024-05-14  8:14 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-05-14  7:10 [PATCH i-g-t v2 00/10] helper function Bommu Krishnaiah
2024-05-14  7:10 ` [PATCH i-g-t v2 01/10] lib/xe/xe_util: Introduce helper functions for buffer creation and command submission etc Bommu Krishnaiah
2024-05-14  7:10 ` [PATCH i-g-t v2 02/10] tests/intel/xe_svm: basic xe_svm test Bommu Krishnaiah
2024-05-14  7:10 ` [PATCH i-g-t v2 03/10] tests/xe_svm: basic svm test Bommu Krishnaiah
2024-05-15 17:38   ` Kamil Konieczny
2024-05-14  7:10 ` [PATCH i-g-t v2 04/10] tests/intel/xe_svm: svm_random_access Bommu Krishnaiah
2024-05-14  7:10 ` [PATCH i-g-t v2 05/10] tests/intel/xe_svm: svm-huge-page Bommu Krishnaiah
2024-05-14  7:10 ` [PATCH i-g-t v2 06/10] tests/intel/xe_svm: svm_atomic_access Bommu Krishnaiah
2024-05-14  8:14   ` Lisovskiy, Stanislav [this message]
2024-05-14  7:10 ` [PATCH i-g-t v2 07/10] tests/intel/xe_svm: svm-invalid-va Bommu Krishnaiah
2024-05-14  7:10 ` [PATCH i-g-t v2 08/10] tests/intel/xe_svm: svm_benchmark Bommu Krishnaiah
2024-05-14  7:10 ` [PATCH i-g-t v2 09/10] tests/intel/xe_svm: svm_mprotect Bommu Krishnaiah
2024-05-14  7:10 ` [PATCH i-g-t v2 10/10] tests/intel/xe_svm: svm-sparse-access Bommu Krishnaiah
2024-05-14  7:52 ` ✗ Fi.CI.BUILD: failure for helper function (rev2) Patchwork
2024-05-14  7:58 ` ✗ GitLab.Pipeline: warning " Patchwork
2024-05-15 17:29 ` [PATCH i-g-t v2 00/10] helper function Kamil Konieczny

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZkMdXZAq5eq5ACY2@intel.com \
    --to=stanislav.lisovskiy@intel.com \
    --cc=himal.prasad.ghimiray@intel.com \
    --cc=igt-dev@lists.freedesktop.org \
    --cc=krishnaiah.bommu@intel.com \
    --cc=oak.zeng@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox