From: Jonathan Cavitt <jonathan.cavitt@intel.com>
To: igt-dev@lists.freedesktop.org
Cc: jonathan.cavitt@intel.com
Subject: [PATCH i-g-t v2] tests/xe_create: Use separate VMs per process
Date: Wed, 27 Dec 2023 09:07:04 -0800 [thread overview]
Message-ID: <20231227170704.2972287-1-jonathan.cavitt@intel.com> (raw)
We currently exercise cross-user impact in xe_create by attempting to
concurrently create a large number of exec queues on a single vm using
forked child processes. This is unrealistic, as multiple users are more
likely to be using separate file descriptors for creating exec queues.
Update the test to reflect this use case.
v2:
- Keep the original test path (which uses a single shared vm across
multiple forked processes) intact, using a flag to denote which path to
take.
- Increase the MAXTIME in the shared vm case.
Suggested-by: Brian Welty <brian.welty@intel.com>
Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
CC: Zbigniew Kempczynski <zbigniew.kempczynski@intel.com>
CC: Lucas De Marchi <lucas.demarchi@intel.com>
CC: Kamil Konieczny <kamil.konieczny@linux.intel.com>
CC: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
Reviewed-by: Brian Welty <brian.welty@intel.com>
---
Resubmitted to generate a patchworks series.
tests/intel/xe_create.c | 50 ++++++++++++++++++++++++++++++++---------
1 file changed, 40 insertions(+), 10 deletions(-)
diff --git a/tests/intel/xe_create.c b/tests/intel/xe_create.c
index 0aa32c788a..6d06708492 100644
--- a/tests/intel/xe_create.c
+++ b/tests/intel/xe_create.c
@@ -150,6 +150,11 @@ enum exec_queue_destroy {
LEAK
};
+enum vm_count {
+ MULTI,
+ SHARED
+};
+
#define MAXEXECQUEUES 2048
#define MAXTIME 5
@@ -163,16 +168,22 @@ enum exec_queue_destroy {
*
* @noleak: destroy exec_queues in the code
* @leak: destroy exec_queues in close() path
+ * @noleak-shared: same as noleak, but with a shared vm
+ * @leak-shared: same as leak, but with a shared vm
*/
-static void create_execqueues(int fd, enum exec_queue_destroy ed)
+static void create_execqueues(int fd, enum exec_queue_destroy ed,
+ enum vm_count vc)
{
struct timespec tv = { };
uint32_t num_engines, exec_queues_per_process, vm;
int nproc = sysconf(_SC_NPROCESSORS_ONLN), seconds;
+ int real_timeout = MAXTIME * (vc == SHARED ? 4 : 1);
- fd = drm_reopen_driver(fd);
- num_engines = xe_number_engines(fd);
- vm = xe_vm_create(fd, 0, 0);
+ if (vc == SHARED) {
+ fd = drm_reopen_driver(fd);
+ num_engines = xe_number_engines(fd);
+ vm = xe_vm_create(fd, 0, 0);
+ }
exec_queues_per_process = max_t(uint32_t, 1, MAXEXECQUEUES / nproc);
igt_debug("nproc: %u, exec_queues per process: %u\n", nproc, exec_queues_per_process);
@@ -184,6 +195,12 @@ static void create_execqueues(int fd, enum exec_queue_destroy ed)
uint32_t exec_queue, exec_queues[exec_queues_per_process];
int idx, err, i;
+ if (vc == MULTI) {
+ fd = drm_reopen_driver(fd);
+ num_engines = xe_number_engines(fd);
+ vm = xe_vm_create(fd, 0, 0);
+ }
+
srandom(n);
for (i = 0; i < exec_queues_per_process; i++) {
@@ -206,16 +223,23 @@ static void create_execqueues(int fd, enum exec_queue_destroy ed)
xe_exec_queue_destroy(fd, exec_queues[i]);
}
}
+
+ if (vc == MULTI) {
+ xe_vm_destroy(fd, vm);
+ drm_close_driver(fd);
+ }
}
igt_waitchildren();
- xe_vm_destroy(fd, vm);
- drm_close_driver(fd);
+ if (vc == SHARED) {
+ xe_vm_destroy(fd, vm);
+ drm_close_driver(fd);
+ }
seconds = igt_seconds_elapsed(&tv);
- igt_assert_f(seconds < MAXTIME,
+ igt_assert_f(seconds < real_timeout,
"Creating %d exec_queues tooks too long: %d [limit: %d]\n",
- MAXEXECQUEUES, seconds, MAXTIME);
+ MAXEXECQUEUES, seconds, real_timeout);
}
/**
@@ -384,10 +408,16 @@ igt_main_args("Q:p:", NULL, help_str, opt_handler, NULL)
}
igt_subtest("create-execqueues-noleak")
- create_execqueues(xe, NOLEAK);
+ create_execqueues(xe, NOLEAK, MULTI);
igt_subtest("create-execqueues-leak")
- create_execqueues(xe, LEAK);
+ create_execqueues(xe, LEAK, MULTI);
+
+ igt_subtest("create-execqueues-noleak-shared")
+ create_execqueues(xe, NOLEAK, SHARED);
+
+ igt_subtest("create-execqueues-leak-shared")
+ create_execqueues(xe, LEAK, SHARED);
igt_subtest("create-massive-size") {
create_massive_size(xe);
--
2.25.1
next reply other threads:[~2023-12-27 17:17 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-12-27 17:07 Jonathan Cavitt [this message]
-- strict thread matches above, loose matches on Subject: below --
2023-12-20 15:37 [PATCH i-g-t v2] tests/xe_create: Use separate VMs per process Jonathan Cavitt
2023-12-21 20:44 ` Cavitt, Jonathan
2023-12-22 6:25 ` Zbigniew Kempczyński
2023-12-22 22:31 ` Welty, Brian
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231227170704.2972287-1-jonathan.cavitt@intel.com \
--to=jonathan.cavitt@intel.com \
--cc=igt-dev@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox