From: Nirmoy Das <nirmoy.das@linux.intel.com>
To: Jonathan Cavitt <jonathan.cavitt@intel.com>,
igt-dev@lists.freedesktop.org
Cc: saurabhg.gupta@intel.com, chris.p.wilson@linux.intel.com,
yu.bruce.chang@intel.com
Subject: Re: [PATCH i-g-t] i915/gem_exec_whisper: Limit workload
Date: Mon, 25 Mar 2024 17:05:29 +0100 [thread overview]
Message-ID: <389a619b-3a8c-4796-ab71-6169c9d6969e@linux.intel.com> (raw)
In-Reply-To: <20240321183005.2130924-1-jonathan.cavitt@intel.com>
On 3/21/2024 7:30 PM, Jonathan Cavitt wrote:
> From: Chris Wilson <chris.p.wilson@linux.intel.com>
>
> For large machines with lots of cpus and many engines, using a workload
> that tries to use every engine from every cpu causes massive
> oversaturation. The goal of the test workload is to cause saturation on
> both engines and cpus so that we apply some pressure on the scheduler to
> maintain order, but to do so we only need to ensure each scheduling
> queue is saturated and there is sufficient pressure for the CPU
> scheduler to push work across cores. We can impose a limit on the number
> of threads such that we ensure that every engine and every core should
> be occupied. Furthermore we can scale the amount of work submitted by
> each thread to keep the total amount of work under a reasonable time
> bound.
>
> Signed-off-by: Chris Wilson <chris.p.wilson@intel.com>
Need your sign off here. Otherwise
Reviewed-by: Nirmoy Das <nirmoy.das@intel.com>
> CC: Bruce Chang <yu.bruce.chang@intel.com>
> ---
> tests/intel/gem_exec_whisper.c | 24 +++++++++++++++---------
> 1 file changed, 15 insertions(+), 9 deletions(-)
>
> diff --git a/tests/intel/gem_exec_whisper.c b/tests/intel/gem_exec_whisper.c
> index effb473350..429dfc00d5 100644
> --- a/tests/intel/gem_exec_whisper.c
> +++ b/tests/intel/gem_exec_whisper.c
> @@ -294,6 +294,7 @@ static void whisper(int fd, const intel_ctx_t *ctx,
> {
> const uint32_t bbe = MI_BATCH_BUFFER_END;
> const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
> + const unsigned int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
> struct drm_i915_gem_exec_object2 batches[QLEN];
> struct drm_i915_gem_relocation_entry inter[QLEN];
> struct drm_i915_gem_relocation_entry reloc;
> @@ -306,18 +307,19 @@ static void whisper(int fd, const intel_ctx_t *ctx,
> int fds[64];
> intel_ctx_cfg_t local_cfg;
> const intel_ctx_t *contexts[64];
> - unsigned nengine;
> uint32_t batch[16];
> unsigned int relocations = 0;
> unsigned int reloc_migrations = 0;
> unsigned int reloc_interruptions = 0;
> unsigned int eb_migrations = 0;
> struct power_sample sample[2];
> + unsigned int nengine;
> + unsigned int nchild;
> + unsigned int qlen;
> struct igt_power gpu;
> uint64_t old_offset;
> int i, n, loc;
> int debugfs;
> - int nchild;
> bool has_relocs = gem_has_relocations(fd);
>
> if (flags & PRIORITY) {
> @@ -356,9 +358,13 @@ static void whisper(int fd, const intel_ctx_t *ctx,
>
> nchild = 1;
> if (flags & FORKED)
> - nchild *= sysconf(_SC_NPROCESSORS_ONLN);
> + nchild *= ncpus;
> if (flags & ALL)
> nchild *= nengine;
> + nchild = min(nchild, 2 * max(ncpus, nengine));
> + qlen = max(2u, QLEN * nengine / (nchild + nengine - 1));
> + igt_info("Using nchild:%d (out of ncpus:%d and nengine:%d), with a qlen:%d\n",
> + nchild, ncpus, nengine, qlen);
>
> intel_detect_and_clear_missed_interrupts(fd);
> igt_power_get_energy(&gpu, &sample[0]);
> @@ -448,19 +454,19 @@ static void whisper(int fd, const intel_ctx_t *ctx,
> }
>
> memset(batches, 0, sizeof(batches));
> - for (n = 0; n < QLEN; n++) {
> + for (n = 0; n < qlen; n++) {
> batches[n].handle = gem_create(fd, 4096);
> gem_write(fd, batches[n].handle, 0, &bbe, sizeof(bbe));
> }
> execbuf.buffers_ptr = to_user_pointer(batches);
> - execbuf.buffer_count = QLEN;
> + execbuf.buffer_count = qlen;
> gem_execbuf(fd, &execbuf);
>
> execbuf.buffers_ptr = to_user_pointer(tmp);
> execbuf.buffer_count = 2;
>
> old_offset = store.offset;
> - for (n = 0; n < QLEN; n++) {
> + for (n = 0; n < qlen; n++) {
> if (gen >= 8) {
> batch[1] = old_offset + loc;
> batch[2] = (old_offset + loc) >> 32;
> @@ -525,8 +531,8 @@ static void whisper(int fd, const intel_ctx_t *ctx,
> gem_sync(fd, tmp[0].handle);
> scratch = tmp[0];
>
> - gem_write(fd, batches[QLEN-1].handle, loc, &pass, sizeof(pass));
> - for (n = QLEN; --n >= 1; ) {
> + gem_write(fd, batches[qlen-1].handle, loc, &pass, sizeof(pass));
> + for (n = qlen; --n >= 1; ) {
> uint32_t handle[2] = {};
> int this_fd = fd;
>
> @@ -648,7 +654,7 @@ static void whisper(int fd, const intel_ctx_t *ctx,
> gem_vm_destroy(fd, local_cfg.vm);
> }
> }
> - for (n = 0; n < QLEN; n++)
> + for (n = 0; n < qlen; n++)
> gem_close(fd, batches[n].handle);
> if (flags & FDS) {
> for (n = 0; n < 64; n++)
next prev parent reply other threads:[~2024-03-25 16:05 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-03-21 18:30 [PATCH i-g-t] i915/gem_exec_whisper: Limit workload Jonathan Cavitt
2024-03-21 21:00 ` ✗ Fi.CI.BAT: failure for " Patchwork
2024-03-21 21:16 ` ✓ CI.xeBAT: success " Patchwork
2024-03-25 16:05 ` Nirmoy Das [this message]
-- strict thread matches above, loose matches on Subject: below --
2024-03-25 16:13 [PATCH i-g-t] " Jonathan Cavitt
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=389a619b-3a8c-4796-ab71-6169c9d6969e@linux.intel.com \
--to=nirmoy.das@linux.intel.com \
--cc=chris.p.wilson@linux.intel.com \
--cc=igt-dev@lists.freedesktop.org \
--cc=jonathan.cavitt@intel.com \
--cc=saurabhg.gupta@intel.com \
--cc=yu.bruce.chang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox