Igt-dev Archive on lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH i-g-t] i915/gem_exec_whisper: Limit workload
@ 2024-03-25 16:13 Jonathan Cavitt
  2024-03-25 19:00 ` ✓ Fi.CI.BAT: success for i915/gem_exec_whisper: Limit workload (rev2) Patchwork
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Jonathan Cavitt @ 2024-03-25 16:13 UTC (permalink / raw)
  To: igt-dev
  Cc: jonathan.cavitt, saurabhg.gupta, chris.p.wilson, nirmoy.das,
	kamil.konieczny

From: Chris Wilson <chris.p.wilson@linux.intel.com>

For large machines with lots of cpus and many engines, using a workload
that tries to use every engine from every cpu causes massive
oversaturation. The goal of the test workload is to cause saturation on
both engines and cpus so that we apply some pressure on the scheduler to
maintain order, but to do so we only need to ensure each scheduling
queue is saturated and there is sufficient pressure for the CPU
scheduler to push work across cores. We can impose a limit on the number
of threads such that we ensure that every engine and every core should
be occupied. Furthermore we can scale the amount of work submitted by
each thread to keep the total amount of work under a reasonable time
bound.

Signed-off-by: Chris Wilson <chris.p.wilson@intel.com>
Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
Reviewed-by: Nirmoy Das <nirmoy.das@intel.com>

---
 tests/intel/gem_exec_whisper.c | 24 +++++++++++++++---------
 1 file changed, 15 insertions(+), 9 deletions(-)

diff --git a/tests/intel/gem_exec_whisper.c b/tests/intel/gem_exec_whisper.c
index effb473350..429dfc00d5 100644
--- a/tests/intel/gem_exec_whisper.c
+++ b/tests/intel/gem_exec_whisper.c
@@ -294,6 +294,7 @@ static void whisper(int fd, const intel_ctx_t *ctx,
 {
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
 	const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
+	const unsigned int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
 	struct drm_i915_gem_exec_object2 batches[QLEN];
 	struct drm_i915_gem_relocation_entry inter[QLEN];
 	struct drm_i915_gem_relocation_entry reloc;
@@ -306,18 +307,19 @@ static void whisper(int fd, const intel_ctx_t *ctx,
 	int fds[64];
 	intel_ctx_cfg_t local_cfg;
 	const intel_ctx_t *contexts[64];
-	unsigned nengine;
 	uint32_t batch[16];
 	unsigned int relocations = 0;
 	unsigned int reloc_migrations = 0;
 	unsigned int reloc_interruptions = 0;
 	unsigned int eb_migrations = 0;
 	struct power_sample sample[2];
+	unsigned int nengine;
+	unsigned int nchild;
+	unsigned int qlen;
 	struct igt_power gpu;
 	uint64_t old_offset;
 	int i, n, loc;
 	int debugfs;
-	int nchild;
 	bool has_relocs = gem_has_relocations(fd);
 
 	if (flags & PRIORITY) {
@@ -356,9 +358,13 @@ static void whisper(int fd, const intel_ctx_t *ctx,
 
 	nchild = 1;
 	if (flags & FORKED)
-		nchild *= sysconf(_SC_NPROCESSORS_ONLN);
+		nchild *= ncpus;
 	if (flags & ALL)
 		nchild *= nengine;
+	nchild = min(nchild, 2 * max(ncpus, nengine));
+	qlen = max(2u, QLEN * nengine / (nchild + nengine - 1));
+	igt_info("Using nchild:%d (out of ncpus:%d and nengine:%d), with a qlen:%d\n",
+		 nchild, ncpus, nengine, qlen);
 
 	intel_detect_and_clear_missed_interrupts(fd);
 	igt_power_get_energy(&gpu, &sample[0]);
@@ -448,19 +454,19 @@ static void whisper(int fd, const intel_ctx_t *ctx,
 		}
 
 		memset(batches, 0, sizeof(batches));
-		for (n = 0; n < QLEN; n++) {
+		for (n = 0; n < qlen; n++) {
 			batches[n].handle = gem_create(fd, 4096);
 			gem_write(fd, batches[n].handle, 0, &bbe, sizeof(bbe));
 		}
 		execbuf.buffers_ptr = to_user_pointer(batches);
-		execbuf.buffer_count = QLEN;
+		execbuf.buffer_count = qlen;
 		gem_execbuf(fd, &execbuf);
 
 		execbuf.buffers_ptr = to_user_pointer(tmp);
 		execbuf.buffer_count = 2;
 
 		old_offset = store.offset;
-		for (n = 0; n < QLEN; n++) {
+		for (n = 0; n < qlen; n++) {
 			if (gen >= 8) {
 				batch[1] = old_offset + loc;
 				batch[2] = (old_offset + loc) >> 32;
@@ -525,8 +531,8 @@ static void whisper(int fd, const intel_ctx_t *ctx,
 					gem_sync(fd, tmp[0].handle);
 				scratch = tmp[0];
 
-				gem_write(fd, batches[QLEN-1].handle, loc, &pass, sizeof(pass));
-				for (n = QLEN; --n >= 1; ) {
+				gem_write(fd, batches[qlen-1].handle, loc, &pass, sizeof(pass));
+				for (n = qlen; --n >= 1; ) {
 					uint32_t handle[2] = {};
 					int this_fd = fd;
 
@@ -648,7 +654,7 @@ static void whisper(int fd, const intel_ctx_t *ctx,
 				gem_vm_destroy(fd, local_cfg.vm);
 			}
 		}
-		for (n = 0; n < QLEN; n++)
+		for (n = 0; n < qlen; n++)
 			gem_close(fd, batches[n].handle);
 		if (flags & FDS) {
 			for (n = 0; n < 64; n++)
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread
* [PATCH i-g-t] i915/gem_exec_whisper: Limit workload
@ 2024-03-21 18:30 Jonathan Cavitt
  2024-03-25 16:05 ` Nirmoy Das
  0 siblings, 1 reply; 7+ messages in thread
From: Jonathan Cavitt @ 2024-03-21 18:30 UTC (permalink / raw)
  To: igt-dev; +Cc: jonathan.cavitt, saurabhg.gupta, chris.p.wilson, yu.bruce.chang

From: Chris Wilson <chris.p.wilson@linux.intel.com>

For large machines with lots of cpus and many engines, using a workload
that tries to use every engine from every cpu causes massive
oversaturation. The goal of the test workload is to cause saturation on
both engines and cpus so that we apply some pressure on the scheduler to
maintain order, but to do so we only need to ensure each scheduling
queue is saturated and there is sufficient pressure for the CPU
scheduler to push work across cores. We can impose a limit on the number
of threads such that we ensure that every engine and every core should
be occupied. Furthermore we can scale the amount of work submitted by
each thread to keep the total amount of work under a reasonable time
bound.

Signed-off-by: Chris Wilson <chris.p.wilson@intel.com>
CC: Bruce Chang <yu.bruce.chang@intel.com>
---
 tests/intel/gem_exec_whisper.c | 24 +++++++++++++++---------
 1 file changed, 15 insertions(+), 9 deletions(-)

diff --git a/tests/intel/gem_exec_whisper.c b/tests/intel/gem_exec_whisper.c
index effb473350..429dfc00d5 100644
--- a/tests/intel/gem_exec_whisper.c
+++ b/tests/intel/gem_exec_whisper.c
@@ -294,6 +294,7 @@ static void whisper(int fd, const intel_ctx_t *ctx,
 {
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
 	const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
+	const unsigned int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
 	struct drm_i915_gem_exec_object2 batches[QLEN];
 	struct drm_i915_gem_relocation_entry inter[QLEN];
 	struct drm_i915_gem_relocation_entry reloc;
@@ -306,18 +307,19 @@ static void whisper(int fd, const intel_ctx_t *ctx,
 	int fds[64];
 	intel_ctx_cfg_t local_cfg;
 	const intel_ctx_t *contexts[64];
-	unsigned nengine;
 	uint32_t batch[16];
 	unsigned int relocations = 0;
 	unsigned int reloc_migrations = 0;
 	unsigned int reloc_interruptions = 0;
 	unsigned int eb_migrations = 0;
 	struct power_sample sample[2];
+	unsigned int nengine;
+	unsigned int nchild;
+	unsigned int qlen;
 	struct igt_power gpu;
 	uint64_t old_offset;
 	int i, n, loc;
 	int debugfs;
-	int nchild;
 	bool has_relocs = gem_has_relocations(fd);
 
 	if (flags & PRIORITY) {
@@ -356,9 +358,13 @@ static void whisper(int fd, const intel_ctx_t *ctx,
 
 	nchild = 1;
 	if (flags & FORKED)
-		nchild *= sysconf(_SC_NPROCESSORS_ONLN);
+		nchild *= ncpus;
 	if (flags & ALL)
 		nchild *= nengine;
+	nchild = min(nchild, 2 * max(ncpus, nengine));
+	qlen = max(2u, QLEN * nengine / (nchild + nengine - 1));
+	igt_info("Using nchild:%d (out of ncpus:%d and nengine:%d), with a qlen:%d\n",
+		 nchild, ncpus, nengine, qlen);
 
 	intel_detect_and_clear_missed_interrupts(fd);
 	igt_power_get_energy(&gpu, &sample[0]);
@@ -448,19 +454,19 @@ static void whisper(int fd, const intel_ctx_t *ctx,
 		}
 
 		memset(batches, 0, sizeof(batches));
-		for (n = 0; n < QLEN; n++) {
+		for (n = 0; n < qlen; n++) {
 			batches[n].handle = gem_create(fd, 4096);
 			gem_write(fd, batches[n].handle, 0, &bbe, sizeof(bbe));
 		}
 		execbuf.buffers_ptr = to_user_pointer(batches);
-		execbuf.buffer_count = QLEN;
+		execbuf.buffer_count = qlen;
 		gem_execbuf(fd, &execbuf);
 
 		execbuf.buffers_ptr = to_user_pointer(tmp);
 		execbuf.buffer_count = 2;
 
 		old_offset = store.offset;
-		for (n = 0; n < QLEN; n++) {
+		for (n = 0; n < qlen; n++) {
 			if (gen >= 8) {
 				batch[1] = old_offset + loc;
 				batch[2] = (old_offset + loc) >> 32;
@@ -525,8 +531,8 @@ static void whisper(int fd, const intel_ctx_t *ctx,
 					gem_sync(fd, tmp[0].handle);
 				scratch = tmp[0];
 
-				gem_write(fd, batches[QLEN-1].handle, loc, &pass, sizeof(pass));
-				for (n = QLEN; --n >= 1; ) {
+				gem_write(fd, batches[qlen-1].handle, loc, &pass, sizeof(pass));
+				for (n = qlen; --n >= 1; ) {
 					uint32_t handle[2] = {};
 					int this_fd = fd;
 
@@ -648,7 +654,7 @@ static void whisper(int fd, const intel_ctx_t *ctx,
 				gem_vm_destroy(fd, local_cfg.vm);
 			}
 		}
-		for (n = 0; n < QLEN; n++)
+		for (n = 0; n < qlen; n++)
 			gem_close(fd, batches[n].handle);
 		if (flags & FDS) {
 			for (n = 0; n < 64; n++)
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2024-03-26 11:15 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-03-25 16:13 [PATCH i-g-t] i915/gem_exec_whisper: Limit workload Jonathan Cavitt
2024-03-25 19:00 ` ✓ Fi.CI.BAT: success for i915/gem_exec_whisper: Limit workload (rev2) Patchwork
2024-03-25 19:20 ` ✓ CI.xeBAT: " Patchwork
2024-03-26  3:54 ` ✗ Fi.CI.IGT: failure " Patchwork
2024-03-26 11:15   ` Kamil Konieczny
  -- strict thread matches above, loose matches on Subject: below --
2024-03-21 18:30 [PATCH i-g-t] i915/gem_exec_whisper: Limit workload Jonathan Cavitt
2024-03-25 16:05 ` Nirmoy Das

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox