Igt-dev Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Marcin Bernatowicz <marcin.bernatowicz@linux.intel.com>
To: igt-dev@lists.freedesktop.org
Cc: chris.p.wilson@linux.intel.com
Subject: [igt-dev] [PATCH i-g-t 7/8] benchmarks/gem_wsim: extract prepare_ctxs function, add w_sync
Date: Wed,  6 Sep 2023 15:51:07 +0000	[thread overview]
Message-ID: <20230906155108.2175876-8-marcin.bernatowicz@linux.intel.com> (raw)
In-Reply-To: <20230906155108.2175876-1-marcin.bernatowicz@linux.intel.com>

Some code reorganization, no functional changes.
Extracted prepare_ctxs function from prepare_workload.
Added w_sync abstraction for workload step synchronization.
Changes will allow cleaner xe integration.

Signed-off-by: Marcin Bernatowicz <marcin.bernatowicz@linux.intel.com>
---
 benchmarks/gem_wsim.c | 145 ++++++++++++++++++++++++------------------
 1 file changed, 82 insertions(+), 63 deletions(-)

diff --git a/benchmarks/gem_wsim.c b/benchmarks/gem_wsim.c
index ec9fdc2d0..d807a9d7d 100644
--- a/benchmarks/gem_wsim.c
+++ b/benchmarks/gem_wsim.c
@@ -261,6 +261,11 @@ static const char *ring_str_map[NUM_ENGINES] = {
 	[VECS] = "VECS",
 };
 
+static void w_sync(int fd_, struct w_step *w)
+{
+	gem_sync(fd_, w->obj[0].handle);
+}
+
 static int read_timestamp_frequency(int i915)
 {
 	int value = 0;
@@ -1886,20 +1891,13 @@ static void measure_active_set(struct workload *wrk)
 
 #define alloca0(sz) ({ size_t sz__ = (sz); memset(alloca(sz__), 0, sz__); })
 
-static int prepare_workload(unsigned int id, struct workload *wrk)
+static int prepare_ctxs(unsigned int id, struct workload *wrk)
 {
-	struct working_set **sets;
-	unsigned long total = 0;
 	uint32_t share_vm = 0;
 	int max_ctx = -1;
 	struct w_step *w;
 	int i, j;
 
-	wrk->id = id;
-	wrk->bb_prng = (wrk->flags & SYNCEDCLIENTS) ? master_prng : rand();
-	wrk->bo_prng = (wrk->flags & SYNCEDCLIENTS) ? master_prng : rand();
-	wrk->run = true;
-
 	/*
 	 * Pre-scan workload steps to allocate context list storage.
 	 */
@@ -2088,6 +2086,21 @@ static int prepare_workload(unsigned int id, struct workload *wrk)
 	if (share_vm)
 		vm_destroy(fd, share_vm);
 
+	return 0;
+}
+
+static int prepare_workload(unsigned int id, struct workload *wrk)
+{
+	struct w_step *w;
+	int i, j;
+
+	wrk->id = id;
+	wrk->bb_prng = (wrk->flags & SYNCEDCLIENTS) ? master_prng : rand();
+	wrk->bo_prng = (wrk->flags & SYNCEDCLIENTS) ? master_prng : rand();
+	wrk->run = true;
+
+	prepare_ctxs(id, wrk);
+
 	/* Record default preemption. */
 	for (i = 0, w = wrk->steps; i < wrk->nr_steps; i++, w++) {
 		if (w->type == BATCH)
@@ -2108,75 +2121,81 @@ static int prepare_workload(unsigned int id, struct workload *wrk)
 		for (j = i + 1; j < wrk->nr_steps; j++) {
 			w2 = &wrk->steps[j];
 
-			if (w2->context != w->context)
-				continue;
-			else if (w2->type == PREEMPTION)
+				if (w2->context != w->context)
+					continue;
+
+			if (w2->type == PREEMPTION)
 				break;
-			else if (w2->type != BATCH)
+			if (w2->type != BATCH)
 				continue;
 
 			w2->preempt_us = w->period;
 		}
 	}
 
-	/*
-	 * Scan for SSEU control steps.
-	 */
-	for (i = 0, w = wrk->steps; i < wrk->nr_steps; i++, w++) {
-		if (w->type == SSEU) {
-			get_device_sseu();
-			break;
+	{
+		struct working_set **sets;
+		unsigned long total = 0;
+
+		/*
+		 * Scan for SSEU control steps.
+		 */
+		for (i = 0, w = wrk->steps; i < wrk->nr_steps; i++, w++) {
+			if (w->type == SSEU) {
+				get_device_sseu();
+				break;
+			}
 		}
-	}
 
-	/*
-	 * Allocate working sets.
-	 */
-	for (i = 0, w = wrk->steps; i < wrk->nr_steps; i++, w++) {
-		if (w->type == WORKINGSET && !w->working_set.shared)
-			total += allocate_working_set(wrk, &w->working_set);
-	}
+		/*
+		 * Allocate working sets.
+		 */
+		for (i = 0, w = wrk->steps; i < wrk->nr_steps; i++, w++) {
+			if (w->type == WORKINGSET && !w->working_set.shared)
+				total += allocate_working_set(wrk, &w->working_set);
+		}
 
-	if (verbose > 2)
-		printf("%u: %lu bytes in working sets.\n", wrk->id, total);
+		if (verbose > 2)
+			printf("%u: %lu bytes in working sets.\n", wrk->id, total);
 
-	/*
-	 * Map of working set ids.
-	 */
-	wrk->max_working_set_id = -1;
-	for (i = 0, w = wrk->steps; i < wrk->nr_steps; i++, w++) {
-		if (w->type == WORKINGSET &&
-		    w->working_set.id > wrk->max_working_set_id)
-			wrk->max_working_set_id = w->working_set.id;
-	}
+		/*
+		 * Map of working set ids.
+		 */
+		wrk->max_working_set_id = -1;
+		for (i = 0, w = wrk->steps; i < wrk->nr_steps; i++, w++) {
+			if (w->type == WORKINGSET &&
+			w->working_set.id > wrk->max_working_set_id)
+				wrk->max_working_set_id = w->working_set.id;
+		}
 
-	sets = wrk->working_sets;
-	wrk->working_sets = calloc(wrk->max_working_set_id + 1,
-				   sizeof(*wrk->working_sets));
-	igt_assert(wrk->working_sets);
+		sets = wrk->working_sets;
+		wrk->working_sets = calloc(wrk->max_working_set_id + 1,
+					sizeof(*wrk->working_sets));
+		igt_assert(wrk->working_sets);
 
-	for (i = 0, w = wrk->steps; i < wrk->nr_steps; i++, w++) {
-		struct working_set *set;
+		for (i = 0, w = wrk->steps; i < wrk->nr_steps; i++, w++) {
+			struct working_set *set;
 
-		if (w->type != WORKINGSET)
-			continue;
+			if (w->type != WORKINGSET)
+				continue;
 
-		if (!w->working_set.shared) {
-			set = &w->working_set;
-		} else {
-			igt_assert(sets);
+			if (!w->working_set.shared) {
+				set = &w->working_set;
+			} else {
+				igt_assert(sets);
 
-			set = sets[w->working_set.id];
-			igt_assert(set->shared);
-			igt_assert(set->sizes);
+				set = sets[w->working_set.id];
+				igt_assert(set->shared);
+				igt_assert(set->sizes);
+			}
+
+			wrk->working_sets[w->working_set.id] = set;
 		}
 
-		wrk->working_sets[w->working_set.id] = set;
+		if (sets)
+			free(sets);
 	}
 
-	if (sets)
-		free(sets);
-
 	/*
 	 * Allocate batch buffers.
 	 */
@@ -2231,7 +2250,7 @@ static void w_sync_to(struct workload *wrk, struct w_step *w, int target)
 	igt_assert(target < wrk->nr_steps);
 	igt_assert(wrk->steps[target].type == BATCH);
 
-	gem_sync(fd, wrk->steps[target].obj[0].handle);
+	w_sync(fd, &wrk->steps[target]);
 }
 
 static void
@@ -2290,7 +2309,7 @@ static void sync_deps(struct workload *wrk, struct w_step *w)
 		igt_assert(dep_idx >= 0 && dep_idx < w->idx);
 		igt_assert(wrk->steps[dep_idx].type == BATCH);
 
-		gem_sync(fd, wrk->steps[dep_idx].obj[0].handle);
+		w_sync(fd, &wrk->steps[dep_idx]);
 	}
 }
 
@@ -2346,7 +2365,7 @@ static void *run_workload(void *data)
 
 				igt_assert(s_idx >= 0 && s_idx < i);
 				igt_assert(wrk->steps[s_idx].type == BATCH);
-				gem_sync(fd, wrk->steps[s_idx].obj[0].handle);
+				w_sync(fd, &wrk->steps[s_idx]);
 				continue;
 			} else if (w->type == THROTTLE) {
 				throttle = w->throttle;
@@ -2437,7 +2456,7 @@ static void *run_workload(void *data)
 				break;
 
 			if (w->sync)
-				gem_sync(fd, w->obj[0].handle);
+				w_sync(fd, w);
 
 			if (qd_throttle > 0) {
 				while (wrk->nrequest[engine] > qd_throttle) {
@@ -2446,7 +2465,7 @@ static void *run_workload(void *data)
 					s = igt_list_first_entry(&wrk->requests[engine],
 								 s, rq_link);
 
-					gem_sync(fd, s->obj[0].handle);
+						w_sync(fd, s);
 
 					s->request = -1;
 					igt_list_del(&s->rq_link);
@@ -2471,7 +2490,7 @@ static void *run_workload(void *data)
 				w->emit_fence = -1;
 			}
 		}
-	}
+	} // main loop
 
 	for (i = 0; i < NUM_ENGINES; i++) {
 		if (!wrk->nrequest[i])
-- 
2.30.2

  parent reply	other threads:[~2023-09-06 16:26 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-09-06 15:51 [igt-dev] [PATCH i-g-t 0/8] [RFC] benchmarks/gem_wsim: added basic xe support Marcin Bernatowicz
2023-09-06 15:51 ` [igt-dev] [PATCH i-g-t 1/8] lib/xe_spin: xe_spin_opts for xe_spin initialization Marcin Bernatowicz
2023-09-20 16:43   ` Kamil Konieczny
2023-09-21 15:08     ` Bernatowicz, Marcin
2023-09-06 15:51 ` [igt-dev] [PATCH i-g-t 2/8] lib/xe_spin: fixed duration xe_spin capability Marcin Bernatowicz
2023-09-06 15:51 ` [igt-dev] [PATCH i-g-t 3/8] lib/igt_device_scan: Xe get integrated/discrete card functions Marcin Bernatowicz
2023-09-06 15:51 ` [igt-dev] [PATCH i-g-t 4/8] benchmarks/gem_wsim: scale duration option fixes Marcin Bernatowicz
2023-09-20 16:06   ` Tvrtko Ursulin
2023-09-06 15:51 ` [igt-dev] [PATCH i-g-t 5/8] benchmarks/gem_wsim: cleanups Marcin Bernatowicz
2023-09-06 15:51 ` [igt-dev] [PATCH i-g-t 6/8] benchmarks/gem_wsim: allow comments in workload description files Marcin Bernatowicz
2023-09-20 16:13   ` Tvrtko Ursulin
2023-09-21 15:05     ` Bernatowicz, Marcin
2023-09-21 15:22       ` Tvrtko Ursulin
2023-09-21 16:20         ` Bernatowicz, Marcin
2023-09-25  9:03           ` Tvrtko Ursulin
2023-09-06 15:51 ` Marcin Bernatowicz [this message]
2023-09-06 15:51 ` [igt-dev] [PATCH i-g-t 8/8] [RFC] benchmarks/gem_wsim: added basic xe support Marcin Bernatowicz
2023-09-21 15:57   ` Tvrtko Ursulin
2023-09-21 19:39     ` Bernatowicz, Marcin
2023-09-25  9:16       ` Tvrtko Ursulin
2023-09-06 21:01 ` [igt-dev] ✗ Fi.CI.BAT: failure for benchmarks/gem_wsim: added basic xe support (rev2) Patchwork
2023-09-07  9:30   ` Bernatowicz, Marcin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230906155108.2175876-8-marcin.bernatowicz@linux.intel.com \
    --to=marcin.bernatowicz@linux.intel.com \
    --cc=chris.p.wilson@linux.intel.com \
    --cc=igt-dev@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox