public inbox for intel-gfx@lists.freedesktop.org
 help / color / mirror / Atom feed
* [PATCH 1/3] lib/drmtest: Add a new test helper to check for Full PPGTT usage
@ 2013-12-10  9:36 oscar.mateo
  2013-12-10  9:36 ` [PATCH 2/3] tests/gem_close_race: Adapt the test for Full PPGTT oscar.mateo
  2013-12-10  9:36 ` [PATCH 3/3] tests/gem_ppgtt: New Full PPGTT set of tests oscar.mateo
  0 siblings, 2 replies; 7+ messages in thread
From: oscar.mateo @ 2013-12-10  9:36 UTC (permalink / raw)
  To: intel-gfx; +Cc: benjamin.widawsky

From: Oscar Mateo <oscar.mateo@intel.com>

Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
---
 lib/drmtest.c |   14 ++++++++++++++
 lib/drmtest.h |    1 +
 2 files changed, 15 insertions(+)

diff --git a/lib/drmtest.c b/lib/drmtest.c
index f2624a1..c2483ee 100644
--- a/lib/drmtest.c
+++ b/lib/drmtest.c
@@ -87,6 +87,20 @@ is_intel(int fd)
 	return IS_INTEL(devid);
 }
 
+bool gem_uses_full_ppgtt(int fd)
+{
+	struct drm_i915_getparam gp;
+	int val;
+
+	gp.param = 28; /* HAS_FULL_PPGTT */
+	gp.value = &val;
+
+	if (ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp)))
+		return 0;
+
+	return val;
+}
+
 bool gem_uses_aliasing_ppgtt(int fd)
 {
 	struct drm_i915_getparam gp;
diff --git a/lib/drmtest.h b/lib/drmtest.h
index 20e2620..61769e2 100644
--- a/lib/drmtest.h
+++ b/lib/drmtest.h
@@ -91,6 +91,7 @@ void gem_sw_finish(int fd, uint32_t handle);
 bool gem_bo_busy(int fd, uint32_t handle);
 
 /* feature test helpers */
+bool gem_uses_full_ppgtt(int fd);
 bool gem_uses_aliasing_ppgtt(int fd);
 int gem_available_fences(int fd);
 
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/3] tests/gem_close_race: Adapt the test for Full PPGTT
  2013-12-10  9:36 [PATCH 1/3] lib/drmtest: Add a new test helper to check for Full PPGTT usage oscar.mateo
@ 2013-12-10  9:36 ` oscar.mateo
  2013-12-10 12:32   ` Daniel Vetter
  2013-12-10  9:36 ` [PATCH 3/3] tests/gem_ppgtt: New Full PPGTT set of tests oscar.mateo
  1 sibling, 1 reply; 7+ messages in thread
From: oscar.mateo @ 2013-12-10  9:36 UTC (permalink / raw)
  To: intel-gfx; +Cc: benjamin.widawsky

From: Oscar Mateo <oscar.mateo@intel.com>

With Full PPGTT, each new fd creates a new context and thus a new
PPGTT, so we have to reduce the number of simultaneous fds or face
OOM problems. For every new PPGTT, its PDEs are stored in the GGTT
which imposes a limit of 1024 new contexts. We want to leave at
least 1/4 of the GGTT available for "important" stuff like scanout
buffers, so never open more than 768 fds.

Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
---
 tests/gem_close_race.c |   39 ++++++++++++++++++++++++++-------------
 1 file changed, 26 insertions(+), 13 deletions(-)

diff --git a/tests/gem_close_race.c b/tests/gem_close_race.c
index 6064c02..f658c90 100644
--- a/tests/gem_close_race.c
+++ b/tests/gem_close_race.c
@@ -49,6 +49,8 @@
 
 static char device[80];
 static uint32_t devid;
+static unsigned int num_childs = 2000;
+static unsigned int num_fds = 32000;
 
 static void selfcopy(int fd, uint32_t handle, int loops)
 {
@@ -136,11 +138,10 @@ static void run(int child)
 		gem_read(fd, handle, 0, &handle, sizeof(handle));
 }
 
-#define NUM_FD 32000
-
 struct thread {
 	pthread_mutex_t mutex;
-	int fds[NUM_FD];
+	unsigned int num_fds;
+	int *fds;
 	int done;
 };
 
@@ -152,7 +153,7 @@ static void *thread_run(void *_data)
 	while (!t->done) {
 		pthread_mutex_unlock(&t->mutex);
 
-		for (int n = 0; n < NUM_FD; n++) {
+		for (int n = 0; n < t->num_fds; n++) {
 			struct drm_i915_gem_create create;
 
 			create.handle = 0;
@@ -185,7 +186,7 @@ static void *thread_busy(void *_data)
 
 		pthread_mutex_unlock(&t->mutex);
 
-		n  = rand() % NUM_FD;
+		n  = rand() % t->num_fds;
 
 		create.handle = 0;
 		create.size = OBJECT_SIZE;
@@ -213,16 +214,23 @@ igt_main
 {
 	igt_skip_on_simulation();
 
-	sprintf(device, "/dev/dri/card%d", drm_get_card());
-	{
-		int fd = open(device, O_RDWR);
+	igt_fixture {
+		int fd;
+		sprintf(device, "/dev/dri/card%d", drm_get_card());
+		fd = open(device, O_RDWR);
 		igt_assert(fd != -1);
 		devid = intel_get_drm_devid(fd);
+		if (gem_uses_full_ppgtt(fd))
+		{
+			/* Reduce the number of simultaneous fds or face OOM */
+			num_childs = 768;
+			num_fds = 768;
+		}
 		close(fd);
 	}
 
 	igt_subtest("process-exit") {
-		igt_fork(child, 2000)
+		igt_fork(child, num_childs)
 			run(child);
 		igt_waitchildren();
 	}
@@ -232,17 +240,21 @@ igt_main
 		struct thread *data = calloc(1, sizeof(struct thread));
 		int n;
 
+		data->num_fds = num_fds;
+		data->fds = calloc(num_fds, sizeof(int));
+
 		igt_assert(data);
+		igt_assert(data->fds);
 
 		pthread_mutex_init(&data->mutex, NULL);
-		for (n = 0; n < NUM_FD; n++)
+		for (n = 0; n < num_fds; n++)
 			data->fds[n] = open(device, O_RDWR);
 
 		pthread_create(&thread[0], NULL, thread_run, data);
 		pthread_create(&thread[1], NULL, thread_busy, data);
 
-		for (n = 0; n < 1000*NUM_FD; n++) {
-			int i = rand() % NUM_FD;
+		for (n = 0; n < 1000*num_fds; n++) {
+			int i = rand() % num_fds;
 			if (data->fds[i] == -1) {
 				data->fds[i] = open(device, O_RDWR);
 			} else{
@@ -258,8 +270,9 @@ igt_main
 		pthread_join(thread[1], NULL);
 		pthread_join(thread[0], NULL);
 
-		for (n = 0; n < NUM_FD; n++)
+		for (n = 0; n < num_fds; n++)
 			close(data->fds[n]);
+		free(data->fds);
 		free(data);
 	}
 }
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/3] tests/gem_ppgtt: New Full PPGTT set of tests
  2013-12-10  9:36 [PATCH 1/3] lib/drmtest: Add a new test helper to check for Full PPGTT usage oscar.mateo
  2013-12-10  9:36 ` [PATCH 2/3] tests/gem_close_race: Adapt the test for Full PPGTT oscar.mateo
@ 2013-12-10  9:36 ` oscar.mateo
  1 sibling, 0 replies; 7+ messages in thread
From: oscar.mateo @ 2013-12-10  9:36 UTC (permalink / raw)
  To: intel-gfx; +Cc: benjamin.widawsky

From: Oscar Mateo <oscar.mateo@intel.com>

These tests cover some tricky corner cases found during
the True-and-only Full PPGTT feature development.

v2: Add pthread requirement to Makefile.
v3: Added new "pinned" testcase.
v4: Require Full PPGTT.

Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
---
 tests/.gitignore       |    1 +
 tests/Makefile.am      |    1 +
 tests/Makefile.sources |    1 +
 tests/gem_full_ppgtt.c |  327 ++++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 330 insertions(+)
 create mode 100644 tests/gem_full_ppgtt.c

diff --git a/tests/.gitignore b/tests/.gitignore
index 8a00364..a77b544 100644
--- a/tests/.gitignore
+++ b/tests/.gitignore
@@ -55,6 +55,7 @@ gem_partial_pwrite_pread
 gem_persistent_relocs
 gem_pin
 gem_pipe_control_store_loop
+gem_full_ppgtt
 gem_pread
 gem_pread_after_blit
 gem_pwrite
diff --git a/tests/Makefile.am b/tests/Makefile.am
index b8cddd5..02e0796 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -32,6 +32,7 @@ LDADD = ../lib/libintel_tools.la $(PCIACCESS_LIBS) $(DRM_LIBS)
 LDADD += $(CAIRO_LIBS) $(LIBUDEV_LIBS) $(GLIB_LIBS)
 AM_CFLAGS += $(CAIRO_CFLAGS) $(LIBUDEV_CFLAGS) $(GLIB_CFLAGS)
 
+gem_full_ppgtt_LDADD = $(LDADD) -lpthread
 gem_close_race_LDADD = $(LDADD) -lpthread
 gem_fence_thrash_CFLAGS = $(AM_CFLAGS) $(THREAD_CFLAGS)
 gem_fence_thrash_LDADD = $(LDADD) -lpthread
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index ca2b58c..c3c1852 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -36,6 +36,7 @@ TESTS_progs_M = \
 	gem_mmap_gtt \
 	gem_partial_pwrite_pread \
 	gem_persistent_relocs \
+	gem_full_ppgtt \
 	gem_pread \
 	gem_pread_after_blit \
 	gem_pwrite \
diff --git a/tests/gem_full_ppgtt.c b/tests/gem_full_ppgtt.c
new file mode 100644
index 0000000..f3c8576
--- /dev/null
+++ b/tests/gem_full_ppgtt.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Oscar Mateo <oscar.mateo@intel.com>
+ *
+ */
+
+/*
+ * This test covers some tricky corner cases found during
+ * True PPGTT development
+ */
+
+#include <pthread.h>
+#include "rendercopy.h"
+#include "igt_debugfs.h"
+
+#define WIDTH 512
+#define STRIDE (WIDTH*4)
+#define HEIGHT 512
+#define SIZE (HEIGHT*STRIDE)
+
+/* options */
+int option_num_threads = 10;
+int option_num_iter = 200;
+bool option_tiled = false;
+
+/* globals */
+int global_fd;
+int devid;
+render_copyfunc_t rendercopy;
+
+static void init_buffer(drm_intel_bufmgr *bufmgr,
+			struct scratch_buf *buf, bool special)
+{
+	uint32_t tiling;
+	unsigned long pitch = STRIDE;
+
+	if (option_tiled)
+	{
+		tiling = I915_TILING_X;
+		buf->bo = drm_intel_bo_alloc_tiled(bufmgr, "",
+			     WIDTH, HEIGHT, 4,
+			     &tiling, &pitch, 0);
+	}
+	else
+	{
+		tiling = I915_TILING_NONE;
+		buf->bo = drm_intel_bo_alloc(bufmgr, "",
+				special? 16*SIZE : SIZE, 4096);
+	}
+
+	igt_assert(buf->bo);
+	buf->size = SIZE;
+	buf->tiling = tiling;
+	buf->stride = pitch;
+}
+
+static bool check_buffer_contents(struct scratch_buf *buf, uint32_t val)
+{
+	uint32_t *ptr;
+	int i;
+	drm_intel_bo *bo = buf->bo;
+
+	if (buf->tiling == I915_TILING_NONE)
+		do_or_die(drm_intel_bo_map(bo, 0));
+	else
+		do_or_die(drm_intel_gem_bo_map_gtt(bo));
+
+	ptr = bo->virtual;
+	for (i = 0; i < WIDTH*HEIGHT; i++) {
+		if (ptr[i] != val) {
+			printf("Expected 0x%08x, found 0x%08x at offset 0x%08x\n",
+					val, ptr[i], i * 4);
+			return false;
+		}
+		val++;
+	}
+
+	if (buf->tiling == I915_TILING_NONE)
+		drm_intel_bo_unmap(bo);
+	else
+		drm_intel_gem_bo_unmap_gtt(bo);
+
+	return true;
+}
+
+static void init_buffer_contents(struct scratch_buf *buf, uint32_t val)
+{
+	uint32_t *ptr;
+	int i;
+	drm_intel_bo *bo = buf->bo;
+
+	if (buf->tiling == I915_TILING_NONE)
+		do_or_die(drm_intel_bo_map(bo, 1));
+	else
+		do_or_die(drm_intel_gem_bo_map_gtt(bo));
+
+	ptr = bo->virtual;
+	for (i = 0; i < WIDTH*HEIGHT; i++)
+		ptr[i] = val++;
+
+	if (buf->tiling == I915_TILING_NONE)
+		drm_intel_bo_unmap(bo);
+	else
+		drm_intel_gem_bo_unmap_gtt(bo);
+}
+
+static void run(void)
+{
+	int fd;
+	drm_intel_bufmgr *bufmgr;
+	struct intel_batchbuffer *batch;
+	struct scratch_buf src, dst;
+	int i;
+	uint32_t start = 0;
+
+	/* Initialize */
+	igt_assert((fd = drm_open_any()) >= 0);
+	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
+	batch = intel_batchbuffer_alloc(bufmgr, devid);
+	init_buffer(bufmgr, &src, false);
+	init_buffer_contents(&src, start);
+	init_buffer(bufmgr, &dst, false);
+
+	/* Work */
+	for (i = 0; i < option_num_iter; i++)
+		rendercopy(batch, NULL, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0);
+	igt_assert(check_buffer_contents(&dst, start));
+
+	/* Cleanup */
+	drm_intel_bo_unreference(src.bo);
+	drm_intel_bo_unreference(dst.bo);
+	intel_batchbuffer_free(batch);
+	drm_intel_bufmgr_destroy(bufmgr);
+	close(fd);
+}
+
+static void *work_multiple(void *arg)
+{
+	run();
+
+	pthread_exit(NULL);
+}
+
+static void run_multiple(void)
+{
+	int i = 0;
+	void *retval;
+	pthread_t *threads = calloc(option_num_threads, sizeof(*threads));
+
+	for (i = 0; i < option_num_threads; i++)
+		pthread_create(&threads[i], NULL, work_multiple, &i);
+
+	for (i = 0; i < option_num_threads; i++)
+		igt_assert(pthread_join(threads[i], &retval) == 0);
+
+	free(threads);
+}
+
+static void run_with_namedbo(uint32_t name)
+{
+	int fd;
+	drm_intel_bufmgr *bufmgr;
+	struct intel_batchbuffer *batch;
+	struct scratch_buf src, dst;
+	uint32_t start = 0;
+	int i;
+
+	/* Initialize */
+	igt_assert((fd = drm_open_any()) >= 0);
+	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
+	batch = intel_batchbuffer_alloc(bufmgr, devid);
+	init_buffer(bufmgr, &dst, false);
+
+	/* Recover src */
+	src.size = dst.size;
+	src.tiling = dst.tiling;
+	src.stride = dst.stride;
+	src.bo = drm_intel_bo_gem_create_from_name(bufmgr, "", name);
+	igt_assert(src.bo);
+	igt_assert(check_buffer_contents(&src, start));
+
+	/* Work */
+	for (i = 0; i < option_num_iter; i++)
+		rendercopy(batch, NULL, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0);
+	igt_assert(check_buffer_contents(&dst, start));
+
+	/* Cleanup */
+	drm_intel_bo_unreference(src.bo);
+	drm_intel_bo_unreference(dst.bo);
+	intel_batchbuffer_free(batch);
+	drm_intel_bufmgr_destroy(bufmgr);
+	close(fd);
+}
+
+static void *work_flinked(void *arg)
+{
+	uint32_t *name = (uint32_t *)arg;
+
+	run_with_namedbo(*name);
+
+	pthread_exit(NULL);
+}
+
+static void run_flinked(void)
+{
+	drm_intel_bufmgr *bufmgr;
+	struct scratch_buf src;
+	uint32_t name;
+	uint32_t start = 0;
+	int i;
+	void *retval;
+	pthread_t *threads = calloc(option_num_threads, sizeof(*threads));
+
+	bufmgr = drm_intel_bufmgr_gem_init(global_fd, 4096);
+	init_buffer(bufmgr, &src, false);
+	init_buffer_contents(&src, start);
+	do_or_die(drm_intel_bo_flink(src.bo, &name));
+
+	for (i = 0; i < option_num_threads; i++)
+		pthread_create(&threads[i], NULL, work_flinked, &name);
+
+	for (i = 0; i < option_num_threads; i++)
+		igt_assert(pthread_join(threads[i], &retval) == 0);
+
+	free(threads);
+	drm_intel_bo_unreference(src.bo);
+	drm_intel_bufmgr_destroy(bufmgr);
+}
+
+static void run_pinned(void)
+{
+	drm_intel_bufmgr *bufmgr;
+	struct scratch_buf src;
+	uint32_t name;
+	uint32_t val = 0;
+
+	bufmgr = drm_intel_bufmgr_gem_init(global_fd, 4096);
+	init_buffer(bufmgr, &src, true);
+	init_buffer_contents(&src, val);
+	do_or_die(drm_intel_bo_flink(src.bo, &name));
+
+	/* Pin to GGTT */
+	drm_intel_bo_pin(src.bo, 4096);
+
+	/* Use the source bo in a different context (pin to a PPGTT) */
+	run_with_namedbo(name);
+
+	/* Make sure we switch the context in every ring so that the
+	 * previous context finally dies */
+	gem_quiescent_gpu(global_fd);
+	igt_drop_caches_set(DROP_RETIRE);
+
+	drm_intel_bo_unpin(src.bo);	
+	drm_intel_bo_unreference(src.bo);
+	drm_intel_bufmgr_destroy(bufmgr);
+}
+
+static void do_tests(bool use_tiled, const char *suffix)
+{
+	option_tiled = use_tiled;
+
+	/* One context, rendercopying in a loop */
+	igt_subtest_f("single-ctx-%s", suffix)
+		run();
+
+	/* Multiple threads, each with its own context
+	 * rendercopying in a loop */
+	igt_subtest_f("multiple-ctx-%s", suffix) {
+		run_multiple();
+	}
+
+	/* Multiple threads, each with its own context
+	 * rendercopying in a loop. The src bo is shared
+	 * among them via flink */
+	igt_subtest_f("multiple-ctx-flink-%s", suffix) {
+		run_flinked();
+	}
+
+	/* Pin object to GGTT, create a context, bind the
+	 * object to the new PPGTT and then clean the context */
+	igt_subtest_f("single-ctx-pinned-%s", suffix) {
+		run_pinned();
+	}
+}
+
+igt_main
+{
+	igt_fixture {
+		global_fd = drm_open_any();
+		igt_require_f(gem_uses_full_ppgtt(global_fd), "Requires Full PPGTT\n");
+		devid = intel_get_drm_devid(global_fd);
+		rendercopy = get_render_copyfunc(devid);
+		igt_require(rendercopy);
+		if (igt_run_in_simulation()) {
+			option_num_threads = 3;
+			option_num_iter = 10;
+		}
+	}
+
+	do_tests(false, "linear");
+	do_tests(true, "tiled");
+
+	igt_fixture
+		close(global_fd);
+}
+
-- 
1.7.9.5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/3] tests/gem_close_race: Adapt the test for Full PPGTT
  2013-12-10  9:36 ` [PATCH 2/3] tests/gem_close_race: Adapt the test for Full PPGTT oscar.mateo
@ 2013-12-10 12:32   ` Daniel Vetter
  2013-12-10 18:11     ` Ben Widawsky
  0 siblings, 1 reply; 7+ messages in thread
From: Daniel Vetter @ 2013-12-10 12:32 UTC (permalink / raw)
  To: oscar.mateo; +Cc: intel-gfx, benjamin.widawsky

On Tue, Dec 10, 2013 at 09:36:22AM +0000, oscar.mateo@intel.com wrote:
> From: Oscar Mateo <oscar.mateo@intel.com>
> 
> With Full PPGTT, each new fd creates a new context and thus a new
> PPGTT, so we have to reduce the number of simultaneous fds or face
> OOM problems. For every new PPGTT, its PDEs are stored in the GGTT
> which imposes a limit of 1024 new contexts. We want to leave at
> least 1/4 of the GGTT available for "important" stuff like scanout
> buffers, so never open more than 768 fds.
> 
> Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>

This test hasn't been terribly effective at provoking the bug it tries to
hit, so I think we can just unconditionally use the lower limit. That also
helps with the really long runtime of this case a bit.
-Daniel

> ---
>  tests/gem_close_race.c |   39 ++++++++++++++++++++++++++-------------
>  1 file changed, 26 insertions(+), 13 deletions(-)
> 
> diff --git a/tests/gem_close_race.c b/tests/gem_close_race.c
> index 6064c02..f658c90 100644
> --- a/tests/gem_close_race.c
> +++ b/tests/gem_close_race.c
> @@ -49,6 +49,8 @@
>  
>  static char device[80];
>  static uint32_t devid;
> +static unsigned int num_childs = 2000;
> +static unsigned int num_fds = 32000;
>  
>  static void selfcopy(int fd, uint32_t handle, int loops)
>  {
> @@ -136,11 +138,10 @@ static void run(int child)
>  		gem_read(fd, handle, 0, &handle, sizeof(handle));
>  }
>  
> -#define NUM_FD 32000
> -
>  struct thread {
>  	pthread_mutex_t mutex;
> -	int fds[NUM_FD];
> +	unsigned int num_fds;
> +	int *fds;
>  	int done;
>  };
>  
> @@ -152,7 +153,7 @@ static void *thread_run(void *_data)
>  	while (!t->done) {
>  		pthread_mutex_unlock(&t->mutex);
>  
> -		for (int n = 0; n < NUM_FD; n++) {
> +		for (int n = 0; n < t->num_fds; n++) {
>  			struct drm_i915_gem_create create;
>  
>  			create.handle = 0;
> @@ -185,7 +186,7 @@ static void *thread_busy(void *_data)
>  
>  		pthread_mutex_unlock(&t->mutex);
>  
> -		n  = rand() % NUM_FD;
> +		n  = rand() % t->num_fds;
>  
>  		create.handle = 0;
>  		create.size = OBJECT_SIZE;
> @@ -213,16 +214,23 @@ igt_main
>  {
>  	igt_skip_on_simulation();
>  
> -	sprintf(device, "/dev/dri/card%d", drm_get_card());
> -	{
> -		int fd = open(device, O_RDWR);
> +	igt_fixture {
> +		int fd;
> +		sprintf(device, "/dev/dri/card%d", drm_get_card());
> +		fd = open(device, O_RDWR);
>  		igt_assert(fd != -1);
>  		devid = intel_get_drm_devid(fd);
> +		if (gem_uses_full_ppgtt(fd))
> +		{
> +			/* Reduce the number of simultaneous fds or face OOM */
> +			num_childs = 768;
> +			num_fds = 768;
> +		}
>  		close(fd);
>  	}
>  
>  	igt_subtest("process-exit") {
> -		igt_fork(child, 2000)
> +		igt_fork(child, num_childs)
>  			run(child);
>  		igt_waitchildren();
>  	}
> @@ -232,17 +240,21 @@ igt_main
>  		struct thread *data = calloc(1, sizeof(struct thread));
>  		int n;
>  
> +		data->num_fds = num_fds;
> +		data->fds = calloc(num_fds, sizeof(int));
> +
>  		igt_assert(data);
> +		igt_assert(data->fds);
>  
>  		pthread_mutex_init(&data->mutex, NULL);
> -		for (n = 0; n < NUM_FD; n++)
> +		for (n = 0; n < num_fds; n++)
>  			data->fds[n] = open(device, O_RDWR);
>  
>  		pthread_create(&thread[0], NULL, thread_run, data);
>  		pthread_create(&thread[1], NULL, thread_busy, data);
>  
> -		for (n = 0; n < 1000*NUM_FD; n++) {
> -			int i = rand() % NUM_FD;
> +		for (n = 0; n < 1000*num_fds; n++) {
> +			int i = rand() % num_fds;
>  			if (data->fds[i] == -1) {
>  				data->fds[i] = open(device, O_RDWR);
>  			} else{
> @@ -258,8 +270,9 @@ igt_main
>  		pthread_join(thread[1], NULL);
>  		pthread_join(thread[0], NULL);
>  
> -		for (n = 0; n < NUM_FD; n++)
> +		for (n = 0; n < num_fds; n++)
>  			close(data->fds[n]);
> +		free(data->fds);
>  		free(data);
>  	}
>  }
> -- 
> 1.7.9.5
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/3] tests/gem_close_race: Adapt the test for Full PPGTT
  2013-12-10 12:32   ` Daniel Vetter
@ 2013-12-10 18:11     ` Ben Widawsky
  2013-12-10 18:22       ` Mateo Lozano, Oscar
  0 siblings, 1 reply; 7+ messages in thread
From: Ben Widawsky @ 2013-12-10 18:11 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: intel-gfx

On Tue, Dec 10, 2013 at 01:32:13PM +0100, Daniel Vetter wrote:
> On Tue, Dec 10, 2013 at 09:36:22AM +0000, oscar.mateo@intel.com wrote:
> > From: Oscar Mateo <oscar.mateo@intel.com>
> > 
> > With Full PPGTT, each new fd creates a new context and thus a new
> > PPGTT, so we have to reduce the number of simultaneous fds or face
> > OOM problems. For every new PPGTT, its PDEs are stored in the GGTT
> > which imposes a limit of 1024 new contexts. We want to leave at
> > least 1/4 of the GGTT available for "important" stuff like scanout
> > buffers, so never open more than 768 fds.
> > 
> > Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
> 
> This test hasn't been terribly effective at provoking the bug it tries to
> hit, so I think we can just unconditionally use the lower limit. That also
> helps with the really long runtime of this case a bit.
> -Daniel

FWIW, all 3 of these patches, or their equivalents were in the branch I
sent in my cover letter.

My implementation on this specifically was slightly different. Same
idea.

> 
> > ---
> >  tests/gem_close_race.c |   39 ++++++++++++++++++++++++++-------------
> >  1 file changed, 26 insertions(+), 13 deletions(-)
> > 
> > diff --git a/tests/gem_close_race.c b/tests/gem_close_race.c
> > index 6064c02..f658c90 100644
> > --- a/tests/gem_close_race.c
> > +++ b/tests/gem_close_race.c
> > @@ -49,6 +49,8 @@
> >  
> >  static char device[80];
> >  static uint32_t devid;
> > +static unsigned int num_childs = 2000;
> > +static unsigned int num_fds = 32000;
> >  
> >  static void selfcopy(int fd, uint32_t handle, int loops)
> >  {
> > @@ -136,11 +138,10 @@ static void run(int child)
> >  		gem_read(fd, handle, 0, &handle, sizeof(handle));
> >  }
> >  
> > -#define NUM_FD 32000
> > -
> >  struct thread {
> >  	pthread_mutex_t mutex;
> > -	int fds[NUM_FD];
> > +	unsigned int num_fds;
> > +	int *fds;
> >  	int done;
> >  };
> >  
> > @@ -152,7 +153,7 @@ static void *thread_run(void *_data)
> >  	while (!t->done) {
> >  		pthread_mutex_unlock(&t->mutex);
> >  
> > -		for (int n = 0; n < NUM_FD; n++) {
> > +		for (int n = 0; n < t->num_fds; n++) {
> >  			struct drm_i915_gem_create create;
> >  
> >  			create.handle = 0;
> > @@ -185,7 +186,7 @@ static void *thread_busy(void *_data)
> >  
> >  		pthread_mutex_unlock(&t->mutex);
> >  
> > -		n  = rand() % NUM_FD;
> > +		n  = rand() % t->num_fds;
> >  
> >  		create.handle = 0;
> >  		create.size = OBJECT_SIZE;
> > @@ -213,16 +214,23 @@ igt_main
> >  {
> >  	igt_skip_on_simulation();
> >  
> > -	sprintf(device, "/dev/dri/card%d", drm_get_card());
> > -	{
> > -		int fd = open(device, O_RDWR);
> > +	igt_fixture {
> > +		int fd;
> > +		sprintf(device, "/dev/dri/card%d", drm_get_card());
> > +		fd = open(device, O_RDWR);
> >  		igt_assert(fd != -1);
> >  		devid = intel_get_drm_devid(fd);
> > +		if (gem_uses_full_ppgtt(fd))
> > +		{
> > +			/* Reduce the number of simultaneous fds or face OOM */
> > +			num_childs = 768;
> > +			num_fds = 768;
> > +		}
> >  		close(fd);
> >  	}
> >  
> >  	igt_subtest("process-exit") {
> > -		igt_fork(child, 2000)
> > +		igt_fork(child, num_childs)
> >  			run(child);
> >  		igt_waitchildren();
> >  	}
> > @@ -232,17 +240,21 @@ igt_main
> >  		struct thread *data = calloc(1, sizeof(struct thread));
> >  		int n;
> >  
> > +		data->num_fds = num_fds;
> > +		data->fds = calloc(num_fds, sizeof(int));
> > +
> >  		igt_assert(data);
> > +		igt_assert(data->fds);
> >  
> >  		pthread_mutex_init(&data->mutex, NULL);
> > -		for (n = 0; n < NUM_FD; n++)
> > +		for (n = 0; n < num_fds; n++)
> >  			data->fds[n] = open(device, O_RDWR);
> >  
> >  		pthread_create(&thread[0], NULL, thread_run, data);
> >  		pthread_create(&thread[1], NULL, thread_busy, data);
> >  
> > -		for (n = 0; n < 1000*NUM_FD; n++) {
> > -			int i = rand() % NUM_FD;
> > +		for (n = 0; n < 1000*num_fds; n++) {
> > +			int i = rand() % num_fds;
> >  			if (data->fds[i] == -1) {
> >  				data->fds[i] = open(device, O_RDWR);
> >  			} else{
> > @@ -258,8 +270,9 @@ igt_main
> >  		pthread_join(thread[1], NULL);
> >  		pthread_join(thread[0], NULL);
> >  
> > -		for (n = 0; n < NUM_FD; n++)
> > +		for (n = 0; n < num_fds; n++)
> >  			close(data->fds[n]);
> > +		free(data->fds);
> >  		free(data);
> >  	}
> >  }
> > -- 
> > 1.7.9.5
> > 
> > _______________________________________________
> > Intel-gfx mailing list
> > Intel-gfx@lists.freedesktop.org
> > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 
> -- 
> Daniel Vetter
> Software Engineer, Intel Corporation
> +41 (0) 79 365 57 48 - http://blog.ffwll.ch

-- 
Ben Widawsky, Intel Open Source Technology Center

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/3] tests/gem_close_race: Adapt the test for Full PPGTT
  2013-12-10 18:11     ` Ben Widawsky
@ 2013-12-10 18:22       ` Mateo Lozano, Oscar
  2013-12-10 18:24         ` Ben Widawsky
  0 siblings, 1 reply; 7+ messages in thread
From: Mateo Lozano, Oscar @ 2013-12-10 18:22 UTC (permalink / raw)
  To: Widawsky, Benjamin, Daniel Vetter; +Cc: intel-gfx@lists.freedesktop.org

> > This test hasn't been terribly effective at provoking the bug it tries
> > to hit, so I think we can just unconditionally use the lower limit.
> > That also helps with the really long runtime of this case a bit.
> > -Daniel

Understood. I´ll simplify the patch and send it again then.
 
> FWIW, all 3 of these patches, or their equivalents were in the branch I sent in
> my cover letter.

I saw your series and I thought that I better send these patches over, but I missed that point in your cover letter, sorry :(
How should we do it, then? will Daniel take them directly from your branch?

-- Oscar

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/3] tests/gem_close_race: Adapt the test for Full PPGTT
  2013-12-10 18:22       ` Mateo Lozano, Oscar
@ 2013-12-10 18:24         ` Ben Widawsky
  0 siblings, 0 replies; 7+ messages in thread
From: Ben Widawsky @ 2013-12-10 18:24 UTC (permalink / raw)
  To: Mateo Lozano, Oscar; +Cc: intel-gfx@lists.freedesktop.org

On Tue, Dec 10, 2013 at 06:22:36PM +0000, Mateo Lozano, Oscar wrote:
> > > This test hasn't been terribly effective at provoking the bug it tries
> > > to hit, so I think we can just unconditionally use the lower limit.
> > > That also helps with the really long runtime of this case a bit.
> > > -Daniel
> 
> Understood. I´ll simplify the patch and send it again then.
>  
> > FWIW, all 3 of these patches, or their equivalents were in the branch I sent in
> > my cover letter.
> 
> I saw your series and I thought that I better send these patches over, but I missed that point in your cover letter, sorry :(
> How should we do it, then? will Daniel take them directly from your branch?
> 
> -- Oscar

You're fine. Use these when support is merged. My point to Daniel was
reiterating that this was tested on the patches I've submitted. (though
not with the identical patches).



-- 
Ben Widawsky, Intel Open Source Technology Center
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2013-12-10 18:24 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-12-10  9:36 [PATCH 1/3] lib/drmtest: Add a new test helper to check for Full PPGTT usage oscar.mateo
2013-12-10  9:36 ` [PATCH 2/3] tests/gem_close_race: Adapt the test for Full PPGTT oscar.mateo
2013-12-10 12:32   ` Daniel Vetter
2013-12-10 18:11     ` Ben Widawsky
2013-12-10 18:22       ` Mateo Lozano, Oscar
2013-12-10 18:24         ` Ben Widawsky
2013-12-10  9:36 ` [PATCH 3/3] tests/gem_ppgtt: New Full PPGTT set of tests oscar.mateo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox