* [PATCH i-g-t] lib: Fix intel_get_current_physical_engine() iterator
@ 2019-05-29 13:24 Chris Wilson
2019-05-29 14:33 ` Andi Shyti
2019-06-03 10:19 ` [igt-dev] " Tvrtko Ursulin
0 siblings, 2 replies; 7+ messages in thread
From: Chris Wilson @ 2019-05-29 13:24 UTC (permalink / raw)
To: intel-gfx; +Cc: igt-dev
If we run out of engines, intel_get_current_physical_engine() degrades
into an infinite loop as although it advanced the iterator, it did not
update its local engine pointer.
Reported-by: Petri Latvala <petri.latvala@intel.com>
Fixes: 17c77e7b0c3c ("lib/i915: add gem_engine_topology library and for_each loop definition")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Andi Shyti <andi.shyti@intel.com>
Cc: Petri Latvala <petri.latvala@intel.com>
---
lib/i915/gem_engine_topology.c | 49 +++++-----------------------------
lib/i915/gem_engine_topology.h | 36 ++++++++++++++++---------
2 files changed, 29 insertions(+), 56 deletions(-)
diff --git a/lib/i915/gem_engine_topology.c b/lib/i915/gem_engine_topology.c
index fdd1b9516..17f67786f 100644
--- a/lib/i915/gem_engine_topology.c
+++ b/lib/i915/gem_engine_topology.c
@@ -81,11 +81,10 @@ static void ctx_map_engines(int fd, struct intel_engine_data *ed,
struct drm_i915_gem_context_param *param)
{
struct i915_context_param_engines *engines =
- from_user_pointer(param->value);
+ from_user_pointer(param->value);
int i = 0;
- for (typeof(engines->engines[0]) *p =
- &engines->engines[0];
+ for (struct i915_engine_class_instance *p = &engines->engines[0];
i < ed->nengines; i++, p++) {
p->engine_class = ed->engines[i].class;
p->engine_instance = ed->engines[i].instance;
@@ -136,7 +135,7 @@ static void query_engine_list(int fd, struct intel_engine_data *ed)
{
uint8_t buff[SIZEOF_QUERY] = { };
struct drm_i915_query_engine_info *query_engine =
- (struct drm_i915_query_engine_info *) buff;
+ (struct drm_i915_query_engine_info *)buff;
int i;
query_engines(fd, query_engine, SIZEOF_QUERY);
@@ -149,41 +148,6 @@ static void query_engine_list(int fd, struct intel_engine_data *ed)
ed->nengines = query_engine->num_engines;
}
-struct intel_execution_engine2 *
-intel_get_current_engine(struct intel_engine_data *ed)
-{
- if (!ed->n)
- ed->current_engine = &ed->engines[0];
- else if (ed->n >= ed->nengines)
- ed->current_engine = NULL;
-
- return ed->current_engine;
-}
-
-void intel_next_engine(struct intel_engine_data *ed)
-{
- if (ed->n + 1 < ed->nengines) {
- ed->n++;
- ed->current_engine = &ed->engines[ed->n];
- } else {
- ed->n = ed->nengines;
- ed->current_engine = NULL;
- }
-}
-
-struct intel_execution_engine2 *
-intel_get_current_physical_engine(struct intel_engine_data *ed)
-{
- struct intel_execution_engine2 *e;
-
- for (e = intel_get_current_engine(ed);
- e && e->is_virtual;
- intel_next_engine(ed))
- ;
-
- return e;
-}
-
static int gem_topology_get_param(int fd,
struct drm_i915_gem_context_param *p)
{
@@ -197,10 +161,9 @@ static int gem_topology_get_param(int fd,
return 0;
/* size will store the engine count */
- p->size = (p->size - sizeof(struct i915_context_param_engines)) /
- (offsetof(struct i915_context_param_engines,
- engines[1]) -
- sizeof(struct i915_context_param_engines));
+ igt_assert(p->size >= sizeof(struct i915_context_param_engines));
+ p->size -= sizeof(struct i915_context_param_engines);
+ p->size /= sizeof(struct i915_engine_class_instance);
igt_assert_f(p->size <= GEM_MAX_ENGINES, "unsupported engine count\n");
diff --git a/lib/i915/gem_engine_topology.h b/lib/i915/gem_engine_topology.h
index 2415fd1e3..a1018afca 100644
--- a/lib/i915/gem_engine_topology.h
+++ b/lib/i915/gem_engine_topology.h
@@ -30,9 +30,7 @@
#define GEM_MAX_ENGINES I915_EXEC_RING_MASK + 1
struct intel_engine_data {
- uint32_t nengines;
- uint32_t n;
- struct intel_execution_engine2 *current_engine;
+ uint32_t nengines, cur;
struct intel_execution_engine2 engines[GEM_MAX_ENGINES];
};
@@ -40,31 +38,43 @@ bool gem_has_engine_topology(int fd);
struct intel_engine_data intel_init_engine_list(int fd, uint32_t ctx_id);
/* iteration functions */
-struct intel_execution_engine2 *
-intel_get_current_engine(struct intel_engine_data *ed);
-
-struct intel_execution_engine2 *
-intel_get_current_physical_engine(struct intel_engine_data *ed);
-
-void intel_next_engine(struct intel_engine_data *ed);
-
int gem_context_lookup_engine(int fd, uint64_t engine, uint32_t ctx_id,
struct intel_execution_engine2 *e);
void gem_context_set_all_engines(int fd, uint32_t ctx);
+static inline struct intel_execution_engine2 *
+intel_get_current_engine(struct intel_engine_data *ed)
+{
+ if (ed->cur >= ed->nengines)
+ return NULL;
+
+ return &ed->engines[ed->cur];
+}
+
+static inline struct intel_execution_engine2 *
+intel_get_current_physical_engine(struct intel_engine_data *ed)
+{
+ struct intel_execution_engine2 *e;
+
+ for (; (e = intel_get_current_engine(ed)) && e->is_virtual; ed->cur++)
+ ;
+
+ return e;
+}
+
#define __for_each_static_engine(e__) \
for ((e__) = intel_execution_engines2; (e__)->name; (e__)++)
#define for_each_context_engine(fd__, ctx__, e__) \
for (struct intel_engine_data i__ = intel_init_engine_list(fd__, ctx__); \
((e__) = intel_get_current_engine(&i__)); \
- intel_next_engine(&i__))
+ i__.cur++)
/* needs to replace "for_each_physical_engine" when conflicts are fixed */
#define __for_each_physical_engine(fd__, e__) \
for (struct intel_engine_data i__ = intel_init_engine_list(fd__, 0); \
((e__) = intel_get_current_physical_engine(&i__)); \
- intel_next_engine(&i__))
+ i__.cur++)
#endif /* GEM_ENGINE_TOPOLOGY_H */
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH i-g-t] lib: Fix intel_get_current_physical_engine() iterator
2019-05-29 13:24 [PATCH i-g-t] lib: Fix intel_get_current_physical_engine() iterator Chris Wilson
@ 2019-05-29 14:33 ` Andi Shyti
2019-06-03 10:19 ` [igt-dev] " Tvrtko Ursulin
1 sibling, 0 replies; 7+ messages in thread
From: Andi Shyti @ 2019-05-29 14:33 UTC (permalink / raw)
To: Chris Wilson; +Cc: igt-dev, intel-gfx
Hi Chris,
On Wed, May 29, 2019 at 02:24:21PM +0100, Chris Wilson wrote:
> If we run out of engines, intel_get_current_physical_engine() degrades
> into an infinite loop as although it advanced the iterator, it did not
> update its local engine pointer.
The patch looks like it does everything "but" what you say in the
commit log :)
>
> Reported-by: Petri Latvala <petri.latvala@intel.com>
> Fixes: 17c77e7b0c3c ("lib/i915: add gem_engine_topology library and for_each loop definition")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Andi Shyti <andi.shyti@intel.com>
> Cc: Petri Latvala <petri.latvala@intel.com>
> ---
> lib/i915/gem_engine_topology.c | 49 +++++-----------------------------
> lib/i915/gem_engine_topology.h | 36 ++++++++++++++++---------
> 2 files changed, 29 insertions(+), 56 deletions(-)
>
> diff --git a/lib/i915/gem_engine_topology.c b/lib/i915/gem_engine_topology.c
> index fdd1b9516..17f67786f 100644
> --- a/lib/i915/gem_engine_topology.c
> +++ b/lib/i915/gem_engine_topology.c
> @@ -81,11 +81,10 @@ static void ctx_map_engines(int fd, struct intel_engine_data *ed,
> struct drm_i915_gem_context_param *param)
> {
> struct i915_context_param_engines *engines =
> - from_user_pointer(param->value);
> + from_user_pointer(param->value);
> int i = 0;
>
> - for (typeof(engines->engines[0]) *p =
> - &engines->engines[0];
> + for (struct i915_engine_class_instance *p = &engines->engines[0];
> i < ed->nengines; i++, p++) {
> p->engine_class = ed->engines[i].class;
> p->engine_instance = ed->engines[i].instance;
> @@ -136,7 +135,7 @@ static void query_engine_list(int fd, struct intel_engine_data *ed)
> {
> uint8_t buff[SIZEOF_QUERY] = { };
> struct drm_i915_query_engine_info *query_engine =
> - (struct drm_i915_query_engine_info *) buff;
> + (struct drm_i915_query_engine_info *)buff;
Until here, nothing is related to the description in the commit
log. Can we put the above in a different patch?
> -struct intel_execution_engine2 *
> -intel_get_current_engine(struct intel_engine_data *ed)
> -{
> - if (!ed->n)
> - ed->current_engine = &ed->engines[0];
> - else if (ed->n >= ed->nengines)
> - ed->current_engine = NULL;
> -
> - return ed->current_engine;
> -}
> -
> -void intel_next_engine(struct intel_engine_data *ed)
> -{
> - if (ed->n + 1 < ed->nengines) {
> - ed->n++;
> - ed->current_engine = &ed->engines[ed->n];
> - } else {
> - ed->n = ed->nengines;
> - ed->current_engine = NULL;
> - }
> -}
> -
> -struct intel_execution_engine2 *
> -intel_get_current_physical_engine(struct intel_engine_data *ed)
> -{
> - struct intel_execution_engine2 *e;
> -
> - for (e = intel_get_current_engine(ed);
> - e && e->is_virtual;
> - intel_next_engine(ed))
> - ;
> -
> - return e;
> -}
> -
Moving these functions to inline in the header file is unrelated
to the patch topic, right?
> static int gem_topology_get_param(int fd,
> struct drm_i915_gem_context_param *p)
> {
> @@ -197,10 +161,9 @@ static int gem_topology_get_param(int fd,
> return 0;
>
> /* size will store the engine count */
> - p->size = (p->size - sizeof(struct i915_context_param_engines)) /
> - (offsetof(struct i915_context_param_engines,
> - engines[1]) -
> - sizeof(struct i915_context_param_engines));
> + igt_assert(p->size >= sizeof(struct i915_context_param_engines));
> + p->size -= sizeof(struct i915_context_param_engines);
> + p->size /= sizeof(struct i915_engine_class_instance);
This is also unrelated.
> struct intel_engine_data {
> - uint32_t nengines;
> - uint32_t n;
> - struct intel_execution_engine2 *current_engine;
so we don't have anymore current_engine... I had the feeling the
Tvrtko really wanted it :)
> + uint32_t nengines, cur;
[ some copy paste ]
> - uint32_t nengines;
> - uint32_t n;
> + uint32_t nengines, cur;
mmhhh... why?
> +static inline struct intel_execution_engine2 *
> +intel_get_current_engine(struct intel_engine_data *ed)
> +{
> + if (ed->cur >= ed->nengines)
> + return NULL;
> +
> + return &ed->engines[ed->cur];
> +}
> +
> +static inline struct intel_execution_engine2 *
> +intel_get_current_physical_engine(struct intel_engine_data *ed)
> +{
> + struct intel_execution_engine2 *e;
> +
> + for (; (e = intel_get_current_engine(ed)) && e->is_virtual; ed->cur++)
> + ;
The above two lines are the only ones related to the commit
message. Can we keep this patch smaller? and put cosmetics in a
different patchset?
Thanks,
Andi
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [igt-dev] [PATCH i-g-t] lib: Fix intel_get_current_physical_engine() iterator
2019-05-29 13:24 [PATCH i-g-t] lib: Fix intel_get_current_physical_engine() iterator Chris Wilson
2019-05-29 14:33 ` Andi Shyti
@ 2019-06-03 10:19 ` Tvrtko Ursulin
2019-06-03 10:32 ` Petri Latvala
1 sibling, 1 reply; 7+ messages in thread
From: Tvrtko Ursulin @ 2019-06-03 10:19 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: igt-dev
On 29/05/2019 14:24, Chris Wilson wrote:
> If we run out of engines, intel_get_current_physical_engine() degrades
> into an infinite loop as although it advanced the iterator, it did not
> update its local engine pointer.
We had one infinite loop in there already.. AFAIR it was on one engine
platforms. Does the new incarnation happen actually via the
__for_each_physical_engine iterator or perhaps only when calling
intel_get_current_physical_engine after loop end? Why it wasn't seen in
testing?
Regards,
Tvrtko
> Reported-by: Petri Latvala <petri.latvala@intel.com>
> Fixes: 17c77e7b0c3c ("lib/i915: add gem_engine_topology library and for_each loop definition")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Andi Shyti <andi.shyti@intel.com>
> Cc: Petri Latvala <petri.latvala@intel.com>
> ---
> lib/i915/gem_engine_topology.c | 49 +++++-----------------------------
> lib/i915/gem_engine_topology.h | 36 ++++++++++++++++---------
> 2 files changed, 29 insertions(+), 56 deletions(-)
>
> diff --git a/lib/i915/gem_engine_topology.c b/lib/i915/gem_engine_topology.c
> index fdd1b9516..17f67786f 100644
> --- a/lib/i915/gem_engine_topology.c
> +++ b/lib/i915/gem_engine_topology.c
> @@ -81,11 +81,10 @@ static void ctx_map_engines(int fd, struct intel_engine_data *ed,
> struct drm_i915_gem_context_param *param)
> {
> struct i915_context_param_engines *engines =
> - from_user_pointer(param->value);
> + from_user_pointer(param->value);
> int i = 0;
>
> - for (typeof(engines->engines[0]) *p =
> - &engines->engines[0];
> + for (struct i915_engine_class_instance *p = &engines->engines[0];
> i < ed->nengines; i++, p++) {
> p->engine_class = ed->engines[i].class;
> p->engine_instance = ed->engines[i].instance;
> @@ -136,7 +135,7 @@ static void query_engine_list(int fd, struct intel_engine_data *ed)
> {
> uint8_t buff[SIZEOF_QUERY] = { };
> struct drm_i915_query_engine_info *query_engine =
> - (struct drm_i915_query_engine_info *) buff;
> + (struct drm_i915_query_engine_info *)buff;
> int i;
>
> query_engines(fd, query_engine, SIZEOF_QUERY);
> @@ -149,41 +148,6 @@ static void query_engine_list(int fd, struct intel_engine_data *ed)
> ed->nengines = query_engine->num_engines;
> }
>
> -struct intel_execution_engine2 *
> -intel_get_current_engine(struct intel_engine_data *ed)
> -{
> - if (!ed->n)
> - ed->current_engine = &ed->engines[0];
> - else if (ed->n >= ed->nengines)
> - ed->current_engine = NULL;
> -
> - return ed->current_engine;
> -}
> -
> -void intel_next_engine(struct intel_engine_data *ed)
> -{
> - if (ed->n + 1 < ed->nengines) {
> - ed->n++;
> - ed->current_engine = &ed->engines[ed->n];
> - } else {
> - ed->n = ed->nengines;
> - ed->current_engine = NULL;
> - }
> -}
> -
> -struct intel_execution_engine2 *
> -intel_get_current_physical_engine(struct intel_engine_data *ed)
> -{
> - struct intel_execution_engine2 *e;
> -
> - for (e = intel_get_current_engine(ed);
> - e && e->is_virtual;
> - intel_next_engine(ed))
> - ;
> -
> - return e;
> -}
> -
> static int gem_topology_get_param(int fd,
> struct drm_i915_gem_context_param *p)
> {
> @@ -197,10 +161,9 @@ static int gem_topology_get_param(int fd,
> return 0;
>
> /* size will store the engine count */
> - p->size = (p->size - sizeof(struct i915_context_param_engines)) /
> - (offsetof(struct i915_context_param_engines,
> - engines[1]) -
> - sizeof(struct i915_context_param_engines));
> + igt_assert(p->size >= sizeof(struct i915_context_param_engines));
> + p->size -= sizeof(struct i915_context_param_engines);
> + p->size /= sizeof(struct i915_engine_class_instance);
>
> igt_assert_f(p->size <= GEM_MAX_ENGINES, "unsupported engine count\n");
>
> diff --git a/lib/i915/gem_engine_topology.h b/lib/i915/gem_engine_topology.h
> index 2415fd1e3..a1018afca 100644
> --- a/lib/i915/gem_engine_topology.h
> +++ b/lib/i915/gem_engine_topology.h
> @@ -30,9 +30,7 @@
> #define GEM_MAX_ENGINES I915_EXEC_RING_MASK + 1
>
> struct intel_engine_data {
> - uint32_t nengines;
> - uint32_t n;
> - struct intel_execution_engine2 *current_engine;
> + uint32_t nengines, cur;
> struct intel_execution_engine2 engines[GEM_MAX_ENGINES];
> };
>
> @@ -40,31 +38,43 @@ bool gem_has_engine_topology(int fd);
> struct intel_engine_data intel_init_engine_list(int fd, uint32_t ctx_id);
>
> /* iteration functions */
> -struct intel_execution_engine2 *
> -intel_get_current_engine(struct intel_engine_data *ed);
> -
> -struct intel_execution_engine2 *
> -intel_get_current_physical_engine(struct intel_engine_data *ed);
> -
> -void intel_next_engine(struct intel_engine_data *ed);
> -
> int gem_context_lookup_engine(int fd, uint64_t engine, uint32_t ctx_id,
> struct intel_execution_engine2 *e);
>
> void gem_context_set_all_engines(int fd, uint32_t ctx);
>
> +static inline struct intel_execution_engine2 *
> +intel_get_current_engine(struct intel_engine_data *ed)
> +{
> + if (ed->cur >= ed->nengines)
> + return NULL;
> +
> + return &ed->engines[ed->cur];
> +}
> +
> +static inline struct intel_execution_engine2 *
> +intel_get_current_physical_engine(struct intel_engine_data *ed)
> +{
> + struct intel_execution_engine2 *e;
> +
> + for (; (e = intel_get_current_engine(ed)) && e->is_virtual; ed->cur++)
> + ;
> +
> + return e;
> +}
> +
> #define __for_each_static_engine(e__) \
> for ((e__) = intel_execution_engines2; (e__)->name; (e__)++)
>
> #define for_each_context_engine(fd__, ctx__, e__) \
> for (struct intel_engine_data i__ = intel_init_engine_list(fd__, ctx__); \
> ((e__) = intel_get_current_engine(&i__)); \
> - intel_next_engine(&i__))
> + i__.cur++)
>
> /* needs to replace "for_each_physical_engine" when conflicts are fixed */
> #define __for_each_physical_engine(fd__, e__) \
> for (struct intel_engine_data i__ = intel_init_engine_list(fd__, 0); \
> ((e__) = intel_get_current_physical_engine(&i__)); \
> - intel_next_engine(&i__))
> + i__.cur++)
>
> #endif /* GEM_ENGINE_TOPOLOGY_H */
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [igt-dev] [PATCH i-g-t] lib: Fix intel_get_current_physical_engine() iterator
2019-06-03 10:19 ` [igt-dev] " Tvrtko Ursulin
@ 2019-06-03 10:32 ` Petri Latvala
2019-06-03 10:39 ` Tvrtko Ursulin
0 siblings, 1 reply; 7+ messages in thread
From: Petri Latvala @ 2019-06-03 10:32 UTC (permalink / raw)
To: Tvrtko Ursulin; +Cc: igt-dev, intel-gfx
On Mon, Jun 03, 2019 at 11:19:48AM +0100, Tvrtko Ursulin wrote:
>
> On 29/05/2019 14:24, Chris Wilson wrote:
> > If we run out of engines, intel_get_current_physical_engine() degrades
> > into an infinite loop as although it advanced the iterator, it did not
> > update its local engine pointer.
>
> We had one infinite loop in there already.. AFAIR it was on one engine
> platforms. Does the new incarnation happen actually via the
> __for_each_physical_engine iterator or perhaps only when calling
> intel_get_current_physical_engine after loop end? Why it wasn't seen in
> testing?
The new incarnation happens with a wedged GPU. That's a case that's
hard to come by in testing.
--
Petri Latvala
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [igt-dev] [PATCH i-g-t] lib: Fix intel_get_current_physical_engine() iterator
2019-06-03 10:32 ` Petri Latvala
@ 2019-06-03 10:39 ` Tvrtko Ursulin
2019-06-03 11:19 ` Petri Latvala
0 siblings, 1 reply; 7+ messages in thread
From: Tvrtko Ursulin @ 2019-06-03 10:39 UTC (permalink / raw)
To: Chris Wilson, intel-gfx, igt-dev
On 03/06/2019 11:32, Petri Latvala wrote:
> On Mon, Jun 03, 2019 at 11:19:48AM +0100, Tvrtko Ursulin wrote:
>>
>> On 29/05/2019 14:24, Chris Wilson wrote:
>>> If we run out of engines, intel_get_current_physical_engine() degrades
>>> into an infinite loop as although it advanced the iterator, it did not
>>> update its local engine pointer.
>>
>> We had one infinite loop in there already.. AFAIR it was on one engine
>> platforms. Does the new incarnation happen actually via the
>> __for_each_physical_engine iterator or perhaps only when calling
>> intel_get_current_physical_engine after loop end? Why it wasn't seen in
>> testing?
>
>
> The new incarnation happens with a wedged GPU. That's a case that's
> hard to come by in testing.
1.
Colour me confused. :) How does a wedged GPU affect this loop?
2.
Are we missing a test? Wedge-do-stuff-unwedge should be easy to write.
Regards,
Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [igt-dev] [PATCH i-g-t] lib: Fix intel_get_current_physical_engine() iterator
2019-06-03 10:39 ` Tvrtko Ursulin
@ 2019-06-03 11:19 ` Petri Latvala
2019-06-03 12:47 ` Tvrtko Ursulin
0 siblings, 1 reply; 7+ messages in thread
From: Petri Latvala @ 2019-06-03 11:19 UTC (permalink / raw)
To: Tvrtko Ursulin; +Cc: igt-dev, intel-gfx
On Mon, Jun 03, 2019 at 11:39:25AM +0100, Tvrtko Ursulin wrote:
>
> On 03/06/2019 11:32, Petri Latvala wrote:
> > On Mon, Jun 03, 2019 at 11:19:48AM +0100, Tvrtko Ursulin wrote:
> > >
> > > On 29/05/2019 14:24, Chris Wilson wrote:
> > > > If we run out of engines, intel_get_current_physical_engine() degrades
> > > > into an infinite loop as although it advanced the iterator, it did not
> > > > update its local engine pointer.
> > >
> > > We had one infinite loop in there already.. AFAIR it was on one engine
> > > platforms. Does the new incarnation happen actually via the
> > > __for_each_physical_engine iterator or perhaps only when calling
> > > intel_get_current_physical_engine after loop end? Why it wasn't seen in
> > > testing?
> >
> >
> > The new incarnation happens with a wedged GPU. That's a case that's
> > hard to come by in testing.
>
> 1.
> Colour me confused. :) How does a wedged GPU affect this loop?
Wedging could be a red herring, but regardless the GPU was in a funky
state.
An easy reproduction method is just
# ./perf_pmu
(as normal user, not root!)
--
Petri Latvala
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [igt-dev] [PATCH i-g-t] lib: Fix intel_get_current_physical_engine() iterator
2019-06-03 11:19 ` Petri Latvala
@ 2019-06-03 12:47 ` Tvrtko Ursulin
0 siblings, 0 replies; 7+ messages in thread
From: Tvrtko Ursulin @ 2019-06-03 12:47 UTC (permalink / raw)
To: Chris Wilson, intel-gfx, igt-dev
On 03/06/2019 12:19, Petri Latvala wrote:
> On Mon, Jun 03, 2019 at 11:39:25AM +0100, Tvrtko Ursulin wrote:
>>
>> On 03/06/2019 11:32, Petri Latvala wrote:
>>> On Mon, Jun 03, 2019 at 11:19:48AM +0100, Tvrtko Ursulin wrote:
>>>>
>>>> On 29/05/2019 14:24, Chris Wilson wrote:
>>>>> If we run out of engines, intel_get_current_physical_engine() degrades
>>>>> into an infinite loop as although it advanced the iterator, it did not
>>>>> update its local engine pointer.
>>>>
>>>> We had one infinite loop in there already.. AFAIR it was on one engine
>>>> platforms. Does the new incarnation happen actually via the
>>>> __for_each_physical_engine iterator or perhaps only when calling
>>>> intel_get_current_physical_engine after loop end? Why it wasn't seen in
>>>> testing?
>>>
>>>
>>> The new incarnation happens with a wedged GPU. That's a case that's
>>> hard to come by in testing.
>>
>> 1.
>> Colour me confused. :) How does a wedged GPU affect this loop?
>
> Wedging could be a red herring, but regardless the GPU was in a funky
> state.
>
> An easy reproduction method is just
>
> # ./perf_pmu
>
> (as normal user, not root!)
See it now, so the problem is code is not capable of handling zero
engines (which it happens if no access to drm master like in your example).
As such, Chris' patch looks okay to me.
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Regards,
Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2019-06-03 12:47 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2019-05-29 13:24 [PATCH i-g-t] lib: Fix intel_get_current_physical_engine() iterator Chris Wilson
2019-05-29 14:33 ` Andi Shyti
2019-06-03 10:19 ` [igt-dev] " Tvrtko Ursulin
2019-06-03 10:32 ` Petri Latvala
2019-06-03 10:39 ` Tvrtko Ursulin
2019-06-03 11:19 ` Petri Latvala
2019-06-03 12:47 ` Tvrtko Ursulin
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox