All of lore.kernel.org
 help / color / mirror / Atom feed
From: Boris Brezillon <boris.brezillon@collabora.com>
To: "Christian König" <christian.koenig@amd.com>, donald.robson@imgtec.com
Cc: robdclark@chromium.org, sarah.walker@imgtec.com,
	ketil.johnsen@arm.com, lina@asahilina.net, Liviu.Dudau@arm.com,
	dri-devel@lists.freedesktop.org, luben.tuikov@amd.com,
	Danilo Krummrich <dakr@redhat.com>,
	intel-xe@lists.freedesktop.org, faith.ekstrand@collabora.com
Subject: Re: [Intel-xe] [PATCH v2 4/9] drm/sched: Split free_job into own work item
Date: Tue, 12 Sep 2023 12:13:57 +0200	[thread overview]
Message-ID: <20230912121357.4cc10dec@collabora.com> (raw)
In-Reply-To: <ee56b9ee-36c7-5935-c319-c8d1ad412c7c@amd.com>

On Fri, 25 Aug 2023 15:45:49 +0200
Christian König <christian.koenig@amd.com> wrote:

> Am 25.08.23 um 15:36 schrieb Matthew Brost:
> > On Fri, Aug 25, 2023 at 10:02:32AM +0200, Christian König wrote:  
> >> Am 25.08.23 um 04:58 schrieb Matthew Brost:  
> >>> On Fri, Aug 25, 2023 at 01:04:10AM +0200, Danilo Krummrich wrote:  
> >>>> On Thu, Aug 10, 2023 at 07:31:32PM -0700, Matthew Brost wrote:  
> >>>>> Rather than call free_job and run_job in same work item have a dedicated
> >>>>> work item for each. This aligns with the design and intended use of work
> >>>>> queues.
> >>>>>
> >>>>> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> >>>>> ---
> >>>>>    drivers/gpu/drm/scheduler/sched_main.c | 137 ++++++++++++++++++-------
> >>>>>    include/drm/gpu_scheduler.h            |   8 +-
> >>>>>    2 files changed, 106 insertions(+), 39 deletions(-)
> >>>>>
> >>>>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> >>>>> index cede47afc800..b67469eac179 100644
> >>>>> --- a/drivers/gpu/drm/scheduler/sched_main.c
> >>>>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> >>>>> @@ -213,11 +213,12 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
> >>>>>     * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
> >>>>>     *
> >>>>>     * @rq: scheduler run queue to check.
> >>>>> + * @dequeue: dequeue selected entity
> >>>>>     *
> >>>>>     * Try to find a ready entity, returns NULL if none found.
> >>>>>     */
> >>>>>    static struct drm_sched_entity *
> >>>>> -drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
> >>>>> +drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq, bool dequeue)
> >>>>>    {
> >>>>>    	struct drm_sched_entity *entity;
> >>>>> @@ -227,8 +228,10 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
> >>>>>    	if (entity) {
> >>>>>    		list_for_each_entry_continue(entity, &rq->entities, list) {
> >>>>>    			if (drm_sched_entity_is_ready(entity)) {
> >>>>> -				rq->current_entity = entity;
> >>>>> -				reinit_completion(&entity->entity_idle);
> >>>>> +				if (dequeue) {
> >>>>> +					rq->current_entity = entity;
> >>>>> +					reinit_completion(&entity->entity_idle);
> >>>>> +				}
> >>>>>    				spin_unlock(&rq->lock);
> >>>>>    				return entity;
> >>>>>    			}
> >>>>> @@ -238,8 +241,10 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
> >>>>>    	list_for_each_entry(entity, &rq->entities, list) {
> >>>>>    		if (drm_sched_entity_is_ready(entity)) {
> >>>>> -			rq->current_entity = entity;
> >>>>> -			reinit_completion(&entity->entity_idle);
> >>>>> +			if (dequeue) {
> >>>>> +				rq->current_entity = entity;
> >>>>> +				reinit_completion(&entity->entity_idle);
> >>>>> +			}
> >>>>>    			spin_unlock(&rq->lock);
> >>>>>    			return entity;
> >>>>>    		}
> >>>>> @@ -257,11 +262,12 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
> >>>>>     * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
> >>>>>     *
> >>>>>     * @rq: scheduler run queue to check.
> >>>>> + * @dequeue: dequeue selected entity
> >>>>>     *
> >>>>>     * Find oldest waiting ready entity, returns NULL if none found.
> >>>>>     */
> >>>>>    static struct drm_sched_entity *
> >>>>> -drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
> >>>>> +drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq, bool dequeue)
> >>>>>    {
> >>>>>    	struct rb_node *rb;
> >>>>> @@ -271,8 +277,10 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
> >>>>>    		entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
> >>>>>    		if (drm_sched_entity_is_ready(entity)) {
> >>>>> -			rq->current_entity = entity;
> >>>>> -			reinit_completion(&entity->entity_idle);
> >>>>> +			if (dequeue) {
> >>>>> +				rq->current_entity = entity;
> >>>>> +				reinit_completion(&entity->entity_idle);
> >>>>> +			}
> >>>>>    			break;
> >>>>>    		}
> >>>>>    	}
> >>>>> @@ -282,13 +290,54 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
> >>>>>    }
> >>>>>    /**
> >>>>> - * drm_sched_submit_queue - scheduler queue submission
> >>>>> + * drm_sched_run_job_queue - queue job submission
> >>>>>     * @sched: scheduler instance
> >>>>>     */
> >>>>> -static void drm_sched_submit_queue(struct drm_gpu_scheduler *sched)
> >>>>> +static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
> >>>>>    {
> >>>>>    	if (!READ_ONCE(sched->pause_submit))
> >>>>> -		queue_work(sched->submit_wq, &sched->work_submit);
> >>>>> +		queue_work(sched->submit_wq, &sched->work_run_job);
> >>>>> +}
> >>>>> +
> >>>>> +static struct drm_sched_entity *
> >>>>> +drm_sched_select_entity(struct drm_gpu_scheduler *sched, bool dequeue);
> >>>>> +
> >>>>> +/**
> >>>>> + * drm_sched_run_job_queue_if_ready - queue job submission if ready
> >>>>> + * @sched: scheduler instance
> >>>>> + */
> >>>>> +static void drm_sched_run_job_queue_if_ready(struct drm_gpu_scheduler *sched)
> >>>>> +{
> >>>>> +	if (drm_sched_select_entity(sched, false))
> >>>>> +		drm_sched_run_job_queue(sched);
> >>>>> +}
> >>>>> +
> >>>>> +/**
> >>>>> + * drm_sched_free_job_queue - queue free job
> >>>>> + *
> >>>>> + * @sched: scheduler instance to queue free job
> >>>>> + */
> >>>>> +static void drm_sched_free_job_queue(struct drm_gpu_scheduler *sched)
> >>>>> +{
> >>>>> +	if (!READ_ONCE(sched->pause_submit))
> >>>>> +		queue_work(sched->submit_wq, &sched->work_free_job);
> >>>>> +}
> >>>>> +
> >>>>> +/**
> >>>>> + * drm_sched_free_job_queue_if_ready - queue free job if ready
> >>>>> + *
> >>>>> + * @sched: scheduler instance to queue free job
> >>>>> + */
> >>>>> +static void drm_sched_free_job_queue_if_ready(struct drm_gpu_scheduler *sched)
> >>>>> +{
> >>>>> +	struct drm_sched_job *job;
> >>>>> +
> >>>>> +	spin_lock(&sched->job_list_lock);
> >>>>> +	job = list_first_entry_or_null(&sched->pending_list,
> >>>>> +				       struct drm_sched_job, list);
> >>>>> +	if (job && dma_fence_is_signaled(&job->s_fence->finished))
> >>>>> +		drm_sched_free_job_queue(sched);
> >>>>> +	spin_unlock(&sched->job_list_lock);
> >>>>>    }
> >>>>>    /**
> >>>>> @@ -310,7 +359,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
> >>>>>    	dma_fence_get(&s_fence->finished);
> >>>>>    	drm_sched_fence_finished(s_fence, result);
> >>>>>    	dma_fence_put(&s_fence->finished);
> >>>>> -	drm_sched_submit_queue(sched);
> >>>>> +	drm_sched_free_job_queue(sched);
> >>>>>    }
> >>>>>    /**
> >>>>> @@ -906,18 +955,19 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched)
> >>>>>    void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
> >>>>>    {
> >>>>>    	if (drm_sched_can_queue(sched))
> >>>>> -		drm_sched_submit_queue(sched);
> >>>>> +		drm_sched_run_job_queue(sched);
> >>>>>    }
> >>>>>    /**
> >>>>>     * drm_sched_select_entity - Select next entity to process
> >>>>>     *
> >>>>>     * @sched: scheduler instance
> >>>>> + * @dequeue: dequeue selected entity
> >>>>>     *
> >>>>>     * Returns the entity to process or NULL if none are found.
> >>>>>     */
> >>>>>    static struct drm_sched_entity *
> >>>>> -drm_sched_select_entity(struct drm_gpu_scheduler *sched)
> >>>>> +drm_sched_select_entity(struct drm_gpu_scheduler *sched, bool dequeue)
> >>>>>    {
> >>>>>    	struct drm_sched_entity *entity;
> >>>>>    	int i;
> >>>>> @@ -935,8 +985,10 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
> >>>>>    	/* Kernel run queue has higher priority than normal run queue*/
> >>>>>    	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
> >>>>>    		entity = sched->sched_policy == DRM_SCHED_POLICY_FIFO ?
> >>>>> -			drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
> >>>>> -			drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
> >>>>> +			drm_sched_rq_select_entity_fifo(&sched->sched_rq[i],
> >>>>> +							dequeue) :
> >>>>> +			drm_sched_rq_select_entity_rr(&sched->sched_rq[i],
> >>>>> +						      dequeue);
> >>>>>    		if (entity)
> >>>>>    			break;
> >>>>>    	}
> >>>>> @@ -1024,30 +1076,44 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
> >>>>>    EXPORT_SYMBOL(drm_sched_pick_best);
> >>>>>    /**
> >>>>> - * drm_sched_main - main scheduler thread
> >>>>> + * drm_sched_free_job_work - worker to call free_job
> >>>>>     *
> >>>>> - * @param: scheduler instance
> >>>>> + * @w: free job work
> >>>>>     */
> >>>>> -static void drm_sched_main(struct work_struct *w)
> >>>>> +static void drm_sched_free_job_work(struct work_struct *w)
> >>>>>    {
> >>>>>    	struct drm_gpu_scheduler *sched =
> >>>>> -		container_of(w, struct drm_gpu_scheduler, work_submit);
> >>>>> -	struct drm_sched_entity *entity;
> >>>>> +		container_of(w, struct drm_gpu_scheduler, work_free_job);
> >>>>>    	struct drm_sched_job *cleanup_job;
> >>>>> -	int r;
> >>>>>    	if (READ_ONCE(sched->pause_submit))
> >>>>>    		return;
> >>>>>    	cleanup_job = drm_sched_get_cleanup_job(sched);  
> >>>> I tried this patch with Nouveau and found a race condition:
> >>>>
> >>>> In drm_sched_run_job_work() the job is added to the pending_list via
> >>>> drm_sched_job_begin(), then the run_job() callback is called and the scheduled
> >>>> fence is signaled.
> >>>>
> >>>> However, in parallel drm_sched_get_cleanup_job() might be called from
> >>>> drm_sched_free_job_work(), which picks the first job from the pending_list and
> >>>> for the next job on the pending_list sets the scheduled fence' timestamp field.  
> >> Well why can this happen in parallel? Either the work items are scheduled to
> >> a single threaded work queue or you have protected the pending list with
> >> some locks.
> >>  
> > Xe uses a single-threaded work queue, Nouveau does not (desired
> > behavior).

I'm a bit worried that leaving this single vs multi-threaded wq
decision to drivers is going to cause unnecessary pain, because what
was previously a granted in term of run/cleanup execution order (thanks
to the kthread+static-drm_sched_main-workflow approach) is now subject
to the wq ordering guarantees, which depend on the wq type picked by
the driver.

> >
> > The list of pending jobs is protected by a lock (safe), the race is:
> >
> > add job to pending list
> > run_job
> > signal scheduled fence
> >
> > dequeue from pending list
> > free_job
> > update timestamp
> >
> > Once a job is on the pending list its timestamp can be accessed which
> > can blow up if scheduled fence isn't signaled or more specifically unless
> > DMA_FENCE_FLAG_TIMESTAMP_BIT is set. 

Ah, so that's the reason for the TIMESTAMP test added in v3. Sorry for
the noise in my v3 review, but I still think it'd be beneficial to have
that change moved to its own commit.

WARNING: multiple messages have this Message-ID (diff)
From: Boris Brezillon <boris.brezillon@collabora.com>
To: "Christian König" <christian.koenig@amd.com>, donald.robson@imgtec.com
Cc: Matthew Brost <matthew.brost@intel.com>,
	robdclark@chromium.org, sarah.walker@imgtec.com,
	thomas.hellstrom@linux.intel.com, ketil.johnsen@arm.com,
	lina@asahilina.net, Liviu.Dudau@arm.com,
	dri-devel@lists.freedesktop.org, luben.tuikov@amd.com,
	Danilo Krummrich <dakr@redhat.com>,
	intel-xe@lists.freedesktop.org, faith.ekstrand@collabora.com
Subject: Re: [PATCH v2 4/9] drm/sched: Split free_job into own work item
Date: Tue, 12 Sep 2023 12:13:57 +0200	[thread overview]
Message-ID: <20230912121357.4cc10dec@collabora.com> (raw)
In-Reply-To: <ee56b9ee-36c7-5935-c319-c8d1ad412c7c@amd.com>

On Fri, 25 Aug 2023 15:45:49 +0200
Christian König <christian.koenig@amd.com> wrote:

> Am 25.08.23 um 15:36 schrieb Matthew Brost:
> > On Fri, Aug 25, 2023 at 10:02:32AM +0200, Christian König wrote:  
> >> Am 25.08.23 um 04:58 schrieb Matthew Brost:  
> >>> On Fri, Aug 25, 2023 at 01:04:10AM +0200, Danilo Krummrich wrote:  
> >>>> On Thu, Aug 10, 2023 at 07:31:32PM -0700, Matthew Brost wrote:  
> >>>>> Rather than call free_job and run_job in same work item have a dedicated
> >>>>> work item for each. This aligns with the design and intended use of work
> >>>>> queues.
> >>>>>
> >>>>> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> >>>>> ---
> >>>>>    drivers/gpu/drm/scheduler/sched_main.c | 137 ++++++++++++++++++-------
> >>>>>    include/drm/gpu_scheduler.h            |   8 +-
> >>>>>    2 files changed, 106 insertions(+), 39 deletions(-)
> >>>>>
> >>>>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> >>>>> index cede47afc800..b67469eac179 100644
> >>>>> --- a/drivers/gpu/drm/scheduler/sched_main.c
> >>>>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> >>>>> @@ -213,11 +213,12 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
> >>>>>     * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
> >>>>>     *
> >>>>>     * @rq: scheduler run queue to check.
> >>>>> + * @dequeue: dequeue selected entity
> >>>>>     *
> >>>>>     * Try to find a ready entity, returns NULL if none found.
> >>>>>     */
> >>>>>    static struct drm_sched_entity *
> >>>>> -drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
> >>>>> +drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq, bool dequeue)
> >>>>>    {
> >>>>>    	struct drm_sched_entity *entity;
> >>>>> @@ -227,8 +228,10 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
> >>>>>    	if (entity) {
> >>>>>    		list_for_each_entry_continue(entity, &rq->entities, list) {
> >>>>>    			if (drm_sched_entity_is_ready(entity)) {
> >>>>> -				rq->current_entity = entity;
> >>>>> -				reinit_completion(&entity->entity_idle);
> >>>>> +				if (dequeue) {
> >>>>> +					rq->current_entity = entity;
> >>>>> +					reinit_completion(&entity->entity_idle);
> >>>>> +				}
> >>>>>    				spin_unlock(&rq->lock);
> >>>>>    				return entity;
> >>>>>    			}
> >>>>> @@ -238,8 +241,10 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
> >>>>>    	list_for_each_entry(entity, &rq->entities, list) {
> >>>>>    		if (drm_sched_entity_is_ready(entity)) {
> >>>>> -			rq->current_entity = entity;
> >>>>> -			reinit_completion(&entity->entity_idle);
> >>>>> +			if (dequeue) {
> >>>>> +				rq->current_entity = entity;
> >>>>> +				reinit_completion(&entity->entity_idle);
> >>>>> +			}
> >>>>>    			spin_unlock(&rq->lock);
> >>>>>    			return entity;
> >>>>>    		}
> >>>>> @@ -257,11 +262,12 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
> >>>>>     * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
> >>>>>     *
> >>>>>     * @rq: scheduler run queue to check.
> >>>>> + * @dequeue: dequeue selected entity
> >>>>>     *
> >>>>>     * Find oldest waiting ready entity, returns NULL if none found.
> >>>>>     */
> >>>>>    static struct drm_sched_entity *
> >>>>> -drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
> >>>>> +drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq, bool dequeue)
> >>>>>    {
> >>>>>    	struct rb_node *rb;
> >>>>> @@ -271,8 +277,10 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
> >>>>>    		entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
> >>>>>    		if (drm_sched_entity_is_ready(entity)) {
> >>>>> -			rq->current_entity = entity;
> >>>>> -			reinit_completion(&entity->entity_idle);
> >>>>> +			if (dequeue) {
> >>>>> +				rq->current_entity = entity;
> >>>>> +				reinit_completion(&entity->entity_idle);
> >>>>> +			}
> >>>>>    			break;
> >>>>>    		}
> >>>>>    	}
> >>>>> @@ -282,13 +290,54 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
> >>>>>    }
> >>>>>    /**
> >>>>> - * drm_sched_submit_queue - scheduler queue submission
> >>>>> + * drm_sched_run_job_queue - queue job submission
> >>>>>     * @sched: scheduler instance
> >>>>>     */
> >>>>> -static void drm_sched_submit_queue(struct drm_gpu_scheduler *sched)
> >>>>> +static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
> >>>>>    {
> >>>>>    	if (!READ_ONCE(sched->pause_submit))
> >>>>> -		queue_work(sched->submit_wq, &sched->work_submit);
> >>>>> +		queue_work(sched->submit_wq, &sched->work_run_job);
> >>>>> +}
> >>>>> +
> >>>>> +static struct drm_sched_entity *
> >>>>> +drm_sched_select_entity(struct drm_gpu_scheduler *sched, bool dequeue);
> >>>>> +
> >>>>> +/**
> >>>>> + * drm_sched_run_job_queue_if_ready - queue job submission if ready
> >>>>> + * @sched: scheduler instance
> >>>>> + */
> >>>>> +static void drm_sched_run_job_queue_if_ready(struct drm_gpu_scheduler *sched)
> >>>>> +{
> >>>>> +	if (drm_sched_select_entity(sched, false))
> >>>>> +		drm_sched_run_job_queue(sched);
> >>>>> +}
> >>>>> +
> >>>>> +/**
> >>>>> + * drm_sched_free_job_queue - queue free job
> >>>>> + *
> >>>>> + * @sched: scheduler instance to queue free job
> >>>>> + */
> >>>>> +static void drm_sched_free_job_queue(struct drm_gpu_scheduler *sched)
> >>>>> +{
> >>>>> +	if (!READ_ONCE(sched->pause_submit))
> >>>>> +		queue_work(sched->submit_wq, &sched->work_free_job);
> >>>>> +}
> >>>>> +
> >>>>> +/**
> >>>>> + * drm_sched_free_job_queue_if_ready - queue free job if ready
> >>>>> + *
> >>>>> + * @sched: scheduler instance to queue free job
> >>>>> + */
> >>>>> +static void drm_sched_free_job_queue_if_ready(struct drm_gpu_scheduler *sched)
> >>>>> +{
> >>>>> +	struct drm_sched_job *job;
> >>>>> +
> >>>>> +	spin_lock(&sched->job_list_lock);
> >>>>> +	job = list_first_entry_or_null(&sched->pending_list,
> >>>>> +				       struct drm_sched_job, list);
> >>>>> +	if (job && dma_fence_is_signaled(&job->s_fence->finished))
> >>>>> +		drm_sched_free_job_queue(sched);
> >>>>> +	spin_unlock(&sched->job_list_lock);
> >>>>>    }
> >>>>>    /**
> >>>>> @@ -310,7 +359,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
> >>>>>    	dma_fence_get(&s_fence->finished);
> >>>>>    	drm_sched_fence_finished(s_fence, result);
> >>>>>    	dma_fence_put(&s_fence->finished);
> >>>>> -	drm_sched_submit_queue(sched);
> >>>>> +	drm_sched_free_job_queue(sched);
> >>>>>    }
> >>>>>    /**
> >>>>> @@ -906,18 +955,19 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched)
> >>>>>    void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
> >>>>>    {
> >>>>>    	if (drm_sched_can_queue(sched))
> >>>>> -		drm_sched_submit_queue(sched);
> >>>>> +		drm_sched_run_job_queue(sched);
> >>>>>    }
> >>>>>    /**
> >>>>>     * drm_sched_select_entity - Select next entity to process
> >>>>>     *
> >>>>>     * @sched: scheduler instance
> >>>>> + * @dequeue: dequeue selected entity
> >>>>>     *
> >>>>>     * Returns the entity to process or NULL if none are found.
> >>>>>     */
> >>>>>    static struct drm_sched_entity *
> >>>>> -drm_sched_select_entity(struct drm_gpu_scheduler *sched)
> >>>>> +drm_sched_select_entity(struct drm_gpu_scheduler *sched, bool dequeue)
> >>>>>    {
> >>>>>    	struct drm_sched_entity *entity;
> >>>>>    	int i;
> >>>>> @@ -935,8 +985,10 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
> >>>>>    	/* Kernel run queue has higher priority than normal run queue*/
> >>>>>    	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
> >>>>>    		entity = sched->sched_policy == DRM_SCHED_POLICY_FIFO ?
> >>>>> -			drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
> >>>>> -			drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
> >>>>> +			drm_sched_rq_select_entity_fifo(&sched->sched_rq[i],
> >>>>> +							dequeue) :
> >>>>> +			drm_sched_rq_select_entity_rr(&sched->sched_rq[i],
> >>>>> +						      dequeue);
> >>>>>    		if (entity)
> >>>>>    			break;
> >>>>>    	}
> >>>>> @@ -1024,30 +1076,44 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
> >>>>>    EXPORT_SYMBOL(drm_sched_pick_best);
> >>>>>    /**
> >>>>> - * drm_sched_main - main scheduler thread
> >>>>> + * drm_sched_free_job_work - worker to call free_job
> >>>>>     *
> >>>>> - * @param: scheduler instance
> >>>>> + * @w: free job work
> >>>>>     */
> >>>>> -static void drm_sched_main(struct work_struct *w)
> >>>>> +static void drm_sched_free_job_work(struct work_struct *w)
> >>>>>    {
> >>>>>    	struct drm_gpu_scheduler *sched =
> >>>>> -		container_of(w, struct drm_gpu_scheduler, work_submit);
> >>>>> -	struct drm_sched_entity *entity;
> >>>>> +		container_of(w, struct drm_gpu_scheduler, work_free_job);
> >>>>>    	struct drm_sched_job *cleanup_job;
> >>>>> -	int r;
> >>>>>    	if (READ_ONCE(sched->pause_submit))
> >>>>>    		return;
> >>>>>    	cleanup_job = drm_sched_get_cleanup_job(sched);  
> >>>> I tried this patch with Nouveau and found a race condition:
> >>>>
> >>>> In drm_sched_run_job_work() the job is added to the pending_list via
> >>>> drm_sched_job_begin(), then the run_job() callback is called and the scheduled
> >>>> fence is signaled.
> >>>>
> >>>> However, in parallel drm_sched_get_cleanup_job() might be called from
> >>>> drm_sched_free_job_work(), which picks the first job from the pending_list and
> >>>> for the next job on the pending_list sets the scheduled fence' timestamp field.  
> >> Well why can this happen in parallel? Either the work items are scheduled to
> >> a single threaded work queue or you have protected the pending list with
> >> some locks.
> >>  
> > Xe uses a single-threaded work queue, Nouveau does not (desired
> > behavior).

I'm a bit worried that leaving this single vs multi-threaded wq
decision to drivers is going to cause unnecessary pain, because what
was previously a granted in term of run/cleanup execution order (thanks
to the kthread+static-drm_sched_main-workflow approach) is now subject
to the wq ordering guarantees, which depend on the wq type picked by
the driver.

> >
> > The list of pending jobs is protected by a lock (safe), the race is:
> >
> > add job to pending list
> > run_job
> > signal scheduled fence
> >
> > dequeue from pending list
> > free_job
> > update timestamp
> >
> > Once a job is on the pending list its timestamp can be accessed which
> > can blow up if scheduled fence isn't signaled or more specifically unless
> > DMA_FENCE_FLAG_TIMESTAMP_BIT is set. 

Ah, so that's the reason for the TIMESTAMP test added in v3. Sorry for
the noise in my v3 review, but I still think it'd be beneficial to have
that change moved to its own commit.

  reply	other threads:[~2023-09-12 10:14 UTC|newest]

Thread overview: 163+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-11  2:31 [Intel-xe] [PATCH v2 0/9] DRM scheduler changes for Xe Matthew Brost
2023-08-11  2:31 ` Matthew Brost
2023-08-11  2:31 ` [Intel-xe] [PATCH v2 1/9] drm/sched: Convert drm scheduler to use a work queue rather than kthread Matthew Brost
2023-08-11  2:31   ` Matthew Brost
2023-08-16 11:30   ` [Intel-xe] " Danilo Krummrich
2023-08-16 11:30     ` Danilo Krummrich
2023-08-16 14:05     ` [Intel-xe] " Christian König
2023-08-16 14:05       ` Christian König
2023-08-16 12:30       ` [Intel-xe] " Danilo Krummrich
2023-08-16 12:30         ` Danilo Krummrich
2023-08-16 14:38         ` [Intel-xe] " Matthew Brost
2023-08-16 14:38           ` Matthew Brost
2023-08-16 15:40           ` [Intel-xe] " Danilo Krummrich
2023-08-16 15:40             ` Danilo Krummrich
2023-08-16 14:59         ` [Intel-xe] " Christian König
2023-08-16 14:59           ` Christian König
2023-08-16 16:33           ` [Intel-xe] " Danilo Krummrich
2023-08-16 16:33             ` Danilo Krummrich
2023-08-17  5:33             ` [Intel-xe] " Christian König
2023-08-17  5:33               ` Christian König
2023-08-17 11:13               ` [Intel-xe] " Danilo Krummrich
2023-08-17 11:13                 ` Danilo Krummrich
2023-08-17 13:35                 ` [Intel-xe] " Christian König
2023-08-17 13:35                   ` Christian König
2023-08-17 12:48                   ` [Intel-xe] " Danilo Krummrich
2023-08-17 12:48                     ` Danilo Krummrich
2023-08-17 16:17                     ` [Intel-xe] " Christian König
2023-08-17 16:17                       ` Christian König
2023-08-18 11:58                       ` [Intel-xe] " Danilo Krummrich
2023-08-18 11:58                         ` Danilo Krummrich
2023-08-21 14:07                         ` [Intel-xe] " Christian König
2023-08-21 14:07                           ` Christian König
2023-08-21 18:01                           ` [Intel-xe] " Danilo Krummrich
2023-08-21 18:01                             ` Danilo Krummrich
2023-08-21 18:12                             ` [Intel-xe] " Christian König
2023-08-21 18:12                               ` Christian König
2023-08-21 19:07                               ` [Intel-xe] " Danilo Krummrich
2023-08-21 19:07                                 ` Danilo Krummrich
2023-08-22  9:35                                 ` [Intel-xe] " Christian König
2023-08-22  9:35                                   ` Christian König
2023-08-21 19:46                               ` [Intel-xe] " Faith Ekstrand
2023-08-21 19:46                                 ` Faith Ekstrand
2023-08-22  9:51                                 ` [Intel-xe] " Christian König
2023-08-22  9:51                                   ` Christian König
2023-08-22 16:55                                   ` [Intel-xe] " Faith Ekstrand
2023-08-22 16:55                                     ` Faith Ekstrand
2023-08-24 11:50                                     ` [Intel-xe] " Bas Nieuwenhuizen
2023-08-24 11:50                                       ` Bas Nieuwenhuizen
2023-08-18  3:08                 ` [Intel-xe] " Matthew Brost
2023-08-18  3:08                   ` Matthew Brost
2023-08-18  5:40                   ` [Intel-xe] " Christian König
2023-08-18  5:40                     ` Christian König
2023-08-18 12:49                     ` [Intel-xe] " Matthew Brost
2023-08-18 12:49                       ` Matthew Brost
2023-08-18 12:06                       ` [Intel-xe] " Danilo Krummrich
2023-08-18 12:06                         ` Danilo Krummrich
2023-09-12 14:28                 ` [Intel-xe] " Boris Brezillon
2023-09-12 14:28                   ` Boris Brezillon
2023-09-12 14:33                   ` [Intel-xe] " Danilo Krummrich
2023-09-12 14:33                     ` Danilo Krummrich
2023-09-12 14:49                     ` [Intel-xe] " Boris Brezillon
2023-09-12 14:49                       ` Boris Brezillon
2023-09-12 15:13                       ` [Intel-xe] " Boris Brezillon
2023-09-12 15:13                         ` Boris Brezillon
2023-09-12 16:58                         ` [Intel-xe] " Danilo Krummrich
2023-09-12 16:58                           ` Danilo Krummrich
2023-09-12 16:52                       ` [Intel-xe] " Danilo Krummrich
2023-09-12 16:52                         ` Danilo Krummrich
2023-08-11  2:31 ` [Intel-xe] [PATCH v2 2/9] drm/sched: Move schedule policy to scheduler / entity Matthew Brost
2023-08-11  2:31   ` Matthew Brost
2023-08-11 21:43   ` [Intel-xe] " Maira Canal
2023-08-11 21:43     ` Maira Canal
2023-08-12  3:20     ` [Intel-xe] " Matthew Brost
2023-08-12  3:20       ` Matthew Brost
2023-08-11  2:31 ` [Intel-xe] [PATCH v2 3/9] drm/sched: Add DRM_SCHED_POLICY_SINGLE_ENTITY scheduling policy Matthew Brost
2023-08-11  2:31   ` Matthew Brost
2023-08-29 17:37   ` [Intel-xe] " Danilo Krummrich
2023-08-29 17:37     ` Danilo Krummrich
2023-09-05 11:10     ` [Intel-xe] " Danilo Krummrich
2023-09-05 11:10       ` Danilo Krummrich
2023-09-11 19:44       ` [Intel-xe] " Matthew Brost
2023-09-11 19:44         ` Matthew Brost
2023-08-11  2:31 ` [Intel-xe] [PATCH v2 4/9] drm/sched: Split free_job into own work item Matthew Brost
2023-08-11  2:31   ` Matthew Brost
2023-08-17 13:39   ` [Intel-xe] " Christian König
2023-08-17 13:39     ` Christian König
2023-08-17 17:54     ` [Intel-xe] " Matthew Brost
2023-08-17 17:54       ` Matthew Brost
2023-08-18  5:27       ` [Intel-xe] " Christian König
2023-08-18  5:27         ` Christian König
2023-08-18 13:13         ` [Intel-xe] " Matthew Brost
2023-08-18 13:13           ` Matthew Brost
2023-08-21 13:17           ` [Intel-xe] " Christian König
2023-08-21 13:17             ` Christian König
2023-08-23  3:27             ` [Intel-xe] " Matthew Brost
2023-08-23  3:27               ` Matthew Brost
2023-08-23  7:10               ` [Intel-xe] " Christian König
2023-08-23  7:10                 ` Christian König
2023-08-23 15:24                 ` [Intel-xe] " Matthew Brost
2023-08-23 15:24                   ` Matthew Brost
2023-08-23 15:41                   ` [Intel-xe] " Alex Deucher
2023-08-23 15:41                     ` Alex Deucher
2023-08-23 17:26                     ` [Intel-xe] " Rodrigo Vivi
2023-08-23 17:26                       ` Rodrigo Vivi
2023-08-23 23:12                       ` Matthew Brost
2023-08-23 23:12                         ` Matthew Brost
2023-08-24 11:44                         ` Christian König
2023-08-24 11:44                           ` Christian König
2023-08-24 14:30                           ` Matthew Brost
2023-08-24 14:30                             ` Matthew Brost
2023-08-24 23:04   ` Danilo Krummrich
2023-08-24 23:04     ` Danilo Krummrich
2023-08-25  2:58     ` [Intel-xe] " Matthew Brost
2023-08-25  2:58       ` Matthew Brost
2023-08-25  8:02       ` [Intel-xe] " Christian König
2023-08-25  8:02         ` Christian König
2023-08-25 13:36         ` [Intel-xe] " Matthew Brost
2023-08-25 13:36           ` Matthew Brost
2023-08-25 13:45           ` [Intel-xe] " Christian König
2023-08-25 13:45             ` Christian König
2023-09-12 10:13             ` Boris Brezillon [this message]
2023-09-12 10:13               ` Boris Brezillon
2023-09-12 10:46               ` [Intel-xe] " Danilo Krummrich
2023-09-12 10:46                 ` Danilo Krummrich
2023-09-12 12:18                 ` [Intel-xe] " Boris Brezillon
2023-09-12 12:18                   ` Boris Brezillon
2023-09-12 12:56                   ` [Intel-xe] " Danilo Krummrich
2023-09-12 12:56                     ` Danilo Krummrich
2023-09-12 13:52                     ` [Intel-xe] " Boris Brezillon
2023-09-12 13:52                       ` Boris Brezillon
2023-09-12 14:10                       ` [Intel-xe] " Danilo Krummrich
2023-09-12 14:10                         ` Danilo Krummrich
2023-09-12 13:27             ` [Intel-xe] " Boris Brezillon
2023-09-12 13:27               ` Boris Brezillon
2023-09-12 13:34               ` [Intel-xe] " Danilo Krummrich
2023-09-12 13:34                 ` Danilo Krummrich
2023-09-12 13:53                 ` [Intel-xe] " Boris Brezillon
2023-09-12 13:53                   ` Boris Brezillon
2023-08-28 18:04   ` [Intel-xe] " Danilo Krummrich
2023-08-28 18:04     ` Danilo Krummrich
2023-08-28 18:41     ` [Intel-xe] " Matthew Brost
2023-08-28 18:41       ` Matthew Brost
2023-08-29  1:20       ` [Intel-xe] " Danilo Krummrich
2023-08-29  1:20         ` Danilo Krummrich
2023-08-11  2:31 ` [Intel-xe] [PATCH v2 5/9] drm/sched: Add generic scheduler message interface Matthew Brost
2023-08-11  2:31   ` Matthew Brost
2023-08-11  2:31 ` [Intel-xe] [PATCH v2 6/9] drm/sched: Add drm_sched_start_timeout_unlocked helper Matthew Brost
2023-08-11  2:31   ` Matthew Brost
2023-08-11  2:31 ` [Intel-xe] [PATCH v2 7/9] drm/sched: Start run wq before TDR in drm_sched_start Matthew Brost
2023-08-11  2:31   ` Matthew Brost
2023-08-11  2:31 ` [Intel-xe] [PATCH v2 8/9] drm/sched: Submit job before starting TDR Matthew Brost
2023-08-11  2:31   ` Matthew Brost
2023-08-11  2:31 ` [Intel-xe] [PATCH v2 9/9] drm/sched: Add helper to set TDR timeout Matthew Brost
2023-08-11  2:31   ` Matthew Brost
2023-08-11  2:34 ` [Intel-xe] ✗ CI.Patch_applied: failure for DRM scheduler changes for Xe (rev2) Patchwork
2023-08-24  0:08 ` [Intel-xe] [PATCH v2 0/9] DRM scheduler changes for Xe Danilo Krummrich
2023-08-24  0:08   ` Danilo Krummrich
2023-08-24  3:23   ` [Intel-xe] " Matthew Brost
2023-08-24  3:23     ` Matthew Brost
2023-08-24 14:51     ` [Intel-xe] " Danilo Krummrich
2023-08-24 14:51       ` Danilo Krummrich
2023-08-25  3:01 ` [Intel-xe] ✗ CI.Patch_applied: failure for DRM scheduler changes for Xe (rev3) Patchwork
2023-09-05 11:13 ` [Intel-xe] ✗ CI.Patch_applied: failure for DRM scheduler changes for Xe (rev4) Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230912121357.4cc10dec@collabora.com \
    --to=boris.brezillon@collabora.com \
    --cc=Liviu.Dudau@arm.com \
    --cc=christian.koenig@amd.com \
    --cc=dakr@redhat.com \
    --cc=donald.robson@imgtec.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=faith.ekstrand@collabora.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=ketil.johnsen@arm.com \
    --cc=lina@asahilina.net \
    --cc=luben.tuikov@amd.com \
    --cc=robdclark@chromium.org \
    --cc=sarah.walker@imgtec.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.