linux-m68k.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* Re: [PATCH v2 02/12] sched/deadline: Less agressive dl_server handling
       [not found] ` <20250702121158.465086194@infradead.org>
@ 2025-07-30  9:34   ` Geert Uytterhoeven
  2025-07-30  9:46     ` Juri Lelli
  0 siblings, 1 reply; 3+ messages in thread
From: Geert Uytterhoeven @ 2025-07-30  9:34 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: mingo, juri.lelli, vincent.guittot, dietmar.eggemann, rostedt,
	bsegall, mgorman, vschneid, clm, linux-kernel, linux-m68k

Hi Peter,

On Wed, 2 Jul 2025 at 14:19, Peter Zijlstra <peterz@infradead.org> wrote:
> Chris reported that commit 5f6bd380c7bd ("sched/rt: Remove default
> bandwidth control") caused a significant dip in his favourite
> benchmark of the day. Simply disabling dl_server cured things.
>
> His workload hammers the 0->1, 1->0 transitions, and the
> dl_server_{start,stop}() overhead kills it -- fairly obviously a bad
> idea in hind sight and all that.
>
> Change things around to only disable the dl_server when there has not
> been a fair task around for a whole period. Since the default period
> is 1 second, this ensures the benchmark never trips this, overhead
> gone.
>
> Fixes: 557a6bfc662c ("sched/fair: Add trivial fair server")
> Reported-by: Chris Mason <clm@meta.com>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> Link: https://lkml.kernel.org/r/20250520101727.507378961@infradead.org

Thanks for your patch, which is now commit cccb45d7c4295bbf
("sched/deadline: Less agressive dl_server handling") upstream.

This commit causes

    sched: DL replenish lagged too much

to be printed after full user-space (Debian) start-up on m68k
(atari_defconfig running on ARAnyM).  Reverting this commit and fixing
the small conflict gets rid of the message.

> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -701,6 +701,7 @@ struct sched_dl_entity {
>         unsigned int                    dl_defer          : 1;
>         unsigned int                    dl_defer_armed    : 1;
>         unsigned int                    dl_defer_running  : 1;
> +       unsigned int                    dl_server_idle    : 1;
>
>         /*
>          * Bandwidth enforcement timer. Each -deadline task has its
> --- a/kernel/sched/deadline.c
> +++ b/kernel/sched/deadline.c
> @@ -1215,6 +1215,8 @@ static void __push_dl_task(struct rq *rq
>  /* a defer timer will not be reset if the runtime consumed was < dl_server_min_res */
>  static const u64 dl_server_min_res = 1 * NSEC_PER_MSEC;
>
> +static bool dl_server_stopped(struct sched_dl_entity *dl_se);
> +
>  static enum hrtimer_restart dl_server_timer(struct hrtimer *timer, struct sched_dl_entity *dl_se)
>  {
>         struct rq *rq = rq_of_dl_se(dl_se);
> @@ -1234,6 +1236,7 @@ static enum hrtimer_restart dl_server_ti
>
>                 if (!dl_se->server_has_tasks(dl_se)) {
>                         replenish_dl_entity(dl_se);
> +                       dl_server_stopped(dl_se);
>                         return HRTIMER_NORESTART;
>                 }
>
> @@ -1639,8 +1642,10 @@ void dl_server_update_idle_time(struct r
>  void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec)
>  {
>         /* 0 runtime = fair server disabled */
> -       if (dl_se->dl_runtime)
> +       if (dl_se->dl_runtime) {
> +               dl_se->dl_server_idle = 0;
>                 update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
> +       }
>  }
>
>  void dl_server_start(struct sched_dl_entity *dl_se)
> @@ -1663,7 +1668,7 @@ void dl_server_start(struct sched_dl_ent
>                 setup_new_dl_entity(dl_se);
>         }
>
> -       if (!dl_se->dl_runtime)
> +       if (!dl_se->dl_runtime || dl_se->dl_server_active)
>                 return;
>
>         dl_se->dl_server_active = 1;
> @@ -1684,6 +1689,20 @@ void dl_server_stop(struct sched_dl_enti
>         dl_se->dl_server_active = 0;
>  }
>
> +static bool dl_server_stopped(struct sched_dl_entity *dl_se)
> +{
> +       if (!dl_se->dl_server_active)
> +               return false;
> +
> +       if (dl_se->dl_server_idle) {
> +               dl_server_stop(dl_se);
> +               return true;
> +       }
> +
> +       dl_se->dl_server_idle = 1;
> +       return false;
> +}
> +
>  void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
>                     dl_server_has_tasks_f has_tasks,
>                     dl_server_pick_f pick_task)
> @@ -2435,7 +2454,7 @@ static struct task_struct *__pick_task_d
>         if (dl_server(dl_se)) {
>                 p = dl_se->server_pick_task(dl_se);
>                 if (!p) {
> -                       if (dl_server_active(dl_se)) {
> +                       if (!dl_server_stopped(dl_se)) {
>                                 dl_se->dl_yielded = 1;
>                                 update_curr_dl_se(rq, dl_se, 0);
>                         }
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -5879,7 +5879,6 @@ static bool throttle_cfs_rq(struct cfs_r
>         struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
>         struct sched_entity *se;
>         long queued_delta, runnable_delta, idle_delta, dequeue = 1;
> -       long rq_h_nr_queued = rq->cfs.h_nr_queued;
>
>         raw_spin_lock(&cfs_b->lock);
>         /* This will start the period timer if necessary */
> @@ -5963,10 +5962,6 @@ static bool throttle_cfs_rq(struct cfs_r
>
>         /* At this point se is NULL and we are at root level*/
>         sub_nr_running(rq, queued_delta);
> -
> -       /* Stop the fair server if throttling resulted in no runnable tasks */
> -       if (rq_h_nr_queued && !rq->cfs.h_nr_queued)
> -               dl_server_stop(&rq->fair_server);
>  done:
>         /*
>          * Note: distribution will already see us throttled via the
> @@ -7060,7 +7055,6 @@ static void set_next_buddy(struct sched_
>  static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
>  {
>         bool was_sched_idle = sched_idle_rq(rq);
> -       int rq_h_nr_queued = rq->cfs.h_nr_queued;
>         bool task_sleep = flags & DEQUEUE_SLEEP;
>         bool task_delayed = flags & DEQUEUE_DELAYED;
>         struct task_struct *p = NULL;
> @@ -7144,9 +7138,6 @@ static int dequeue_entities(struct rq *r
>
>         sub_nr_running(rq, h_nr_queued);
>
> -       if (rq_h_nr_queued && !rq->cfs.h_nr_queued)
> -               dl_server_stop(&rq->fair_server);
> -
>         /* balance early to pull high priority tasks */
>         if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
>                 rq->next_balance = jiffies;

Gr{oetje,eeting}s,

                        Geert

-- 
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- geert@linux-m68k.org

In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
                                -- Linus Torvalds

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v2 02/12] sched/deadline: Less agressive dl_server handling
  2025-07-30  9:34   ` [PATCH v2 02/12] sched/deadline: Less agressive dl_server handling Geert Uytterhoeven
@ 2025-07-30  9:46     ` Juri Lelli
  2025-07-30 10:05       ` Geert Uytterhoeven
  0 siblings, 1 reply; 3+ messages in thread
From: Juri Lelli @ 2025-07-30  9:46 UTC (permalink / raw)
  To: Geert Uytterhoeven
  Cc: Peter Zijlstra, mingo, vincent.guittot, dietmar.eggemann, rostedt,
	bsegall, mgorman, vschneid, clm, linux-kernel, linux-m68k

Hello,

On 30/07/25 11:34, Geert Uytterhoeven wrote:
> Hi Peter,

Apologies for interjecting.

> On Wed, 2 Jul 2025 at 14:19, Peter Zijlstra <peterz@infradead.org> wrote:
> > Chris reported that commit 5f6bd380c7bd ("sched/rt: Remove default
> > bandwidth control") caused a significant dip in his favourite
> > benchmark of the day. Simply disabling dl_server cured things.
> >
> > His workload hammers the 0->1, 1->0 transitions, and the
> > dl_server_{start,stop}() overhead kills it -- fairly obviously a bad
> > idea in hind sight and all that.
> >
> > Change things around to only disable the dl_server when there has not
> > been a fair task around for a whole period. Since the default period
> > is 1 second, this ensures the benchmark never trips this, overhead
> > gone.
> >
> > Fixes: 557a6bfc662c ("sched/fair: Add trivial fair server")
> > Reported-by: Chris Mason <clm@meta.com>
> > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> > Link: https://lkml.kernel.org/r/20250520101727.507378961@infradead.org
> 
> Thanks for your patch, which is now commit cccb45d7c4295bbf
> ("sched/deadline: Less agressive dl_server handling") upstream.
> 
> This commit causes
> 
>     sched: DL replenish lagged too much
> 
> to be printed after full user-space (Debian) start-up on m68k
> (atari_defconfig running on ARAnyM).  Reverting this commit and fixing
> the small conflict gets rid of the message.

Does
https://lore.kernel.org/lkml/20250615131129.954975-1-kuyo.chang@mediatek.com/
help already (w/o the revert)?

Best,
Juri


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v2 02/12] sched/deadline: Less agressive dl_server handling
  2025-07-30  9:46     ` Juri Lelli
@ 2025-07-30 10:05       ` Geert Uytterhoeven
  0 siblings, 0 replies; 3+ messages in thread
From: Geert Uytterhoeven @ 2025-07-30 10:05 UTC (permalink / raw)
  To: Juri Lelli
  Cc: Peter Zijlstra, mingo, vincent.guittot, dietmar.eggemann, rostedt,
	bsegall, mgorman, vschneid, clm, linux-kernel, linux-m68k

Hi Juri,

On Wed, 30 Jul 2025 at 11:46, Juri Lelli <juri.lelli@redhat.com> wrote:
> On 30/07/25 11:34, Geert Uytterhoeven wrote:
> Apologies for interjecting.

No apologies needed, much appreciated!

> > On Wed, 2 Jul 2025 at 14:19, Peter Zijlstra <peterz@infradead.org> wrote:
> > > Chris reported that commit 5f6bd380c7bd ("sched/rt: Remove default
> > > bandwidth control") caused a significant dip in his favourite
> > > benchmark of the day. Simply disabling dl_server cured things.
> > >
> > > His workload hammers the 0->1, 1->0 transitions, and the
> > > dl_server_{start,stop}() overhead kills it -- fairly obviously a bad
> > > idea in hind sight and all that.
> > >
> > > Change things around to only disable the dl_server when there has not
> > > been a fair task around for a whole period. Since the default period
> > > is 1 second, this ensures the benchmark never trips this, overhead
> > > gone.
> > >
> > > Fixes: 557a6bfc662c ("sched/fair: Add trivial fair server")
> > > Reported-by: Chris Mason <clm@meta.com>
> > > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> > > Link: https://lkml.kernel.org/r/20250520101727.507378961@infradead.org
> >
> > Thanks for your patch, which is now commit cccb45d7c4295bbf
> > ("sched/deadline: Less agressive dl_server handling") upstream.
> >
> > This commit causes
> >
> >     sched: DL replenish lagged too much
> >
> > to be printed after full user-space (Debian) start-up on m68k
> > (atari_defconfig running on ARAnyM).  Reverting this commit and fixing
> > the small conflict gets rid of the message.
>
> Does
> https://lore.kernel.org/lkml/20250615131129.954975-1-kuyo.chang@mediatek.com/
> help already (w/o the revert)?

Thanks, it does!

Gr{oetje,eeting}s,

                        Geert

-- 
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- geert@linux-m68k.org

In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
                                -- Linus Torvalds

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2025-07-30 10:05 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <20250702114924.091581796@infradead.org>
     [not found] ` <20250702121158.465086194@infradead.org>
2025-07-30  9:34   ` [PATCH v2 02/12] sched/deadline: Less agressive dl_server handling Geert Uytterhoeven
2025-07-30  9:46     ` Juri Lelli
2025-07-30 10:05       ` Geert Uytterhoeven

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).