linux-pm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Vincent Guittot <vincent.guittot@linaro.org>
To: Steve Muckle <steve.muckle@linaro.org>, Luca Abeni <luca.abeni@unitn.it>
Cc: Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@redhat.com>,
	linux-kernel <linux-kernel@vger.kernel.org>,
	"linux-pm@vger.kernel.org" <linux-pm@vger.kernel.org>,
	Morten Rasmussen <morten.rasmussen@arm.com>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	Juri Lelli <Juri.Lelli@arm.com>,
	Patrick Bellasi <patrick.bellasi@arm.com>,
	Michael Turquette <mturquette@baylibre.com>
Subject: Re: [RFCv6 PATCH 09/10] sched: deadline: use deadline bandwidth in scale_rt_capacity
Date: Wed, 9 Dec 2015 09:50:27 +0100	[thread overview]
Message-ID: <CAKfTPtCV4d=1nSDcU8J4x=MF7HmuyRxKr-4XiYAxPZYPKd1okA@mail.gmail.com> (raw)
In-Reply-To: <1449641971-20827-10-git-send-email-smuckle@linaro.org>

adding Lucas

On 9 December 2015 at 07:19, Steve Muckle <steve.muckle@linaro.org> wrote:
> From: Vincent Guittot <vincent.guittot@linaro.org>
>
> Instead of monitoring the exec time of deadline tasks to evaluate the
> CPU capacity consumed by deadline scheduler class, we can directly
> calculate it thanks to the sum of utilization of deadline tasks on the
> CPU.  We can remove deadline tasks from rt_avg metric and directly use
> the average bandwidth of deadline scheduler in scale_rt_capacity.
>
> Based in part on a similar patch from Luca Abeni <luca.abeni@unitn.it>.
>
> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
> Signed-off-by: Steve Muckle <smuckle@linaro.org>
> ---
>  kernel/sched/deadline.c | 33 +++++++++++++++++++++++++++++++--
>  kernel/sched/fair.c     |  8 ++++++++
>  kernel/sched/sched.h    |  2 ++
>  3 files changed, 41 insertions(+), 2 deletions(-)
>
> diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
> index 8b0a15e..9d9eb50 100644
> --- a/kernel/sched/deadline.c
> +++ b/kernel/sched/deadline.c
> @@ -43,6 +43,24 @@ static inline int on_dl_rq(struct sched_dl_entity *dl_se)
>         return !RB_EMPTY_NODE(&dl_se->rb_node);
>  }
>
> +static void add_average_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
> +{
> +       u64 se_bw = dl_se->dl_bw;
> +
> +       dl_rq->avg_bw += se_bw;
> +}
> +
> +static void clear_average_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
> +{
> +       u64 se_bw = dl_se->dl_bw;
> +
> +       dl_rq->avg_bw -= se_bw;
> +       if (dl_rq->avg_bw < 0) {
> +               WARN_ON(1);
> +               dl_rq->avg_bw = 0;
> +       }
> +}
> +
>  static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
>  {
>         struct sched_dl_entity *dl_se = &p->dl;
> @@ -494,6 +512,9 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
>         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
>         struct rq *rq = rq_of_dl_rq(dl_rq);
>
> +       if (dl_se->dl_new)
> +               add_average_bw(dl_se, dl_rq);
> +
>         /*
>          * The arrival of a new instance needs special treatment, i.e.,
>          * the actual scheduling parameters have to be "renewed".
> @@ -741,8 +762,6 @@ static void update_curr_dl(struct rq *rq)
>         curr->se.exec_start = rq_clock_task(rq);
>         cpuacct_charge(curr, delta_exec);
>
> -       sched_rt_avg_update(rq, delta_exec);
> -
>         dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
>         if (dl_runtime_exceeded(dl_se)) {
>                 dl_se->dl_throttled = 1;
> @@ -1241,6 +1260,8 @@ static void task_fork_dl(struct task_struct *p)
>  static void task_dead_dl(struct task_struct *p)
>  {
>         struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
> +       struct dl_rq *dl_rq = dl_rq_of_se(&p->dl);
> +       struct rq *rq = rq_of_dl_rq(dl_rq);
>
>         /*
>          * Since we are TASK_DEAD we won't slip out of the domain!
> @@ -1249,6 +1270,8 @@ static void task_dead_dl(struct task_struct *p)
>         /* XXX we should retain the bw until 0-lag */
>         dl_b->total_bw -= p->dl.dl_bw;
>         raw_spin_unlock_irq(&dl_b->lock);
> +
> +       clear_average_bw(&p->dl, &rq->dl);
>  }
>
>  static void set_curr_task_dl(struct rq *rq)
> @@ -1556,7 +1579,9 @@ retry:
>         }
>
>         deactivate_task(rq, next_task, 0);
> +       clear_average_bw(&next_task->dl, &rq->dl);
>         set_task_cpu(next_task, later_rq->cpu);
> +       add_average_bw(&next_task->dl, &later_rq->dl);
>         activate_task(later_rq, next_task, 0);
>         ret = 1;
>
> @@ -1644,7 +1669,9 @@ static void pull_dl_task(struct rq *this_rq)
>                         resched = true;
>
>                         deactivate_task(src_rq, p, 0);
> +                       clear_average_bw(&p->dl, &src_rq->dl);
>                         set_task_cpu(p, this_cpu);
> +                       add_average_bw(&p->dl, &this_rq->dl);
>                         activate_task(this_rq, p, 0);
>                         dmin = p->dl.deadline;
>
> @@ -1750,6 +1777,8 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
>         if (!start_dl_timer(p))
>                 __dl_clear_params(p);
>
> +       clear_average_bw(&p->dl, &rq->dl);
> +
>         /*
>          * Since this might be the only -deadline task on the rq,
>          * this is the right place to try to pull some other one
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 4c49f76..ce05f61 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -6203,6 +6203,14 @@ static unsigned long scale_rt_capacity(int cpu)
>
>         used = div_u64(avg, total);
>
> +       /*
> +        * deadline bandwidth is defined at system level so we must
> +        * weight this bandwidth with the max capacity of the system.
> +        * As a reminder, avg_bw is 20bits width and
> +        * scale_cpu_capacity is 10 bits width
> +        */
> +       used += div_u64(rq->dl.avg_bw, arch_scale_cpu_capacity(NULL, cpu));
> +
>         if (likely(used < SCHED_CAPACITY_SCALE))
>                 return SCHED_CAPACITY_SCALE - used;
>
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 08858d1..e44c6be 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -519,6 +519,8 @@ struct dl_rq {
>  #else
>         struct dl_bw dl_bw;
>  #endif
> +       /* This is the "average utilization" for this runqueue */
> +       s64 avg_bw;
>  };
>
>  #ifdef CONFIG_SMP
> --
> 2.4.10
>

  reply	other threads:[~2015-12-09  8:50 UTC|newest]

Thread overview: 59+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-12-09  6:19 [RFCv6 PATCH 00/10] sched: scheduler-driven CPU frequency selection Steve Muckle
2015-12-09  6:19 ` [RFCv6 PATCH 01/10] sched: Compute cpu capacity available at current frequency Steve Muckle
2015-12-09  6:19 ` [RFCv6 PATCH 02/10] cpufreq: introduce cpufreq_driver_is_slow Steve Muckle
2015-12-09  6:19 ` [RFCv6 PATCH 03/10] sched: scheduler-driven cpu frequency selection Steve Muckle
2015-12-11 11:04   ` Juri Lelli
2015-12-15  2:02     ` Steve Muckle
2015-12-15 10:31       ` Juri Lelli
2015-12-16  1:22         ` Steve Muckle
2015-12-16  3:48   ` Leo Yan
2015-12-17  1:24     ` Steve Muckle
2015-12-17  7:17       ` Leo Yan
2015-12-18 19:15         ` Steve Muckle
2015-12-19  5:54           ` Leo Yan
2016-01-25 12:06   ` Ricky Liang
2016-01-27  1:14     ` Steve Muckle
2016-02-01 17:10   ` Ricky Liang
2016-02-11  4:44     ` Steve Muckle
2015-12-09  6:19 ` [RFCv6 PATCH 04/10] sched/fair: add triggers for OPP change requests Steve Muckle
2015-12-09  6:19 ` [RFCv6 PATCH 05/10] sched/{core,fair}: trigger OPP change request on fork() Steve Muckle
2015-12-09  6:19 ` [RFCv6 PATCH 06/10] sched/fair: cpufreq_sched triggers for load balancing Steve Muckle
2015-12-09  6:19 ` [RFCv6 PATCH 07/10] sched/fair: jump to max OPP when crossing UP threshold Steve Muckle
2015-12-11 11:12   ` Juri Lelli
2015-12-15  2:42     ` Steve Muckle
2015-12-09  6:19 ` [RFCv6 PATCH 08/10] sched: remove call of sched_avg_update from sched_rt_avg_update Steve Muckle
2015-12-09  6:19 ` [RFCv6 PATCH 09/10] sched: deadline: use deadline bandwidth in scale_rt_capacity Steve Muckle
2015-12-09  8:50   ` Vincent Guittot [this message]
2015-12-10 13:27     ` Luca Abeni
2015-12-10 16:11       ` Vincent Guittot
2015-12-11  7:48         ` Luca Abeni
2015-12-14 14:02           ` Vincent Guittot
2015-12-14 14:38             ` Luca Abeni
2015-12-14 15:17   ` Peter Zijlstra
2015-12-14 15:56     ` Vincent Guittot
2015-12-14 16:07       ` Juri Lelli
2015-12-14 21:19         ` Luca Abeni
2015-12-14 16:51       ` Peter Zijlstra
2015-12-14 21:31         ` Luca Abeni
2015-12-15 12:38           ` Peter Zijlstra
2015-12-15 13:30             ` Luca Abeni
2015-12-15 13:42               ` Peter Zijlstra
2015-12-15 21:24                 ` Luca Abeni
2015-12-16  9:28                   ` Juri Lelli
2015-12-15  4:43         ` Vincent Guittot
2015-12-15 12:41           ` Peter Zijlstra
2015-12-15 12:56             ` Vincent Guittot
2015-12-14 21:12       ` Luca Abeni
2015-12-15  4:59         ` Vincent Guittot
2015-12-15  8:50           ` Luca Abeni
2015-12-15 12:20             ` Peter Zijlstra
2015-12-15 12:46               ` Vincent Guittot
2015-12-15 13:18               ` Luca Abeni
2015-12-15 12:23             ` Peter Zijlstra
2015-12-15 13:21               ` Luca Abeni
2015-12-15 12:43             ` Vincent Guittot
2015-12-15 13:39               ` Luca Abeni
2015-12-15 12:58             ` Vincent Guittot
2015-12-15 13:41               ` Luca Abeni
2015-12-09  6:19 ` [RFCv6 PATCH 10/10] sched: rt scheduler sets capacity requirement Steve Muckle
2015-12-11 11:22   ` Juri Lelli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CAKfTPtCV4d=1nSDcU8J4x=MF7HmuyRxKr-4XiYAxPZYPKd1okA@mail.gmail.com' \
    --to=vincent.guittot@linaro.org \
    --cc=Juri.Lelli@arm.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pm@vger.kernel.org \
    --cc=luca.abeni@unitn.it \
    --cc=mingo@redhat.com \
    --cc=morten.rasmussen@arm.com \
    --cc=mturquette@baylibre.com \
    --cc=patrick.bellasi@arm.com \
    --cc=peterz@infradead.org \
    --cc=steve.muckle@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).