From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1757006Ab0JLHvO (ORCPT ); Tue, 12 Oct 2010 03:51:14 -0400 Received: from e37.co.us.ibm.com ([32.97.110.158]:47175 "EHLO e37.co.us.ibm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756710Ab0JLHvN (ORCPT ); Tue, 12 Oct 2010 03:51:13 -0400 Date: Tue, 12 Oct 2010 13:21:09 +0530 From: Bharata B Rao To: linux-kernel@vger.kernel.org Cc: Dhaval Giani , Balbir Singh , Vaidyanathan Srinivasan , Srivatsa Vaddagiri , Kamalesh Babulal , Ingo Molnar , Peter Zijlstra , Pavel Emelyanov , Herbert Poetzl , Avi Kivity , Chris Friesen , Paul Menage , Mike Waychison , Paul Turner , Nikhil Rao Subject: [PATCH v3 2/7] sched: accumulate per-cfs_rq cpu usage Message-ID: <20101012075109.GC9893@in.ibm.com> Reply-To: bharata@linux.vnet.ibm.com References: <20101012074910.GA9893@in.ibm.com> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <20101012074910.GA9893@in.ibm.com> User-Agent: Mutt/1.5.19 (2009-01-05) Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org sched: accumulate per-cfs_rq cpu usage From: Paul Turner Introduce account_cfs_rq_quota() to account bandwidth usage on the cfs_rq level versus task_groups for which bandwidth has been assigned. This is tracked by whether the local cfs_rq->quota_assigned is finite or infinite (RUNTIME_INF). For cfs_rq's that belong to a bandwidth constrained task_group we introduce tg_request_cfs_quota() which attempts to allocate quota from the global pool for use locally. Updates involving the global pool are currently protected under cfs_bandwidth->lock, local pools are protected by rq->lock. This patch only attempts to assign and track quota, no action is taken in the case that cfs_rq->quota_used exceeds cfs_rq->quota_assigned. Signed-off-by: Paul Turner Signed-off-by: Nikhil Rao Signed-off-by: Bharata B Rao --- include/linux/sched.h | 4 ++++ kernel/sched.c | 13 +++++++++++++ kernel/sched_fair.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ kernel/sysctl.c | 10 ++++++++++ 4 files changed, 77 insertions(+) --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1898,6 +1898,10 @@ int sched_rt_handler(struct ctl_table *t void __user *buffer, size_t *lenp, loff_t *ppos); +#ifdef CONFIG_CFS_BANDWIDTH +extern unsigned int sysctl_sched_cfs_bandwidth_slice; +#endif + extern unsigned int sysctl_sched_compat_yield; #ifdef CONFIG_RT_MUTEXES --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1929,6 +1929,19 @@ static const struct sched_class rt_sched * default: 0.5s */ static u64 sched_cfs_bandwidth_period = 500000000ULL; + +/* + * default slice of quota to allocate from global tg to local cfs_rq pool on + * each refresh + * default: 10ms + */ +unsigned int sysctl_sched_cfs_bandwidth_slice = 10000UL; + +static inline u64 sched_cfs_bandwidth_slice(void) +{ + return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC; +} + #endif #define sched_class_highest (&rt_sched_class) --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -267,6 +267,16 @@ find_matching_se(struct sched_entity **s #endif /* CONFIG_FAIR_GROUP_SCHED */ +#ifdef CONFIG_CFS_BANDWIDTH +static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) +{ + return &tg->cfs_bandwidth; +} + +static void account_cfs_rq_quota(struct cfs_rq *cfs_rq, + unsigned long delta_exec); +#endif + /************************************************************** * Scheduling class tree data structure manipulation methods: @@ -547,6 +557,9 @@ static void update_curr(struct cfs_rq *c cpuacct_charge(curtask, delta_exec); account_group_exec_runtime(curtask, delta_exec); } +#ifdef CONFIG_CFS_BANDWIDTH + account_cfs_rq_quota(cfs_rq, delta_exec); +#endif } static inline void @@ -1130,6 +1143,43 @@ static void yield_task_fair(struct rq *r } #ifdef CONFIG_CFS_BANDWIDTH +static u64 tg_request_cfs_quota(struct task_group *tg) +{ + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); + u64 delta = 0; + + if (cfs_b->runtime > 0 || cfs_b->quota == RUNTIME_INF) { + raw_spin_lock(&cfs_b->lock); + /* + * it's possible a bandwidth update has changed the global + * pool. + */ + if (cfs_b->quota == RUNTIME_INF) + delta = sched_cfs_bandwidth_slice(); + else { + delta = min(cfs_b->runtime, + sched_cfs_bandwidth_slice()); + cfs_b->runtime -= delta; + } + raw_spin_unlock(&cfs_b->lock); + } + return delta; +} + +static void account_cfs_rq_quota(struct cfs_rq *cfs_rq, + unsigned long delta_exec) +{ + if (cfs_rq->quota_assigned == RUNTIME_INF) + return; + + cfs_rq->quota_used += delta_exec; + + if (cfs_rq->quota_used < cfs_rq->quota_assigned) + return; + + cfs_rq->quota_assigned += tg_request_cfs_quota(cfs_rq->tg); +} + static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) { return 1; --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -384,6 +384,16 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, +#ifdef CONFIG_CFS_BANDWIDTH + { + .procname = "sched_cfs_bandwidth_slice_us", + .data = &sysctl_sched_cfs_bandwidth_slice, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &one, + }, +#endif #ifdef CONFIG_PROVE_LOCKING { .procname = "prove_locking",