public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
To: Ingo Molnar <mingo@elte.hu>
Cc: dmitry.adamushko@gmail.com, a.p.zijlstra@chello.nl,
	dhaval@linux.vnet.ibm.com, linux-kernel@vger.kernel.org,
	efault@gmx.de, skumar@linux.vnet.ibm.com,
	Balbir Singh <balbir@in.ibm.com>, Dipankar <dipankar@in.ibm.com>
Subject: [PATCH 2/4] sched: minor fixes for group scheduler
Date: Mon, 26 Nov 2007 10:33:44 +0530	[thread overview]
Message-ID: <20071126050344.GC5304@linux.vnet.ibm.com> (raw)
In-Reply-To: <20071126050044.GA5304@linux.vnet.ibm.com>


Minor bug fixes for group scheduler:

- Use a mutex to serialize add/remove of task groups and also when
  changing shares of a task group. Use the same mutex when printing cfs_rq
  stats for various task groups.
- Use list_for_each_entry_rcu in for_each_leaf_cfs_rq macro (when walking task 
  group list)


Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>

---
 kernel/sched.c      |   33 +++++++++++++++++++++++++--------
 kernel/sched_fair.c |    4 +++-
 2 files changed, 28 insertions(+), 9 deletions(-)

Index: current/kernel/sched.c
===================================================================
--- current.orig/kernel/sched.c
+++ current/kernel/sched.c
@@ -169,8 +169,6 @@ struct task_group {
 	/* runqueue "owned" by this group on each cpu */
 	struct cfs_rq **cfs_rq;
 	unsigned long shares;
-	/* spinlock to serialize modification to shares */
-	spinlock_t lock;
 	struct rcu_head rcu;
 };
 
@@ -182,6 +180,11 @@ static DEFINE_PER_CPU(struct cfs_rq, ini
 static struct sched_entity *init_sched_entity_p[NR_CPUS];
 static struct cfs_rq *init_cfs_rq_p[NR_CPUS];
 
+/* task_group_mutex serializes add/remove of task groups and also changes to
+ * a task group's cpu shares.
+ */
+static DEFINE_MUTEX(task_group_mutex);
+
 /* Default task group.
  *	Every task in system belong to this group at bootup.
  */
@@ -222,9 +225,21 @@ static inline void set_task_cfs_rq(struc
 	p->se.parent = task_group(p)->se[cpu];
 }
 
+static inline void lock_task_group_list(void)
+{
+	mutex_lock(&task_group_mutex);
+}
+
+static inline void unlock_task_group_list(void)
+{
+	mutex_unlock(&task_group_mutex);
+}
+
 #else
 
 static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { }
+static inline void lock_task_group_list(void) { }
+static inline void unlock_task_group_list(void) { }
 
 #endif	/* CONFIG_FAIR_GROUP_SCHED */
 
@@ -6747,7 +6762,6 @@ void __init sched_init(void)
 			se->parent = NULL;
 		}
 		init_task_group.shares = init_task_group_load;
-		spin_lock_init(&init_task_group.lock);
 #endif
 
 		for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
@@ -6987,14 +7001,15 @@ struct task_group *sched_create_group(vo
 		se->parent = NULL;
 	}
 
+	tg->shares = NICE_0_LOAD;
+
+	lock_task_group_list();
 	for_each_possible_cpu(i) {
 		rq = cpu_rq(i);
 		cfs_rq = tg->cfs_rq[i];
 		list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
 	}
-
-	tg->shares = NICE_0_LOAD;
-	spin_lock_init(&tg->lock);
+	unlock_task_group_list();
 
 	return tg;
 
@@ -7040,10 +7055,12 @@ void sched_destroy_group(struct task_gro
 	struct cfs_rq *cfs_rq = NULL;
 	int i;
 
+	lock_task_group_list();
 	for_each_possible_cpu(i) {
 		cfs_rq = tg->cfs_rq[i];
 		list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
 	}
+	unlock_task_group_list();
 
 	BUG_ON(!cfs_rq);
 
@@ -7117,7 +7134,7 @@ int sched_group_set_shares(struct task_g
 {
 	int i;
 
-	spin_lock(&tg->lock);
+	lock_task_group_list();
 	if (tg->shares == shares)
 		goto done;
 
@@ -7126,7 +7143,7 @@ int sched_group_set_shares(struct task_g
 		set_se_shares(tg->se[i], shares);
 
 done:
-	spin_unlock(&tg->lock);
+	unlock_task_group_list();
 	return 0;
 }
 
Index: current/kernel/sched_fair.c
===================================================================
--- current.orig/kernel/sched_fair.c
+++ current/kernel/sched_fair.c
@@ -685,7 +685,7 @@ static inline struct cfs_rq *cpu_cfs_rq(
 
 /* Iterate thr' all leaf cfs_rq's on a runqueue */
 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
-	list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
+	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
 
 /* Do the two (enqueued) entities belong to the same group ? */
 static inline int
@@ -1126,7 +1126,9 @@ static void print_cfs_stats(struct seq_f
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
 #endif
+	lock_task_group_list();
 	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
 		print_cfs_rq(m, cpu, cfs_rq);
+	unlock_task_group_list();
 }
 #endif

-- 
Regards,
vatsa

  parent reply	other threads:[~2007-11-26  4:51 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-11-19 12:27 [PATCH 0/2] sched: Group scheduler related patches Srivatsa Vaddagiri
2007-11-19 12:28 ` [PATCH 1/2] sched: Minor cleanups Srivatsa Vaddagiri
2007-11-19 13:08   ` Ingo Molnar
2007-11-19 15:01     ` Srivatsa Vaddagiri
2007-11-19 12:30 ` [PATCH 2/2] sched: Improve fairness of cpu allocation for task groups Srivatsa Vaddagiri
2007-11-19 13:12   ` Ingo Molnar
2007-11-19 15:03     ` Srivatsa Vaddagiri
2007-11-19 15:22       ` Ingo Molnar
2007-11-19 16:06         ` Srivatsa Vaddagiri
2007-11-19 19:00           ` Ingo Molnar
2007-11-26  5:00             ` [PATCH 0/4] sched: group scheduler related patches (V3) Srivatsa Vaddagiri
2007-11-26  5:02               ` [PATCH 1/4] sched: code cleanup Srivatsa Vaddagiri
2007-11-26  5:03               ` Srivatsa Vaddagiri [this message]
2007-11-26  5:05               ` [Patch 3/4 v1] sched: change how cpu load is calculated Srivatsa Vaddagiri
2007-11-26  5:06               ` [Patch 3/4 v2] " Srivatsa Vaddagiri
2007-11-26  5:09               ` [Patch 4/4] sched: Improve fairness of cpu bandwidth allocation for task groups Srivatsa Vaddagiri
2007-11-26 20:28                 ` Ingo Molnar
2007-11-27  5:06                   ` [Patch 0/5] sched: group scheduler related patches (V4) Srivatsa Vaddagiri
2007-11-27  5:08                     ` [Patch 1/5] sched: code cleanup Srivatsa Vaddagiri
2007-11-27  5:09                     ` [Patch 2/5] sched: minor fixes for group scheduler Srivatsa Vaddagiri
2007-11-27  5:11                     ` [Patch 3/5 v1] sched: change how cpu load is calculated Srivatsa Vaddagiri
2007-11-27  5:12                     ` [Patch 3/5 v2] " Srivatsa Vaddagiri
2007-11-27  5:21                     ` [Patch 4/5] sched: introduce a mutex and corresponding API to serialize access to doms_cur[] array Srivatsa Vaddagiri
2007-11-27  5:27                     ` [Patch 5/5] sched: Improve fairness of cpu bandwidth allocation for task groups Srivatsa Vaddagiri
2007-11-27 11:09                     ` [Patch 0/5] sched: group scheduler related patches (V4) Ingo Molnar
2007-11-27 11:42                       ` Srivatsa Vaddagiri
2007-11-27 12:53                         ` Ingo Molnar
2007-11-27 14:32                           ` Srivatsa Vaddagiri
2007-11-26 20:29                 ` [Patch 4/4] sched: Improve fairness of cpu bandwidth allocation for task groups Ingo Molnar

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20071126050344.GC5304@linux.vnet.ibm.com \
    --to=vatsa@linux.vnet.ibm.com \
    --cc=a.p.zijlstra@chello.nl \
    --cc=balbir@in.ibm.com \
    --cc=dhaval@linux.vnet.ibm.com \
    --cc=dipankar@in.ibm.com \
    --cc=dmitry.adamushko@gmail.com \
    --cc=efault@gmx.de \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=skumar@linux.vnet.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox