* [RFC PATCH 01/11] sched: Simple helper functions for find_busiest_group()
2009-03-25 9:13 [RFC PATCH 00/11] sched: find_busiest_group() cleanup Gautham R Shenoy
@ 2009-03-25 9:13 ` Gautham R Shenoy
2009-03-25 9:46 ` [tip:sched/balancing] " Gautham R Shenoy
2009-03-25 9:13 ` [RFC PATCH 02/11] sched: Fix indentations in find_busiest_group using gotos Gautham R Shenoy
` (10 subsequent siblings)
11 siblings, 1 reply; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:13 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra, Vaidyanathan Srinivasan
Cc: linux-kernel, Suresh Siddha, Balbir Singh, Nick Piggin,
Dhaval Giani, Bharata B Rao, Gautham R Shenoy
Currently the load idx calculation code is in find_busiest_group(). Move that
to a static inline helper function.
Similary, to find the first cpu of a sched_group we use
cpumask_first(sched_group_cpus(group))
Use a helper to that. It improves readability in
some cases.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
---
kernel/sched.c | 55 +++++++++++++++++++++++++++++++++++++++++++------------
1 files changed, 43 insertions(+), 12 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 8e2558c..0b65f8c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3085,6 +3085,43 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
return 0;
}
+/********** Helpers for find_busiest_group ************************/
+
+/**
+ * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
+ * @group: The group whose first cpu is to be returned.
+ */
+static inline unsigned int group_first_cpu(struct sched_group *group)
+{
+ return cpumask_first(sched_group_cpus(group));
+}
+
+/**
+ * get_sd_load_idx - Obtain the load index for a given sched domain.
+ * @sd: The sched_domain whose load_idx is to be obtained.
+ * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
+ */
+static inline int get_sd_load_idx(struct sched_domain *sd,
+ enum cpu_idle_type idle)
+{
+ int load_idx;
+
+ switch (idle) {
+ case CPU_NOT_IDLE:
+ load_idx = sd->busy_idx;
+ break;
+
+ case CPU_NEWLY_IDLE:
+ load_idx = sd->newidle_idx;
+ break;
+ default:
+ load_idx = sd->idle_idx;
+ break;
+ }
+
+ return load_idx;
+}
+/******* find_busiest_group() helpers end here *********************/
/*
* find_busiest_group finds and returns the busiest CPU group within the
@@ -3113,12 +3150,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
busiest_load_per_task = busiest_nr_running = 0;
this_load_per_task = this_nr_running = 0;
- if (idle == CPU_NOT_IDLE)
- load_idx = sd->busy_idx;
- else if (idle == CPU_NEWLY_IDLE)
- load_idx = sd->newidle_idx;
- else
- load_idx = sd->idle_idx;
+ load_idx = get_sd_load_idx(sd, idle);
do {
unsigned long load, group_capacity, max_cpu_load, min_cpu_load;
@@ -3134,7 +3166,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
sched_group_cpus(group));
if (local_group)
- balance_cpu = cpumask_first(sched_group_cpus(group));
+ balance_cpu = group_first_cpu(group);
/* Tally up the load of all CPUs in the group */
sum_weighted_load = sum_nr_running = avg_load = 0;
@@ -3255,8 +3287,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
*/
if ((sum_nr_running < min_nr_running) ||
(sum_nr_running == min_nr_running &&
- cpumask_first(sched_group_cpus(group)) >
- cpumask_first(sched_group_cpus(group_min)))) {
+ group_first_cpu(group) > group_first_cpu(group_min))) {
group_min = group;
min_nr_running = sum_nr_running;
min_load_per_task = sum_weighted_load /
@@ -3271,8 +3302,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
if (sum_nr_running <= group_capacity - 1) {
if (sum_nr_running > leader_nr_running ||
(sum_nr_running == leader_nr_running &&
- cpumask_first(sched_group_cpus(group)) <
- cpumask_first(sched_group_cpus(group_leader)))) {
+ group_first_cpu(group) <
+ group_first_cpu(group_leader))) {
group_leader = group;
leader_nr_running = sum_nr_running;
}
@@ -3400,7 +3431,7 @@ out_balanced:
*imbalance = min_load_per_task;
if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
- cpumask_first(sched_group_cpus(group_leader));
+ group_first_cpu(group_leader);
}
return group_min;
}
^ permalink raw reply related [flat|nested] 33+ messages in thread* [tip:sched/balancing] sched: Simple helper functions for find_busiest_group()
2009-03-25 9:13 ` [RFC PATCH 01/11] sched: Simple helper functions for find_busiest_group() Gautham R Shenoy
@ 2009-03-25 9:46 ` Gautham R Shenoy
0 siblings, 0 replies; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:46 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, ego, hpa, mingo, a.p.zijlstra, dhaval, balbir,
bharata, suresh.b.siddha, tglx, mingo, svaidy, nickpiggin
Commit-ID: 67bb6c036d1fc3d332c8527a36a546e3e72e822c
Gitweb: http://git.kernel.org/tip/67bb6c036d1fc3d332c8527a36a546e3e72e822c
Author: Gautham R Shenoy <ego@in.ibm.com>
AuthorDate: Wed, 25 Mar 2009 14:43:35 +0530
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Wed, 25 Mar 2009 10:30:44 +0100
sched: Simple helper functions for find_busiest_group()
Impact: cleanup
Currently the load idx calculation code is in find_busiest_group().
Move that to a static inline helper function.
Similary, to find the first cpu of a sched_group we use
cpumask_first(sched_group_cpus(group))
Use a helper to that. It improves readability in some cases.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Balbir Singh" <balbir@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
Cc: "Vaidyanathan Srinivasan" <svaidy@linux.vnet.ibm.com>
LKML-Reference: <20090325091335.13992.55424.stgit@sofia.in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 55 +++++++++++++++++++++++++++++++++++++++++++------------
1 files changed, 43 insertions(+), 12 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 7b389c7..6aec1e7 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3189,6 +3189,43 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
return 0;
}
+/********** Helpers for find_busiest_group ************************/
+
+/**
+ * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
+ * @group: The group whose first cpu is to be returned.
+ */
+static inline unsigned int group_first_cpu(struct sched_group *group)
+{
+ return cpumask_first(sched_group_cpus(group));
+}
+
+/**
+ * get_sd_load_idx - Obtain the load index for a given sched domain.
+ * @sd: The sched_domain whose load_idx is to be obtained.
+ * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
+ */
+static inline int get_sd_load_idx(struct sched_domain *sd,
+ enum cpu_idle_type idle)
+{
+ int load_idx;
+
+ switch (idle) {
+ case CPU_NOT_IDLE:
+ load_idx = sd->busy_idx;
+ break;
+
+ case CPU_NEWLY_IDLE:
+ load_idx = sd->newidle_idx;
+ break;
+ default:
+ load_idx = sd->idle_idx;
+ break;
+ }
+
+ return load_idx;
+}
+/******* find_busiest_group() helpers end here *********************/
/*
* find_busiest_group finds and returns the busiest CPU group within the
@@ -3217,12 +3254,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
busiest_load_per_task = busiest_nr_running = 0;
this_load_per_task = this_nr_running = 0;
- if (idle == CPU_NOT_IDLE)
- load_idx = sd->busy_idx;
- else if (idle == CPU_NEWLY_IDLE)
- load_idx = sd->newidle_idx;
- else
- load_idx = sd->idle_idx;
+ load_idx = get_sd_load_idx(sd, idle);
do {
unsigned long load, group_capacity, max_cpu_load, min_cpu_load;
@@ -3238,7 +3270,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
sched_group_cpus(group));
if (local_group)
- balance_cpu = cpumask_first(sched_group_cpus(group));
+ balance_cpu = group_first_cpu(group);
/* Tally up the load of all CPUs in the group */
sum_weighted_load = sum_nr_running = avg_load = 0;
@@ -3359,8 +3391,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
*/
if ((sum_nr_running < min_nr_running) ||
(sum_nr_running == min_nr_running &&
- cpumask_first(sched_group_cpus(group)) >
- cpumask_first(sched_group_cpus(group_min)))) {
+ group_first_cpu(group) > group_first_cpu(group_min))) {
group_min = group;
min_nr_running = sum_nr_running;
min_load_per_task = sum_weighted_load /
@@ -3375,8 +3406,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
if (sum_nr_running <= group_capacity - 1) {
if (sum_nr_running > leader_nr_running ||
(sum_nr_running == leader_nr_running &&
- cpumask_first(sched_group_cpus(group)) <
- cpumask_first(sched_group_cpus(group_leader)))) {
+ group_first_cpu(group) <
+ group_first_cpu(group_leader))) {
group_leader = group;
leader_nr_running = sum_nr_running;
}
@@ -3504,7 +3535,7 @@ out_balanced:
*imbalance = min_load_per_task;
if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
- cpumask_first(sched_group_cpus(group_leader));
+ group_first_cpu(group_leader);
}
return group_min;
}
^ permalink raw reply related [flat|nested] 33+ messages in thread
* [RFC PATCH 02/11] sched: Fix indentations in find_busiest_group using gotos.
2009-03-25 9:13 [RFC PATCH 00/11] sched: find_busiest_group() cleanup Gautham R Shenoy
2009-03-25 9:13 ` [RFC PATCH 01/11] sched: Simple helper functions for find_busiest_group() Gautham R Shenoy
@ 2009-03-25 9:13 ` Gautham R Shenoy
2009-03-25 9:46 ` [tip:sched/balancing] sched: Fix indentations in find_busiest_group() " Gautham R Shenoy
2009-03-25 9:13 ` [RFC PATCH 03/11] sched: Define structure to store the sched_group statistics for fbg() Gautham R Shenoy
` (9 subsequent siblings)
11 siblings, 1 reply; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:13 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra, Vaidyanathan Srinivasan
Cc: linux-kernel, Suresh Siddha, Balbir Singh, Nick Piggin,
Dhaval Giani, Bharata B Rao, Gautham R Shenoy
Some indentations in find_busiest_group() can minimized by using
early exits with the help of gotos. This improves readability in a couple of
cases.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
---
kernel/sched.c | 32 +++++++++++++++++---------------
1 files changed, 17 insertions(+), 15 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 0b65f8c..d95dcc0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3299,14 +3299,14 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* capacity but still has some space to pick up some load
* from other group and save more power
*/
- if (sum_nr_running <= group_capacity - 1) {
- if (sum_nr_running > leader_nr_running ||
- (sum_nr_running == leader_nr_running &&
- group_first_cpu(group) <
- group_first_cpu(group_leader))) {
- group_leader = group;
- leader_nr_running = sum_nr_running;
- }
+ if (sum_nr_running > group_capacity - 1)
+ goto group_next;
+
+ if (sum_nr_running > leader_nr_running ||
+ (sum_nr_running == leader_nr_running &&
+ group_first_cpu(group) < group_first_cpu(group_leader))) {
+ group_leader = group;
+ leader_nr_running = sum_nr_running;
}
group_next:
#endif
@@ -3427,14 +3427,16 @@ out_balanced:
if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
goto ret;
- if (this == group_leader && group_leader != group_min) {
- *imbalance = min_load_per_task;
- if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
- cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
- group_first_cpu(group_leader);
- }
- return group_min;
+ if (this != group_leader || group_leader == group_min)
+ goto ret;
+
+ *imbalance = min_load_per_task;
+ if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
+ cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
+ group_first_cpu(group_leader);
}
+ return group_min;
+
#endif
ret:
*imbalance = 0;
^ permalink raw reply related [flat|nested] 33+ messages in thread* [tip:sched/balancing] sched: Fix indentations in find_busiest_group() using gotos
2009-03-25 9:13 ` [RFC PATCH 02/11] sched: Fix indentations in find_busiest_group using gotos Gautham R Shenoy
@ 2009-03-25 9:46 ` Gautham R Shenoy
0 siblings, 0 replies; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:46 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, ego, hpa, mingo, a.p.zijlstra, dhaval, balbir,
bharata, suresh.b.siddha, tglx, mingo, svaidy, nickpiggin
Commit-ID: 6dfdb0629019f307ab18864b1fd3e5dbb02f383c
Gitweb: http://git.kernel.org/tip/6dfdb0629019f307ab18864b1fd3e5dbb02f383c
Author: Gautham R Shenoy <ego@in.ibm.com>
AuthorDate: Wed, 25 Mar 2009 14:43:40 +0530
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Wed, 25 Mar 2009 10:30:44 +0100
sched: Fix indentations in find_busiest_group() using gotos
Impact: cleanup
Some indentations in find_busiest_group() can minimized by using
early exits with the help of gotos. This improves readability in
a couple of cases.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Balbir Singh" <balbir@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
Cc: "Vaidyanathan Srinivasan" <svaidy@linux.vnet.ibm.com>
LKML-Reference: <20090325091340.13992.45062.stgit@sofia.in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 32 +++++++++++++++++---------------
1 files changed, 17 insertions(+), 15 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 6aec1e7..f87adbe 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3403,14 +3403,14 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* capacity but still has some space to pick up some load
* from other group and save more power
*/
- if (sum_nr_running <= group_capacity - 1) {
- if (sum_nr_running > leader_nr_running ||
- (sum_nr_running == leader_nr_running &&
- group_first_cpu(group) <
- group_first_cpu(group_leader))) {
- group_leader = group;
- leader_nr_running = sum_nr_running;
- }
+ if (sum_nr_running > group_capacity - 1)
+ goto group_next;
+
+ if (sum_nr_running > leader_nr_running ||
+ (sum_nr_running == leader_nr_running &&
+ group_first_cpu(group) < group_first_cpu(group_leader))) {
+ group_leader = group;
+ leader_nr_running = sum_nr_running;
}
group_next:
#endif
@@ -3531,14 +3531,16 @@ out_balanced:
if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
goto ret;
- if (this == group_leader && group_leader != group_min) {
- *imbalance = min_load_per_task;
- if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
- cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
- group_first_cpu(group_leader);
- }
- return group_min;
+ if (this != group_leader || group_leader == group_min)
+ goto ret;
+
+ *imbalance = min_load_per_task;
+ if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
+ cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
+ group_first_cpu(group_leader);
}
+ return group_min;
+
#endif
ret:
*imbalance = 0;
^ permalink raw reply related [flat|nested] 33+ messages in thread
* [RFC PATCH 03/11] sched: Define structure to store the sched_group statistics for fbg()
2009-03-25 9:13 [RFC PATCH 00/11] sched: find_busiest_group() cleanup Gautham R Shenoy
2009-03-25 9:13 ` [RFC PATCH 01/11] sched: Simple helper functions for find_busiest_group() Gautham R Shenoy
2009-03-25 9:13 ` [RFC PATCH 02/11] sched: Fix indentations in find_busiest_group using gotos Gautham R Shenoy
@ 2009-03-25 9:13 ` Gautham R Shenoy
2009-03-25 9:46 ` [tip:sched/balancing] " Gautham R Shenoy
2009-03-25 9:13 ` [RFC PATCH 04/11] sched: Create a helper function to calculate sched_group stats " Gautham R Shenoy
` (8 subsequent siblings)
11 siblings, 1 reply; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:13 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra, Vaidyanathan Srinivasan
Cc: linux-kernel, Suresh Siddha, Balbir Singh, Nick Piggin,
Dhaval Giani, Bharata B Rao, Gautham R Shenoy
Currently a whole bunch of variables are used to store the various statistics
pertaining to the groups we iterate over in find_busiest_group().
Group them together in a single data structure and add appropriate comments.
This will be useful later on when we create helper functions to calculate the
sched_group statistics.
Credit: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
---
kernel/sched.c | 79 +++++++++++++++++++++++++++++++++-----------------------
1 files changed, 46 insertions(+), 33 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index d95dcc0..6da9939 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3088,6 +3088,18 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
/********** Helpers for find_busiest_group ************************/
/**
+ * sg_lb_stats - stats of a sched_group required for load_balancing
+ */
+struct sg_lb_stats {
+ unsigned long avg_load; /*Avg load across the CPUs of the group */
+ unsigned long group_load; /* Total load over the CPUs of the group */
+ unsigned long sum_nr_running; /* Nr tasks running in the group */
+ unsigned long sum_weighted_load; /* Weighted load of group's tasks */
+ unsigned long group_capacity;
+ int group_imb; /* Is there an imbalance in the group ? */
+};
+
+/**
* group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
* @group: The group whose first cpu is to be returned.
*/
@@ -3153,23 +3165,22 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
load_idx = get_sd_load_idx(sd, idle);
do {
- unsigned long load, group_capacity, max_cpu_load, min_cpu_load;
+ struct sg_lb_stats sgs;
+ unsigned long load, max_cpu_load, min_cpu_load;
int local_group;
int i;
- int __group_imb = 0;
unsigned int balance_cpu = -1, first_idle_cpu = 0;
- unsigned long sum_nr_running, sum_weighted_load;
unsigned long sum_avg_load_per_task;
unsigned long avg_load_per_task;
local_group = cpumask_test_cpu(this_cpu,
sched_group_cpus(group));
+ memset(&sgs, 0, sizeof(sgs));
if (local_group)
balance_cpu = group_first_cpu(group);
/* Tally up the load of all CPUs in the group */
- sum_weighted_load = sum_nr_running = avg_load = 0;
sum_avg_load_per_task = avg_load_per_task = 0;
max_cpu_load = 0;
@@ -3197,9 +3208,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
min_cpu_load = load;
}
- avg_load += load;
- sum_nr_running += rq->nr_running;
- sum_weighted_load += weighted_cpuload(i);
+ sgs.group_load += load;
+ sgs.sum_nr_running += rq->nr_running;
+ sgs.sum_weighted_load += weighted_cpuload(i);
sum_avg_load_per_task += cpu_avg_load_per_task(i);
}
@@ -3216,12 +3227,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
goto ret;
}
- total_load += avg_load;
+ total_load += sgs.group_load;
total_pwr += group->__cpu_power;
/* Adjust by relative CPU power of the group */
- avg_load = sg_div_cpu_power(group,
- avg_load * SCHED_LOAD_SCALE);
+ sgs.avg_load = sg_div_cpu_power(group,
+ sgs.group_load * SCHED_LOAD_SCALE);
/*
@@ -3237,22 +3248,23 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
sum_avg_load_per_task * SCHED_LOAD_SCALE);
if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
- __group_imb = 1;
+ sgs.group_imb = 1;
- group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
+ sgs.group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
if (local_group) {
- this_load = avg_load;
+ this_load = sgs.avg_load;
this = group;
- this_nr_running = sum_nr_running;
- this_load_per_task = sum_weighted_load;
- } else if (avg_load > max_load &&
- (sum_nr_running > group_capacity || __group_imb)) {
- max_load = avg_load;
+ this_nr_running = sgs.sum_nr_running;
+ this_load_per_task = sgs.sum_weighted_load;
+ } else if (sgs.avg_load > max_load &&
+ (sgs.sum_nr_running > sgs.group_capacity ||
+ sgs.group_imb)) {
+ max_load = sgs.avg_load;
busiest = group;
- busiest_nr_running = sum_nr_running;
- busiest_load_per_task = sum_weighted_load;
- group_imb = __group_imb;
+ busiest_nr_running = sgs.sum_nr_running;
+ busiest_load_per_task = sgs.sum_weighted_load;
+ group_imb = sgs.group_imb;
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -3268,7 +3280,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* If the local group is idle or completely loaded
* no need to do power savings balance at this domain
*/
- if (local_group && (this_nr_running >= group_capacity ||
+ if (local_group && (this_nr_running >= sgs.group_capacity ||
!this_nr_running))
power_savings_balance = 0;
@@ -3276,8 +3288,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* If a group is already running at full capacity or idle,
* don't include that group in power savings calculations
*/
- if (!power_savings_balance || sum_nr_running >= group_capacity
- || !sum_nr_running)
+ if (!power_savings_balance ||
+ sgs.sum_nr_running >= sgs.group_capacity ||
+ !sgs.sum_nr_running)
goto group_next;
/*
@@ -3285,13 +3298,13 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* This is the group from where we need to pick up the load
* for saving power
*/
- if ((sum_nr_running < min_nr_running) ||
- (sum_nr_running == min_nr_running &&
+ if ((sgs.sum_nr_running < min_nr_running) ||
+ (sgs.sum_nr_running == min_nr_running &&
group_first_cpu(group) > group_first_cpu(group_min))) {
group_min = group;
- min_nr_running = sum_nr_running;
- min_load_per_task = sum_weighted_load /
- sum_nr_running;
+ min_nr_running = sgs.sum_nr_running;
+ min_load_per_task = sgs.sum_weighted_load /
+ sgs.sum_nr_running;
}
/*
@@ -3299,14 +3312,14 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* capacity but still has some space to pick up some load
* from other group and save more power
*/
- if (sum_nr_running > group_capacity - 1)
+ if (sgs.sum_nr_running > sgs.group_capacity - 1)
goto group_next;
- if (sum_nr_running > leader_nr_running ||
- (sum_nr_running == leader_nr_running &&
+ if (sgs.sum_nr_running > leader_nr_running ||
+ (sgs.sum_nr_running == leader_nr_running &&
group_first_cpu(group) < group_first_cpu(group_leader))) {
group_leader = group;
- leader_nr_running = sum_nr_running;
+ leader_nr_running = sgs.sum_nr_running;
}
group_next:
#endif
^ permalink raw reply related [flat|nested] 33+ messages in thread* [tip:sched/balancing] sched: Define structure to store the sched_group statistics for fbg()
2009-03-25 9:13 ` [RFC PATCH 03/11] sched: Define structure to store the sched_group statistics for fbg() Gautham R Shenoy
@ 2009-03-25 9:46 ` Gautham R Shenoy
0 siblings, 0 replies; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:46 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, ego, hpa, mingo, a.p.zijlstra, dhaval, balbir,
bharata, suresh.b.siddha, tglx, mingo, nickpiggin
Commit-ID: 381be78fdc829a22f6327a0ed09f54b6270a976d
Gitweb: http://git.kernel.org/tip/381be78fdc829a22f6327a0ed09f54b6270a976d
Author: Gautham R Shenoy <ego@in.ibm.com>
AuthorDate: Wed, 25 Mar 2009 14:43:46 +0530
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Wed, 25 Mar 2009 10:30:45 +0100
sched: Define structure to store the sched_group statistics for fbg()
Impact: cleanup
Currently a whole bunch of variables are used to store the
various statistics pertaining to the groups we iterate over
in find_busiest_group().
Group them together in a single data structure and add
appropriate comments.
This will be useful later on when we create helper functions
to calculate the sched_group statistics.
Credit: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Balbir Singh" <balbir@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
LKML-Reference: <20090325091345.13992.20099.stgit@sofia.in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 79 ++++++++++++++++++++++++++++++++-----------------------
1 files changed, 46 insertions(+), 33 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index f87adbe..109db12 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3192,6 +3192,18 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
/********** Helpers for find_busiest_group ************************/
/**
+ * sg_lb_stats - stats of a sched_group required for load_balancing
+ */
+struct sg_lb_stats {
+ unsigned long avg_load; /*Avg load across the CPUs of the group */
+ unsigned long group_load; /* Total load over the CPUs of the group */
+ unsigned long sum_nr_running; /* Nr tasks running in the group */
+ unsigned long sum_weighted_load; /* Weighted load of group's tasks */
+ unsigned long group_capacity;
+ int group_imb; /* Is there an imbalance in the group ? */
+};
+
+/**
* group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
* @group: The group whose first cpu is to be returned.
*/
@@ -3257,23 +3269,22 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
load_idx = get_sd_load_idx(sd, idle);
do {
- unsigned long load, group_capacity, max_cpu_load, min_cpu_load;
+ struct sg_lb_stats sgs;
+ unsigned long load, max_cpu_load, min_cpu_load;
int local_group;
int i;
- int __group_imb = 0;
unsigned int balance_cpu = -1, first_idle_cpu = 0;
- unsigned long sum_nr_running, sum_weighted_load;
unsigned long sum_avg_load_per_task;
unsigned long avg_load_per_task;
local_group = cpumask_test_cpu(this_cpu,
sched_group_cpus(group));
+ memset(&sgs, 0, sizeof(sgs));
if (local_group)
balance_cpu = group_first_cpu(group);
/* Tally up the load of all CPUs in the group */
- sum_weighted_load = sum_nr_running = avg_load = 0;
sum_avg_load_per_task = avg_load_per_task = 0;
max_cpu_load = 0;
@@ -3301,9 +3312,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
min_cpu_load = load;
}
- avg_load += load;
- sum_nr_running += rq->nr_running;
- sum_weighted_load += weighted_cpuload(i);
+ sgs.group_load += load;
+ sgs.sum_nr_running += rq->nr_running;
+ sgs.sum_weighted_load += weighted_cpuload(i);
sum_avg_load_per_task += cpu_avg_load_per_task(i);
}
@@ -3320,12 +3331,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
goto ret;
}
- total_load += avg_load;
+ total_load += sgs.group_load;
total_pwr += group->__cpu_power;
/* Adjust by relative CPU power of the group */
- avg_load = sg_div_cpu_power(group,
- avg_load * SCHED_LOAD_SCALE);
+ sgs.avg_load = sg_div_cpu_power(group,
+ sgs.group_load * SCHED_LOAD_SCALE);
/*
@@ -3341,22 +3352,23 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
sum_avg_load_per_task * SCHED_LOAD_SCALE);
if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
- __group_imb = 1;
+ sgs.group_imb = 1;
- group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
+ sgs.group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
if (local_group) {
- this_load = avg_load;
+ this_load = sgs.avg_load;
this = group;
- this_nr_running = sum_nr_running;
- this_load_per_task = sum_weighted_load;
- } else if (avg_load > max_load &&
- (sum_nr_running > group_capacity || __group_imb)) {
- max_load = avg_load;
+ this_nr_running = sgs.sum_nr_running;
+ this_load_per_task = sgs.sum_weighted_load;
+ } else if (sgs.avg_load > max_load &&
+ (sgs.sum_nr_running > sgs.group_capacity ||
+ sgs.group_imb)) {
+ max_load = sgs.avg_load;
busiest = group;
- busiest_nr_running = sum_nr_running;
- busiest_load_per_task = sum_weighted_load;
- group_imb = __group_imb;
+ busiest_nr_running = sgs.sum_nr_running;
+ busiest_load_per_task = sgs.sum_weighted_load;
+ group_imb = sgs.group_imb;
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -3372,7 +3384,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* If the local group is idle or completely loaded
* no need to do power savings balance at this domain
*/
- if (local_group && (this_nr_running >= group_capacity ||
+ if (local_group && (this_nr_running >= sgs.group_capacity ||
!this_nr_running))
power_savings_balance = 0;
@@ -3380,8 +3392,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* If a group is already running at full capacity or idle,
* don't include that group in power savings calculations
*/
- if (!power_savings_balance || sum_nr_running >= group_capacity
- || !sum_nr_running)
+ if (!power_savings_balance ||
+ sgs.sum_nr_running >= sgs.group_capacity ||
+ !sgs.sum_nr_running)
goto group_next;
/*
@@ -3389,13 +3402,13 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* This is the group from where we need to pick up the load
* for saving power
*/
- if ((sum_nr_running < min_nr_running) ||
- (sum_nr_running == min_nr_running &&
+ if ((sgs.sum_nr_running < min_nr_running) ||
+ (sgs.sum_nr_running == min_nr_running &&
group_first_cpu(group) > group_first_cpu(group_min))) {
group_min = group;
- min_nr_running = sum_nr_running;
- min_load_per_task = sum_weighted_load /
- sum_nr_running;
+ min_nr_running = sgs.sum_nr_running;
+ min_load_per_task = sgs.sum_weighted_load /
+ sgs.sum_nr_running;
}
/*
@@ -3403,14 +3416,14 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* capacity but still has some space to pick up some load
* from other group and save more power
*/
- if (sum_nr_running > group_capacity - 1)
+ if (sgs.sum_nr_running > sgs.group_capacity - 1)
goto group_next;
- if (sum_nr_running > leader_nr_running ||
- (sum_nr_running == leader_nr_running &&
+ if (sgs.sum_nr_running > leader_nr_running ||
+ (sgs.sum_nr_running == leader_nr_running &&
group_first_cpu(group) < group_first_cpu(group_leader))) {
group_leader = group;
- leader_nr_running = sum_nr_running;
+ leader_nr_running = sgs.sum_nr_running;
}
group_next:
#endif
^ permalink raw reply related [flat|nested] 33+ messages in thread
* [RFC PATCH 04/11] sched: Create a helper function to calculate sched_group stats for fbg()
2009-03-25 9:13 [RFC PATCH 00/11] sched: find_busiest_group() cleanup Gautham R Shenoy
` (2 preceding siblings ...)
2009-03-25 9:13 ` [RFC PATCH 03/11] sched: Define structure to store the sched_group statistics for fbg() Gautham R Shenoy
@ 2009-03-25 9:13 ` Gautham R Shenoy
2009-03-25 9:46 ` [tip:sched/balancing] " Gautham R Shenoy
2009-03-25 9:13 ` [RFC PATCH 05/11] sched: Define structure to store the sched_domain statistics " Gautham R Shenoy
` (7 subsequent siblings)
11 siblings, 1 reply; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:13 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra, Vaidyanathan Srinivasan
Cc: linux-kernel, Suresh Siddha, Balbir Singh, Nick Piggin,
Dhaval Giani, Bharata B Rao, Gautham R Shenoy
Create a helper function named update_sg_lb_stats() which can be invoked to
calculate the individual group's statistics in find_busiest_group().
This reduces the lenght of find_busiest_group() considerably.
Credit: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
---
kernel/sched.c | 175 ++++++++++++++++++++++++++++++++------------------------
1 files changed, 100 insertions(+), 75 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 6da9939..d2e9b8a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3133,6 +3133,103 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
return load_idx;
}
+
+
+/**
+ * update_sg_lb_stats - Update sched_group's statistics for load balancing.
+ * @group: sched_group whose statistics are to be updated.
+ * @this_cpu: Cpu for which load balance is currently performed.
+ * @idle: Idle status of this_cpu
+ * @load_idx: Load index of sched_domain of this_cpu for load calc.
+ * @sd_idle: Idle status of the sched_domain containing group.
+ * @local_group: Does group contain this_cpu.
+ * @cpus: Set of cpus considered for load balancing.
+ * @balance: Should we balance.
+ * @sgs: variable to hold the statistics for this group.
+ */
+static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
+ enum cpu_idle_type idle, int load_idx, int *sd_idle,
+ int local_group, const struct cpumask *cpus,
+ int *balance, struct sg_lb_stats *sgs)
+{
+ unsigned long load, max_cpu_load, min_cpu_load;
+ int i;
+ unsigned int balance_cpu = -1, first_idle_cpu = 0;
+ unsigned long sum_avg_load_per_task;
+ unsigned long avg_load_per_task;
+
+ if (local_group)
+ balance_cpu = group_first_cpu(group);
+
+ /* Tally up the load of all CPUs in the group */
+ sum_avg_load_per_task = avg_load_per_task = 0;
+ max_cpu_load = 0;
+ min_cpu_load = ~0UL;
+
+ for_each_cpu_and(i, sched_group_cpus(group), cpus) {
+ struct rq *rq = cpu_rq(i);
+
+ if (*sd_idle && rq->nr_running)
+ *sd_idle = 0;
+
+ /* Bias balancing toward cpus of our domain */
+ if (local_group) {
+ if (idle_cpu(i) && !first_idle_cpu) {
+ first_idle_cpu = 1;
+ balance_cpu = i;
+ }
+
+ load = target_load(i, load_idx);
+ } else {
+ load = source_load(i, load_idx);
+ if (load > max_cpu_load)
+ max_cpu_load = load;
+ if (min_cpu_load > load)
+ min_cpu_load = load;
+ }
+
+ sgs->group_load += load;
+ sgs->sum_nr_running += rq->nr_running;
+ sgs->sum_weighted_load += weighted_cpuload(i);
+
+ sum_avg_load_per_task += cpu_avg_load_per_task(i);
+ }
+
+ /*
+ * First idle cpu or the first cpu(busiest) in this sched group
+ * is eligible for doing load balancing at this and above
+ * domains. In the newly idle case, we will allow all the cpu's
+ * to do the newly idle load balance.
+ */
+ if (idle != CPU_NEWLY_IDLE && local_group &&
+ balance_cpu != this_cpu && balance) {
+ *balance = 0;
+ return;
+ }
+
+ /* Adjust by relative CPU power of the group */
+ sgs->avg_load = sg_div_cpu_power(group,
+ sgs->group_load * SCHED_LOAD_SCALE);
+
+
+ /*
+ * Consider the group unbalanced when the imbalance is larger
+ * than the average weight of two tasks.
+ *
+ * APZ: with cgroup the avg task weight can vary wildly and
+ * might not be a suitable number - should we keep a
+ * normalized nr_running number somewhere that negates
+ * the hierarchy?
+ */
+ avg_load_per_task = sg_div_cpu_power(group,
+ sum_avg_load_per_task * SCHED_LOAD_SCALE);
+
+ if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
+ sgs->group_imb = 1;
+
+ sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
+
+}
/******* find_busiest_group() helpers end here *********************/
/*
@@ -3166,92 +3263,20 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
do {
struct sg_lb_stats sgs;
- unsigned long load, max_cpu_load, min_cpu_load;
int local_group;
- int i;
- unsigned int balance_cpu = -1, first_idle_cpu = 0;
- unsigned long sum_avg_load_per_task;
- unsigned long avg_load_per_task;
local_group = cpumask_test_cpu(this_cpu,
sched_group_cpus(group));
memset(&sgs, 0, sizeof(sgs));
+ update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle,
+ local_group, cpus, balance, &sgs);
- if (local_group)
- balance_cpu = group_first_cpu(group);
-
- /* Tally up the load of all CPUs in the group */
- sum_avg_load_per_task = avg_load_per_task = 0;
-
- max_cpu_load = 0;
- min_cpu_load = ~0UL;
-
- for_each_cpu_and(i, sched_group_cpus(group), cpus) {
- struct rq *rq = cpu_rq(i);
-
- if (*sd_idle && rq->nr_running)
- *sd_idle = 0;
-
- /* Bias balancing toward cpus of our domain */
- if (local_group) {
- if (idle_cpu(i) && !first_idle_cpu) {
- first_idle_cpu = 1;
- balance_cpu = i;
- }
-
- load = target_load(i, load_idx);
- } else {
- load = source_load(i, load_idx);
- if (load > max_cpu_load)
- max_cpu_load = load;
- if (min_cpu_load > load)
- min_cpu_load = load;
- }
-
- sgs.group_load += load;
- sgs.sum_nr_running += rq->nr_running;
- sgs.sum_weighted_load += weighted_cpuload(i);
-
- sum_avg_load_per_task += cpu_avg_load_per_task(i);
- }
-
- /*
- * First idle cpu or the first cpu(busiest) in this sched group
- * is eligible for doing load balancing at this and above
- * domains. In the newly idle case, we will allow all the cpu's
- * to do the newly idle load balance.
- */
- if (idle != CPU_NEWLY_IDLE && local_group &&
- balance_cpu != this_cpu && balance) {
- *balance = 0;
+ if (balance && !(*balance))
goto ret;
- }
total_load += sgs.group_load;
total_pwr += group->__cpu_power;
- /* Adjust by relative CPU power of the group */
- sgs.avg_load = sg_div_cpu_power(group,
- sgs.group_load * SCHED_LOAD_SCALE);
-
-
- /*
- * Consider the group unbalanced when the imbalance is larger
- * than the average weight of two tasks.
- *
- * APZ: with cgroup the avg task weight can vary wildly and
- * might not be a suitable number - should we keep a
- * normalized nr_running number somewhere that negates
- * the hierarchy?
- */
- avg_load_per_task = sg_div_cpu_power(group,
- sum_avg_load_per_task * SCHED_LOAD_SCALE);
-
- if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
- sgs.group_imb = 1;
-
- sgs.group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
-
if (local_group) {
this_load = sgs.avg_load;
this = group;
^ permalink raw reply related [flat|nested] 33+ messages in thread* [tip:sched/balancing] sched: Create a helper function to calculate sched_group stats for fbg()
2009-03-25 9:13 ` [RFC PATCH 04/11] sched: Create a helper function to calculate sched_group stats " Gautham R Shenoy
@ 2009-03-25 9:46 ` Gautham R Shenoy
0 siblings, 0 replies; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:46 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, ego, hpa, mingo, a.p.zijlstra, dhaval, balbir,
bharata, suresh.b.siddha, tglx, mingo, nickpiggin
Commit-ID: 1f8c553d0f11d85f7993fe21015695d266771c00
Gitweb: http://git.kernel.org/tip/1f8c553d0f11d85f7993fe21015695d266771c00
Author: Gautham R Shenoy <ego@in.ibm.com>
AuthorDate: Wed, 25 Mar 2009 14:43:51 +0530
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Wed, 25 Mar 2009 10:30:45 +0100
sched: Create a helper function to calculate sched_group stats for fbg()
Impact: cleanup
Create a helper function named update_sg_lb_stats() which
can be invoked to calculate the individual group's statistics
in find_busiest_group().
This reduces the lenght of find_busiest_group() considerably.
Credit: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Aked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Balbir Singh" <balbir@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
LKML-Reference: <20090325091351.13992.43461.stgit@sofia.in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 175 ++++++++++++++++++++++++++++++++------------------------
1 files changed, 100 insertions(+), 75 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 109db12..1893d55 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3237,6 +3237,103 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
return load_idx;
}
+
+
+/**
+ * update_sg_lb_stats - Update sched_group's statistics for load balancing.
+ * @group: sched_group whose statistics are to be updated.
+ * @this_cpu: Cpu for which load balance is currently performed.
+ * @idle: Idle status of this_cpu
+ * @load_idx: Load index of sched_domain of this_cpu for load calc.
+ * @sd_idle: Idle status of the sched_domain containing group.
+ * @local_group: Does group contain this_cpu.
+ * @cpus: Set of cpus considered for load balancing.
+ * @balance: Should we balance.
+ * @sgs: variable to hold the statistics for this group.
+ */
+static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
+ enum cpu_idle_type idle, int load_idx, int *sd_idle,
+ int local_group, const struct cpumask *cpus,
+ int *balance, struct sg_lb_stats *sgs)
+{
+ unsigned long load, max_cpu_load, min_cpu_load;
+ int i;
+ unsigned int balance_cpu = -1, first_idle_cpu = 0;
+ unsigned long sum_avg_load_per_task;
+ unsigned long avg_load_per_task;
+
+ if (local_group)
+ balance_cpu = group_first_cpu(group);
+
+ /* Tally up the load of all CPUs in the group */
+ sum_avg_load_per_task = avg_load_per_task = 0;
+ max_cpu_load = 0;
+ min_cpu_load = ~0UL;
+
+ for_each_cpu_and(i, sched_group_cpus(group), cpus) {
+ struct rq *rq = cpu_rq(i);
+
+ if (*sd_idle && rq->nr_running)
+ *sd_idle = 0;
+
+ /* Bias balancing toward cpus of our domain */
+ if (local_group) {
+ if (idle_cpu(i) && !first_idle_cpu) {
+ first_idle_cpu = 1;
+ balance_cpu = i;
+ }
+
+ load = target_load(i, load_idx);
+ } else {
+ load = source_load(i, load_idx);
+ if (load > max_cpu_load)
+ max_cpu_load = load;
+ if (min_cpu_load > load)
+ min_cpu_load = load;
+ }
+
+ sgs->group_load += load;
+ sgs->sum_nr_running += rq->nr_running;
+ sgs->sum_weighted_load += weighted_cpuload(i);
+
+ sum_avg_load_per_task += cpu_avg_load_per_task(i);
+ }
+
+ /*
+ * First idle cpu or the first cpu(busiest) in this sched group
+ * is eligible for doing load balancing at this and above
+ * domains. In the newly idle case, we will allow all the cpu's
+ * to do the newly idle load balance.
+ */
+ if (idle != CPU_NEWLY_IDLE && local_group &&
+ balance_cpu != this_cpu && balance) {
+ *balance = 0;
+ return;
+ }
+
+ /* Adjust by relative CPU power of the group */
+ sgs->avg_load = sg_div_cpu_power(group,
+ sgs->group_load * SCHED_LOAD_SCALE);
+
+
+ /*
+ * Consider the group unbalanced when the imbalance is larger
+ * than the average weight of two tasks.
+ *
+ * APZ: with cgroup the avg task weight can vary wildly and
+ * might not be a suitable number - should we keep a
+ * normalized nr_running number somewhere that negates
+ * the hierarchy?
+ */
+ avg_load_per_task = sg_div_cpu_power(group,
+ sum_avg_load_per_task * SCHED_LOAD_SCALE);
+
+ if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
+ sgs->group_imb = 1;
+
+ sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
+
+}
/******* find_busiest_group() helpers end here *********************/
/*
@@ -3270,92 +3367,20 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
do {
struct sg_lb_stats sgs;
- unsigned long load, max_cpu_load, min_cpu_load;
int local_group;
- int i;
- unsigned int balance_cpu = -1, first_idle_cpu = 0;
- unsigned long sum_avg_load_per_task;
- unsigned long avg_load_per_task;
local_group = cpumask_test_cpu(this_cpu,
sched_group_cpus(group));
memset(&sgs, 0, sizeof(sgs));
+ update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle,
+ local_group, cpus, balance, &sgs);
- if (local_group)
- balance_cpu = group_first_cpu(group);
-
- /* Tally up the load of all CPUs in the group */
- sum_avg_load_per_task = avg_load_per_task = 0;
-
- max_cpu_load = 0;
- min_cpu_load = ~0UL;
-
- for_each_cpu_and(i, sched_group_cpus(group), cpus) {
- struct rq *rq = cpu_rq(i);
-
- if (*sd_idle && rq->nr_running)
- *sd_idle = 0;
-
- /* Bias balancing toward cpus of our domain */
- if (local_group) {
- if (idle_cpu(i) && !first_idle_cpu) {
- first_idle_cpu = 1;
- balance_cpu = i;
- }
-
- load = target_load(i, load_idx);
- } else {
- load = source_load(i, load_idx);
- if (load > max_cpu_load)
- max_cpu_load = load;
- if (min_cpu_load > load)
- min_cpu_load = load;
- }
-
- sgs.group_load += load;
- sgs.sum_nr_running += rq->nr_running;
- sgs.sum_weighted_load += weighted_cpuload(i);
-
- sum_avg_load_per_task += cpu_avg_load_per_task(i);
- }
-
- /*
- * First idle cpu or the first cpu(busiest) in this sched group
- * is eligible for doing load balancing at this and above
- * domains. In the newly idle case, we will allow all the cpu's
- * to do the newly idle load balance.
- */
- if (idle != CPU_NEWLY_IDLE && local_group &&
- balance_cpu != this_cpu && balance) {
- *balance = 0;
+ if (balance && !(*balance))
goto ret;
- }
total_load += sgs.group_load;
total_pwr += group->__cpu_power;
- /* Adjust by relative CPU power of the group */
- sgs.avg_load = sg_div_cpu_power(group,
- sgs.group_load * SCHED_LOAD_SCALE);
-
-
- /*
- * Consider the group unbalanced when the imbalance is larger
- * than the average weight of two tasks.
- *
- * APZ: with cgroup the avg task weight can vary wildly and
- * might not be a suitable number - should we keep a
- * normalized nr_running number somewhere that negates
- * the hierarchy?
- */
- avg_load_per_task = sg_div_cpu_power(group,
- sum_avg_load_per_task * SCHED_LOAD_SCALE);
-
- if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
- sgs.group_imb = 1;
-
- sgs.group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
-
if (local_group) {
this_load = sgs.avg_load;
this = group;
^ permalink raw reply related [flat|nested] 33+ messages in thread
* [RFC PATCH 05/11] sched: Define structure to store the sched_domain statistics for fbg()
2009-03-25 9:13 [RFC PATCH 00/11] sched: find_busiest_group() cleanup Gautham R Shenoy
` (3 preceding siblings ...)
2009-03-25 9:13 ` [RFC PATCH 04/11] sched: Create a helper function to calculate sched_group stats " Gautham R Shenoy
@ 2009-03-25 9:13 ` Gautham R Shenoy
2009-03-25 9:46 ` [tip:sched/balancing] " Gautham R Shenoy
2009-03-25 9:14 ` [RFC PATCH 06/11] sched: Create a helper function to calculate sched_domain stats " Gautham R Shenoy
` (6 subsequent siblings)
11 siblings, 1 reply; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:13 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra, Vaidyanathan Srinivasan
Cc: linux-kernel, Suresh Siddha, Balbir Singh, Nick Piggin,
Dhaval Giani, Bharata B Rao, Gautham R Shenoy
Currently we use a lot of local variables in find_busiest_group() to capture
the various statistics related to the sched_domain. Group them together into a
single data structure.
This will help us to offload the job of updating the sched_domain statistics
to a helper function.
Credit: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
---
kernel/sched.c | 207 +++++++++++++++++++++++++++++++++-----------------------
1 files changed, 121 insertions(+), 86 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index d2e9b8a..c1b92da 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3086,6 +3086,37 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
return 0;
}
/********** Helpers for find_busiest_group ************************/
+/**
+ * sd_lb_stats - Structure to store the statistics of a sched_domain
+ * during load balancing.
+ */
+struct sd_lb_stats {
+ struct sched_group *busiest; /* Busiest group in this sd */
+ struct sched_group *this; /* Local group in this sd */
+ unsigned long total_load; /* Total load of all groups in sd */
+ unsigned long total_pwr; /* Total power of all groups in sd */
+ unsigned long avg_load; /* Average load across all groups in sd */
+
+ /** Statistics of this group */
+ unsigned long this_load;
+ unsigned long this_load_per_task;
+ unsigned long this_nr_running;
+
+ /* Statistics of the busiest group */
+ unsigned long max_load;
+ unsigned long busiest_load_per_task;
+ unsigned long busiest_nr_running;
+
+ int group_imb; /* Is there imbalance in this sd */
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ int power_savings_balance; /* Is powersave balance needed for this sd */
+ struct sched_group *group_min; /* Least loaded group in sd */
+ struct sched_group *group_leader; /* Group which relieves group_min */
+ unsigned long min_load_per_task; /* load_per_task in group_min */
+ unsigned long leader_nr_running; /* Nr running of group_leader */
+ unsigned long min_nr_running; /* Nr running of group_min */
+#endif
+};
/**
* sg_lb_stats - stats of a sched_group required for load_balancing
@@ -3242,23 +3273,16 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
unsigned long *imbalance, enum cpu_idle_type idle,
int *sd_idle, const struct cpumask *cpus, int *balance)
{
- struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
- unsigned long max_load, avg_load, total_load, this_load, total_pwr;
+ struct sd_lb_stats sds;
+ struct sched_group *group = sd->groups;
unsigned long max_pull;
- unsigned long busiest_load_per_task, busiest_nr_running;
- unsigned long this_load_per_task, this_nr_running;
- int load_idx, group_imb = 0;
+ int load_idx;
+
+ memset(&sds, 0, sizeof(sds));
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- int power_savings_balance = 1;
- unsigned long leader_nr_running = 0, min_load_per_task = 0;
- unsigned long min_nr_running = ULONG_MAX;
- struct sched_group *group_min = NULL, *group_leader = NULL;
+ sds.power_savings_balance = 1;
+ sds.min_nr_running = ULONG_MAX;
#endif
-
- max_load = this_load = total_load = total_pwr = 0;
- busiest_load_per_task = busiest_nr_running = 0;
- this_load_per_task = this_nr_running = 0;
-
load_idx = get_sd_load_idx(sd, idle);
do {
@@ -3274,22 +3298,22 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
if (balance && !(*balance))
goto ret;
- total_load += sgs.group_load;
- total_pwr += group->__cpu_power;
+ sds.total_load += sgs.group_load;
+ sds.total_pwr += group->__cpu_power;
if (local_group) {
- this_load = sgs.avg_load;
- this = group;
- this_nr_running = sgs.sum_nr_running;
- this_load_per_task = sgs.sum_weighted_load;
- } else if (sgs.avg_load > max_load &&
+ sds.this_load = sgs.avg_load;
+ sds.this = group;
+ sds.this_nr_running = sgs.sum_nr_running;
+ sds.this_load_per_task = sgs.sum_weighted_load;
+ } else if (sgs.avg_load > sds.max_load &&
(sgs.sum_nr_running > sgs.group_capacity ||
sgs.group_imb)) {
- max_load = sgs.avg_load;
- busiest = group;
- busiest_nr_running = sgs.sum_nr_running;
- busiest_load_per_task = sgs.sum_weighted_load;
- group_imb = sgs.group_imb;
+ sds.max_load = sgs.avg_load;
+ sds.busiest = group;
+ sds.busiest_nr_running = sgs.sum_nr_running;
+ sds.busiest_load_per_task = sgs.sum_weighted_load;
+ sds.group_imb = sgs.group_imb;
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -3305,15 +3329,16 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* If the local group is idle or completely loaded
* no need to do power savings balance at this domain
*/
- if (local_group && (this_nr_running >= sgs.group_capacity ||
- !this_nr_running))
- power_savings_balance = 0;
+ if (local_group &&
+ (sds.this_nr_running >= sgs.group_capacity ||
+ !sds.this_nr_running))
+ sds.power_savings_balance = 0;
/*
* If a group is already running at full capacity or idle,
* don't include that group in power savings calculations
*/
- if (!power_savings_balance ||
+ if (!sds.power_savings_balance ||
sgs.sum_nr_running >= sgs.group_capacity ||
!sgs.sum_nr_running)
goto group_next;
@@ -3323,12 +3348,13 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* This is the group from where we need to pick up the load
* for saving power
*/
- if ((sgs.sum_nr_running < min_nr_running) ||
- (sgs.sum_nr_running == min_nr_running &&
- group_first_cpu(group) > group_first_cpu(group_min))) {
- group_min = group;
- min_nr_running = sgs.sum_nr_running;
- min_load_per_task = sgs.sum_weighted_load /
+ if ((sgs.sum_nr_running < sds.min_nr_running) ||
+ (sgs.sum_nr_running == sds.min_nr_running &&
+ group_first_cpu(group) >
+ group_first_cpu(sds.group_min))) {
+ sds.group_min = group;
+ sds.min_nr_running = sgs.sum_nr_running;
+ sds.min_load_per_task = sgs.sum_weighted_load /
sgs.sum_nr_running;
}
@@ -3340,29 +3366,32 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
if (sgs.sum_nr_running > sgs.group_capacity - 1)
goto group_next;
- if (sgs.sum_nr_running > leader_nr_running ||
- (sgs.sum_nr_running == leader_nr_running &&
- group_first_cpu(group) < group_first_cpu(group_leader))) {
- group_leader = group;
- leader_nr_running = sgs.sum_nr_running;
+ if (sgs.sum_nr_running > sds.leader_nr_running ||
+ (sgs.sum_nr_running == sds.leader_nr_running &&
+ group_first_cpu(group) <
+ group_first_cpu(sds.group_leader))) {
+ sds.group_leader = group;
+ sds.leader_nr_running = sgs.sum_nr_running;
}
group_next:
#endif
group = group->next;
} while (group != sd->groups);
- if (!busiest || this_load >= max_load || busiest_nr_running == 0)
+ if (!sds.busiest || sds.this_load >= sds.max_load
+ || sds.busiest_nr_running == 0)
goto out_balanced;
- avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
+ sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
- if (this_load >= avg_load ||
- 100*max_load <= sd->imbalance_pct*this_load)
+ if (sds.this_load >= sds.avg_load ||
+ 100*sds.max_load <= sd->imbalance_pct * sds.this_load)
goto out_balanced;
- busiest_load_per_task /= busiest_nr_running;
- if (group_imb)
- busiest_load_per_task = min(busiest_load_per_task, avg_load);
+ sds.busiest_load_per_task /= sds.busiest_nr_running;
+ if (sds.group_imb)
+ sds.busiest_load_per_task =
+ min(sds.busiest_load_per_task, sds.avg_load);
/*
* We're trying to get all the cpus to the average_load, so we don't
@@ -3375,7 +3404,7 @@ group_next:
* by pulling tasks to us. Be careful of negative numbers as they'll
* appear as very large values with unsigned longs.
*/
- if (max_load <= busiest_load_per_task)
+ if (sds.max_load <= sds.busiest_load_per_task)
goto out_balanced;
/*
@@ -3383,17 +3412,18 @@ group_next:
* max load less than avg load(as we skip the groups at or below
* its cpu_power, while calculating max_load..)
*/
- if (max_load < avg_load) {
+ if (sds.max_load < sds.avg_load) {
*imbalance = 0;
goto small_imbalance;
}
/* Don't want to pull so many tasks that a group would go idle */
- max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
+ max_pull = min(sds.max_load - sds.avg_load,
+ sds.max_load - sds.busiest_load_per_task);
/* How much load to actually move to equalise the imbalance */
- *imbalance = min(max_pull * busiest->__cpu_power,
- (avg_load - this_load) * this->__cpu_power)
+ *imbalance = min(max_pull * sds.busiest->__cpu_power,
+ (sds.avg_load - sds.this_load) * sds.this->__cpu_power)
/ SCHED_LOAD_SCALE;
/*
@@ -3402,24 +3432,27 @@ group_next:
* a think about bumping its value to force at least one task to be
* moved
*/
- if (*imbalance < busiest_load_per_task) {
+ if (*imbalance < sds.busiest_load_per_task) {
unsigned long tmp, pwr_now, pwr_move;
unsigned int imbn;
small_imbalance:
pwr_move = pwr_now = 0;
imbn = 2;
- if (this_nr_running) {
- this_load_per_task /= this_nr_running;
- if (busiest_load_per_task > this_load_per_task)
+ if (sds.this_nr_running) {
+ sds.this_load_per_task /= sds.this_nr_running;
+ if (sds.busiest_load_per_task >
+ sds.this_load_per_task)
imbn = 1;
} else
- this_load_per_task = cpu_avg_load_per_task(this_cpu);
-
- if (max_load - this_load + busiest_load_per_task >=
- busiest_load_per_task * imbn) {
- *imbalance = busiest_load_per_task;
- return busiest;
+ sds.this_load_per_task =
+ cpu_avg_load_per_task(this_cpu);
+
+ if (sds.max_load - sds.this_load +
+ sds.busiest_load_per_task >=
+ sds.busiest_load_per_task * imbn) {
+ *imbalance = sds.busiest_load_per_task;
+ return sds.busiest;
}
/*
@@ -3428,52 +3461,54 @@ small_imbalance:
* moving them.
*/
- pwr_now += busiest->__cpu_power *
- min(busiest_load_per_task, max_load);
- pwr_now += this->__cpu_power *
- min(this_load_per_task, this_load);
+ pwr_now += sds.busiest->__cpu_power *
+ min(sds.busiest_load_per_task, sds.max_load);
+ pwr_now += sds.this->__cpu_power *
+ min(sds.this_load_per_task, sds.this_load);
pwr_now /= SCHED_LOAD_SCALE;
/* Amount of load we'd subtract */
- tmp = sg_div_cpu_power(busiest,
- busiest_load_per_task * SCHED_LOAD_SCALE);
- if (max_load > tmp)
- pwr_move += busiest->__cpu_power *
- min(busiest_load_per_task, max_load - tmp);
+ tmp = sg_div_cpu_power(sds.busiest,
+ sds.busiest_load_per_task * SCHED_LOAD_SCALE);
+ if (sds.max_load > tmp)
+ pwr_move += sds.busiest->__cpu_power *
+ min(sds.busiest_load_per_task,
+ sds.max_load - tmp);
/* Amount of load we'd add */
- if (max_load * busiest->__cpu_power <
- busiest_load_per_task * SCHED_LOAD_SCALE)
- tmp = sg_div_cpu_power(this,
- max_load * busiest->__cpu_power);
+ if (sds.max_load * sds.busiest->__cpu_power <
+ sds.busiest_load_per_task * SCHED_LOAD_SCALE)
+ tmp = sg_div_cpu_power(sds.this,
+ sds.max_load * sds.busiest->__cpu_power);
else
- tmp = sg_div_cpu_power(this,
- busiest_load_per_task * SCHED_LOAD_SCALE);
- pwr_move += this->__cpu_power *
- min(this_load_per_task, this_load + tmp);
+ tmp = sg_div_cpu_power(sds.this,
+ sds.busiest_load_per_task * SCHED_LOAD_SCALE);
+ pwr_move += sds.this->__cpu_power *
+ min(sds.this_load_per_task,
+ sds.this_load + tmp);
pwr_move /= SCHED_LOAD_SCALE;
/* Move if we gain throughput */
if (pwr_move > pwr_now)
- *imbalance = busiest_load_per_task;
+ *imbalance = sds.busiest_load_per_task;
}
- return busiest;
+ return sds.busiest;
out_balanced:
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
goto ret;
- if (this != group_leader || group_leader == group_min)
+ if (sds.this != sds.group_leader || sds.group_leader == sds.group_min)
goto ret;
- *imbalance = min_load_per_task;
+ *imbalance = sds.min_load_per_task;
if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
- group_first_cpu(group_leader);
+ group_first_cpu(sds.group_leader);
}
- return group_min;
+ return sds.group_min;
#endif
ret:
^ permalink raw reply related [flat|nested] 33+ messages in thread* [tip:sched/balancing] sched: Define structure to store the sched_domain statistics for fbg()
2009-03-25 9:13 ` [RFC PATCH 05/11] sched: Define structure to store the sched_domain statistics " Gautham R Shenoy
@ 2009-03-25 9:46 ` Gautham R Shenoy
0 siblings, 0 replies; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:46 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, ego, hpa, mingo, a.p.zijlstra, dhaval, balbir,
bharata, suresh.b.siddha, tglx, mingo, nickpiggin
Commit-ID: 222d656dea57e4e084fbd1e9383e6fed2ca9fa61
Gitweb: http://git.kernel.org/tip/222d656dea57e4e084fbd1e9383e6fed2ca9fa61
Author: Gautham R Shenoy <ego@in.ibm.com>
AuthorDate: Wed, 25 Mar 2009 14:43:56 +0530
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Wed, 25 Mar 2009 10:30:46 +0100
sched: Define structure to store the sched_domain statistics for fbg()
Impact: cleanup
Currently we use a lot of local variables in find_busiest_group()
to capture the various statistics related to the sched_domain.
Group them together into a single data structure.
This will help us to offload the job of updating the sched_domain
statistics to a helper function.
Credit: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Balbir Singh" <balbir@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
LKML-Reference: <20090325091356.13992.25970.stgit@sofia.in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 207 +++++++++++++++++++++++++++++++++-----------------------
1 files changed, 121 insertions(+), 86 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 1893d55..8198dbe 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3190,6 +3190,37 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
return 0;
}
/********** Helpers for find_busiest_group ************************/
+/**
+ * sd_lb_stats - Structure to store the statistics of a sched_domain
+ * during load balancing.
+ */
+struct sd_lb_stats {
+ struct sched_group *busiest; /* Busiest group in this sd */
+ struct sched_group *this; /* Local group in this sd */
+ unsigned long total_load; /* Total load of all groups in sd */
+ unsigned long total_pwr; /* Total power of all groups in sd */
+ unsigned long avg_load; /* Average load across all groups in sd */
+
+ /** Statistics of this group */
+ unsigned long this_load;
+ unsigned long this_load_per_task;
+ unsigned long this_nr_running;
+
+ /* Statistics of the busiest group */
+ unsigned long max_load;
+ unsigned long busiest_load_per_task;
+ unsigned long busiest_nr_running;
+
+ int group_imb; /* Is there imbalance in this sd */
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ int power_savings_balance; /* Is powersave balance needed for this sd */
+ struct sched_group *group_min; /* Least loaded group in sd */
+ struct sched_group *group_leader; /* Group which relieves group_min */
+ unsigned long min_load_per_task; /* load_per_task in group_min */
+ unsigned long leader_nr_running; /* Nr running of group_leader */
+ unsigned long min_nr_running; /* Nr running of group_min */
+#endif
+};
/**
* sg_lb_stats - stats of a sched_group required for load_balancing
@@ -3346,23 +3377,16 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
unsigned long *imbalance, enum cpu_idle_type idle,
int *sd_idle, const struct cpumask *cpus, int *balance)
{
- struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
- unsigned long max_load, avg_load, total_load, this_load, total_pwr;
+ struct sd_lb_stats sds;
+ struct sched_group *group = sd->groups;
unsigned long max_pull;
- unsigned long busiest_load_per_task, busiest_nr_running;
- unsigned long this_load_per_task, this_nr_running;
- int load_idx, group_imb = 0;
+ int load_idx;
+
+ memset(&sds, 0, sizeof(sds));
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- int power_savings_balance = 1;
- unsigned long leader_nr_running = 0, min_load_per_task = 0;
- unsigned long min_nr_running = ULONG_MAX;
- struct sched_group *group_min = NULL, *group_leader = NULL;
+ sds.power_savings_balance = 1;
+ sds.min_nr_running = ULONG_MAX;
#endif
-
- max_load = this_load = total_load = total_pwr = 0;
- busiest_load_per_task = busiest_nr_running = 0;
- this_load_per_task = this_nr_running = 0;
-
load_idx = get_sd_load_idx(sd, idle);
do {
@@ -3378,22 +3402,22 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
if (balance && !(*balance))
goto ret;
- total_load += sgs.group_load;
- total_pwr += group->__cpu_power;
+ sds.total_load += sgs.group_load;
+ sds.total_pwr += group->__cpu_power;
if (local_group) {
- this_load = sgs.avg_load;
- this = group;
- this_nr_running = sgs.sum_nr_running;
- this_load_per_task = sgs.sum_weighted_load;
- } else if (sgs.avg_load > max_load &&
+ sds.this_load = sgs.avg_load;
+ sds.this = group;
+ sds.this_nr_running = sgs.sum_nr_running;
+ sds.this_load_per_task = sgs.sum_weighted_load;
+ } else if (sgs.avg_load > sds.max_load &&
(sgs.sum_nr_running > sgs.group_capacity ||
sgs.group_imb)) {
- max_load = sgs.avg_load;
- busiest = group;
- busiest_nr_running = sgs.sum_nr_running;
- busiest_load_per_task = sgs.sum_weighted_load;
- group_imb = sgs.group_imb;
+ sds.max_load = sgs.avg_load;
+ sds.busiest = group;
+ sds.busiest_nr_running = sgs.sum_nr_running;
+ sds.busiest_load_per_task = sgs.sum_weighted_load;
+ sds.group_imb = sgs.group_imb;
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -3409,15 +3433,16 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* If the local group is idle or completely loaded
* no need to do power savings balance at this domain
*/
- if (local_group && (this_nr_running >= sgs.group_capacity ||
- !this_nr_running))
- power_savings_balance = 0;
+ if (local_group &&
+ (sds.this_nr_running >= sgs.group_capacity ||
+ !sds.this_nr_running))
+ sds.power_savings_balance = 0;
/*
* If a group is already running at full capacity or idle,
* don't include that group in power savings calculations
*/
- if (!power_savings_balance ||
+ if (!sds.power_savings_balance ||
sgs.sum_nr_running >= sgs.group_capacity ||
!sgs.sum_nr_running)
goto group_next;
@@ -3427,12 +3452,13 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* This is the group from where we need to pick up the load
* for saving power
*/
- if ((sgs.sum_nr_running < min_nr_running) ||
- (sgs.sum_nr_running == min_nr_running &&
- group_first_cpu(group) > group_first_cpu(group_min))) {
- group_min = group;
- min_nr_running = sgs.sum_nr_running;
- min_load_per_task = sgs.sum_weighted_load /
+ if ((sgs.sum_nr_running < sds.min_nr_running) ||
+ (sgs.sum_nr_running == sds.min_nr_running &&
+ group_first_cpu(group) >
+ group_first_cpu(sds.group_min))) {
+ sds.group_min = group;
+ sds.min_nr_running = sgs.sum_nr_running;
+ sds.min_load_per_task = sgs.sum_weighted_load /
sgs.sum_nr_running;
}
@@ -3444,29 +3470,32 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
if (sgs.sum_nr_running > sgs.group_capacity - 1)
goto group_next;
- if (sgs.sum_nr_running > leader_nr_running ||
- (sgs.sum_nr_running == leader_nr_running &&
- group_first_cpu(group) < group_first_cpu(group_leader))) {
- group_leader = group;
- leader_nr_running = sgs.sum_nr_running;
+ if (sgs.sum_nr_running > sds.leader_nr_running ||
+ (sgs.sum_nr_running == sds.leader_nr_running &&
+ group_first_cpu(group) <
+ group_first_cpu(sds.group_leader))) {
+ sds.group_leader = group;
+ sds.leader_nr_running = sgs.sum_nr_running;
}
group_next:
#endif
group = group->next;
} while (group != sd->groups);
- if (!busiest || this_load >= max_load || busiest_nr_running == 0)
+ if (!sds.busiest || sds.this_load >= sds.max_load
+ || sds.busiest_nr_running == 0)
goto out_balanced;
- avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
+ sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
- if (this_load >= avg_load ||
- 100*max_load <= sd->imbalance_pct*this_load)
+ if (sds.this_load >= sds.avg_load ||
+ 100*sds.max_load <= sd->imbalance_pct * sds.this_load)
goto out_balanced;
- busiest_load_per_task /= busiest_nr_running;
- if (group_imb)
- busiest_load_per_task = min(busiest_load_per_task, avg_load);
+ sds.busiest_load_per_task /= sds.busiest_nr_running;
+ if (sds.group_imb)
+ sds.busiest_load_per_task =
+ min(sds.busiest_load_per_task, sds.avg_load);
/*
* We're trying to get all the cpus to the average_load, so we don't
@@ -3479,7 +3508,7 @@ group_next:
* by pulling tasks to us. Be careful of negative numbers as they'll
* appear as very large values with unsigned longs.
*/
- if (max_load <= busiest_load_per_task)
+ if (sds.max_load <= sds.busiest_load_per_task)
goto out_balanced;
/*
@@ -3487,17 +3516,18 @@ group_next:
* max load less than avg load(as we skip the groups at or below
* its cpu_power, while calculating max_load..)
*/
- if (max_load < avg_load) {
+ if (sds.max_load < sds.avg_load) {
*imbalance = 0;
goto small_imbalance;
}
/* Don't want to pull so many tasks that a group would go idle */
- max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
+ max_pull = min(sds.max_load - sds.avg_load,
+ sds.max_load - sds.busiest_load_per_task);
/* How much load to actually move to equalise the imbalance */
- *imbalance = min(max_pull * busiest->__cpu_power,
- (avg_load - this_load) * this->__cpu_power)
+ *imbalance = min(max_pull * sds.busiest->__cpu_power,
+ (sds.avg_load - sds.this_load) * sds.this->__cpu_power)
/ SCHED_LOAD_SCALE;
/*
@@ -3506,24 +3536,27 @@ group_next:
* a think about bumping its value to force at least one task to be
* moved
*/
- if (*imbalance < busiest_load_per_task) {
+ if (*imbalance < sds.busiest_load_per_task) {
unsigned long tmp, pwr_now, pwr_move;
unsigned int imbn;
small_imbalance:
pwr_move = pwr_now = 0;
imbn = 2;
- if (this_nr_running) {
- this_load_per_task /= this_nr_running;
- if (busiest_load_per_task > this_load_per_task)
+ if (sds.this_nr_running) {
+ sds.this_load_per_task /= sds.this_nr_running;
+ if (sds.busiest_load_per_task >
+ sds.this_load_per_task)
imbn = 1;
} else
- this_load_per_task = cpu_avg_load_per_task(this_cpu);
-
- if (max_load - this_load + busiest_load_per_task >=
- busiest_load_per_task * imbn) {
- *imbalance = busiest_load_per_task;
- return busiest;
+ sds.this_load_per_task =
+ cpu_avg_load_per_task(this_cpu);
+
+ if (sds.max_load - sds.this_load +
+ sds.busiest_load_per_task >=
+ sds.busiest_load_per_task * imbn) {
+ *imbalance = sds.busiest_load_per_task;
+ return sds.busiest;
}
/*
@@ -3532,52 +3565,54 @@ small_imbalance:
* moving them.
*/
- pwr_now += busiest->__cpu_power *
- min(busiest_load_per_task, max_load);
- pwr_now += this->__cpu_power *
- min(this_load_per_task, this_load);
+ pwr_now += sds.busiest->__cpu_power *
+ min(sds.busiest_load_per_task, sds.max_load);
+ pwr_now += sds.this->__cpu_power *
+ min(sds.this_load_per_task, sds.this_load);
pwr_now /= SCHED_LOAD_SCALE;
/* Amount of load we'd subtract */
- tmp = sg_div_cpu_power(busiest,
- busiest_load_per_task * SCHED_LOAD_SCALE);
- if (max_load > tmp)
- pwr_move += busiest->__cpu_power *
- min(busiest_load_per_task, max_load - tmp);
+ tmp = sg_div_cpu_power(sds.busiest,
+ sds.busiest_load_per_task * SCHED_LOAD_SCALE);
+ if (sds.max_load > tmp)
+ pwr_move += sds.busiest->__cpu_power *
+ min(sds.busiest_load_per_task,
+ sds.max_load - tmp);
/* Amount of load we'd add */
- if (max_load * busiest->__cpu_power <
- busiest_load_per_task * SCHED_LOAD_SCALE)
- tmp = sg_div_cpu_power(this,
- max_load * busiest->__cpu_power);
+ if (sds.max_load * sds.busiest->__cpu_power <
+ sds.busiest_load_per_task * SCHED_LOAD_SCALE)
+ tmp = sg_div_cpu_power(sds.this,
+ sds.max_load * sds.busiest->__cpu_power);
else
- tmp = sg_div_cpu_power(this,
- busiest_load_per_task * SCHED_LOAD_SCALE);
- pwr_move += this->__cpu_power *
- min(this_load_per_task, this_load + tmp);
+ tmp = sg_div_cpu_power(sds.this,
+ sds.busiest_load_per_task * SCHED_LOAD_SCALE);
+ pwr_move += sds.this->__cpu_power *
+ min(sds.this_load_per_task,
+ sds.this_load + tmp);
pwr_move /= SCHED_LOAD_SCALE;
/* Move if we gain throughput */
if (pwr_move > pwr_now)
- *imbalance = busiest_load_per_task;
+ *imbalance = sds.busiest_load_per_task;
}
- return busiest;
+ return sds.busiest;
out_balanced:
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
goto ret;
- if (this != group_leader || group_leader == group_min)
+ if (sds.this != sds.group_leader || sds.group_leader == sds.group_min)
goto ret;
- *imbalance = min_load_per_task;
+ *imbalance = sds.min_load_per_task;
if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
- group_first_cpu(group_leader);
+ group_first_cpu(sds.group_leader);
}
- return group_min;
+ return sds.group_min;
#endif
ret:
^ permalink raw reply related [flat|nested] 33+ messages in thread
* [RFC PATCH 06/11] sched: Create a helper function to calculate sched_domain stats for fbg()
2009-03-25 9:13 [RFC PATCH 00/11] sched: find_busiest_group() cleanup Gautham R Shenoy
` (4 preceding siblings ...)
2009-03-25 9:13 ` [RFC PATCH 05/11] sched: Define structure to store the sched_domain statistics " Gautham R Shenoy
@ 2009-03-25 9:14 ` Gautham R Shenoy
2009-03-25 9:46 ` [tip:sched/balancing] " Gautham R Shenoy
2009-03-25 9:14 ` [RFC PATCH 07/11] sched: Create helper to calculate small_imbalance in find_busiest_group Gautham R Shenoy
` (5 subsequent siblings)
11 siblings, 1 reply; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:14 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra, Vaidyanathan Srinivasan
Cc: linux-kernel, Suresh Siddha, Balbir Singh, Nick Piggin,
Dhaval Giani, Bharata B Rao, Gautham R Shenoy
Create a helper function named update_sd_lb_stats() to update the various
sched_domain related statistics in find_busiest_group(). With this
we would have moved all the statistics computation out of
find_busiest_group().
Credit: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
---
kernel/sched.c | 117 +++++++++++++++++++++++++++++++++++---------------------
1 files changed, 73 insertions(+), 44 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index c1b92da..5e01162 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3261,32 +3261,33 @@ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
}
-/******* find_busiest_group() helpers end here *********************/
-/*
- * find_busiest_group finds and returns the busiest CPU group within the
- * domain. It calculates and returns the amount of weighted load which
- * should be moved to restore balance via the imbalance parameter.
+/**
+ * update_sd_lb_stats - Update sched_group's statistics for load balancing.
+ * @sd: sched_domain whose statistics are to be updated.
+ * @this_cpu: Cpu for which load balance is currently performed.
+ * @idle: Idle status of this_cpu
+ * @sd_idle: Idle status of the sched_domain containing group.
+ * @cpus: Set of cpus considered for load balancing.
+ * @balance: Should we balance.
+ * @sds: variable to hold the statistics for this sched_domain.
*/
-static struct sched_group *
-find_busiest_group(struct sched_domain *sd, int this_cpu,
- unsigned long *imbalance, enum cpu_idle_type idle,
- int *sd_idle, const struct cpumask *cpus, int *balance)
+static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
+ enum cpu_idle_type idle, int *sd_idle,
+ const struct cpumask *cpus, int *balance,
+ struct sd_lb_stats *sds)
{
- struct sd_lb_stats sds;
struct sched_group *group = sd->groups;
- unsigned long max_pull;
+ struct sg_lb_stats sgs;
int load_idx;
- memset(&sds, 0, sizeof(sds));
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- sds.power_savings_balance = 1;
- sds.min_nr_running = ULONG_MAX;
+ sds->power_savings_balance = 1;
+ sds->min_nr_running = ULONG_MAX;
#endif
load_idx = get_sd_load_idx(sd, idle);
do {
- struct sg_lb_stats sgs;
int local_group;
local_group = cpumask_test_cpu(this_cpu,
@@ -3295,25 +3296,25 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle,
local_group, cpus, balance, &sgs);
- if (balance && !(*balance))
- goto ret;
+ if (local_group && balance && !(*balance))
+ return;
- sds.total_load += sgs.group_load;
- sds.total_pwr += group->__cpu_power;
+ sds->total_load += sgs.group_load;
+ sds->total_pwr += group->__cpu_power;
if (local_group) {
- sds.this_load = sgs.avg_load;
- sds.this = group;
- sds.this_nr_running = sgs.sum_nr_running;
- sds.this_load_per_task = sgs.sum_weighted_load;
- } else if (sgs.avg_load > sds.max_load &&
+ sds->this_load = sgs.avg_load;
+ sds->this = group;
+ sds->this_nr_running = sgs.sum_nr_running;
+ sds->this_load_per_task = sgs.sum_weighted_load;
+ } else if (sgs.avg_load > sds->max_load &&
(sgs.sum_nr_running > sgs.group_capacity ||
sgs.group_imb)) {
- sds.max_load = sgs.avg_load;
- sds.busiest = group;
- sds.busiest_nr_running = sgs.sum_nr_running;
- sds.busiest_load_per_task = sgs.sum_weighted_load;
- sds.group_imb = sgs.group_imb;
+ sds->max_load = sgs.avg_load;
+ sds->busiest = group;
+ sds->busiest_nr_running = sgs.sum_nr_running;
+ sds->busiest_load_per_task = sgs.sum_weighted_load;
+ sds->group_imb = sgs.group_imb;
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -3330,15 +3331,15 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* no need to do power savings balance at this domain
*/
if (local_group &&
- (sds.this_nr_running >= sgs.group_capacity ||
- !sds.this_nr_running))
- sds.power_savings_balance = 0;
+ (sds->this_nr_running >= sgs.group_capacity ||
+ !sds->this_nr_running))
+ sds->power_savings_balance = 0;
/*
* If a group is already running at full capacity or idle,
* don't include that group in power savings calculations
*/
- if (!sds.power_savings_balance ||
+ if (!sds->power_savings_balance ||
sgs.sum_nr_running >= sgs.group_capacity ||
!sgs.sum_nr_running)
goto group_next;
@@ -3348,13 +3349,13 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* This is the group from where we need to pick up the load
* for saving power
*/
- if ((sgs.sum_nr_running < sds.min_nr_running) ||
- (sgs.sum_nr_running == sds.min_nr_running &&
+ if ((sgs.sum_nr_running < sds->min_nr_running) ||
+ (sgs.sum_nr_running == sds->min_nr_running &&
group_first_cpu(group) >
- group_first_cpu(sds.group_min))) {
- sds.group_min = group;
- sds.min_nr_running = sgs.sum_nr_running;
- sds.min_load_per_task = sgs.sum_weighted_load /
+ group_first_cpu(sds->group_min))) {
+ sds->group_min = group;
+ sds->min_nr_running = sgs.sum_nr_running;
+ sds->min_load_per_task = sgs.sum_weighted_load /
sgs.sum_nr_running;
}
@@ -3366,18 +3367,46 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
if (sgs.sum_nr_running > sgs.group_capacity - 1)
goto group_next;
- if (sgs.sum_nr_running > sds.leader_nr_running ||
- (sgs.sum_nr_running == sds.leader_nr_running &&
+ if (sgs.sum_nr_running > sds->leader_nr_running ||
+ (sgs.sum_nr_running == sds->leader_nr_running &&
group_first_cpu(group) <
- group_first_cpu(sds.group_leader))) {
- sds.group_leader = group;
- sds.leader_nr_running = sgs.sum_nr_running;
+ group_first_cpu(sds->group_leader))) {
+ sds->group_leader = group;
+ sds->leader_nr_running = sgs.sum_nr_running;
}
group_next:
#endif
group = group->next;
} while (group != sd->groups);
+}
+/******* find_busiest_group() helpers end here *********************/
+
+/*
+ * find_busiest_group finds and returns the busiest CPU group within the
+ * domain. It calculates and returns the amount of weighted load which
+ * should be moved to restore balance via the imbalance parameter.
+ */
+static struct sched_group *
+find_busiest_group(struct sched_domain *sd, int this_cpu,
+ unsigned long *imbalance, enum cpu_idle_type idle,
+ int *sd_idle, const struct cpumask *cpus, int *balance)
+{
+ struct sd_lb_stats sds;
+ unsigned long max_pull;
+
+ memset(&sds, 0, sizeof(sds));
+
+ /*
+ * Compute the various statistics relavent for load balancing at
+ * this level.
+ */
+ update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
+ balance, &sds);
+
+ if (balance && !(*balance))
+ goto ret;
+
if (!sds.busiest || sds.this_load >= sds.max_load
|| sds.busiest_nr_running == 0)
goto out_balanced;
^ permalink raw reply related [flat|nested] 33+ messages in thread* [tip:sched/balancing] sched: Create a helper function to calculate sched_domain stats for fbg()
2009-03-25 9:14 ` [RFC PATCH 06/11] sched: Create a helper function to calculate sched_domain stats " Gautham R Shenoy
@ 2009-03-25 9:46 ` Gautham R Shenoy
0 siblings, 0 replies; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:46 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, ego, hpa, mingo, a.p.zijlstra, dhaval, balbir,
bharata, suresh.b.siddha, tglx, mingo, nickpiggin
Commit-ID: 37abe198b1246ddd206319c43502a687db62d347
Gitweb: http://git.kernel.org/tip/37abe198b1246ddd206319c43502a687db62d347
Author: Gautham R Shenoy <ego@in.ibm.com>
AuthorDate: Wed, 25 Mar 2009 14:44:01 +0530
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Wed, 25 Mar 2009 10:30:46 +0100
sched: Create a helper function to calculate sched_domain stats for fbg()
Impact: cleanup
Create a helper function named update_sd_lb_stats() to update the
various sched_domain related statistics in find_busiest_group().
With this we would have moved all the statistics computation out of
find_busiest_group().
Credit: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Balbir Singh" <balbir@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
LKML-Reference: <20090325091401.13992.88737.stgit@sofia.in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 117 +++++++++++++++++++++++++++++++++++---------------------
1 files changed, 73 insertions(+), 44 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 8198dbe..ec715f9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3365,32 +3365,33 @@ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
}
-/******* find_busiest_group() helpers end here *********************/
-/*
- * find_busiest_group finds and returns the busiest CPU group within the
- * domain. It calculates and returns the amount of weighted load which
- * should be moved to restore balance via the imbalance parameter.
+/**
+ * update_sd_lb_stats - Update sched_group's statistics for load balancing.
+ * @sd: sched_domain whose statistics are to be updated.
+ * @this_cpu: Cpu for which load balance is currently performed.
+ * @idle: Idle status of this_cpu
+ * @sd_idle: Idle status of the sched_domain containing group.
+ * @cpus: Set of cpus considered for load balancing.
+ * @balance: Should we balance.
+ * @sds: variable to hold the statistics for this sched_domain.
*/
-static struct sched_group *
-find_busiest_group(struct sched_domain *sd, int this_cpu,
- unsigned long *imbalance, enum cpu_idle_type idle,
- int *sd_idle, const struct cpumask *cpus, int *balance)
+static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
+ enum cpu_idle_type idle, int *sd_idle,
+ const struct cpumask *cpus, int *balance,
+ struct sd_lb_stats *sds)
{
- struct sd_lb_stats sds;
struct sched_group *group = sd->groups;
- unsigned long max_pull;
+ struct sg_lb_stats sgs;
int load_idx;
- memset(&sds, 0, sizeof(sds));
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- sds.power_savings_balance = 1;
- sds.min_nr_running = ULONG_MAX;
+ sds->power_savings_balance = 1;
+ sds->min_nr_running = ULONG_MAX;
#endif
load_idx = get_sd_load_idx(sd, idle);
do {
- struct sg_lb_stats sgs;
int local_group;
local_group = cpumask_test_cpu(this_cpu,
@@ -3399,25 +3400,25 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle,
local_group, cpus, balance, &sgs);
- if (balance && !(*balance))
- goto ret;
+ if (local_group && balance && !(*balance))
+ return;
- sds.total_load += sgs.group_load;
- sds.total_pwr += group->__cpu_power;
+ sds->total_load += sgs.group_load;
+ sds->total_pwr += group->__cpu_power;
if (local_group) {
- sds.this_load = sgs.avg_load;
- sds.this = group;
- sds.this_nr_running = sgs.sum_nr_running;
- sds.this_load_per_task = sgs.sum_weighted_load;
- } else if (sgs.avg_load > sds.max_load &&
+ sds->this_load = sgs.avg_load;
+ sds->this = group;
+ sds->this_nr_running = sgs.sum_nr_running;
+ sds->this_load_per_task = sgs.sum_weighted_load;
+ } else if (sgs.avg_load > sds->max_load &&
(sgs.sum_nr_running > sgs.group_capacity ||
sgs.group_imb)) {
- sds.max_load = sgs.avg_load;
- sds.busiest = group;
- sds.busiest_nr_running = sgs.sum_nr_running;
- sds.busiest_load_per_task = sgs.sum_weighted_load;
- sds.group_imb = sgs.group_imb;
+ sds->max_load = sgs.avg_load;
+ sds->busiest = group;
+ sds->busiest_nr_running = sgs.sum_nr_running;
+ sds->busiest_load_per_task = sgs.sum_weighted_load;
+ sds->group_imb = sgs.group_imb;
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -3434,15 +3435,15 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* no need to do power savings balance at this domain
*/
if (local_group &&
- (sds.this_nr_running >= sgs.group_capacity ||
- !sds.this_nr_running))
- sds.power_savings_balance = 0;
+ (sds->this_nr_running >= sgs.group_capacity ||
+ !sds->this_nr_running))
+ sds->power_savings_balance = 0;
/*
* If a group is already running at full capacity or idle,
* don't include that group in power savings calculations
*/
- if (!sds.power_savings_balance ||
+ if (!sds->power_savings_balance ||
sgs.sum_nr_running >= sgs.group_capacity ||
!sgs.sum_nr_running)
goto group_next;
@@ -3452,13 +3453,13 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* This is the group from where we need to pick up the load
* for saving power
*/
- if ((sgs.sum_nr_running < sds.min_nr_running) ||
- (sgs.sum_nr_running == sds.min_nr_running &&
+ if ((sgs.sum_nr_running < sds->min_nr_running) ||
+ (sgs.sum_nr_running == sds->min_nr_running &&
group_first_cpu(group) >
- group_first_cpu(sds.group_min))) {
- sds.group_min = group;
- sds.min_nr_running = sgs.sum_nr_running;
- sds.min_load_per_task = sgs.sum_weighted_load /
+ group_first_cpu(sds->group_min))) {
+ sds->group_min = group;
+ sds->min_nr_running = sgs.sum_nr_running;
+ sds->min_load_per_task = sgs.sum_weighted_load /
sgs.sum_nr_running;
}
@@ -3470,18 +3471,46 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
if (sgs.sum_nr_running > sgs.group_capacity - 1)
goto group_next;
- if (sgs.sum_nr_running > sds.leader_nr_running ||
- (sgs.sum_nr_running == sds.leader_nr_running &&
+ if (sgs.sum_nr_running > sds->leader_nr_running ||
+ (sgs.sum_nr_running == sds->leader_nr_running &&
group_first_cpu(group) <
- group_first_cpu(sds.group_leader))) {
- sds.group_leader = group;
- sds.leader_nr_running = sgs.sum_nr_running;
+ group_first_cpu(sds->group_leader))) {
+ sds->group_leader = group;
+ sds->leader_nr_running = sgs.sum_nr_running;
}
group_next:
#endif
group = group->next;
} while (group != sd->groups);
+}
+/******* find_busiest_group() helpers end here *********************/
+
+/*
+ * find_busiest_group finds and returns the busiest CPU group within the
+ * domain. It calculates and returns the amount of weighted load which
+ * should be moved to restore balance via the imbalance parameter.
+ */
+static struct sched_group *
+find_busiest_group(struct sched_domain *sd, int this_cpu,
+ unsigned long *imbalance, enum cpu_idle_type idle,
+ int *sd_idle, const struct cpumask *cpus, int *balance)
+{
+ struct sd_lb_stats sds;
+ unsigned long max_pull;
+
+ memset(&sds, 0, sizeof(sds));
+
+ /*
+ * Compute the various statistics relavent for load balancing at
+ * this level.
+ */
+ update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
+ balance, &sds);
+
+ if (balance && !(*balance))
+ goto ret;
+
if (!sds.busiest || sds.this_load >= sds.max_load
|| sds.busiest_nr_running == 0)
goto out_balanced;
^ permalink raw reply related [flat|nested] 33+ messages in thread
* [RFC PATCH 07/11] sched: Create helper to calculate small_imbalance in find_busiest_group.
2009-03-25 9:13 [RFC PATCH 00/11] sched: find_busiest_group() cleanup Gautham R Shenoy
` (5 preceding siblings ...)
2009-03-25 9:14 ` [RFC PATCH 06/11] sched: Create a helper function to calculate sched_domain stats " Gautham R Shenoy
@ 2009-03-25 9:14 ` Gautham R Shenoy
2009-03-25 9:46 ` [tip:sched/balancing] sched: Create helper to calculate small_imbalance in fbg() Gautham R Shenoy
2009-03-25 9:14 ` [RFC PATCH 08/11] sched: Create a helper function to calculate imbalance Gautham R Shenoy
` (4 subsequent siblings)
11 siblings, 1 reply; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:14 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra, Vaidyanathan Srinivasan
Cc: linux-kernel, Suresh Siddha, Balbir Singh, Nick Piggin,
Dhaval Giani, Bharata B Rao, Gautham R Shenoy
We have two places in find_busiest_group() where we need to calculate the
minor imbalance before returning the busiest group. Encapsulate this
functionality into a seperate helper function.
Credit: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
---
kernel/sched.c | 131 ++++++++++++++++++++++++++++++--------------------------
1 files changed, 70 insertions(+), 61 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 5e01162..364866f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3380,6 +3380,71 @@ group_next:
} while (group != sd->groups);
}
+
+/**
+ * fix_small_imbalance - Calculate the minor imbalance that exists
+ * amongst the groups of a sched_domain, during
+ * load balancing.
+ * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
+ * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
+ * @imbalance: Variable to store the imbalance.
+ */
+static inline void fix_small_imbalance(struct sd_lb_stats *sds,
+ int this_cpu, unsigned long *imbalance)
+{
+ unsigned long tmp, pwr_now = 0, pwr_move = 0;
+ unsigned int imbn = 2;
+
+ if (sds->this_nr_running) {
+ sds->this_load_per_task /= sds->this_nr_running;
+ if (sds->busiest_load_per_task >
+ sds->this_load_per_task)
+ imbn = 1;
+ } else
+ sds->this_load_per_task =
+ cpu_avg_load_per_task(this_cpu);
+
+ if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
+ sds->busiest_load_per_task * imbn) {
+ *imbalance = sds->busiest_load_per_task;
+ return;
+ }
+
+ /*
+ * OK, we don't have enough imbalance to justify moving tasks,
+ * however we may be able to increase total CPU power used by
+ * moving them.
+ */
+
+ pwr_now += sds->busiest->__cpu_power *
+ min(sds->busiest_load_per_task, sds->max_load);
+ pwr_now += sds->this->__cpu_power *
+ min(sds->this_load_per_task, sds->this_load);
+ pwr_now /= SCHED_LOAD_SCALE;
+
+ /* Amount of load we'd subtract */
+ tmp = sg_div_cpu_power(sds->busiest,
+ sds->busiest_load_per_task * SCHED_LOAD_SCALE);
+ if (sds->max_load > tmp)
+ pwr_move += sds->busiest->__cpu_power *
+ min(sds->busiest_load_per_task, sds->max_load - tmp);
+
+ /* Amount of load we'd add */
+ if (sds->max_load * sds->busiest->__cpu_power <
+ sds->busiest_load_per_task * SCHED_LOAD_SCALE)
+ tmp = sg_div_cpu_power(sds->this,
+ sds->max_load * sds->busiest->__cpu_power);
+ else
+ tmp = sg_div_cpu_power(sds->this,
+ sds->busiest_load_per_task * SCHED_LOAD_SCALE);
+ pwr_move += sds->this->__cpu_power *
+ min(sds->this_load_per_task, sds->this_load + tmp);
+ pwr_move /= SCHED_LOAD_SCALE;
+
+ /* Move if we gain throughput */
+ if (pwr_move > pwr_now)
+ *imbalance = sds->busiest_load_per_task;
+}
/******* find_busiest_group() helpers end here *********************/
/*
@@ -3443,7 +3508,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
*/
if (sds.max_load < sds.avg_load) {
*imbalance = 0;
- goto small_imbalance;
+ fix_small_imbalance(&sds, this_cpu, imbalance);
+ goto ret_busiest;
}
/* Don't want to pull so many tasks that a group would go idle */
@@ -3461,67 +3527,10 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* a think about bumping its value to force at least one task to be
* moved
*/
- if (*imbalance < sds.busiest_load_per_task) {
- unsigned long tmp, pwr_now, pwr_move;
- unsigned int imbn;
-
-small_imbalance:
- pwr_move = pwr_now = 0;
- imbn = 2;
- if (sds.this_nr_running) {
- sds.this_load_per_task /= sds.this_nr_running;
- if (sds.busiest_load_per_task >
- sds.this_load_per_task)
- imbn = 1;
- } else
- sds.this_load_per_task =
- cpu_avg_load_per_task(this_cpu);
-
- if (sds.max_load - sds.this_load +
- sds.busiest_load_per_task >=
- sds.busiest_load_per_task * imbn) {
- *imbalance = sds.busiest_load_per_task;
- return sds.busiest;
- }
-
- /*
- * OK, we don't have enough imbalance to justify moving tasks,
- * however we may be able to increase total CPU power used by
- * moving them.
- */
-
- pwr_now += sds.busiest->__cpu_power *
- min(sds.busiest_load_per_task, sds.max_load);
- pwr_now += sds.this->__cpu_power *
- min(sds.this_load_per_task, sds.this_load);
- pwr_now /= SCHED_LOAD_SCALE;
-
- /* Amount of load we'd subtract */
- tmp = sg_div_cpu_power(sds.busiest,
- sds.busiest_load_per_task * SCHED_LOAD_SCALE);
- if (sds.max_load > tmp)
- pwr_move += sds.busiest->__cpu_power *
- min(sds.busiest_load_per_task,
- sds.max_load - tmp);
-
- /* Amount of load we'd add */
- if (sds.max_load * sds.busiest->__cpu_power <
- sds.busiest_load_per_task * SCHED_LOAD_SCALE)
- tmp = sg_div_cpu_power(sds.this,
- sds.max_load * sds.busiest->__cpu_power);
- else
- tmp = sg_div_cpu_power(sds.this,
- sds.busiest_load_per_task * SCHED_LOAD_SCALE);
- pwr_move += sds.this->__cpu_power *
- min(sds.this_load_per_task,
- sds.this_load + tmp);
- pwr_move /= SCHED_LOAD_SCALE;
-
- /* Move if we gain throughput */
- if (pwr_move > pwr_now)
- *imbalance = sds.busiest_load_per_task;
- }
+ if (*imbalance < sds.busiest_load_per_task)
+ fix_small_imbalance(&sds, this_cpu, imbalance);
+ret_busiest:
return sds.busiest;
out_balanced:
^ permalink raw reply related [flat|nested] 33+ messages in thread* [tip:sched/balancing] sched: Create helper to calculate small_imbalance in fbg()
2009-03-25 9:14 ` [RFC PATCH 07/11] sched: Create helper to calculate small_imbalance in find_busiest_group Gautham R Shenoy
@ 2009-03-25 9:46 ` Gautham R Shenoy
0 siblings, 0 replies; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:46 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, ego, hpa, mingo, a.p.zijlstra, dhaval, balbir,
bharata, suresh.b.siddha, tglx, mingo, nickpiggin
Commit-ID: 2e6f44aeda426054fc58464df1ad571aecca0c92
Gitweb: http://git.kernel.org/tip/2e6f44aeda426054fc58464df1ad571aecca0c92
Author: Gautham R Shenoy <ego@in.ibm.com>
AuthorDate: Wed, 25 Mar 2009 14:44:06 +0530
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Wed, 25 Mar 2009 10:30:47 +0100
sched: Create helper to calculate small_imbalance in fbg()
Impact: cleanup
We have two places in find_busiest_group() where we need to calculate
the minor imbalance before returning the busiest group. Encapsulate
this functionality into a seperate helper function.
Credit: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Balbir Singh" <balbir@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
LKML-Reference: <20090325091406.13992.54316.stgit@sofia.in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 131 ++++++++++++++++++++++++++++++--------------------------
1 files changed, 70 insertions(+), 61 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index ec715f9..540147e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3484,6 +3484,71 @@ group_next:
} while (group != sd->groups);
}
+
+/**
+ * fix_small_imbalance - Calculate the minor imbalance that exists
+ * amongst the groups of a sched_domain, during
+ * load balancing.
+ * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
+ * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
+ * @imbalance: Variable to store the imbalance.
+ */
+static inline void fix_small_imbalance(struct sd_lb_stats *sds,
+ int this_cpu, unsigned long *imbalance)
+{
+ unsigned long tmp, pwr_now = 0, pwr_move = 0;
+ unsigned int imbn = 2;
+
+ if (sds->this_nr_running) {
+ sds->this_load_per_task /= sds->this_nr_running;
+ if (sds->busiest_load_per_task >
+ sds->this_load_per_task)
+ imbn = 1;
+ } else
+ sds->this_load_per_task =
+ cpu_avg_load_per_task(this_cpu);
+
+ if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
+ sds->busiest_load_per_task * imbn) {
+ *imbalance = sds->busiest_load_per_task;
+ return;
+ }
+
+ /*
+ * OK, we don't have enough imbalance to justify moving tasks,
+ * however we may be able to increase total CPU power used by
+ * moving them.
+ */
+
+ pwr_now += sds->busiest->__cpu_power *
+ min(sds->busiest_load_per_task, sds->max_load);
+ pwr_now += sds->this->__cpu_power *
+ min(sds->this_load_per_task, sds->this_load);
+ pwr_now /= SCHED_LOAD_SCALE;
+
+ /* Amount of load we'd subtract */
+ tmp = sg_div_cpu_power(sds->busiest,
+ sds->busiest_load_per_task * SCHED_LOAD_SCALE);
+ if (sds->max_load > tmp)
+ pwr_move += sds->busiest->__cpu_power *
+ min(sds->busiest_load_per_task, sds->max_load - tmp);
+
+ /* Amount of load we'd add */
+ if (sds->max_load * sds->busiest->__cpu_power <
+ sds->busiest_load_per_task * SCHED_LOAD_SCALE)
+ tmp = sg_div_cpu_power(sds->this,
+ sds->max_load * sds->busiest->__cpu_power);
+ else
+ tmp = sg_div_cpu_power(sds->this,
+ sds->busiest_load_per_task * SCHED_LOAD_SCALE);
+ pwr_move += sds->this->__cpu_power *
+ min(sds->this_load_per_task, sds->this_load + tmp);
+ pwr_move /= SCHED_LOAD_SCALE;
+
+ /* Move if we gain throughput */
+ if (pwr_move > pwr_now)
+ *imbalance = sds->busiest_load_per_task;
+}
/******* find_busiest_group() helpers end here *********************/
/*
@@ -3547,7 +3612,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
*/
if (sds.max_load < sds.avg_load) {
*imbalance = 0;
- goto small_imbalance;
+ fix_small_imbalance(&sds, this_cpu, imbalance);
+ goto ret_busiest;
}
/* Don't want to pull so many tasks that a group would go idle */
@@ -3565,67 +3631,10 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* a think about bumping its value to force at least one task to be
* moved
*/
- if (*imbalance < sds.busiest_load_per_task) {
- unsigned long tmp, pwr_now, pwr_move;
- unsigned int imbn;
-
-small_imbalance:
- pwr_move = pwr_now = 0;
- imbn = 2;
- if (sds.this_nr_running) {
- sds.this_load_per_task /= sds.this_nr_running;
- if (sds.busiest_load_per_task >
- sds.this_load_per_task)
- imbn = 1;
- } else
- sds.this_load_per_task =
- cpu_avg_load_per_task(this_cpu);
-
- if (sds.max_load - sds.this_load +
- sds.busiest_load_per_task >=
- sds.busiest_load_per_task * imbn) {
- *imbalance = sds.busiest_load_per_task;
- return sds.busiest;
- }
-
- /*
- * OK, we don't have enough imbalance to justify moving tasks,
- * however we may be able to increase total CPU power used by
- * moving them.
- */
-
- pwr_now += sds.busiest->__cpu_power *
- min(sds.busiest_load_per_task, sds.max_load);
- pwr_now += sds.this->__cpu_power *
- min(sds.this_load_per_task, sds.this_load);
- pwr_now /= SCHED_LOAD_SCALE;
-
- /* Amount of load we'd subtract */
- tmp = sg_div_cpu_power(sds.busiest,
- sds.busiest_load_per_task * SCHED_LOAD_SCALE);
- if (sds.max_load > tmp)
- pwr_move += sds.busiest->__cpu_power *
- min(sds.busiest_load_per_task,
- sds.max_load - tmp);
-
- /* Amount of load we'd add */
- if (sds.max_load * sds.busiest->__cpu_power <
- sds.busiest_load_per_task * SCHED_LOAD_SCALE)
- tmp = sg_div_cpu_power(sds.this,
- sds.max_load * sds.busiest->__cpu_power);
- else
- tmp = sg_div_cpu_power(sds.this,
- sds.busiest_load_per_task * SCHED_LOAD_SCALE);
- pwr_move += sds.this->__cpu_power *
- min(sds.this_load_per_task,
- sds.this_load + tmp);
- pwr_move /= SCHED_LOAD_SCALE;
-
- /* Move if we gain throughput */
- if (pwr_move > pwr_now)
- *imbalance = sds.busiest_load_per_task;
- }
+ if (*imbalance < sds.busiest_load_per_task)
+ fix_small_imbalance(&sds, this_cpu, imbalance);
+ret_busiest:
return sds.busiest;
out_balanced:
^ permalink raw reply related [flat|nested] 33+ messages in thread
* [RFC PATCH 08/11] sched: Create a helper function to calculate imbalance.
2009-03-25 9:13 [RFC PATCH 00/11] sched: find_busiest_group() cleanup Gautham R Shenoy
` (6 preceding siblings ...)
2009-03-25 9:14 ` [RFC PATCH 07/11] sched: Create helper to calculate small_imbalance in find_busiest_group Gautham R Shenoy
@ 2009-03-25 9:14 ` Gautham R Shenoy
2009-03-25 9:46 ` [tip:sched/balancing] " Gautham R Shenoy
2009-03-25 9:14 ` [RFC PATCH 09/11] sched: Optimize the !power_savings_balance during find_busiest_group Gautham R Shenoy
` (3 subsequent siblings)
11 siblings, 1 reply; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:14 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra, Vaidyanathan Srinivasan
Cc: linux-kernel, Suresh Siddha, Balbir Singh, Nick Piggin,
Dhaval Giani, Bharata B Rao, Gautham R Shenoy
Move all the imbalance calculation out of find_busiest_group() through this
helper function.
With this change, the structure of find_busiest_group() will be as follows:
- update_sched_domain_statistics.
- Check if imbalance exits.
- Update imbalance and return busiest.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
---
kernel/sched.c | 78 ++++++++++++++++++++++++++++++++------------------------
1 files changed, 45 insertions(+), 33 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 364866f..b1b1b8a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3383,8 +3383,8 @@ group_next:
/**
* fix_small_imbalance - Calculate the minor imbalance that exists
- * amongst the groups of a sched_domain, during
- * load balancing.
+ * amongst the groups of a sched_domain, during
+ * load balancing.
* @sds: Statistics of the sched_domain whose imbalance is to be calculated.
* @this_cpu: The cpu at whose sched_domain we're performing load-balance.
* @imbalance: Variable to store the imbalance.
@@ -3445,6 +3445,47 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
if (pwr_move > pwr_now)
*imbalance = sds->busiest_load_per_task;
}
+
+/**
+ * calculate_imbalance - Calculate the amount of imbalance present within the
+ * groups of a given sched_domain during load balance.
+ * @sds: statistics of the sched_domain whose imbalance is to be calculated.
+ * @this_cpu: Cpu for which currently load balance is being performed.
+ * @imbalance: The variable to store the imbalance.
+ */
+static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
+ unsigned long *imbalance)
+{
+ unsigned long max_pull;
+ /*
+ * In the presence of smp nice balancing, certain scenarios can have
+ * max load less than avg load(as we skip the groups at or below
+ * its cpu_power, while calculating max_load..)
+ */
+ if (sds->max_load < sds->avg_load) {
+ *imbalance = 0;
+ return fix_small_imbalance(sds, this_cpu, imbalance);
+ }
+
+ /* Don't want to pull so many tasks that a group would go idle */
+ max_pull = min(sds->max_load - sds->avg_load,
+ sds->max_load - sds->busiest_load_per_task);
+
+ /* How much load to actually move to equalise the imbalance */
+ *imbalance = min(max_pull * sds->busiest->__cpu_power,
+ (sds->avg_load - sds->this_load) * sds->this->__cpu_power)
+ / SCHED_LOAD_SCALE;
+
+ /*
+ * if *imbalance is less than the average load per runnable task
+ * there is no gaurantee that any tasks will be moved so we'll have
+ * a think about bumping its value to force at least one task to be
+ * moved
+ */
+ if (*imbalance < sds->busiest_load_per_task)
+ return fix_small_imbalance(sds, this_cpu, imbalance);
+
+}
/******* find_busiest_group() helpers end here *********************/
/*
@@ -3458,7 +3499,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
int *sd_idle, const struct cpumask *cpus, int *balance)
{
struct sd_lb_stats sds;
- unsigned long max_pull;
memset(&sds, 0, sizeof(sds));
@@ -3501,36 +3541,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
if (sds.max_load <= sds.busiest_load_per_task)
goto out_balanced;
- /*
- * In the presence of smp nice balancing, certain scenarios can have
- * max load less than avg load(as we skip the groups at or below
- * its cpu_power, while calculating max_load..)
- */
- if (sds.max_load < sds.avg_load) {
- *imbalance = 0;
- fix_small_imbalance(&sds, this_cpu, imbalance);
- goto ret_busiest;
- }
-
- /* Don't want to pull so many tasks that a group would go idle */
- max_pull = min(sds.max_load - sds.avg_load,
- sds.max_load - sds.busiest_load_per_task);
-
- /* How much load to actually move to equalise the imbalance */
- *imbalance = min(max_pull * sds.busiest->__cpu_power,
- (sds.avg_load - sds.this_load) * sds.this->__cpu_power)
- / SCHED_LOAD_SCALE;
-
- /*
- * if *imbalance is less than the average load per runnable task
- * there is no gaurantee that any tasks will be moved so we'll have
- * a think about bumping its value to force at least one task to be
- * moved
- */
- if (*imbalance < sds.busiest_load_per_task)
- fix_small_imbalance(&sds, this_cpu, imbalance);
-
-ret_busiest:
+ /* Looks like there is an imbalance. Compute it */
+ calculate_imbalance(&sds, this_cpu, imbalance);
return sds.busiest;
out_balanced:
^ permalink raw reply related [flat|nested] 33+ messages in thread* [tip:sched/balancing] sched: Create a helper function to calculate imbalance
2009-03-25 9:14 ` [RFC PATCH 08/11] sched: Create a helper function to calculate imbalance Gautham R Shenoy
@ 2009-03-25 9:46 ` Gautham R Shenoy
0 siblings, 0 replies; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:46 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, ego, hpa, mingo, a.p.zijlstra, dhaval, balbir,
bharata, suresh.b.siddha, tglx, mingo, svaidy, nickpiggin
Commit-ID: dbc523a3b86f9e1765b5e70e6886913b99cc5cec
Gitweb: http://git.kernel.org/tip/dbc523a3b86f9e1765b5e70e6886913b99cc5cec
Author: Gautham R Shenoy <ego@in.ibm.com>
AuthorDate: Wed, 25 Mar 2009 14:44:12 +0530
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Wed, 25 Mar 2009 10:30:47 +0100
sched: Create a helper function to calculate imbalance
Move all the imbalance calculation out of find_busiest_group()
through this helper function.
With this change, the structure of find_busiest_group() will be
as follows:
- update_sched_domain_statistics.
- check if imbalance exits.
- update imbalance and return busiest.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Balbir Singh" <balbir@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
Cc: "Vaidyanathan Srinivasan" <svaidy@linux.vnet.ibm.com>
LKML-Reference: <20090325091411.13992.43293.stgit@sofia.in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 78 ++++++++++++++++++++++++++++++++-----------------------
1 files changed, 45 insertions(+), 33 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 540147e..934f615 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3487,8 +3487,8 @@ group_next:
/**
* fix_small_imbalance - Calculate the minor imbalance that exists
- * amongst the groups of a sched_domain, during
- * load balancing.
+ * amongst the groups of a sched_domain, during
+ * load balancing.
* @sds: Statistics of the sched_domain whose imbalance is to be calculated.
* @this_cpu: The cpu at whose sched_domain we're performing load-balance.
* @imbalance: Variable to store the imbalance.
@@ -3549,6 +3549,47 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
if (pwr_move > pwr_now)
*imbalance = sds->busiest_load_per_task;
}
+
+/**
+ * calculate_imbalance - Calculate the amount of imbalance present within the
+ * groups of a given sched_domain during load balance.
+ * @sds: statistics of the sched_domain whose imbalance is to be calculated.
+ * @this_cpu: Cpu for which currently load balance is being performed.
+ * @imbalance: The variable to store the imbalance.
+ */
+static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
+ unsigned long *imbalance)
+{
+ unsigned long max_pull;
+ /*
+ * In the presence of smp nice balancing, certain scenarios can have
+ * max load less than avg load(as we skip the groups at or below
+ * its cpu_power, while calculating max_load..)
+ */
+ if (sds->max_load < sds->avg_load) {
+ *imbalance = 0;
+ return fix_small_imbalance(sds, this_cpu, imbalance);
+ }
+
+ /* Don't want to pull so many tasks that a group would go idle */
+ max_pull = min(sds->max_load - sds->avg_load,
+ sds->max_load - sds->busiest_load_per_task);
+
+ /* How much load to actually move to equalise the imbalance */
+ *imbalance = min(max_pull * sds->busiest->__cpu_power,
+ (sds->avg_load - sds->this_load) * sds->this->__cpu_power)
+ / SCHED_LOAD_SCALE;
+
+ /*
+ * if *imbalance is less than the average load per runnable task
+ * there is no gaurantee that any tasks will be moved so we'll have
+ * a think about bumping its value to force at least one task to be
+ * moved
+ */
+ if (*imbalance < sds->busiest_load_per_task)
+ return fix_small_imbalance(sds, this_cpu, imbalance);
+
+}
/******* find_busiest_group() helpers end here *********************/
/*
@@ -3562,7 +3603,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
int *sd_idle, const struct cpumask *cpus, int *balance)
{
struct sd_lb_stats sds;
- unsigned long max_pull;
memset(&sds, 0, sizeof(sds));
@@ -3605,36 +3645,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
if (sds.max_load <= sds.busiest_load_per_task)
goto out_balanced;
- /*
- * In the presence of smp nice balancing, certain scenarios can have
- * max load less than avg load(as we skip the groups at or below
- * its cpu_power, while calculating max_load..)
- */
- if (sds.max_load < sds.avg_load) {
- *imbalance = 0;
- fix_small_imbalance(&sds, this_cpu, imbalance);
- goto ret_busiest;
- }
-
- /* Don't want to pull so many tasks that a group would go idle */
- max_pull = min(sds.max_load - sds.avg_load,
- sds.max_load - sds.busiest_load_per_task);
-
- /* How much load to actually move to equalise the imbalance */
- *imbalance = min(max_pull * sds.busiest->__cpu_power,
- (sds.avg_load - sds.this_load) * sds.this->__cpu_power)
- / SCHED_LOAD_SCALE;
-
- /*
- * if *imbalance is less than the average load per runnable task
- * there is no gaurantee that any tasks will be moved so we'll have
- * a think about bumping its value to force at least one task to be
- * moved
- */
- if (*imbalance < sds.busiest_load_per_task)
- fix_small_imbalance(&sds, this_cpu, imbalance);
-
-ret_busiest:
+ /* Looks like there is an imbalance. Compute it */
+ calculate_imbalance(&sds, this_cpu, imbalance);
return sds.busiest;
out_balanced:
^ permalink raw reply related [flat|nested] 33+ messages in thread
* [RFC PATCH 09/11] sched: Optimize the !power_savings_balance during find_busiest_group.
2009-03-25 9:13 [RFC PATCH 00/11] sched: find_busiest_group() cleanup Gautham R Shenoy
` (7 preceding siblings ...)
2009-03-25 9:14 ` [RFC PATCH 08/11] sched: Create a helper function to calculate imbalance Gautham R Shenoy
@ 2009-03-25 9:14 ` Gautham R Shenoy
2009-03-25 9:47 ` [tip:sched/balancing] sched: Optimize the !power_savings_balance during fbg() Gautham R Shenoy
2009-03-25 9:14 ` [RFC PATCH 10/11] sched: Refactor the power savings balance code Gautham R Shenoy
` (2 subsequent siblings)
11 siblings, 1 reply; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:14 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra, Vaidyanathan Srinivasan
Cc: linux-kernel, Suresh Siddha, Balbir Singh, Nick Piggin,
Dhaval Giani, Bharata B Rao, Gautham R Shenoy
We don't need to perform power_savings balance if either the cpu is NOT_IDLE
or if the sched_domain doesn't contain the SD_POWERSAVINGS_BALANCE flag set.
Currently, we check for these conditions multiple number of times, even though
these variables don't change over the scope of find_busiest_group().
Check once, and store the value in the already exiting "power_savings_balance"
variable.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
---
kernel/sched.c | 23 ++++++++++++++---------
1 files changed, 14 insertions(+), 9 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index b1b1b8a..cb2c97b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3282,8 +3282,17 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
int load_idx;
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- sds->power_savings_balance = 1;
- sds->min_nr_running = ULONG_MAX;
+ /*
+ * Busy processors will not participate in power savings
+ * balance.
+ */
+ if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
+ sds->power_savings_balance = 0;
+ else {
+ sds->power_savings_balance = 1;
+ sds->min_nr_running = ULONG_MAX;
+ sds->leader_nr_running = 0;
+ }
#endif
load_idx = get_sd_load_idx(sd, idle);
@@ -3318,12 +3327,8 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- /*
- * Busy processors will not participate in power savings
- * balance.
- */
- if (idle == CPU_NOT_IDLE ||
- !(sd->flags & SD_POWERSAVINGS_BALANCE))
+
+ if (!sds->power_savings_balance)
goto group_next;
/*
@@ -3547,7 +3552,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
out_balanced:
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
+ if (!sds.power_savings_balance)
goto ret;
if (sds.this != sds.group_leader || sds.group_leader == sds.group_min)
^ permalink raw reply related [flat|nested] 33+ messages in thread* [tip:sched/balancing] sched: Optimize the !power_savings_balance during fbg()
2009-03-25 9:14 ` [RFC PATCH 09/11] sched: Optimize the !power_savings_balance during find_busiest_group Gautham R Shenoy
@ 2009-03-25 9:47 ` Gautham R Shenoy
0 siblings, 0 replies; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:47 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, ego, hpa, mingo, a.p.zijlstra, dhaval, balbir,
bharata, suresh.b.siddha, tglx, mingo, svaidy, nickpiggin
Commit-ID: a021dc03376707c55a3483e32c16b8986d4414cc
Gitweb: http://git.kernel.org/tip/a021dc03376707c55a3483e32c16b8986d4414cc
Author: Gautham R Shenoy <ego@in.ibm.com>
AuthorDate: Wed, 25 Mar 2009 14:44:17 +0530
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Wed, 25 Mar 2009 10:30:48 +0100
sched: Optimize the !power_savings_balance during fbg()
Impact: cleanup, micro-optimization
We don't need to perform power_savings balance if either the
cpu is NOT_IDLE or if the sched_domain doesn't contain the
SD_POWERSAVINGS_BALANCE flag set.
Currently, we check for these conditions multiple number of
times, even though these variables don't change over the scope
of find_busiest_group().
Check once, and store the value in the already exiting
"power_savings_balance" variable.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Balbir Singh" <balbir@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
Cc: "Vaidyanathan Srinivasan" <svaidy@linux.vnet.ibm.com>
LKML-Reference: <20090325091417.13992.2657.stgit@sofia.in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 23 ++++++++++++++---------
1 files changed, 14 insertions(+), 9 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 934f615..71e8dca 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3386,8 +3386,17 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
int load_idx;
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- sds->power_savings_balance = 1;
- sds->min_nr_running = ULONG_MAX;
+ /*
+ * Busy processors will not participate in power savings
+ * balance.
+ */
+ if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
+ sds->power_savings_balance = 0;
+ else {
+ sds->power_savings_balance = 1;
+ sds->min_nr_running = ULONG_MAX;
+ sds->leader_nr_running = 0;
+ }
#endif
load_idx = get_sd_load_idx(sd, idle);
@@ -3422,12 +3431,8 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- /*
- * Busy processors will not participate in power savings
- * balance.
- */
- if (idle == CPU_NOT_IDLE ||
- !(sd->flags & SD_POWERSAVINGS_BALANCE))
+
+ if (!sds->power_savings_balance)
goto group_next;
/*
@@ -3651,7 +3656,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
out_balanced:
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
+ if (!sds.power_savings_balance)
goto ret;
if (sds.this != sds.group_leader || sds.group_leader == sds.group_min)
^ permalink raw reply related [flat|nested] 33+ messages in thread
* [RFC PATCH 10/11] sched: Refactor the power savings balance code.
2009-03-25 9:13 [RFC PATCH 00/11] sched: find_busiest_group() cleanup Gautham R Shenoy
` (8 preceding siblings ...)
2009-03-25 9:14 ` [RFC PATCH 09/11] sched: Optimize the !power_savings_balance during find_busiest_group Gautham R Shenoy
@ 2009-03-25 9:14 ` Gautham R Shenoy
2009-03-25 9:47 ` [tip:sched/balancing] " Gautham R Shenoy
2009-03-25 9:14 ` [RFC PATCH 11/11] sched: Add comments to find_busiest_group() function Gautham R Shenoy
2009-03-25 9:30 ` [RFC PATCH 00/11] sched: find_busiest_group() cleanup Ingo Molnar
11 siblings, 1 reply; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:14 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra, Vaidyanathan Srinivasan
Cc: linux-kernel, Suresh Siddha, Balbir Singh, Nick Piggin,
Dhaval Giani, Bharata B Rao, Gautham R Shenoy
Create seperate helper functions to initialize the power-savings-balance
related variables, to update them and to check if we have a scope for
performing power-savings balance.
Add no-op inline functions for the !(CONFIG_SCHED_MC || CONFIG_SCHED_SMT)
case.
This will eliminate all the #ifdef jungle in find_busiest_group() and the
other helper functions.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
---
kernel/sched.c | 236 ++++++++++++++++++++++++++++++++++++--------------------
1 files changed, 153 insertions(+), 83 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index cb2c97b..6404ddf 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3166,6 +3166,151 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
}
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+/**
+ * init_sd_power_savings_stats - Initialize power savings statistics for
+ * the given sched_domain, during load balancing.
+ *
+ * @sd: Sched domain whose power-savings statistics are to be initialized.
+ * @sds: Variable containing the statistics for sd.
+ * @idle: Idle status of the CPU at which we're performing load-balancing.
+ */
+static inline void init_sd_power_savings_stats(struct sched_domain *sd,
+ struct sd_lb_stats *sds, enum cpu_idle_type idle)
+{
+ /*
+ * Busy processors will not participate in power savings
+ * balance.
+ */
+ if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
+ sds->power_savings_balance = 0;
+ else {
+ sds->power_savings_balance = 1;
+ sds->min_nr_running = ULONG_MAX;
+ sds->leader_nr_running = 0;
+ }
+}
+
+/**
+ * update_sd_power_savings_stats - Update the power saving stats for a
+ * sched_domain while performing load balancing.
+ *
+ * @group: sched_group belonging to the sched_domain under consideration.
+ * @sds: Variable containing the statistics of the sched_domain
+ * @local_group: Does group contain the CPU for which we're performing
+ * load balancing ?
+ * @sgs: Variable containing the statistics of the group.
+ */
+static inline void update_sd_power_savings_stats(struct sched_group *group,
+ struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
+{
+
+ if (!sds->power_savings_balance)
+ return;
+
+ /*
+ * If the local group is idle or completely loaded
+ * no need to do power savings balance at this domain
+ */
+ if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
+ !sds->this_nr_running))
+ sds->power_savings_balance = 0;
+
+ /*
+ * If a group is already running at full capacity or idle,
+ * don't include that group in power savings calculations
+ */
+ if (!sds->power_savings_balance ||
+ sgs->sum_nr_running >= sgs->group_capacity ||
+ !sgs->sum_nr_running)
+ return;
+
+ /*
+ * Calculate the group which has the least non-idle load.
+ * This is the group from where we need to pick up the load
+ * for saving power
+ */
+ if ((sgs->sum_nr_running < sds->min_nr_running) ||
+ (sgs->sum_nr_running == sds->min_nr_running &&
+ group_first_cpu(group) > group_first_cpu(sds->group_min))) {
+ sds->group_min = group;
+ sds->min_nr_running = sgs->sum_nr_running;
+ sds->min_load_per_task = sgs->sum_weighted_load /
+ sgs->sum_nr_running;
+ }
+
+ /*
+ * Calculate the group which is almost near its
+ * capacity but still has some space to pick up some load
+ * from other group and save more power
+ */
+ if (sgs->sum_nr_running > sgs->group_capacity - 1)
+ return;
+
+ if (sgs->sum_nr_running > sds->leader_nr_running ||
+ (sgs->sum_nr_running == sds->leader_nr_running &&
+ group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
+ sds->group_leader = group;
+ sds->leader_nr_running = sgs->sum_nr_running;
+ }
+}
+
+/**
+ * check_power_save_busiest_group - Check if we have potential to perform
+ * some power-savings balance. If yes, set the busiest group to be
+ * the least loaded group in the sched_domain, so that it's CPUs can
+ * be put to idle.
+ *
+ * @sds: Variable containing the statistics of the sched_domain
+ * under consideration.
+ * @this_cpu: Cpu at which we're currently performing load-balancing.
+ * @imbalance: Variable to store the imbalance.
+ *
+ * Returns 1 if there is potential to perform power-savings balance.
+ * Else returns 0.
+ */
+static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
+ int this_cpu, unsigned long *imbalance)
+{
+ if (!sds->power_savings_balance)
+ return 0;
+
+ if (sds->this != sds->group_leader ||
+ sds->group_leader == sds->group_min)
+ return 0;
+
+ *imbalance = sds->min_load_per_task;
+ sds->busiest = sds->group_min;
+
+ if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
+ cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
+ group_first_cpu(sds->group_leader);
+ }
+
+ return 1;
+
+}
+#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
+static inline void init_sd_power_savings_stats(struct sched_domain *sd,
+ struct sd_lb_stats *sds, enum cpu_idle_type idle)
+{
+ return;
+}
+
+static inline void update_sd_power_savings_stats(struct sched_group *group,
+ struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
+{
+ return;
+}
+
+static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
+ int this_cpu, unsigned long *imbalance)
+{
+ return 0;
+}
+#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
+
+
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @group: sched_group whose statistics are to be updated.
@@ -3281,19 +3426,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
struct sg_lb_stats sgs;
int load_idx;
-#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- /*
- * Busy processors will not participate in power savings
- * balance.
- */
- if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
- sds->power_savings_balance = 0;
- else {
- sds->power_savings_balance = 1;
- sds->min_nr_running = ULONG_MAX;
- sds->leader_nr_running = 0;
- }
-#endif
+ init_sd_power_savings_stats(sd, sds, idle);
load_idx = get_sd_load_idx(sd, idle);
do {
@@ -3326,61 +3459,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
sds->group_imb = sgs.group_imb;
}
-#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
-
- if (!sds->power_savings_balance)
- goto group_next;
-
- /*
- * If the local group is idle or completely loaded
- * no need to do power savings balance at this domain
- */
- if (local_group &&
- (sds->this_nr_running >= sgs.group_capacity ||
- !sds->this_nr_running))
- sds->power_savings_balance = 0;
-
- /*
- * If a group is already running at full capacity or idle,
- * don't include that group in power savings calculations
- */
- if (!sds->power_savings_balance ||
- sgs.sum_nr_running >= sgs.group_capacity ||
- !sgs.sum_nr_running)
- goto group_next;
-
- /*
- * Calculate the group which has the least non-idle load.
- * This is the group from where we need to pick up the load
- * for saving power
- */
- if ((sgs.sum_nr_running < sds->min_nr_running) ||
- (sgs.sum_nr_running == sds->min_nr_running &&
- group_first_cpu(group) >
- group_first_cpu(sds->group_min))) {
- sds->group_min = group;
- sds->min_nr_running = sgs.sum_nr_running;
- sds->min_load_per_task = sgs.sum_weighted_load /
- sgs.sum_nr_running;
- }
-
- /*
- * Calculate the group which is almost near its
- * capacity but still has some space to pick up some load
- * from other group and save more power
- */
- if (sgs.sum_nr_running > sgs.group_capacity - 1)
- goto group_next;
-
- if (sgs.sum_nr_running > sds->leader_nr_running ||
- (sgs.sum_nr_running == sds->leader_nr_running &&
- group_first_cpu(group) <
- group_first_cpu(sds->group_leader))) {
- sds->group_leader = group;
- sds->leader_nr_running = sgs.sum_nr_running;
- }
-group_next:
-#endif
+ update_sd_power_savings_stats(group, sds, local_group, &sgs);
group = group->next;
} while (group != sd->groups);
@@ -3551,21 +3630,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
return sds.busiest;
out_balanced:
-#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- if (!sds.power_savings_balance)
- goto ret;
-
- if (sds.this != sds.group_leader || sds.group_leader == sds.group_min)
- goto ret;
-
- *imbalance = sds.min_load_per_task;
- if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
- cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
- group_first_cpu(sds.group_leader);
- }
- return sds.group_min;
-
-#endif
+ /*
+ * There is no obvious imbalance. But check if we can do some balancing
+ * to save power.
+ */
+ if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
+ return sds.busiest;
ret:
*imbalance = 0;
return NULL;
^ permalink raw reply related [flat|nested] 33+ messages in thread* [tip:sched/balancing] sched: Refactor the power savings balance code
2009-03-25 9:14 ` [RFC PATCH 10/11] sched: Refactor the power savings balance code Gautham R Shenoy
@ 2009-03-25 9:47 ` Gautham R Shenoy
0 siblings, 0 replies; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:47 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, ego, hpa, mingo, a.p.zijlstra, dhaval, balbir,
bharata, suresh.b.siddha, tglx, mingo, svaidy, nickpiggin
Commit-ID: c071df18525a95b37dd5821a6dc4af83bd18675e
Gitweb: http://git.kernel.org/tip/c071df18525a95b37dd5821a6dc4af83bd18675e
Author: Gautham R Shenoy <ego@in.ibm.com>
AuthorDate: Wed, 25 Mar 2009 14:44:22 +0530
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Wed, 25 Mar 2009 10:30:48 +0100
sched: Refactor the power savings balance code
Impact: cleanup
Create seperate helper functions to initialize the
power-savings-balance related variables, to update them and
to check if we have a scope for performing power-savings balance.
Add no-op inline functions for the !(CONFIG_SCHED_MC || CONFIG_SCHED_SMT)
case.
This will eliminate all the #ifdef jungle in find_busiest_group() and the
other helper functions.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Balbir Singh" <balbir@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
Cc: "Vaidyanathan Srinivasan" <svaidy@linux.vnet.ibm.com>
LKML-Reference: <20090325091422.13992.73616.stgit@sofia.in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 236 ++++++++++++++++++++++++++++++++++++--------------------
1 files changed, 153 insertions(+), 83 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 71e8dca..5f21658 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3270,6 +3270,151 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
}
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+/**
+ * init_sd_power_savings_stats - Initialize power savings statistics for
+ * the given sched_domain, during load balancing.
+ *
+ * @sd: Sched domain whose power-savings statistics are to be initialized.
+ * @sds: Variable containing the statistics for sd.
+ * @idle: Idle status of the CPU at which we're performing load-balancing.
+ */
+static inline void init_sd_power_savings_stats(struct sched_domain *sd,
+ struct sd_lb_stats *sds, enum cpu_idle_type idle)
+{
+ /*
+ * Busy processors will not participate in power savings
+ * balance.
+ */
+ if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
+ sds->power_savings_balance = 0;
+ else {
+ sds->power_savings_balance = 1;
+ sds->min_nr_running = ULONG_MAX;
+ sds->leader_nr_running = 0;
+ }
+}
+
+/**
+ * update_sd_power_savings_stats - Update the power saving stats for a
+ * sched_domain while performing load balancing.
+ *
+ * @group: sched_group belonging to the sched_domain under consideration.
+ * @sds: Variable containing the statistics of the sched_domain
+ * @local_group: Does group contain the CPU for which we're performing
+ * load balancing ?
+ * @sgs: Variable containing the statistics of the group.
+ */
+static inline void update_sd_power_savings_stats(struct sched_group *group,
+ struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
+{
+
+ if (!sds->power_savings_balance)
+ return;
+
+ /*
+ * If the local group is idle or completely loaded
+ * no need to do power savings balance at this domain
+ */
+ if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
+ !sds->this_nr_running))
+ sds->power_savings_balance = 0;
+
+ /*
+ * If a group is already running at full capacity or idle,
+ * don't include that group in power savings calculations
+ */
+ if (!sds->power_savings_balance ||
+ sgs->sum_nr_running >= sgs->group_capacity ||
+ !sgs->sum_nr_running)
+ return;
+
+ /*
+ * Calculate the group which has the least non-idle load.
+ * This is the group from where we need to pick up the load
+ * for saving power
+ */
+ if ((sgs->sum_nr_running < sds->min_nr_running) ||
+ (sgs->sum_nr_running == sds->min_nr_running &&
+ group_first_cpu(group) > group_first_cpu(sds->group_min))) {
+ sds->group_min = group;
+ sds->min_nr_running = sgs->sum_nr_running;
+ sds->min_load_per_task = sgs->sum_weighted_load /
+ sgs->sum_nr_running;
+ }
+
+ /*
+ * Calculate the group which is almost near its
+ * capacity but still has some space to pick up some load
+ * from other group and save more power
+ */
+ if (sgs->sum_nr_running > sgs->group_capacity - 1)
+ return;
+
+ if (sgs->sum_nr_running > sds->leader_nr_running ||
+ (sgs->sum_nr_running == sds->leader_nr_running &&
+ group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
+ sds->group_leader = group;
+ sds->leader_nr_running = sgs->sum_nr_running;
+ }
+}
+
+/**
+ * check_power_save_busiest_group - Check if we have potential to perform
+ * some power-savings balance. If yes, set the busiest group to be
+ * the least loaded group in the sched_domain, so that it's CPUs can
+ * be put to idle.
+ *
+ * @sds: Variable containing the statistics of the sched_domain
+ * under consideration.
+ * @this_cpu: Cpu at which we're currently performing load-balancing.
+ * @imbalance: Variable to store the imbalance.
+ *
+ * Returns 1 if there is potential to perform power-savings balance.
+ * Else returns 0.
+ */
+static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
+ int this_cpu, unsigned long *imbalance)
+{
+ if (!sds->power_savings_balance)
+ return 0;
+
+ if (sds->this != sds->group_leader ||
+ sds->group_leader == sds->group_min)
+ return 0;
+
+ *imbalance = sds->min_load_per_task;
+ sds->busiest = sds->group_min;
+
+ if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
+ cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
+ group_first_cpu(sds->group_leader);
+ }
+
+ return 1;
+
+}
+#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
+static inline void init_sd_power_savings_stats(struct sched_domain *sd,
+ struct sd_lb_stats *sds, enum cpu_idle_type idle)
+{
+ return;
+}
+
+static inline void update_sd_power_savings_stats(struct sched_group *group,
+ struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
+{
+ return;
+}
+
+static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
+ int this_cpu, unsigned long *imbalance)
+{
+ return 0;
+}
+#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
+
+
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @group: sched_group whose statistics are to be updated.
@@ -3385,19 +3530,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
struct sg_lb_stats sgs;
int load_idx;
-#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- /*
- * Busy processors will not participate in power savings
- * balance.
- */
- if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
- sds->power_savings_balance = 0;
- else {
- sds->power_savings_balance = 1;
- sds->min_nr_running = ULONG_MAX;
- sds->leader_nr_running = 0;
- }
-#endif
+ init_sd_power_savings_stats(sd, sds, idle);
load_idx = get_sd_load_idx(sd, idle);
do {
@@ -3430,61 +3563,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
sds->group_imb = sgs.group_imb;
}
-#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
-
- if (!sds->power_savings_balance)
- goto group_next;
-
- /*
- * If the local group is idle or completely loaded
- * no need to do power savings balance at this domain
- */
- if (local_group &&
- (sds->this_nr_running >= sgs.group_capacity ||
- !sds->this_nr_running))
- sds->power_savings_balance = 0;
-
- /*
- * If a group is already running at full capacity or idle,
- * don't include that group in power savings calculations
- */
- if (!sds->power_savings_balance ||
- sgs.sum_nr_running >= sgs.group_capacity ||
- !sgs.sum_nr_running)
- goto group_next;
-
- /*
- * Calculate the group which has the least non-idle load.
- * This is the group from where we need to pick up the load
- * for saving power
- */
- if ((sgs.sum_nr_running < sds->min_nr_running) ||
- (sgs.sum_nr_running == sds->min_nr_running &&
- group_first_cpu(group) >
- group_first_cpu(sds->group_min))) {
- sds->group_min = group;
- sds->min_nr_running = sgs.sum_nr_running;
- sds->min_load_per_task = sgs.sum_weighted_load /
- sgs.sum_nr_running;
- }
-
- /*
- * Calculate the group which is almost near its
- * capacity but still has some space to pick up some load
- * from other group and save more power
- */
- if (sgs.sum_nr_running > sgs.group_capacity - 1)
- goto group_next;
-
- if (sgs.sum_nr_running > sds->leader_nr_running ||
- (sgs.sum_nr_running == sds->leader_nr_running &&
- group_first_cpu(group) <
- group_first_cpu(sds->group_leader))) {
- sds->group_leader = group;
- sds->leader_nr_running = sgs.sum_nr_running;
- }
-group_next:
-#endif
+ update_sd_power_savings_stats(group, sds, local_group, &sgs);
group = group->next;
} while (group != sd->groups);
@@ -3655,21 +3734,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
return sds.busiest;
out_balanced:
-#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- if (!sds.power_savings_balance)
- goto ret;
-
- if (sds.this != sds.group_leader || sds.group_leader == sds.group_min)
- goto ret;
-
- *imbalance = sds.min_load_per_task;
- if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
- cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
- group_first_cpu(sds.group_leader);
- }
- return sds.group_min;
-
-#endif
+ /*
+ * There is no obvious imbalance. But check if we can do some balancing
+ * to save power.
+ */
+ if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
+ return sds.busiest;
ret:
*imbalance = 0;
return NULL;
^ permalink raw reply related [flat|nested] 33+ messages in thread
* [RFC PATCH 11/11] sched: Add comments to find_busiest_group() function.
2009-03-25 9:13 [RFC PATCH 00/11] sched: find_busiest_group() cleanup Gautham R Shenoy
` (9 preceding siblings ...)
2009-03-25 9:14 ` [RFC PATCH 10/11] sched: Refactor the power savings balance code Gautham R Shenoy
@ 2009-03-25 9:14 ` Gautham R Shenoy
2009-03-25 9:47 ` [tip:sched/balancing] " Gautham R Shenoy
` (2 more replies)
2009-03-25 9:30 ` [RFC PATCH 00/11] sched: find_busiest_group() cleanup Ingo Molnar
11 siblings, 3 replies; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:14 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra, Vaidyanathan Srinivasan
Cc: linux-kernel, Suresh Siddha, Balbir Singh, Nick Piggin,
Dhaval Giani, Bharata B Rao, Gautham R Shenoy
Add /** style comments around find_busiest_group(). Also add a few explanatory
comments.
This concludes the find_busiest_group() cleanup. The function is down to 72
lines from the original 313 lines.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
---
kernel/sched.c | 50 ++++++++++++++++++++++++++++++++++++++++++--------
1 files changed, 42 insertions(+), 8 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 6404ddf..47a1a7d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3572,10 +3572,30 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
}
/******* find_busiest_group() helpers end here *********************/
-/*
- * find_busiest_group finds and returns the busiest CPU group within the
- * domain. It calculates and returns the amount of weighted load which
- * should be moved to restore balance via the imbalance parameter.
+/**
+ * find_busiest_group - Returns the busiest group within the sched_domain
+ * if there is an imbalance. If there isn't an imbalance, and
+ * the user has opted for power-savings, it returns a group whose
+ * CPUs can be put to idle by rebalancing those tasks elsewhere, if
+ * such a group exists.
+ *
+ * Also calculates the amount of weighted load which should be moved
+ * to restore balance.
+ *
+ * @sd: The sched_domain whose busiest group is to be returned.
+ * @this_cpu: The cpu for which load balancing is currently being performed.
+ * @imbalance: Variable which stores amount of weighted load which should
+ * be moved to restore balance/put a group to idle.
+ * @idle: The idle status of this_cpu.
+ * @sd_idle: The idleness of sd
+ * @cpus: The set of CPUs under consideration for load-balancing.
+ * @balance: Pointer to a variable indicating if this_cpu
+ * is the appropriate cpu to perform load balancing at this_level.
+ *
+ * Returns: - the busiest group if imbalance exists.
+ * - If no imbalance and user has opted for power-savings balance,
+ * return the least loaded group whose CPUs can be
+ * put to idle by rebalancing its tasks onto our group.
*/
static struct sched_group *
find_busiest_group(struct sched_domain *sd, int this_cpu,
@@ -3593,17 +3613,31 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
balance, &sds);
+ /* Cases where imbalance does not exist from POV of this_cpu */
+ /* 1) this_cpu is not the appropriate cpu to perform load balancing
+ * at this level.
+ * 2) There is no busy sibling group to pull from.
+ * 3) This group is the busiest group.
+ * 4) This group is more busy than the avg busieness at this
+ * sched_domain.
+ * 5) The imbalance is within the specified limit.
+ * 6) Any rebalance would lead to ping-pong
+ */
if (balance && !(*balance))
goto ret;
- if (!sds.busiest || sds.this_load >= sds.max_load
- || sds.busiest_nr_running == 0)
+ if (!sds.busiest || sd.busiest_nr_running == 0)
+ goto out_balanced;
+
+ if (sds.this_load >= sds.max_load)
goto out_balanced;
sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
- if (sds.this_load >= sds.avg_load ||
- 100*sds.max_load <= sd->imbalance_pct * sds.this_load)
+ if (sds.this_load >= sds.avg_load)
+ goto out_balanced;
+
+ if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
goto out_balanced;
sds.busiest_load_per_task /= sds.busiest_nr_running;
^ permalink raw reply related [flat|nested] 33+ messages in thread* [tip:sched/balancing] sched: Add comments to find_busiest_group() function
2009-03-25 9:14 ` [RFC PATCH 11/11] sched: Add comments to find_busiest_group() function Gautham R Shenoy
@ 2009-03-25 9:47 ` Gautham R Shenoy
2009-03-25 11:43 ` [RFC PATCH 11/11] " Gautham R Shenoy
2009-03-25 12:30 ` [tip:sched/balancing] " Gautham R Shenoy
2 siblings, 0 replies; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 9:47 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, ego, hpa, mingo, a.p.zijlstra, dhaval, balbir,
bharata, suresh.b.siddha, tglx, mingo, svaidy, nickpiggin
Commit-ID: 7b6340ef884aff69a54f8a530c73ad9da0a7c388
Gitweb: http://git.kernel.org/tip/7b6340ef884aff69a54f8a530c73ad9da0a7c388
Author: Gautham R Shenoy <ego@in.ibm.com>
AuthorDate: Wed, 25 Mar 2009 14:44:27 +0530
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Wed, 25 Mar 2009 10:30:49 +0100
sched: Add comments to find_busiest_group() function
Impact: cleanup
Add /** style comments around find_busiest_group(). Also add a few
explanatory comments.
This concludes the find_busiest_group() cleanup. The function is
now down to 72 lines from the original 313 lines.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Balbir Singh" <balbir@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
Cc: "Vaidyanathan Srinivasan" <svaidy@linux.vnet.ibm.com>
LKML-Reference: <20090325091427.13992.18933.stgit@sofia.in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 50 ++++++++++++++++++++++++++++++++++++++++++--------
1 files changed, 42 insertions(+), 8 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 5f21658..b7723bd 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3676,10 +3676,30 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
}
/******* find_busiest_group() helpers end here *********************/
-/*
- * find_busiest_group finds and returns the busiest CPU group within the
- * domain. It calculates and returns the amount of weighted load which
- * should be moved to restore balance via the imbalance parameter.
+/**
+ * find_busiest_group - Returns the busiest group within the sched_domain
+ * if there is an imbalance. If there isn't an imbalance, and
+ * the user has opted for power-savings, it returns a group whose
+ * CPUs can be put to idle by rebalancing those tasks elsewhere, if
+ * such a group exists.
+ *
+ * Also calculates the amount of weighted load which should be moved
+ * to restore balance.
+ *
+ * @sd: The sched_domain whose busiest group is to be returned.
+ * @this_cpu: The cpu for which load balancing is currently being performed.
+ * @imbalance: Variable which stores amount of weighted load which should
+ * be moved to restore balance/put a group to idle.
+ * @idle: The idle status of this_cpu.
+ * @sd_idle: The idleness of sd
+ * @cpus: The set of CPUs under consideration for load-balancing.
+ * @balance: Pointer to a variable indicating if this_cpu
+ * is the appropriate cpu to perform load balancing at this_level.
+ *
+ * Returns: - the busiest group if imbalance exists.
+ * - If no imbalance and user has opted for power-savings balance,
+ * return the least loaded group whose CPUs can be
+ * put to idle by rebalancing its tasks onto our group.
*/
static struct sched_group *
find_busiest_group(struct sched_domain *sd, int this_cpu,
@@ -3697,17 +3717,31 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
balance, &sds);
+ /* Cases where imbalance does not exist from POV of this_cpu */
+ /* 1) this_cpu is not the appropriate cpu to perform load balancing
+ * at this level.
+ * 2) There is no busy sibling group to pull from.
+ * 3) This group is the busiest group.
+ * 4) This group is more busy than the avg busieness at this
+ * sched_domain.
+ * 5) The imbalance is within the specified limit.
+ * 6) Any rebalance would lead to ping-pong
+ */
if (balance && !(*balance))
goto ret;
- if (!sds.busiest || sds.this_load >= sds.max_load
- || sds.busiest_nr_running == 0)
+ if (!sds.busiest || sd.busiest_nr_running == 0)
+ goto out_balanced;
+
+ if (sds.this_load >= sds.max_load)
goto out_balanced;
sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
- if (sds.this_load >= sds.avg_load ||
- 100*sds.max_load <= sd->imbalance_pct * sds.this_load)
+ if (sds.this_load >= sds.avg_load)
+ goto out_balanced;
+
+ if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
goto out_balanced;
sds.busiest_load_per_task /= sds.busiest_nr_running;
^ permalink raw reply related [flat|nested] 33+ messages in thread
* Re: [RFC PATCH 11/11] sched: Add comments to find_busiest_group() function.
2009-03-25 9:14 ` [RFC PATCH 11/11] sched: Add comments to find_busiest_group() function Gautham R Shenoy
2009-03-25 9:47 ` [tip:sched/balancing] " Gautham R Shenoy
@ 2009-03-25 11:43 ` Gautham R Shenoy
2009-03-25 12:29 ` Ingo Molnar
2009-03-25 12:30 ` [tip:sched/balancing] " Gautham R Shenoy
2 siblings, 1 reply; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 11:43 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra, Vaidyanathan Srinivasan
Cc: linux-kernel, Suresh Siddha, Balbir Singh, Nick Piggin,
Dhaval Giani, Bharata B Rao
On Wed, Mar 25, 2009 at 02:44:27PM +0530, Gautham R Shenoy wrote:
> Add /** style comments around find_busiest_group(). Also add a few explanatory
> */
<snip>
> static struct sched_group *
> find_busiest_group(struct sched_domain *sd, int this_cpu,
> @@ -3593,17 +3613,31 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
> update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
> balance, &sds);
>
> + /* Cases where imbalance does not exist from POV of this_cpu */
> + /* 1) this_cpu is not the appropriate cpu to perform load balancing
> + * at this level.
> + * 2) There is no busy sibling group to pull from.
> + * 3) This group is the busiest group.
> + * 4) This group is more busy than the avg busieness at this
> + * sched_domain.
> + * 5) The imbalance is within the specified limit.
> + * 6) Any rebalance would lead to ping-pong
> + */
> if (balance && !(*balance))
> goto ret;
>
> - if (!sds.busiest || sds.this_load >= sds.max_load
> - || sds.busiest_nr_running == 0)
> + if (!sds.busiest || sd.busiest_nr_running == 0)
^^^^^^^^^^^^^^^^^^^^
should have been sds.busiest_nr_running. Hence the build failure on tip.
I think I missed compile testing this last patch.
Ingo, could you revert commit 7b6340ef884aff69a54f8a530c73ad9da0a7c388 in
tip/balancing and commit the following patch instead?
--->
sched: Add comments to find_busiest_group() function.
From: Gautham R Shenoy <ego@in.ibm.com>
Add /** style comments around find_busiest_group(). Also add a few explanatory
comments.
This concludes the find_busiest_group() cleanup. The function is down to 72
lines from the original 313 lines.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
---
kernel/sched.c | 50 ++++++++++++++++++++++++++++++++++++++++++--------
1 files changed, 42 insertions(+), 8 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 6404ddf..a48cf9d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3572,10 +3572,30 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
}
/******* find_busiest_group() helpers end here *********************/
-/*
- * find_busiest_group finds and returns the busiest CPU group within the
- * domain. It calculates and returns the amount of weighted load which
- * should be moved to restore balance via the imbalance parameter.
+/**
+ * find_busiest_group - Returns the busiest group within the sched_domain
+ * if there is an imbalance. If there isn't an imbalance, and
+ * the user has opted for power-savings, it returns a group whose
+ * CPUs can be put to idle by rebalancing those tasks elsewhere, if
+ * such a group exists.
+ *
+ * Also calculates the amount of weighted load which should be moved
+ * to restore balance.
+ *
+ * @sd: The sched_domain whose busiest group is to be returned.
+ * @this_cpu: The cpu for which load balancing is currently being performed.
+ * @imbalance: Variable which stores amount of weighted load which should
+ * be moved to restore balance/put a group to idle.
+ * @idle: The idle status of this_cpu.
+ * @sd_idle: The idleness of sd
+ * @cpus: The set of CPUs under consideration for load-balancing.
+ * @balance: Pointer to a variable indicating if this_cpu
+ * is the appropriate cpu to perform load balancing at this_level.
+ *
+ * Returns: - the busiest group if imbalance exists.
+ * - If no imbalance and user has opted for power-savings balance,
+ * return the least loaded group whose CPUs can be
+ * put to idle by rebalancing its tasks onto our group.
*/
static struct sched_group *
find_busiest_group(struct sched_domain *sd, int this_cpu,
@@ -3593,17 +3613,31 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
balance, &sds);
+ /* Cases where imbalance does not exist from POV of this_cpu */
+ /* 1) this_cpu is not the appropriate cpu to perform load balancing
+ * at this level.
+ * 2) There is no busy sibling group to pull from.
+ * 3) This group is the busiest group.
+ * 4) This group is more busy than the avg busieness at this
+ * sched_domain.
+ * 5) The imbalance is within the specified limit.
+ * 6) Any rebalance would lead to ping-pong
+ */
if (balance && !(*balance))
goto ret;
- if (!sds.busiest || sds.this_load >= sds.max_load
- || sds.busiest_nr_running == 0)
+ if (!sds.busiest || sds.busiest_nr_running == 0)
+ goto out_balanced;
+
+ if (sds.this_load >= sds.max_load)
goto out_balanced;
sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
- if (sds.this_load >= sds.avg_load ||
- 100*sds.max_load <= sd->imbalance_pct * sds.this_load)
+ if (sds.this_load >= sds.avg_load)
+ goto out_balanced;
+
+ if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
goto out_balanced;
sds.busiest_load_per_task /= sds.busiest_nr_running;
--
Thanks and Regards
gautham
^ permalink raw reply related [flat|nested] 33+ messages in thread
* Re: [RFC PATCH 11/11] sched: Add comments to find_busiest_group() function.
2009-03-25 11:43 ` [RFC PATCH 11/11] " Gautham R Shenoy
@ 2009-03-25 12:29 ` Ingo Molnar
2009-03-25 13:07 ` Gautham R Shenoy
0 siblings, 1 reply; 33+ messages in thread
From: Ingo Molnar @ 2009-03-25 12:29 UTC (permalink / raw)
To: Gautham R Shenoy
Cc: Peter Zijlstra, Vaidyanathan Srinivasan, linux-kernel,
Suresh Siddha, Balbir Singh, Nick Piggin, Dhaval Giani,
Bharata B Rao
* Gautham R Shenoy <ego@in.ibm.com> wrote:
> On Wed, Mar 25, 2009 at 02:44:27PM +0530, Gautham R Shenoy wrote:
> > Add /** style comments around find_busiest_group(). Also add a few explanatory
> > */
>
> <snip>
>
> > static struct sched_group *
> > find_busiest_group(struct sched_domain *sd, int this_cpu,
> > @@ -3593,17 +3613,31 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
> > update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
> > balance, &sds);
> >
> > + /* Cases where imbalance does not exist from POV of this_cpu */
> > + /* 1) this_cpu is not the appropriate cpu to perform load balancing
> > + * at this level.
> > + * 2) There is no busy sibling group to pull from.
> > + * 3) This group is the busiest group.
> > + * 4) This group is more busy than the avg busieness at this
> > + * sched_domain.
> > + * 5) The imbalance is within the specified limit.
> > + * 6) Any rebalance would lead to ping-pong
> > + */
> > if (balance && !(*balance))
> > goto ret;
> >
> > - if (!sds.busiest || sds.this_load >= sds.max_load
> > - || sds.busiest_nr_running == 0)
> > + if (!sds.busiest || sd.busiest_nr_running == 0)
> ^^^^^^^^^^^^^^^^^^^^
> should have been sds.busiest_nr_running. Hence the build failure on tip.
>
> I think I missed compile testing this last patch.
>
> Ingo, could you revert commit 7b6340ef884aff69a54f8a530c73ad9da0a7c388 in
> tip/balancing and commit the following patch instead?
sure - i've amended it and started testing it locally. If it passes
testing it should show up in tip:master.
Ingo
^ permalink raw reply [flat|nested] 33+ messages in thread
* Re: [RFC PATCH 11/11] sched: Add comments to find_busiest_group() function.
2009-03-25 12:29 ` Ingo Molnar
@ 2009-03-25 13:07 ` Gautham R Shenoy
2009-03-25 13:10 ` Ingo Molnar
0 siblings, 1 reply; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 13:07 UTC (permalink / raw)
To: Ingo Molnar
Cc: Peter Zijlstra, Vaidyanathan Srinivasan, linux-kernel,
Suresh Siddha, Balbir Singh, Nick Piggin, Dhaval Giani,
Bharata B Rao
On Wed, Mar 25, 2009 at 01:29:13PM +0100, Ingo Molnar wrote:
>
> * Gautham R Shenoy <ego@in.ibm.com> wrote:
>
> > On Wed, Mar 25, 2009 at 02:44:27PM +0530, Gautham R Shenoy wrote:
> > > Add /** style comments around find_busiest_group(). Also add a few explanatory
> > > */
> >
> > <snip>
> >
> > > static struct sched_group *
> > > find_busiest_group(struct sched_domain *sd, int this_cpu,
> > > @@ -3593,17 +3613,31 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
> > > update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
> > > balance, &sds);
> > >
> > > + /* Cases where imbalance does not exist from POV of this_cpu */
> > > + /* 1) this_cpu is not the appropriate cpu to perform load balancing
> > > + * at this level.
> > > + * 2) There is no busy sibling group to pull from.
> > > + * 3) This group is the busiest group.
> > > + * 4) This group is more busy than the avg busieness at this
> > > + * sched_domain.
> > > + * 5) The imbalance is within the specified limit.
> > > + * 6) Any rebalance would lead to ping-pong
> > > + */
> > > if (balance && !(*balance))
> > > goto ret;
> > >
> > > - if (!sds.busiest || sds.this_load >= sds.max_load
> > > - || sds.busiest_nr_running == 0)
> > > + if (!sds.busiest || sd.busiest_nr_running == 0)
> > ^^^^^^^^^^^^^^^^^^^^
> > should have been sds.busiest_nr_running. Hence the build failure on tip.
> >
> > I think I missed compile testing this last patch.
> >
> > Ingo, could you revert commit 7b6340ef884aff69a54f8a530c73ad9da0a7c388 in
> > tip/balancing and commit the following patch instead?
>
> sure - i've amended it and started testing it locally. If it passes
> testing it should show up in tip:master.
Thanks!
Meanwhile I'll see if there are any regressions in 2.6.29 with
this patchset.
>
> Ingo
--
Thanks and Regards
gautham
^ permalink raw reply [flat|nested] 33+ messages in thread
* Re: [RFC PATCH 11/11] sched: Add comments to find_busiest_group() function.
2009-03-25 13:07 ` Gautham R Shenoy
@ 2009-03-25 13:10 ` Ingo Molnar
0 siblings, 0 replies; 33+ messages in thread
From: Ingo Molnar @ 2009-03-25 13:10 UTC (permalink / raw)
To: Gautham R Shenoy
Cc: Peter Zijlstra, Vaidyanathan Srinivasan, linux-kernel,
Suresh Siddha, Balbir Singh, Nick Piggin, Dhaval Giani,
Bharata B Rao
* Gautham R Shenoy <ego@in.ibm.com> wrote:
> On Wed, Mar 25, 2009 at 01:29:13PM +0100, Ingo Molnar wrote:
> >
> > * Gautham R Shenoy <ego@in.ibm.com> wrote:
> >
> > > On Wed, Mar 25, 2009 at 02:44:27PM +0530, Gautham R Shenoy wrote:
> > > > Add /** style comments around find_busiest_group(). Also add a few explanatory
> > > > */
> > >
> > > <snip>
> > >
> > > > static struct sched_group *
> > > > find_busiest_group(struct sched_domain *sd, int this_cpu,
> > > > @@ -3593,17 +3613,31 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
> > > > update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
> > > > balance, &sds);
> > > >
> > > > + /* Cases where imbalance does not exist from POV of this_cpu */
> > > > + /* 1) this_cpu is not the appropriate cpu to perform load balancing
> > > > + * at this level.
> > > > + * 2) There is no busy sibling group to pull from.
> > > > + * 3) This group is the busiest group.
> > > > + * 4) This group is more busy than the avg busieness at this
> > > > + * sched_domain.
> > > > + * 5) The imbalance is within the specified limit.
> > > > + * 6) Any rebalance would lead to ping-pong
> > > > + */
> > > > if (balance && !(*balance))
> > > > goto ret;
> > > >
> > > > - if (!sds.busiest || sds.this_load >= sds.max_load
> > > > - || sds.busiest_nr_running == 0)
> > > > + if (!sds.busiest || sd.busiest_nr_running == 0)
> > > ^^^^^^^^^^^^^^^^^^^^
> > > should have been sds.busiest_nr_running. Hence the build failure on tip.
> > >
> > > I think I missed compile testing this last patch.
> > >
> > > Ingo, could you revert commit 7b6340ef884aff69a54f8a530c73ad9da0a7c388 in
> > > tip/balancing and commit the following patch instead?
> >
> > sure - i've amended it and started testing it locally. If it passes
> > testing it should show up in tip:master.
>
> Thanks!
>
> Meanwhile I'll see if there are any regressions in 2.6.29 with
> this patchset.
Just try tip:master please - there's a number of other scheduler
changes and it would be nice to validate them together.
Ingo
^ permalink raw reply [flat|nested] 33+ messages in thread
* [tip:sched/balancing] sched: Add comments to find_busiest_group() function
2009-03-25 9:14 ` [RFC PATCH 11/11] sched: Add comments to find_busiest_group() function Gautham R Shenoy
2009-03-25 9:47 ` [tip:sched/balancing] " Gautham R Shenoy
2009-03-25 11:43 ` [RFC PATCH 11/11] " Gautham R Shenoy
@ 2009-03-25 12:30 ` Gautham R Shenoy
2009-03-25 16:04 ` Ray Lee
2 siblings, 1 reply; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 12:30 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, ego, hpa, mingo, a.p.zijlstra, dhaval, balbir,
bharata, suresh.b.siddha, tglx, mingo, svaidy, nickpiggin
Commit-ID: b7bb4c9bb01941fe8feb653f3410e7ed0c9bb786
Gitweb: http://git.kernel.org/tip/b7bb4c9bb01941fe8feb653f3410e7ed0c9bb786
Author: Gautham R Shenoy <ego@in.ibm.com>
AuthorDate: Wed, 25 Mar 2009 14:44:27 +0530
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Wed, 25 Mar 2009 13:28:30 +0100
sched: Add comments to find_busiest_group() function
Impact: cleanup
Add /** style comments around find_busiest_group(). Also add a few
explanatory comments.
This concludes the find_busiest_group() cleanup. The function is
now down to 72 lines from the original 313 lines.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Balbir Singh" <balbir@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
Cc: "Vaidyanathan Srinivasan" <svaidy@linux.vnet.ibm.com>
LKML-Reference: <20090325091427.13992.18933.stgit@sofia.in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 50 ++++++++++++++++++++++++++++++++++++++++++--------
1 files changed, 42 insertions(+), 8 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 5f21658..9f8506d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3676,10 +3676,30 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
}
/******* find_busiest_group() helpers end here *********************/
-/*
- * find_busiest_group finds and returns the busiest CPU group within the
- * domain. It calculates and returns the amount of weighted load which
- * should be moved to restore balance via the imbalance parameter.
+/**
+ * find_busiest_group - Returns the busiest group within the sched_domain
+ * if there is an imbalance. If there isn't an imbalance, and
+ * the user has opted for power-savings, it returns a group whose
+ * CPUs can be put to idle by rebalancing those tasks elsewhere, if
+ * such a group exists.
+ *
+ * Also calculates the amount of weighted load which should be moved
+ * to restore balance.
+ *
+ * @sd: The sched_domain whose busiest group is to be returned.
+ * @this_cpu: The cpu for which load balancing is currently being performed.
+ * @imbalance: Variable which stores amount of weighted load which should
+ * be moved to restore balance/put a group to idle.
+ * @idle: The idle status of this_cpu.
+ * @sd_idle: The idleness of sd
+ * @cpus: The set of CPUs under consideration for load-balancing.
+ * @balance: Pointer to a variable indicating if this_cpu
+ * is the appropriate cpu to perform load balancing at this_level.
+ *
+ * Returns: - the busiest group if imbalance exists.
+ * - If no imbalance and user has opted for power-savings balance,
+ * return the least loaded group whose CPUs can be
+ * put to idle by rebalancing its tasks onto our group.
*/
static struct sched_group *
find_busiest_group(struct sched_domain *sd, int this_cpu,
@@ -3697,17 +3717,31 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
balance, &sds);
+ /* Cases where imbalance does not exist from POV of this_cpu */
+ /* 1) this_cpu is not the appropriate cpu to perform load balancing
+ * at this level.
+ * 2) There is no busy sibling group to pull from.
+ * 3) This group is the busiest group.
+ * 4) This group is more busy than the avg busieness at this
+ * sched_domain.
+ * 5) The imbalance is within the specified limit.
+ * 6) Any rebalance would lead to ping-pong
+ */
if (balance && !(*balance))
goto ret;
- if (!sds.busiest || sds.this_load >= sds.max_load
- || sds.busiest_nr_running == 0)
+ if (!sds.busiest || sds.busiest_nr_running == 0)
+ goto out_balanced;
+
+ if (sds.this_load >= sds.max_load)
goto out_balanced;
sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
- if (sds.this_load >= sds.avg_load ||
- 100*sds.max_load <= sd->imbalance_pct * sds.this_load)
+ if (sds.this_load >= sds.avg_load)
+ goto out_balanced;
+
+ if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
goto out_balanced;
sds.busiest_load_per_task /= sds.busiest_nr_running;
^ permalink raw reply related [flat|nested] 33+ messages in thread
* Re: [tip:sched/balancing] sched: Add comments to find_busiest_group() function
2009-03-25 12:30 ` [tip:sched/balancing] " Gautham R Shenoy
@ 2009-03-25 16:04 ` Ray Lee
2009-03-25 16:17 ` Ingo Molnar
2009-03-25 19:17 ` Gautham R Shenoy
0 siblings, 2 replies; 33+ messages in thread
From: Ray Lee @ 2009-03-25 16:04 UTC (permalink / raw)
To: mingo, hpa, ego, linux-kernel, a.p.zijlstra, dhaval, balbir,
bharata, suresh.b.siddha, tglx, nickpiggin, svaidy, mingo
Cc: linux-tip-commits
This commit says it's just adding comments, but...
On Wed, Mar 25, 2009 at 5:30 AM, Gautham R Shenoy <ego@in.ibm.com> wrote:
> Commit-ID: b7bb4c9bb01941fe8feb653f3410e7ed0c9bb786
> Gitweb: http://git.kernel.org/tip/b7bb4c9bb01941fe8feb653f3410e7ed0c9bb786
> Author: Gautham R Shenoy <ego@in.ibm.com>
> AuthorDate: Wed, 25 Mar 2009 14:44:27 +0530
> Committer: Ingo Molnar <mingo@elte.hu>
> CommitDate: Wed, 25 Mar 2009 13:28:30 +0100
>
> sched: Add comments to find_busiest_group() function
>
> Impact: cleanup
>
> Add /** style comments around find_busiest_group(). Also add a few
> explanatory comments.
...but there are actual code changes. Hard to know if you intended to
do that and forgot to changelog it, or if it's an unexpected hunk that
accidentally got included:
> if (balance && !(*balance))
> goto ret;
>
> - if (!sds.busiest || sds.this_load >= sds.max_load
> - || sds.busiest_nr_running == 0)
> + if (!sds.busiest || sds.busiest_nr_running == 0)
> + goto out_balanced;
> +
> + if (sds.this_load >= sds.max_load)
> goto out_balanced;
>
> sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
>
> - if (sds.this_load >= sds.avg_load ||
> - 100*sds.max_load <= sd->imbalance_pct * sds.this_load)
> + if (sds.this_load >= sds.avg_load)
> + goto out_balanced;
> +
> + if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
> goto out_balanced;
>
> sds.busiest_load_per_task /= sds.busiest_nr_running;
^ permalink raw reply [flat|nested] 33+ messages in thread
* Re: [tip:sched/balancing] sched: Add comments to find_busiest_group() function
2009-03-25 16:04 ` Ray Lee
@ 2009-03-25 16:17 ` Ingo Molnar
2009-03-25 19:17 ` Gautham R Shenoy
1 sibling, 0 replies; 33+ messages in thread
From: Ingo Molnar @ 2009-03-25 16:17 UTC (permalink / raw)
To: Ray Lee
Cc: mingo, hpa, ego, linux-kernel, a.p.zijlstra, dhaval, balbir,
bharata, suresh.b.siddha, tglx, nickpiggin, svaidy,
linux-tip-commits
* Ray Lee <ray-lk@madrabbit.org> wrote:
> This commit says it's just adding comments, but...
>
> On Wed, Mar 25, 2009 at 5:30 AM, Gautham R Shenoy <ego@in.ibm.com> wrote:
> > Commit-ID: b7bb4c9bb01941fe8feb653f3410e7ed0c9bb786
> > Gitweb: http://git.kernel.org/tip/b7bb4c9bb01941fe8feb653f3410e7ed0c9bb786
> > Author: Gautham R Shenoy <ego@in.ibm.com>
> > AuthorDate: Wed, 25 Mar 2009 14:44:27 +0530
> > Committer: Ingo Molnar <mingo@elte.hu>
> > CommitDate: Wed, 25 Mar 2009 13:28:30 +0100
> >
> > sched: Add comments to find_busiest_group() function
> >
> > Impact: cleanup
> >
> > Add /** style comments around find_busiest_group(). Also add a few
> > explanatory comments.
>
> ...but there are actual code changes. Hard to know if you intended to
> do that and forgot to changelog it, or if it's an unexpected hunk that
> accidentally got included:
>
> > if (balance && !(*balance))
> > goto ret;
> >
> > - if (!sds.busiest || sds.this_load >= sds.max_load
> > - || sds.busiest_nr_running == 0)
> > + if (!sds.busiest || sds.busiest_nr_running == 0)
> > + goto out_balanced;
> > +
> > + if (sds.this_load >= sds.max_load)
> > goto out_balanced;
> >
> > sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
> >
> > - if (sds.this_load >= sds.avg_load ||
> > - 100*sds.max_load <= sd->imbalance_pct * sds.this_load)
> > + if (sds.this_load >= sds.avg_load)
> > + goto out_balanced;
> > +
> > + if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
> > goto out_balanced;
> >
> > sds.busiest_load_per_task /= sds.busiest_nr_running;
yeah.
Note that it does not actually change the resulting logic, it splits
out an over-long (and hard to read) series of conditions into an
equivalent set of two if() statements. [the first one changes the
order of two conditions - but that is harmless]
It indeed would have been nice to declare this in the changelog
though.
Ingo
^ permalink raw reply [flat|nested] 33+ messages in thread
* Re: [tip:sched/balancing] sched: Add comments to find_busiest_group() function
2009-03-25 16:04 ` Ray Lee
2009-03-25 16:17 ` Ingo Molnar
@ 2009-03-25 19:17 ` Gautham R Shenoy
1 sibling, 0 replies; 33+ messages in thread
From: Gautham R Shenoy @ 2009-03-25 19:17 UTC (permalink / raw)
To: Ray Lee
Cc: mingo, hpa, linux-kernel, a.p.zijlstra, dhaval, balbir, bharata,
suresh.b.siddha, tglx, nickpiggin, svaidy, mingo,
linux-tip-commits
Hi Ray,
On Wed, Mar 25, 2009 at 09:04:05AM -0700, Ray Lee wrote:
> > sched: Add comments to find_busiest_group() function
> >
> > Impact: cleanup
> >
> > Add /** style comments around find_busiest_group(). Also add a few
> > explanatory comments.
>
> ...but there are actual code changes. Hard to know if you intended to
> do that and forgot to changelog it, or if it's an unexpected hunk that
> accidentally got included:
These code changes are intentional. They improve readability and help us
categorize the various cases for which we're pretty sure that we don't
have a need to calculate the imbalance.
The fact that the code was reorganized should have been mentioned in the
changelog. My bad. Sorry :(
>
> > if (balance && !(*balance))
> > goto ret;
> >
> > - if (!sds.busiest || sds.this_load >= sds.max_load
> > - || sds.busiest_nr_running == 0)
> > + if (!sds.busiest || sds.busiest_nr_running == 0)
> > + goto out_balanced;
> > +
> > + if (sds.this_load >= sds.max_load)
> > goto out_balanced;
> >
> > sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
> >
> > - if (sds.this_load >= sds.avg_load ||
> > - 100*sds.max_load <= sd->imbalance_pct * sds.this_load)
> > + if (sds.this_load >= sds.avg_load)
> > + goto out_balanced;
> > +
> > + if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
> > goto out_balanced;
> >
> > sds.busiest_load_per_task /= sds.busiest_nr_running;
--
Thanks and Regards
gautham
^ permalink raw reply [flat|nested] 33+ messages in thread
* Re: [RFC PATCH 00/11] sched: find_busiest_group() cleanup
2009-03-25 9:13 [RFC PATCH 00/11] sched: find_busiest_group() cleanup Gautham R Shenoy
` (10 preceding siblings ...)
2009-03-25 9:14 ` [RFC PATCH 11/11] sched: Add comments to find_busiest_group() function Gautham R Shenoy
@ 2009-03-25 9:30 ` Ingo Molnar
2009-03-25 9:42 ` Ingo Molnar
11 siblings, 1 reply; 33+ messages in thread
From: Ingo Molnar @ 2009-03-25 9:30 UTC (permalink / raw)
To: Gautham R Shenoy
Cc: Peter Zijlstra, Vaidyanathan Srinivasan, linux-kernel,
Suresh Siddha, Balbir Singh, Nick Piggin, Dhaval Giani,
Bharata B Rao
* Gautham R Shenoy <ego@in.ibm.com> wrote:
> Hi,
>
> This patchset contains the cleanup of the humongous
> find_busiest_group() function.
>
> Vaidy had tried a hand at this before. His approach can be
> found here:
> http://lkml.org/lkml/2008/9/24/201 and
> http://lkml.org/lkml/2008/10/9/176
>
> Though the code in this patchset has been written from scratch I
> have reused some of the ideas that Vaidy had originally proposed.
> Credit has been given whereever it is due :)
>
> The patches in this series are incremental. Each one is a
> functional patch, which compiles fine.
>
> The steps followed in the cleanup are as follows:
> - Fix indentations.
>
> - Group variables that serve a common high-level purpose into a single
> structure.
>
> - Use helper functions to perform all the calculatations, like calculating
> the sched_domain and sched_group statistics, calculating the imbalance, etc.
>
> - Move the power_savings_balance part, which depends on
> (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) into a different section, thereby
> eliminating the #ifdef jungle in helper functions.
>
> - Add /** style comments for all the functions, including find_busiest_group()
>
> - Add additional comments whereever appropriate.
>
> After applying all the patches, the size of find_busiest_group()
> goes down from 313 lines to 76 lines. Of course, there are the
> helpers, but effort has been put to keep all the helper functions
> within the 80 line limit.
Very nice series!
> Any feedback on the patches and how the functionality can be
> tested is greatly appreciated.
I'll try my best to get this tested.
Ingo
^ permalink raw reply [flat|nested] 33+ messages in thread* Re: [RFC PATCH 00/11] sched: find_busiest_group() cleanup
2009-03-25 9:30 ` [RFC PATCH 00/11] sched: find_busiest_group() cleanup Ingo Molnar
@ 2009-03-25 9:42 ` Ingo Molnar
0 siblings, 0 replies; 33+ messages in thread
From: Ingo Molnar @ 2009-03-25 9:42 UTC (permalink / raw)
To: Gautham R Shenoy
Cc: Peter Zijlstra, Vaidyanathan Srinivasan, linux-kernel,
Suresh Siddha, Balbir Singh, Nick Piggin, Dhaval Giani,
Bharata B Rao
[-- Attachment #1: Type: text/plain, Size: 418 bytes --]
* Ingo Molnar <mingo@elte.hu> wrote:
> > Any feedback on the patches and how the functionality can be
> > tested is greatly appreciated.
>
> I'll try my best to get this tested.
-tip testing found a build failure with the attached config:
kernel/sched.c: In function ‘find_busiest_group’:
kernel/sched.c:3798: error: request for member ‘busiest_nr_running’ in something not a structure or union
Ingo
[-- Attachment #2: config --]
[-- Type: text/plain, Size: 56176 bytes --]
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.29
# Wed Mar 25 10:34:54 2009
#
CONFIG_64BIT=y
# CONFIG_X86_32 is not set
CONFIG_X86_64=y
CONFIG_X86=y
CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig"
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CMOS_UPDATE=y
CONFIG_CLOCKSOURCE_WATCHDOG=y
CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
CONFIG_LOCKDEP_SUPPORT=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
CONFIG_FAST_CMPXCHG_LOCAL=y
CONFIG_MMU=y
CONFIG_ZONE_DMA=y
CONFIG_GENERIC_ISA_DMA=y
CONFIG_GENERIC_IOMAP=y
CONFIG_GENERIC_BUG=y
CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_GPIO=y
CONFIG_ARCH_MAY_HAVE_PC_FDC=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_TIME_VSYSCALL=y
CONFIG_ARCH_HAS_CPU_RELAX=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
CONFIG_HAVE_SETUP_PER_CPU_AREA=y
CONFIG_HAVE_DYNAMIC_PER_CPU_AREA=y
CONFIG_HAVE_CPUMASK_OF_CPU_MAP=y
CONFIG_ARCH_HIBERNATION_POSSIBLE=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y
CONFIG_ZONE_DMA32=y
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_AUDIT_ARCH=y
CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_GENERIC_PENDING_IRQ=y
CONFIG_USE_GENERIC_SMP_HELPERS=y
CONFIG_X86_64_SMP=y
CONFIG_X86_HT=y
CONFIG_X86_TRAMPOLINE=y
# CONFIG_KTIME_SCALAR is not set
CONFIG_BOOTPARAM_SUPPORT_NOT_WANTED=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
# General setup
#
CONFIG_EXPERIMENTAL=y
CONFIG_BROKEN_BOOT_ALLOWED4=y
CONFIG_BROKEN_BOOT_ALLOWED3=y
# CONFIG_BROKEN_BOOT_ALLOWED2 is not set
# CONFIG_BROKEN_BOOT_EUROPE is not set
# CONFIG_BROKEN_BOOT_TITAN is not set
CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
# CONFIG_KERNEL_GZIP is not set
# CONFIG_KERNEL_BZIP2 is not set
CONFIG_KERNEL_LZMA=y
CONFIG_SWAP=y
# CONFIG_SYSVIPC is not set
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_AUDIT=y
# CONFIG_AUDITSYSCALL is not set
#
# RCU Subsystem
#
CONFIG_CLASSIC_RCU=y
# CONFIG_TREE_RCU is not set
# CONFIG_PREEMPT_RCU is not set
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
# CONFIG_IKCONFIG_PROC is not set
CONFIG_LOG_BUF_SHIFT=20
CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
CONFIG_GROUP_SCHED=y
CONFIG_FAIR_GROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
# CONFIG_USER_SCHED is not set
CONFIG_CGROUP_SCHED=y
CONFIG_CGROUPS=y
# CONFIG_CGROUP_DEBUG is not set
CONFIG_CGROUP_NS=y
# CONFIG_CGROUP_FREEZER is not set
# CONFIG_CGROUP_DEVICE is not set
CONFIG_CPUSETS=y
# CONFIG_PROC_PID_CPUSET is not set
CONFIG_CGROUP_CPUACCT=y
# CONFIG_RESOURCE_COUNTERS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_RELAY=y
CONFIG_NAMESPACES=y
CONFIG_UTS_NS=y
# CONFIG_USER_NS is not set
CONFIG_PID_NS=y
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
# CONFIG_RD_GZIP is not set
# CONFIG_RD_BZIP2 is not set
CONFIG_RD_LZMA=y
# CONFIG_INITRAMFS_COMPRESSION_NONE is not set
# CONFIG_INITRAMFS_COMPRESSION_GZIP is not set
# CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set
CONFIG_INITRAMFS_COMPRESSION_LZMA=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
CONFIG_UID16=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
CONFIG_PCSPKR_PLATFORM=y
# CONFIG_BASE_FULL is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
# CONFIG_EVENTFD is not set
CONFIG_SHMEM=y
CONFIG_AIO=y
CONFIG_HAVE_PERF_COUNTERS=y
#
# Performance Counters
#
CONFIG_PERF_COUNTERS=y
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_PCI_QUIRKS=y
CONFIG_SLUB_DEBUG=y
CONFIG_COMPAT_BRK=y
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
CONFIG_TRACEPOINTS=y
CONFIG_MARKERS=y
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_API_DEBUG=y
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
CONFIG_BASE_SMALL=1
# CONFIG_MODULES is not set
CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
CONFIG_BLK_DEV_BSG=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLOCK_COMPAT=y
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
# CONFIG_IOSCHED_AS is not set
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
# CONFIG_DEFAULT_AS is not set
# CONFIG_DEFAULT_DEADLINE is not set
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
CONFIG_PREEMPT_NOTIFIERS=y
CONFIG_FREEZER=y
#
# Processor type and features
#
CONFIG_TICK_ONESHOT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
CONFIG_SMP_SUPPORT=y
CONFIG_X86_X2APIC=y
# CONFIG_SPARSE_IRQ is not set
CONFIG_X86_MPPARSE=y
CONFIG_X86_EXTENDED_PLATFORM=y
CONFIG_X86_VSMP=y
# CONFIG_X86_UV is not set
# CONFIG_SCHED_OMIT_FRAME_POINTER is not set
CONFIG_PARAVIRT_GUEST=y
# CONFIG_XEN is not set
# CONFIG_KVM_CLOCK is not set
CONFIG_KVM_GUEST=y
CONFIG_PARAVIRT=y
# CONFIG_PARAVIRT_CLOCK is not set
CONFIG_PARAVIRT_DEBUG=y
CONFIG_MEMTEST=y
# CONFIG_M386 is not set
# CONFIG_M486 is not set
# CONFIG_M586 is not set
# CONFIG_M586TSC is not set
# CONFIG_M586MMX is not set
# CONFIG_M686 is not set
# CONFIG_MPENTIUMII is not set
# CONFIG_MPENTIUMIII is not set
# CONFIG_MPENTIUMM is not set
# CONFIG_MPENTIUM4 is not set
# CONFIG_MK6 is not set
# CONFIG_MK7 is not set
# CONFIG_MK8 is not set
# CONFIG_MCRUSOE is not set
# CONFIG_MEFFICEON is not set
# CONFIG_MWINCHIPC6 is not set
# CONFIG_MWINCHIP3D is not set
# CONFIG_MGEODEGX1 is not set
# CONFIG_MGEODE_LX is not set
# CONFIG_MCYRIXIII is not set
# CONFIG_MVIAC3_2 is not set
# CONFIG_MVIAC7 is not set
# CONFIG_MPSC is not set
# CONFIG_MCORE2 is not set
CONFIG_GENERIC_CPU=y
CONFIG_X86_CPU=y
CONFIG_X86_L1_CACHE_BYTES=64
CONFIG_X86_INTERNODE_CACHE_BYTES=4096
CONFIG_X86_CMPXCHG=y
CONFIG_X86_L1_CACHE_SHIFT=6
CONFIG_X86_WP_WORKS_OK=y
CONFIG_X86_TSC=y
CONFIG_X86_CMPXCHG64=y
CONFIG_X86_CMOV=y
CONFIG_X86_MINIMUM_CPU_FAMILY=64
CONFIG_X86_DEBUGCTLMSR=y
CONFIG_PROCESSOR_SELECT=y
CONFIG_CPU_SUP_INTEL=y
# CONFIG_CPU_SUP_AMD is not set
# CONFIG_CPU_SUP_CENTAUR is not set
CONFIG_X86_DS=y
CONFIG_X86_PTRACE_BTS=y
CONFIG_HPET_TIMER=y
CONFIG_DMI=y
# CONFIG_GART_IOMMU is not set
CONFIG_CALGARY_IOMMU=y
CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y
# CONFIG_AMD_IOMMU is not set
CONFIG_SWIOTLB=y
CONFIG_IOMMU_HELPER=y
CONFIG_IOMMU_API=y
CONFIG_MAXSMP=y
CONFIG_NR_CPUS=4096
CONFIG_SCHED_SMT=y
# CONFIG_SCHED_MC is not set
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_X86_LOCAL_APIC=y
CONFIG_X86_IO_APIC=y
CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
CONFIG_X86_MCE=y
# CONFIG_X86_MCE_INTEL is not set
# CONFIG_X86_MCE_AMD is not set
CONFIG_I8K=y
# CONFIG_MICROCODE is not set
CONFIG_X86_MSR=y
CONFIG_X86_CPUID=y
CONFIG_X86_CPU_DEBUG=y
# CONFIG_UP_WANTED_1 is not set
CONFIG_SMP=y
CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
CONFIG_DIRECT_GBPAGES=y
CONFIG_NUMA=y
# CONFIG_K8_NUMA is not set
CONFIG_X86_64_ACPI_NUMA=y
CONFIG_NODES_SPAN_OTHER_NODES=y
CONFIG_NUMA_EMU=y
CONFIG_NODES_SHIFT=9
CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_FLATMEM_MANUAL is not set
# CONFIG_DISCONTIGMEM_MANUAL is not set
CONFIG_SPARSEMEM_MANUAL=y
CONFIG_SPARSEMEM=y
CONFIG_NEED_MULTIPLE_NODES=y
CONFIG_HAVE_MEMORY_PRESENT=y
CONFIG_SPARSEMEM_EXTREME=y
CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
CONFIG_SPARSEMEM_VMEMMAP=y
#
# Memory hotplug is currently incompatible with Software Suspend
#
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
CONFIG_UNEVICTABLE_LRU=y
CONFIG_MMU_NOTIFIER=y
CONFIG_X86_CHECK_BIOS_CORRUPTION=y
CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y
# CONFIG_X86_RESERVE_LOW_64K is not set
CONFIG_MTRR=y
CONFIG_MTRR_SANITIZER=y
CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=0
CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
CONFIG_X86_PAT=y
# CONFIG_EFI is not set
CONFIG_SECCOMP=y
CONFIG_CC_STACKPROTECTOR_ALL=y
CONFIG_CC_STACKPROTECTOR=y
# CONFIG_HZ_100 is not set
# CONFIG_HZ_250 is not set
CONFIG_HZ_300=y
# CONFIG_HZ_1000 is not set
CONFIG_HZ=300
CONFIG_SCHED_HRTICK=y
# CONFIG_KEXEC is not set
CONFIG_CRASH_DUMP=y
CONFIG_PHYSICAL_START=0x200000
# CONFIG_RELOCATABLE is not set
CONFIG_PHYSICAL_ALIGN=0x200000
CONFIG_HOTPLUG_CPU=y
CONFIG_COMPAT_VDSO=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
#
# Power management and ACPI options
#
CONFIG_ARCH_HIBERNATION_HEADER=y
CONFIG_PM=y
CONFIG_PM_DEBUG=y
CONFIG_PM_VERBOSE=y
CONFIG_CAN_PM_TRACE=y
CONFIG_PM_TRACE=y
CONFIG_PM_TRACE_RTC=y
CONFIG_PM_SLEEP_SMP=y
CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
CONFIG_HIBERNATION=y
CONFIG_PM_STD_PARTITION=""
CONFIG_ACPI=y
CONFIG_ACPI_SLEEP=y
# CONFIG_ACPI_PROCFS is not set
# CONFIG_ACPI_PROCFS_POWER is not set
CONFIG_ACPI_SYSFS_POWER=y
CONFIG_ACPI_PROC_EVENT=y
CONFIG_ACPI_AC=y
CONFIG_ACPI_BATTERY=y
# CONFIG_ACPI_BUTTON is not set
# CONFIG_ACPI_VIDEO is not set
CONFIG_ACPI_FAN=y
CONFIG_ACPI_DOCK=y
CONFIG_ACPI_PROCESSOR=y
CONFIG_ACPI_HOTPLUG_CPU=y
# CONFIG_ACPI_THERMAL is not set
CONFIG_ACPI_NUMA=y
# CONFIG_ACPI_CUSTOM_DSDT is not set
CONFIG_ACPI_BLACKLIST_YEAR=0
# CONFIG_ACPI_DEBUG is not set
CONFIG_ACPI_PCI_SLOT=y
CONFIG_X86_PM_TIMER=y
CONFIG_ACPI_CONTAINER=y
CONFIG_ACPI_SBS=y
#
# CPU Frequency scaling
#
# CONFIG_CPU_FREQ is not set
CONFIG_CPU_IDLE=y
CONFIG_CPU_IDLE_GOV_LADDER=y
CONFIG_CPU_IDLE_GOV_MENU=y
#
# Memory power savings
#
CONFIG_I7300_IDLE_IOAT_CHANNEL=y
CONFIG_I7300_IDLE=y
#
# Bus options (PCI etc.)
#
CONFIG_PCI=y
CONFIG_PCI_DIRECT=y
# CONFIG_PCI_MMCONFIG is not set
CONFIG_PCI_DOMAINS=y
CONFIG_DMAR=y
# CONFIG_DMAR_DEFAULT_ON is not set
# CONFIG_DMAR_GFX_WA is not set
CONFIG_DMAR_FLOPPY_WA=y
CONFIG_INTR_REMAP=y
CONFIG_PCIEPORTBUS=y
CONFIG_HOTPLUG_PCI_PCIE=y
CONFIG_PCIEAER=y
CONFIG_PCIEASPM=y
# CONFIG_PCIEASPM_DEBUG is not set
CONFIG_ARCH_SUPPORTS_MSI=y
CONFIG_PCI_MSI=y
# CONFIG_PCI_LEGACY is not set
# CONFIG_PCI_DEBUG is not set
# CONFIG_PCI_STUB is not set
CONFIG_HT_IRQ=y
CONFIG_ISA_DMA_API=y
CONFIG_K8_NB=y
# CONFIG_PCCARD is not set
CONFIG_HOTPLUG_PCI=y
# CONFIG_HOTPLUG_PCI_FAKE is not set
CONFIG_HOTPLUG_PCI_ACPI=y
# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set
CONFIG_HOTPLUG_PCI_CPCI=y
CONFIG_HOTPLUG_PCI_CPCI_ZT5550=y
CONFIG_HOTPLUG_PCI_CPCI_GENERIC=y
CONFIG_HOTPLUG_PCI_SHPC=y
#
# Executable file formats / Emulations
#
CONFIG_BINFMT_ELF=y
CONFIG_COMPAT_BINFMT_ELF=y
CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
# CONFIG_HAVE_AOUT is not set
# CONFIG_BINFMT_MISC is not set
CONFIG_IA32_EMULATION=y
CONFIG_IA32_AOUT=y
CONFIG_COMPAT=y
CONFIG_COMPAT_FOR_U64_ALIGNMENT=y
CONFIG_NET=y
#
# Networking options
#
CONFIG_COMPAT_NET_DEV_OPS=y
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
CONFIG_XFRM_USER=y
CONFIG_XFRM_SUB_POLICY=y
# CONFIG_XFRM_MIGRATE is not set
CONFIG_XFRM_STATISTICS=y
CONFIG_XFRM_IPCOMP=y
CONFIG_NET_KEY=y
# CONFIG_NET_KEY_MIGRATE is not set
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
# CONFIG_ASK_IP_FIB_HASH is not set
CONFIG_IP_FIB_TRIE=y
# CONFIG_IP_FIB_HASH is not set
CONFIG_IP_FIB_TRIE_STATS=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
# CONFIG_IP_PNP is not set
CONFIG_NET_IPIP=y
CONFIG_NET_IPGRE=y
# CONFIG_NET_IPGRE_BROADCAST is not set
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_ARPD=y
CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
CONFIG_INET_XFRM_TUNNEL=y
CONFIG_INET_TUNNEL=y
CONFIG_INET_XFRM_MODE_TRANSPORT=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
CONFIG_INET_XFRM_MODE_BEET=y
# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y
CONFIG_TCP_CONG_ADVANCED=y
CONFIG_TCP_CONG_BIC=y
# CONFIG_TCP_CONG_CUBIC is not set
CONFIG_TCP_CONG_WESTWOOD=y
# CONFIG_TCP_CONG_HTCP is not set
CONFIG_TCP_CONG_HSTCP=y
CONFIG_TCP_CONG_HYBLA=y
CONFIG_TCP_CONG_VEGAS=y
CONFIG_TCP_CONG_SCALABLE=y
CONFIG_TCP_CONG_LP=y
CONFIG_TCP_CONG_VENO=y
CONFIG_TCP_CONG_YEAH=y
CONFIG_TCP_CONG_ILLINOIS=y
# CONFIG_DEFAULT_BIC is not set
# CONFIG_DEFAULT_CUBIC is not set
# CONFIG_DEFAULT_HTCP is not set
CONFIG_DEFAULT_VEGAS=y
# CONFIG_DEFAULT_WESTWOOD is not set
# CONFIG_DEFAULT_RENO is not set
CONFIG_DEFAULT_TCP_CONG="vegas"
CONFIG_TCP_MD5SIG=y
# CONFIG_IPV6 is not set
# CONFIG_NETLABEL is not set
CONFIG_NETWORK_SECMARK=y
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
CONFIG_IP_SCTP=y
CONFIG_SCTP_DBG_MSG=y
CONFIG_SCTP_DBG_OBJCNT=y
# CONFIG_SCTP_HMAC_NONE is not set
# CONFIG_SCTP_HMAC_SHA1 is not set
CONFIG_SCTP_HMAC_MD5=y
# CONFIG_TIPC is not set
CONFIG_ATM=y
CONFIG_ATM_CLIP=y
CONFIG_ATM_CLIP_NO_ICMP=y
# CONFIG_ATM_LANE is not set
# CONFIG_ATM_BR2684 is not set
CONFIG_STP=y
CONFIG_GARP=y
# CONFIG_BRIDGE is not set
CONFIG_NET_DSA=y
CONFIG_NET_DSA_TAG_DSA=y
CONFIG_NET_DSA_TAG_EDSA=y
CONFIG_NET_DSA_TAG_TRAILER=y
CONFIG_NET_DSA_MV88E6XXX=y
CONFIG_NET_DSA_MV88E6060=y
CONFIG_NET_DSA_MV88E6XXX_NEED_PPU=y
CONFIG_NET_DSA_MV88E6131=y
CONFIG_NET_DSA_MV88E6123_61_65=y
CONFIG_VLAN_8021Q=y
CONFIG_VLAN_8021Q_GVRP=y
CONFIG_DECNET=y
CONFIG_DECNET_ROUTER=y
CONFIG_LLC=y
CONFIG_LLC2=y
CONFIG_IPX=y
# CONFIG_IPX_INTERN is not set
CONFIG_ATALK=y
CONFIG_DEV_APPLETALK=y
CONFIG_IPDDP=y
CONFIG_IPDDP_ENCAP=y
# CONFIG_IPDDP_DECAP is not set
CONFIG_X25=y
CONFIG_LAPB=y
CONFIG_ECONET=y
# CONFIG_ECONET_AUNUDP is not set
CONFIG_ECONET_NATIVE=y
# CONFIG_WAN_ROUTER is not set
CONFIG_NET_SCHED=y
#
# Queueing/Scheduling
#
CONFIG_NET_SCH_CBQ=y
CONFIG_NET_SCH_HTB=y
# CONFIG_NET_SCH_HFSC is not set
# CONFIG_NET_SCH_ATM is not set
# CONFIG_NET_SCH_PRIO is not set
CONFIG_NET_SCH_MULTIQ=y
CONFIG_NET_SCH_RED=y
CONFIG_NET_SCH_SFQ=y
# CONFIG_NET_SCH_TEQL is not set
CONFIG_NET_SCH_TBF=y
# CONFIG_NET_SCH_GRED is not set
# CONFIG_NET_SCH_DSMARK is not set
CONFIG_NET_SCH_NETEM=y
CONFIG_NET_SCH_DRR=y
CONFIG_NET_SCH_INGRESS=y
#
# Classification
#
CONFIG_NET_CLS=y
CONFIG_NET_CLS_BASIC=y
CONFIG_NET_CLS_TCINDEX=y
# CONFIG_NET_CLS_ROUTE4 is not set
CONFIG_NET_CLS_FW=y
# CONFIG_NET_CLS_U32 is not set
# CONFIG_NET_CLS_RSVP is not set
# CONFIG_NET_CLS_RSVP6 is not set
# CONFIG_NET_CLS_FLOW is not set
CONFIG_NET_CLS_CGROUP=y
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_STACK=32
CONFIG_NET_EMATCH_CMP=y
CONFIG_NET_EMATCH_NBYTE=y
CONFIG_NET_EMATCH_U32=y
CONFIG_NET_EMATCH_META=y
CONFIG_NET_EMATCH_TEXT=y
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=y
CONFIG_NET_ACT_GACT=y
# CONFIG_GACT_PROB is not set
# CONFIG_NET_ACT_MIRRED is not set
# CONFIG_NET_ACT_NAT is not set
# CONFIG_NET_ACT_PEDIT is not set
CONFIG_NET_ACT_SIMP=y
CONFIG_NET_ACT_SKBEDIT=y
CONFIG_NET_CLS_IND=y
CONFIG_NET_SCH_FIFO=y
CONFIG_DCB=y
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
CONFIG_HAMRADIO=y
#
# Packet Radio protocols
#
CONFIG_AX25=y
# CONFIG_AX25_DAMA_SLAVE is not set
CONFIG_NETROM=y
# CONFIG_ROSE is not set
#
# AX.25 network device drivers
#
CONFIG_MKISS=y
CONFIG_6PACK=y
CONFIG_BPQETHER=y
CONFIG_BAYCOM_SER_FDX=y
CONFIG_BAYCOM_SER_HDX=y
CONFIG_YAM=y
# CONFIG_CAN is not set
CONFIG_IRDA=y
#
# IrDA protocols
#
CONFIG_IRLAN=y
# CONFIG_IRNET is not set
CONFIG_IRCOMM=y
CONFIG_IRDA_ULTRA=y
#
# IrDA options
#
CONFIG_IRDA_CACHE_LAST_LSAP=y
# CONFIG_IRDA_FAST_RR is not set
CONFIG_IRDA_DEBUG=y
#
# Infrared-port device drivers
#
#
# SIR device drivers
#
CONFIG_IRTTY_SIR=y
#
# Dongle support
#
CONFIG_DONGLE=y
CONFIG_ESI_DONGLE=y
CONFIG_ACTISYS_DONGLE=y
CONFIG_TEKRAM_DONGLE=y
CONFIG_TOIM3232_DONGLE=y
# CONFIG_LITELINK_DONGLE is not set
CONFIG_MA600_DONGLE=y
CONFIG_GIRBIL_DONGLE=y
CONFIG_MCP2120_DONGLE=y
CONFIG_OLD_BELKIN_DONGLE=y
CONFIG_ACT200L_DONGLE=y
# CONFIG_KINGSUN_DONGLE is not set
CONFIG_KSDAZZLE_DONGLE=y
CONFIG_KS959_DONGLE=y
#
# FIR device drivers
#
CONFIG_USB_IRDA=y
CONFIG_SIGMATEL_FIR=y
CONFIG_NSC_FIR=y
# CONFIG_WINBOND_FIR is not set
CONFIG_SMC_IRCC_FIR=y
# CONFIG_ALI_FIR is not set
# CONFIG_VLSI_FIR is not set
CONFIG_VIA_FIR=y
# CONFIG_MCS_FIR is not set
# CONFIG_BT is not set
CONFIG_AF_RXRPC=y
CONFIG_AF_RXRPC_DEBUG=y
# CONFIG_RXKAD is not set
CONFIG_PHONET=y
CONFIG_FIB_RULES=y
CONFIG_WIRELESS=y
CONFIG_CFG80211=y
CONFIG_CFG80211_REG_DEBUG=y
# CONFIG_NL80211 is not set
CONFIG_WIRELESS_OLD_REGULATORY=y
CONFIG_WIRELESS_EXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
CONFIG_LIB80211=y
CONFIG_LIB80211_CRYPT_WEP=y
CONFIG_LIB80211_CRYPT_CCMP=y
CONFIG_LIB80211_CRYPT_TKIP=y
# CONFIG_LIB80211_DEBUG is not set
# CONFIG_MAC80211 is not set
CONFIG_WIMAX=y
CONFIG_WIMAX_DEBUG_LEVEL=8
CONFIG_RFKILL=y
# CONFIG_RFKILL_INPUT is not set
CONFIG_RFKILL_LEDS=y
#
# Device Drivers
#
#
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_EXTRA_FIRMWARE=""
CONFIG_DEBUG_DRIVER=y
# CONFIG_DEBUG_DEVRES is not set
# CONFIG_SYS_HYPERVISOR is not set
# CONFIG_CONNECTOR is not set
# CONFIG_PARPORT is not set
CONFIG_PNP=y
CONFIG_PNP_DEBUG_MESSAGES=y
#
# Protocols
#
CONFIG_PNPACPI=y
CONFIG_BLK_DEV=y
CONFIG_BLK_DEV_FD=y
CONFIG_BLK_CPQ_DA=y
CONFIG_BLK_CPQ_CISS_DA=y
CONFIG_CISS_SCSI_TAPE=y
CONFIG_BLK_DEV_DAC960=y
# CONFIG_BLK_DEV_UMEM is not set
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_SX8=y
CONFIG_BLK_DEV_UB=y
# CONFIG_BLK_DEV_RAM is not set
CONFIG_CDROM_PKTCDVD=y
CONFIG_CDROM_PKTCDVD_BUFFERS=8
CONFIG_CDROM_PKTCDVD_WCACHE=y
# CONFIG_ATA_OVER_ETH is not set
CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_HD=y
CONFIG_MISC_DEVICES=y
CONFIG_IBM_ASM=y
CONFIG_PHANTOM=y
CONFIG_SGI_IOC4=y
CONFIG_TIFM_CORE=y
# CONFIG_TIFM_7XX1 is not set
CONFIG_ENCLOSURE_SERVICES=y
CONFIG_HP_ILO=y
# CONFIG_DELL_LAPTOP is not set
# CONFIG_C2PORT is not set
#
# EEPROM support
#
CONFIG_EEPROM_AT25=y
CONFIG_EEPROM_93CX6=y
CONFIG_HAVE_IDE=y
#
# SCSI device support
#
CONFIG_RAID_ATTRS=y
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
CONFIG_SCSI_TGT=y
CONFIG_SCSI_NETLINK=y
CONFIG_SCSI_PROC_FS=y
#
# SCSI support type (disk, tape, CD-ROM)
#
CONFIG_BLK_DEV_SD=y
# CONFIG_CHR_DEV_ST is not set
CONFIG_CHR_DEV_OSST=y
CONFIG_BLK_DEV_SR=y
# CONFIG_BLK_DEV_SR_VENDOR is not set
# CONFIG_CHR_DEV_SG is not set
CONFIG_CHR_DEV_SCH=y
# CONFIG_SCSI_ENCLOSURE is not set
#
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
#
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_LOGGING is not set
# CONFIG_SCSI_SCAN_ASYNC is not set
#
# SCSI Transports
#
CONFIG_SCSI_SPI_ATTRS=y
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_FC_TGT_ATTRS=y
CONFIG_SCSI_ISCSI_ATTRS=y
CONFIG_SCSI_SAS_ATTRS=y
CONFIG_SCSI_SRP_ATTRS=y
# CONFIG_SCSI_SRP_TGT_ATTRS is not set
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_SCSI_AIC7XXX=y
CONFIG_SCSI_DH=y
CONFIG_SCSI_DH_RDAC=y
CONFIG_SCSI_DH_HP_SW=y
CONFIG_SCSI_DH_EMC=y
CONFIG_SCSI_DH_ALUA=y
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
CONFIG_ATA_ACPI=y
# CONFIG_SATA_PMP is not set
CONFIG_SATA_AHCI=y
# CONFIG_SATA_SIL24 is not set
CONFIG_ATA_SFF=y
CONFIG_SATA_SVW=y
CONFIG_ATA_PIIX=y
# CONFIG_SATA_MV is not set
CONFIG_SATA_NV=y
CONFIG_PDC_ADMA=y
CONFIG_SATA_QSTOR=y
CONFIG_SATA_PROMISE=y
CONFIG_SATA_SX4=y
# CONFIG_SATA_SIL is not set
CONFIG_SATA_SIS=y
CONFIG_SATA_ULI=y
CONFIG_SATA_VIA=y
# CONFIG_SATA_VITESSE is not set
CONFIG_SATA_INIC162X=y
CONFIG_PATA_ACPI=y
# CONFIG_PATA_ALI is not set
CONFIG_PATA_AMD=y
CONFIG_PATA_ARTOP=y
CONFIG_PATA_ATIIXP=y
CONFIG_PATA_CMD640_PCI=y
CONFIG_PATA_CMD64X=y
CONFIG_PATA_CS5520=y
# CONFIG_PATA_CS5530 is not set
CONFIG_PATA_CYPRESS=y
CONFIG_PATA_EFAR=y
CONFIG_ATA_GENERIC=y
CONFIG_PATA_HPT366=y
# CONFIG_PATA_HPT37X is not set
CONFIG_PATA_HPT3X2N=y
# CONFIG_PATA_HPT3X3 is not set
# CONFIG_PATA_IT821X is not set
CONFIG_PATA_IT8213=y
CONFIG_PATA_JMICRON=y
# CONFIG_PATA_TRIFLEX is not set
CONFIG_PATA_MARVELL=y
CONFIG_PATA_MPIIX=y
CONFIG_PATA_OLDPIIX=y
# CONFIG_PATA_NETCELL is not set
# CONFIG_PATA_NINJA32 is not set
CONFIG_PATA_NS87410=y
CONFIG_PATA_NS87415=y
# CONFIG_PATA_OPTI is not set
CONFIG_PATA_OPTIDMA=y
CONFIG_PATA_PDC_OLD=y
CONFIG_PATA_RADISYS=y
CONFIG_PATA_RZ1000=y
# CONFIG_PATA_SC1200 is not set
CONFIG_PATA_SERVERWORKS=y
CONFIG_PATA_PDC2027X=y
CONFIG_PATA_SIL680=y
CONFIG_PATA_SIS=y
CONFIG_PATA_VIA=y
CONFIG_PATA_WINBOND=y
# CONFIG_PATA_PLATFORM is not set
CONFIG_PATA_SCH=y
# CONFIG_MD is not set
# CONFIG_FUSION is not set
#
# IEEE 1394 (FireWire) support
#
#
# Enable only one of the two stacks, unless you know what you are doing
#
CONFIG_FIREWIRE=y
CONFIG_FIREWIRE_OHCI=y
CONFIG_FIREWIRE_OHCI_DEBUG=y
CONFIG_FIREWIRE_SBP2=y
CONFIG_IEEE1394=y
CONFIG_IEEE1394_OHCI1394=y
#
# PCILynx controller requires I2C
#
# CONFIG_IEEE1394_SBP2 is not set
# CONFIG_IEEE1394_ETH1394_ROM_ENTRY is not set
# CONFIG_IEEE1394_ETH1394 is not set
# CONFIG_IEEE1394_RAWIO is not set
CONFIG_IEEE1394_VIDEO1394=y
# CONFIG_IEEE1394_DV1394 is not set
CONFIG_IEEE1394_VERBOSEDEBUG=y
CONFIG_I2O=y
# CONFIG_I2O_LCT_NOTIFY_ON_CHANGES is not set
CONFIG_I2O_EXT_ADAPTEC=y
# CONFIG_I2O_EXT_ADAPTEC_DMA64 is not set
# CONFIG_I2O_BUS is not set
# CONFIG_I2O_BLOCK is not set
CONFIG_I2O_SCSI=y
# CONFIG_I2O_PROC is not set
# CONFIG_MACINTOSH_DRIVERS is not set
CONFIG_NETDEVICES=y
# CONFIG_IFB is not set
CONFIG_DUMMY=y
CONFIG_BONDING=y
CONFIG_MACVLAN=y
CONFIG_EQUALIZER=y
CONFIG_TUN=y
CONFIG_VETH=y
CONFIG_NET_SB1000=y
# CONFIG_ARCNET is not set
CONFIG_PHYLIB=y
#
# MII PHY device drivers
#
# CONFIG_MARVELL_PHY is not set
# CONFIG_DAVICOM_PHY is not set
# CONFIG_QSEMI_PHY is not set
CONFIG_LXT_PHY=y
CONFIG_CICADA_PHY=y
# CONFIG_VITESSE_PHY is not set
CONFIG_SMSC_PHY=y
CONFIG_BROADCOM_PHY=y
CONFIG_ICPLUS_PHY=y
# CONFIG_REALTEK_PHY is not set
CONFIG_NATIONAL_PHY=y
CONFIG_STE10XP=y
CONFIG_LSI_ET1011C_PHY=y
# CONFIG_FIXED_PHY is not set
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
# CONFIG_HAPPYMEAL is not set
CONFIG_SUNGEM=y
CONFIG_CASSINI=y
# CONFIG_NET_VENDOR_3COM is not set
CONFIG_VORTEX=y
# CONFIG_ENC28J60 is not set
CONFIG_DNET=y
CONFIG_NET_TULIP=y
CONFIG_DE2104X=y
# CONFIG_TULIP is not set
# CONFIG_DE4X5 is not set
# CONFIG_WINBOND_840 is not set
CONFIG_DM9102=y
CONFIG_ULI526X=y
# CONFIG_HP100 is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
CONFIG_NET_PCI=y
CONFIG_PCNET32=y
# CONFIG_AMD8111_ETH is not set
CONFIG_ADAPTEC_STARFIRE=y
CONFIG_B44=y
CONFIG_B44_PCI_AUTOSELECT=y
CONFIG_B44_PCICORE_AUTOSELECT=y
CONFIG_B44_PCI=y
CONFIG_FORCEDETH=y
CONFIG_FORCEDETH_NAPI=y
CONFIG_E100=y
# CONFIG_FEALNX is not set
CONFIG_NATSEMI=y
CONFIG_NE2K_PCI=y
# CONFIG_8139CP is not set
CONFIG_8139TOO=y
CONFIG_8139TOO_PIO=y
CONFIG_8139TOO_TUNE_TWISTER=y
CONFIG_8139TOO_8129=y
CONFIG_8139_OLD_RX_RESET=y
CONFIG_R6040=y
CONFIG_SIS900=y
# CONFIG_EPIC100 is not set
# CONFIG_SMSC9420 is not set
CONFIG_SUNDANCE=y
CONFIG_SUNDANCE_MMIO=y
CONFIG_TLAN=y
CONFIG_VIA_RHINE=y
CONFIG_VIA_RHINE_MMIO=y
# CONFIG_SC92031 is not set
CONFIG_ATL2=y
CONFIG_NETDEV_1000=y
# CONFIG_ACENIC is not set
CONFIG_DL2K=y
CONFIG_E1000=y
CONFIG_E1000E=y
CONFIG_IP1000=y
# CONFIG_IGB is not set
CONFIG_NS83820=y
CONFIG_HAMACHI=y
CONFIG_YELLOWFIN=y
CONFIG_R8169=y
# CONFIG_R8169_VLAN is not set
CONFIG_SIS190=y
CONFIG_SKGE=y
CONFIG_SKGE_DEBUG=y
CONFIG_SKY2=y
CONFIG_SKY2_DEBUG=y
# CONFIG_VIA_VELOCITY is not set
CONFIG_TIGON3=y
CONFIG_BNX2=y
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
# CONFIG_ATL1E is not set
# CONFIG_ATL1C is not set
# CONFIG_JME is not set
# CONFIG_NETDEV_10000 is not set
CONFIG_TR=y
# CONFIG_IBMOL is not set
CONFIG_3C359=y
CONFIG_TMS380TR=y
CONFIG_TMSPCI=y
CONFIG_ABYSS=y
#
# Wireless LAN
#
# CONFIG_WLAN_PRE80211 is not set
CONFIG_WLAN_80211=y
CONFIG_LIBERTAS=y
CONFIG_LIBERTAS_USB=y
CONFIG_LIBERTAS_SDIO=y
CONFIG_LIBERTAS_DEBUG=y
CONFIG_AIRO=y
# CONFIG_HERMES is not set
# CONFIG_ATMEL is not set
# CONFIG_PRISM54 is not set
CONFIG_USB_ZD1201=y
CONFIG_USB_NET_RNDIS_WLAN=y
# CONFIG_IPW2100 is not set
# CONFIG_IPW2200 is not set
# CONFIG_IWLWIFI_LEDS is not set
CONFIG_HOSTAP=y
CONFIG_HOSTAP_FIRMWARE=y
CONFIG_HOSTAP_FIRMWARE_NVRAM=y
CONFIG_HOSTAP_PLX=y
CONFIG_HOSTAP_PCI=y
#
# WiMAX Wireless Broadband devices
#
CONFIG_WIMAX_I2400M=y
CONFIG_WIMAX_I2400M_SDIO=y
CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
#
# USB Network Adapters
#
# CONFIG_USB_CATC is not set
CONFIG_USB_KAWETH=y
CONFIG_USB_PEGASUS=y
CONFIG_USB_RTL8150=y
CONFIG_USB_USBNET=y
CONFIG_USB_NET_AX8817X=y
CONFIG_USB_NET_CDCETHER=y
# CONFIG_USB_NET_DM9601 is not set
CONFIG_USB_NET_SMSC95XX=y
CONFIG_USB_NET_GL620A=y
# CONFIG_USB_NET_NET1080 is not set
CONFIG_USB_NET_PLUSB=y
CONFIG_USB_NET_MCS7830=y
CONFIG_USB_NET_RNDIS_HOST=y
CONFIG_USB_NET_CDC_SUBSET=y
# CONFIG_USB_ALI_M5632 is not set
CONFIG_USB_AN2720=y
# CONFIG_USB_BELKIN is not set
# CONFIG_USB_ARMLINUX is not set
CONFIG_USB_EPSON2888=y
# CONFIG_USB_KC2190 is not set
# CONFIG_USB_NET_ZAURUS is not set
CONFIG_USB_HSO=y
CONFIG_WAN=y
# CONFIG_LANMEDIA is not set
CONFIG_HDLC=y
CONFIG_HDLC_RAW=y
# CONFIG_HDLC_RAW_ETH is not set
CONFIG_HDLC_CISCO=y
CONFIG_HDLC_FR=y
CONFIG_HDLC_PPP=y
CONFIG_HDLC_X25=y
CONFIG_PCI200SYN=y
CONFIG_WANXL=y
# CONFIG_PC300TOO is not set
# CONFIG_FARSYNC is not set
CONFIG_DLCI=y
CONFIG_DLCI_MAX=8
CONFIG_LAPBETHER=y
CONFIG_X25_ASY=y
CONFIG_SBNI=y
# CONFIG_SBNI_MULTILINE is not set
CONFIG_ATM_DRIVERS=y
CONFIG_ATM_DUMMY=y
# CONFIG_ATM_TCP is not set
CONFIG_ATM_LANAI=y
# CONFIG_ATM_ENI is not set
# CONFIG_ATM_FIRESTREAM is not set
CONFIG_ATM_ZATM=y
CONFIG_ATM_ZATM_DEBUG=y
CONFIG_ATM_IDT77252=y
CONFIG_ATM_IDT77252_DEBUG=y
CONFIG_ATM_IDT77252_RCV_ALL=y
CONFIG_ATM_IDT77252_USE_SUNI=y
CONFIG_ATM_AMBASSADOR=y
CONFIG_ATM_AMBASSADOR_DEBUG=y
CONFIG_ATM_HORIZON=y
# CONFIG_ATM_HORIZON_DEBUG is not set
# CONFIG_ATM_IA is not set
# CONFIG_ATM_FORE200E is not set
CONFIG_ATM_HE=y
CONFIG_ATM_HE_USE_SUNI=y
CONFIG_ATM_SOLOS=y
CONFIG_FDDI=y
CONFIG_DEFXX=y
CONFIG_DEFXX_MMIO=y
CONFIG_SKFP=y
CONFIG_HIPPI=y
CONFIG_ROADRUNNER=y
# CONFIG_ROADRUNNER_LARGE_RINGS is not set
CONFIG_PPP=y
CONFIG_PPP_MULTILINK=y
CONFIG_PPP_FILTER=y
CONFIG_PPP_ASYNC=y
# CONFIG_PPP_SYNC_TTY is not set
CONFIG_PPP_DEFLATE=y
CONFIG_PPP_BSDCOMP=y
# CONFIG_PPP_MPPE is not set
# CONFIG_PPPOE is not set
# CONFIG_PPPOATM is not set
CONFIG_PPPOL2TP=y
CONFIG_SLIP=y
# CONFIG_SLIP_COMPRESSED is not set
CONFIG_SLHC=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
# CONFIG_NET_FC is not set
CONFIG_NETCONSOLE=y
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_NETPOLL=y
CONFIG_NETPOLL_TRAP=y
CONFIG_NET_POLL_CONTROLLER=y
CONFIG_VIRTIO_NET=y
CONFIG_ISDN=y
CONFIG_ISDN_I4L=y
# CONFIG_ISDN_PPP is not set
CONFIG_ISDN_AUDIO=y
# CONFIG_ISDN_TTY_FAX is not set
# CONFIG_ISDN_X25 is not set
#
# ISDN feature submodules
#
# CONFIG_ISDN_DIVERSION is not set
#
# ISDN4Linux hardware drivers
#
#
# Passive cards
#
# CONFIG_ISDN_DRV_HISAX is not set
#
# Active cards
#
CONFIG_ISDN_DRV_GIGASET=y
# CONFIG_GIGASET_BASE is not set
CONFIG_GIGASET_M105=y
CONFIG_GIGASET_M101=y
CONFIG_GIGASET_DEBUG=y
CONFIG_GIGASET_UNDOCREQ=y
CONFIG_ISDN_CAPI=y
# CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON is not set
# CONFIG_CAPI_TRACE is not set
CONFIG_ISDN_CAPI_MIDDLEWARE=y
# CONFIG_ISDN_CAPI_CAPI20 is not set
CONFIG_ISDN_CAPI_CAPIDRV=y
#
# CAPI hardware drivers
#
CONFIG_CAPI_AVM=y
# CONFIG_ISDN_DRV_AVMB1_B1PCI is not set
CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=y
# CONFIG_ISDN_DRV_AVMB1_T1PCI is not set
CONFIG_ISDN_DRV_AVMB1_C4=y
# CONFIG_CAPI_EICON is not set
# CONFIG_PHONE is not set
#
# Input device support
#
CONFIG_INPUT=y
# CONFIG_INPUT_FF_MEMLESS is not set
CONFIG_INPUT_POLLDEV=y
#
# Userland interfaces
#
CONFIG_INPUT_MOUSEDEV=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
CONFIG_INPUT_JOYDEV=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_EVBUG=y
#
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
CONFIG_KEYBOARD_ATKBD=y
CONFIG_KEYBOARD_SUNKBD=y
# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_XTKBD is not set
CONFIG_KEYBOARD_NEWTON=y
# CONFIG_KEYBOARD_STOWAWAY is not set
# CONFIG_KEYBOARD_GPIO is not set
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
CONFIG_MOUSE_PS2_ALPS=y
CONFIG_MOUSE_PS2_LOGIPS2PP=y
CONFIG_MOUSE_PS2_SYNAPTICS=y
CONFIG_MOUSE_PS2_LIFEBOOK=y
CONFIG_MOUSE_PS2_TRACKPOINT=y
# CONFIG_MOUSE_PS2_ELANTECH is not set
CONFIG_MOUSE_PS2_TOUCHKIT=y
# CONFIG_MOUSE_SERIAL is not set
CONFIG_MOUSE_APPLETOUCH=y
CONFIG_MOUSE_BCM5974=y
CONFIG_MOUSE_VSXXXAA=y
CONFIG_MOUSE_GPIO=y
# CONFIG_INPUT_JOYSTICK is not set
CONFIG_INPUT_TABLET=y
# CONFIG_TABLET_USB_ACECAD is not set
# CONFIG_TABLET_USB_AIPTEK is not set
CONFIG_TABLET_USB_GTCO=y
# CONFIG_TABLET_USB_KBTAB is not set
# CONFIG_TABLET_USB_WACOM is not set
CONFIG_INPUT_TOUCHSCREEN=y
# CONFIG_TOUCHSCREEN_ADS7846 is not set
CONFIG_TOUCHSCREEN_FUJITSU=y
# CONFIG_TOUCHSCREEN_GUNZE is not set
# CONFIG_TOUCHSCREEN_ELO is not set
CONFIG_TOUCHSCREEN_WACOM_W8001=y
CONFIG_TOUCHSCREEN_MTOUCH=y
CONFIG_TOUCHSCREEN_INEXIO=y
CONFIG_TOUCHSCREEN_MK712=y
CONFIG_TOUCHSCREEN_PENMOUNT=y
CONFIG_TOUCHSCREEN_TOUCHRIGHT=y
CONFIG_TOUCHSCREEN_TOUCHWIN=y
CONFIG_TOUCHSCREEN_USB_COMPOSITE=y
CONFIG_TOUCHSCREEN_USB_EGALAX=y
CONFIG_TOUCHSCREEN_USB_PANJIT=y
# CONFIG_TOUCHSCREEN_USB_3M is not set
CONFIG_TOUCHSCREEN_USB_ITM=y
# CONFIG_TOUCHSCREEN_USB_ETURBO is not set
CONFIG_TOUCHSCREEN_USB_GUNZE=y
CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y
CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
# CONFIG_TOUCHSCREEN_USB_GOTOP is not set
# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PCSPKR=y
# CONFIG_INPUT_ATLAS_BTNS is not set
CONFIG_INPUT_ATI_REMOTE=y
CONFIG_INPUT_ATI_REMOTE2=y
# CONFIG_INPUT_KEYSPAN_REMOTE is not set
CONFIG_INPUT_POWERMATE=y
CONFIG_INPUT_YEALINK=y
CONFIG_INPUT_CM109=y
CONFIG_INPUT_UINPUT=y
#
# Hardware I/O ports
#
CONFIG_SERIO=y
CONFIG_SERIO_I8042=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_CT82C710=y
CONFIG_SERIO_PCIPS2=y
CONFIG_SERIO_LIBPS2=y
CONFIG_SERIO_RAW=y
# CONFIG_GAMEPORT is not set
#
# Character devices
#
CONFIG_VT=y
CONFIG_CONSOLE_TRANSLATIONS=y
CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_NONSTANDARD=y
# CONFIG_COMPUTONE is not set
CONFIG_ROCKETPORT=y
CONFIG_CYCLADES=y
# CONFIG_CYZ_INTR is not set
CONFIG_DIGIEPCA=y
# CONFIG_MOXA_INTELLIO is not set
CONFIG_MOXA_SMARTIO=y
CONFIG_ISI=y
CONFIG_SYNCLINK=y
CONFIG_SYNCLINKMP=y
CONFIG_SYNCLINK_GT=y
CONFIG_N_HDLC=y
CONFIG_RISCOM8=y
CONFIG_SPECIALIX=y
CONFIG_SX=y
# CONFIG_RIO is not set
CONFIG_STALDRV=y
# CONFIG_STALLION is not set
# CONFIG_ISTALLION is not set
# CONFIG_NOZOMI is not set
#
# Serial drivers
#
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_FIX_EARLYCON_MEM=y
CONFIG_SERIAL_8250_PCI=y
CONFIG_SERIAL_8250_PNP=y
CONFIG_SERIAL_8250_NR_UARTS=4
CONFIG_SERIAL_8250_RUNTIME_UARTS=4
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_SERIAL_8250_DETECT_IRQ is not set
CONFIG_SERIAL_8250_RSA=y
#
# Non-8250 serial port support
#
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_CONSOLE_POLL=y
CONFIG_SERIAL_JSM=y
CONFIG_UNIX98_PTYS=y
CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
CONFIG_HVC_DRIVER=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_IPMI_HANDLER=y
CONFIG_IPMI_PANIC_EVENT=y
CONFIG_IPMI_PANIC_STRING=y
CONFIG_IPMI_DEVICE_INTERFACE=y
CONFIG_IPMI_SI=y
# CONFIG_IPMI_WATCHDOG is not set
CONFIG_IPMI_POWEROFF=y
# CONFIG_HW_RANDOM is not set
# CONFIG_NVRAM is not set
# CONFIG_R3964 is not set
CONFIG_APPLICOM=y
CONFIG_MWAVE=y
# CONFIG_PC8736x_GPIO is not set
CONFIG_RAW_DRIVER=y
CONFIG_MAX_RAW_DEVS=256
# CONFIG_HPET is not set
CONFIG_HANGCHECK_TIMER=y
CONFIG_TCG_TPM=y
# CONFIG_TCG_TIS is not set
CONFIG_TCG_NSC=y
CONFIG_TCG_ATMEL=y
CONFIG_TCG_INFINEON=y
# CONFIG_TELCLOCK is not set
CONFIG_DEVPORT=y
# CONFIG_I2C is not set
CONFIG_SPI=y
# CONFIG_SPI_DEBUG is not set
CONFIG_SPI_MASTER=y
#
# SPI Master Controller Drivers
#
CONFIG_SPI_BITBANG=y
CONFIG_SPI_GPIO=y
#
# SPI Protocol Masters
#
CONFIG_SPI_SPIDEV=y
CONFIG_SPI_TLE62X0=y
CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
CONFIG_GPIOLIB=y
CONFIG_DEBUG_GPIO=y
CONFIG_GPIO_SYSFS=y
#
# Memory mapped GPIO expanders:
#
#
# I2C GPIO expanders:
#
#
# PCI GPIO expanders:
#
CONFIG_GPIO_BT8XX=y
#
# SPI GPIO expanders:
#
CONFIG_GPIO_MAX7301=y
# CONFIG_GPIO_MCP23S08 is not set
# CONFIG_W1 is not set
CONFIG_POWER_SUPPLY=y
CONFIG_POWER_SUPPLY_DEBUG=y
CONFIG_PDA_POWER=y
# CONFIG_BATTERY_DS2760 is not set
CONFIG_HWMON=y
CONFIG_HWMON_VID=y
CONFIG_SENSORS_ABITUGURU=y
CONFIG_SENSORS_ABITUGURU3=y
CONFIG_SENSORS_ADCXX=y
CONFIG_SENSORS_K8TEMP=y
CONFIG_SENSORS_I5K_AMB=y
CONFIG_SENSORS_F71805F=y
# CONFIG_SENSORS_F71882FG is not set
CONFIG_SENSORS_CORETEMP=y
CONFIG_SENSORS_IBMAEM=y
# CONFIG_SENSORS_IBMPEX is not set
CONFIG_SENSORS_IT87=y
CONFIG_SENSORS_LM70=y
CONFIG_SENSORS_MAX1111=y
# CONFIG_SENSORS_PC87360 is not set
CONFIG_SENSORS_PC87427=y
# CONFIG_SENSORS_SIS5595 is not set
CONFIG_SENSORS_SMSC47M1=y
CONFIG_SENSORS_SMSC47B397=y
CONFIG_SENSORS_VIA686A=y
# CONFIG_SENSORS_VT1211 is not set
CONFIG_SENSORS_VT8231=y
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_SENSORS_HDAPS is not set
# CONFIG_SENSORS_LIS3LV02D is not set
CONFIG_SENSORS_APPLESMC=y
CONFIG_HWMON_DEBUG_CHIP=y
CONFIG_THERMAL=y
CONFIG_THERMAL_HWMON=y
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
#
# Watchdog Device Drivers
#
CONFIG_SOFT_WATCHDOG=y
CONFIG_ACQUIRE_WDT=y
CONFIG_ADVANTECH_WDT=y
# CONFIG_ALIM1535_WDT is not set
CONFIG_ALIM7101_WDT=y
CONFIG_SC520_WDT=y
CONFIG_IB700_WDT=y
# CONFIG_IBMASR is not set
CONFIG_WAFER_WDT=y
CONFIG_I6300ESB_WDT=y
# CONFIG_ITCO_WDT is not set
CONFIG_IT8712F_WDT=y
# CONFIG_IT87_WDT is not set
CONFIG_HP_WATCHDOG=y
# CONFIG_SC1200_WDT is not set
CONFIG_PC87413_WDT=y
# CONFIG_60XX_WDT is not set
# CONFIG_SBC8360_WDT is not set
CONFIG_CPU5_WDT=y
# CONFIG_SMSC_SCH311X_WDT is not set
CONFIG_SMSC37B787_WDT=y
CONFIG_W83627HF_WDT=y
CONFIG_W83877F_WDT=y
CONFIG_W83977F_WDT=y
CONFIG_MACHZ_WDT=y
CONFIG_SBC_EPX_C3_WATCHDOG=y
#
# PCI-based Watchdog Cards
#
CONFIG_PCIPCWATCHDOG=y
CONFIG_WDTPCI=y
# CONFIG_WDT_501_PCI is not set
#
# USB-based Watchdog Cards
#
CONFIG_USBPCWATCHDOG=y
CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
CONFIG_SSB=y
CONFIG_SSB_SPROM=y
CONFIG_SSB_PCIHOST_POSSIBLE=y
CONFIG_SSB_PCIHOST=y
# CONFIG_SSB_B43_PCI_BRIDGE is not set
CONFIG_SSB_SILENT=y
CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
CONFIG_SSB_DRIVER_PCICORE=y
#
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
CONFIG_MFD_SM501=y
CONFIG_MFD_SM501_GPIO=y
CONFIG_HTC_PASIC3=y
# CONFIG_MFD_TMIO is not set
# CONFIG_REGULATOR is not set
#
# Multimedia devices
#
#
# Multimedia core support
#
CONFIG_VIDEO_DEV=y
CONFIG_VIDEO_V4L2_COMMON=y
# CONFIG_VIDEO_ALLOW_V4L1 is not set
# CONFIG_VIDEO_V4L1_COMPAT is not set
CONFIG_DVB_CORE=y
CONFIG_VIDEO_MEDIA=y
#
# Multimedia drivers
#
CONFIG_VIDEO_V4L2=y
CONFIG_VIDEOBUF_GEN=y
CONFIG_VIDEOBUF_VMALLOC=y
CONFIG_VIDEO_CAPTURE_DRIVERS=y
# CONFIG_VIDEO_ADV_DEBUG is not set
# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
#
# Encoders/decoders and other helper chips
#
#
# Audio decoders
#
#
# Video decoders
#
#
# Video and audio decoders
#
#
# MPEG video encoders
#
# CONFIG_VIDEO_CX2341X is not set
#
# Video encoders
#
#
# Video improvement chips
#
CONFIG_VIDEO_VIVI=y
CONFIG_SOC_CAMERA=y
CONFIG_SOC_CAMERA_PLATFORM=y
# CONFIG_V4L_USB_DRIVERS is not set
CONFIG_RADIO_ADAPTERS=y
CONFIG_RADIO_GEMTEK_PCI=y
CONFIG_RADIO_MAXIRADIO=y
# CONFIG_RADIO_MAESTRO is not set
CONFIG_USB_DSBR=y
# CONFIG_USB_SI470X is not set
CONFIG_USB_MR800=y
CONFIG_DVB_DYNAMIC_MINORS=y
# CONFIG_DVB_CAPTURE_DRIVERS is not set
CONFIG_DAB=y
CONFIG_USB_DABUSB=y
#
# Graphics support
#
CONFIG_AGP=y
# CONFIG_AGP_AMD64 is not set
# CONFIG_AGP_INTEL is not set
CONFIG_AGP_SIS=y
CONFIG_AGP_VIA=y
# CONFIG_DRM is not set
CONFIG_VGASTATE=y
CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
# CONFIG_FB_DDC is not set
# CONFIG_FB_BOOT_VESA_SUPPORT is not set
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
# CONFIG_FB_SYS_FILLRECT is not set
# CONFIG_FB_SYS_COPYAREA is not set
# CONFIG_FB_SYS_IMAGEBLIT is not set
# CONFIG_FB_FOREIGN_ENDIAN is not set
# CONFIG_FB_SYS_FOPS is not set
CONFIG_FB_SVGALIB=y
# CONFIG_FB_MACMODES is not set
CONFIG_FB_BACKLIGHT=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
#
# Frame buffer hardware drivers
#
CONFIG_FB_PM2=y
CONFIG_FB_PM2_FIFO_DISCONNECT=y
CONFIG_FB_CYBER2000=y
# CONFIG_FB_ARC is not set
# CONFIG_FB_IMSTT is not set
# CONFIG_FB_N411 is not set
CONFIG_FB_HGA=y
# CONFIG_FB_HGA_ACCEL is not set
# CONFIG_FB_S1D13XXX is not set
CONFIG_FB_NVIDIA=y
# CONFIG_FB_NVIDIA_I2C is not set
CONFIG_FB_NVIDIA_DEBUG=y
CONFIG_FB_NVIDIA_BACKLIGHT=y
CONFIG_FB_RIVA=y
# CONFIG_FB_RIVA_I2C is not set
CONFIG_FB_RIVA_DEBUG=y
CONFIG_FB_RIVA_BACKLIGHT=y
# CONFIG_FB_LE80578 is not set
CONFIG_FB_MATROX=y
# CONFIG_FB_MATROX_MILLENIUM is not set
CONFIG_FB_MATROX_MYSTIQUE=y
# CONFIG_FB_MATROX_G is not set
# CONFIG_FB_MATROX_I2C is not set
CONFIG_FB_MATROX_MULTIHEAD=y
# CONFIG_FB_ATY128 is not set
CONFIG_FB_ATY=y
# CONFIG_FB_ATY_CT is not set
CONFIG_FB_ATY_GX=y
CONFIG_FB_ATY_BACKLIGHT=y
CONFIG_FB_S3=y
CONFIG_FB_SAVAGE=y
# CONFIG_FB_SAVAGE_I2C is not set
# CONFIG_FB_SAVAGE_ACCEL is not set
# CONFIG_FB_SIS is not set
# CONFIG_FB_VIA is not set
CONFIG_FB_NEOMAGIC=y
CONFIG_FB_KYRO=y
CONFIG_FB_3DFX=y
CONFIG_FB_3DFX_ACCEL=y
CONFIG_FB_VOODOO1=y
CONFIG_FB_VT8623=y
CONFIG_FB_TRIDENT=y
CONFIG_FB_TRIDENT_ACCEL=y
CONFIG_FB_ARK=y
CONFIG_FB_PM3=y
CONFIG_FB_CARMINE=y
# CONFIG_FB_CARMINE_DRAM_EVAL is not set
CONFIG_CARMINE_DRAM_CUSTOM=y
CONFIG_FB_GEODE=y
# CONFIG_FB_GEODE_LX is not set
# CONFIG_FB_GEODE_GX is not set
# CONFIG_FB_GEODE_GX1 is not set
# CONFIG_FB_SM501 is not set
# CONFIG_FB_METRONOME is not set
CONFIG_FB_MB862XX=y
# CONFIG_FB_MB862XX_PCI_GDC is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
# CONFIG_LCD_LTV350QV is not set
# CONFIG_LCD_ILI9320 is not set
CONFIG_LCD_TDO24M=y
# CONFIG_LCD_VGG2432A4 is not set
# CONFIG_LCD_PLATFORM is not set
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_GENERIC=y
CONFIG_BACKLIGHT_PROGEAR=y
# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
# CONFIG_BACKLIGHT_SAHARA is not set
#
# Display device support
#
CONFIG_DISPLAY_SUPPORT=y
#
# Display hardware drivers
#
#
# Console display driver support
#
CONFIG_VGA_CONSOLE=y
CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
CONFIG_DUMMY_CONSOLE=y
CONFIG_LOGO=y
CONFIG_LOGO_LINUX_MONO=y
CONFIG_LOGO_LINUX_VGA16=y
CONFIG_LOGO_LINUX_CLUT224=y
CONFIG_SOUND=y
CONFIG_SOUND_OSS_CORE=y
# CONFIG_SND is not set
CONFIG_SOUND_PRIME=y
# CONFIG_SOUND_OSS is not set
# CONFIG_HID_SUPPORT is not set
CONFIG_USB_MOUSE=y
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
CONFIG_USB_ARCH_HAS_EHCI=y
CONFIG_USB=y
CONFIG_USB_DEBUG=y
# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
#
# Miscellaneous USB options
#
CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_DYNAMIC_MINORS=y
# CONFIG_USB_SUSPEND is not set
# CONFIG_USB_OTG is not set
CONFIG_USB_OTG_WHITELIST=y
CONFIG_USB_OTG_BLACKLIST_HUB=y
CONFIG_USB_MON=y
CONFIG_USB_WUSB=y
CONFIG_USB_WUSB_CBAF=y
CONFIG_USB_WUSB_CBAF_DEBUG=y
#
# USB Host Controller Drivers
#
# CONFIG_USB_C67X00_HCD is not set
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_EHCI_TT_NEWSCHED=y
# CONFIG_USB_OXU210HP_HCD is not set
CONFIG_USB_ISP116X_HCD=y
CONFIG_USB_ISP1760_HCD=y
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_HCD_SSB is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
CONFIG_USB_OHCI_LITTLE_ENDIAN=y
CONFIG_USB_UHCI_HCD=y
# CONFIG_USB_U132_HCD is not set
CONFIG_USB_SL811_HCD=y
CONFIG_USB_R8A66597_HCD=y
CONFIG_USB_HWA_HCD=y
#
# USB Device Class drivers
#
CONFIG_USB_ACM=y
CONFIG_USB_PRINTER=y
# CONFIG_USB_WDM is not set
CONFIG_USB_TMC=y
#
# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
#
#
# see USB_STORAGE Help for more information
#
CONFIG_USB_STORAGE=y
# CONFIG_USB_STORAGE_DEBUG is not set
CONFIG_USB_STORAGE_DATAFAB=y
# CONFIG_USB_STORAGE_FREECOM is not set
CONFIG_USB_STORAGE_ISD200=y
# CONFIG_USB_STORAGE_USBAT is not set
# CONFIG_USB_STORAGE_SDDR09 is not set
CONFIG_USB_STORAGE_SDDR55=y
# CONFIG_USB_STORAGE_JUMPSHOT is not set
CONFIG_USB_STORAGE_ALAUDA=y
CONFIG_USB_STORAGE_ONETOUCH=y
CONFIG_USB_STORAGE_KARMA=y
CONFIG_USB_STORAGE_CYPRESS_ATACB=y
CONFIG_USB_LIBUSUAL=y
#
# USB Imaging devices
#
CONFIG_USB_MDC800=y
CONFIG_USB_MICROTEK=y
#
# USB port drivers
#
# CONFIG_USB_SERIAL is not set
#
# USB Miscellaneous drivers
#
# CONFIG_USB_EMI62 is not set
CONFIG_USB_EMI26=y
CONFIG_USB_ADUTUX=y
# CONFIG_USB_SEVSEG is not set
# CONFIG_USB_RIO500 is not set
CONFIG_USB_LEGOTOWER=y
# CONFIG_USB_LCD is not set
CONFIG_USB_BERRY_CHARGE=y
# CONFIG_USB_LED is not set
CONFIG_USB_CYPRESS_CY7C63=y
# CONFIG_USB_CYTHERM is not set
# CONFIG_USB_PHIDGET is not set
# CONFIG_USB_IDMOUSE is not set
CONFIG_USB_FTDI_ELAN=y
# CONFIG_USB_APPLEDISPLAY is not set
# CONFIG_USB_SISUSBVGA is not set
CONFIG_USB_LD=y
CONFIG_USB_TRANCEVIBRATOR=y
CONFIG_USB_IOWARRIOR=y
CONFIG_USB_TEST=y
# CONFIG_USB_ISIGHTFW is not set
CONFIG_USB_VST=y
CONFIG_USB_ATM=y
# CONFIG_USB_SPEEDTOUCH is not set
# CONFIG_USB_CXACRU is not set
CONFIG_USB_UEAGLEATM=y
CONFIG_USB_XUSBATM=y
#
# OTG and related infrastructure
#
CONFIG_USB_OTG_UTILS=y
CONFIG_USB_GPIO_VBUS=y
CONFIG_UWB=y
CONFIG_UWB_HWA=y
CONFIG_UWB_WHCI=y
CONFIG_UWB_WLP=y
CONFIG_UWB_I1480U=y
CONFIG_UWB_I1480U_WLP=y
CONFIG_MMC=y
CONFIG_MMC_DEBUG=y
CONFIG_MMC_UNSAFE_RESUME=y
#
# MMC/SD/SDIO Card Drivers
#
CONFIG_MMC_BLOCK=y
# CONFIG_MMC_BLOCK_BOUNCE is not set
# CONFIG_SDIO_UART is not set
# CONFIG_MMC_TEST is not set
#
# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
CONFIG_MMC_WBSD=y
CONFIG_MMC_TIFM_SD=y
CONFIG_MMC_SPI=y
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
#
# LED drivers
#
CONFIG_LEDS_ALIX2=y
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_CLEVO_MAIL=y
#
# LED Triggers
#
CONFIG_LEDS_TRIGGERS=y
# CONFIG_LEDS_TRIGGER_TIMER is not set
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
# CONFIG_ACCESSIBILITY is not set
# CONFIG_EDAC is not set
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_HCTOSYS=y
CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
CONFIG_RTC_DEBUG=y
#
# RTC interfaces
#
CONFIG_RTC_INTF_SYSFS=y
CONFIG_RTC_INTF_PROC=y
CONFIG_RTC_INTF_DEV=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
CONFIG_RTC_DRV_TEST=y
#
# SPI RTC drivers
#
# CONFIG_RTC_DRV_M41T94 is not set
CONFIG_RTC_DRV_DS1305=y
CONFIG_RTC_DRV_DS1390=y
CONFIG_RTC_DRV_MAX6902=y
CONFIG_RTC_DRV_R9701=y
# CONFIG_RTC_DRV_RS5C348 is not set
CONFIG_RTC_DRV_DS3234=y
#
# Platform RTC drivers
#
# CONFIG_RTC_DRV_CMOS is not set
CONFIG_RTC_DRV_DS1286=y
CONFIG_RTC_DRV_DS1511=y
# CONFIG_RTC_DRV_DS1553 is not set
CONFIG_RTC_DRV_DS1742=y
CONFIG_RTC_DRV_STK17TA8=y
CONFIG_RTC_DRV_M48T86=y
CONFIG_RTC_DRV_M48T35=y
CONFIG_RTC_DRV_M48T59=y
CONFIG_RTC_DRV_BQ4802=y
CONFIG_RTC_DRV_V3020=y
#
# on-CPU RTC drivers
#
CONFIG_DMADEVICES=y
#
# DMA Devices
#
CONFIG_INTEL_IOATDMA=y
CONFIG_DMA_ENGINE=y
#
# DMA Clients
#
CONFIG_NET_DMA=y
# CONFIG_DMATEST is not set
CONFIG_DCA=y
CONFIG_UIO=y
CONFIG_UIO_CIF=y
CONFIG_UIO_PDRV=y
CONFIG_UIO_PDRV_GENIRQ=y
CONFIG_UIO_SMX=y
# CONFIG_UIO_SERCOS3 is not set
CONFIG_X86_PLATFORM_DEVICES=y
# CONFIG_ACER_WMI is not set
CONFIG_FUJITSU_LAPTOP=y
# CONFIG_FUJITSU_LAPTOP_DEBUG is not set
CONFIG_HP_WMI=y
CONFIG_MSI_LAPTOP=y
# CONFIG_PANASONIC_LAPTOP is not set
CONFIG_COMPAL_LAPTOP=y
CONFIG_SONY_LAPTOP=y
CONFIG_SONYPI_COMPAT=y
# CONFIG_THINKPAD_ACPI is not set
CONFIG_EEEPC_LAPTOP=y
CONFIG_ACPI_WMI=y
CONFIG_ACPI_ASUS=y
CONFIG_ACPI_TOSHIBA=y
#
# Firmware Drivers
#
CONFIG_EDD=y
CONFIG_EDD_OFF=y
CONFIG_FIRMWARE_MEMMAP=y
# CONFIG_DELL_RBU is not set
CONFIG_DCDBAS=y
CONFIG_DMIID=y
CONFIG_ISCSI_IBFT_FIND=y
CONFIG_ISCSI_IBFT=y
#
# File systems
#
# CONFIG_EXT2_FS is not set
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_XATTR=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4DEV_COMPAT=y
CONFIG_EXT4_FS_XATTR=y
# CONFIG_EXT4_FS_POSIX_ACL is not set
CONFIG_EXT4_FS_SECURITY=y
CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
CONFIG_JBD2=y
# CONFIG_JBD2_DEBUG is not set
CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
CONFIG_FILE_LOCKING=y
CONFIG_XFS_FS=y
CONFIG_XFS_QUOTA=y
# CONFIG_XFS_POSIX_ACL is not set
CONFIG_XFS_RT=y
CONFIG_XFS_DEBUG=y
CONFIG_GFS2_FS=y
CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=y
CONFIG_OCFS2_FS_O2CB=y
CONFIG_OCFS2_FS_USERSPACE_CLUSTER=y
CONFIG_OCFS2_FS_STATS=y
CONFIG_OCFS2_DEBUG_MASKLOG=y
CONFIG_OCFS2_DEBUG_FS=y
CONFIG_OCFS2_FS_POSIX_ACL=y
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_PRINT_QUOTA_WARNING=y
CONFIG_QUOTA_TREE=y
# CONFIG_QFMT_V1 is not set
CONFIG_QFMT_V2=y
CONFIG_QUOTACTL=y
CONFIG_AUTOFS_FS=y
CONFIG_AUTOFS4_FS=y
# CONFIG_FUSE_FS is not set
#
# CD-ROM/DVD Filesystems
#
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
# CONFIG_ZISOFS is not set
CONFIG_UDF_FS=y
CONFIG_UDF_NLS=y
#
# DOS/FAT/NT Filesystems
#
CONFIG_FAT_FS=y
CONFIG_MSDOS_FS=y
# CONFIG_VFAT_FS is not set
CONFIG_FAT_DEFAULT_CODEPAGE=437
CONFIG_NTFS_FS=y
# CONFIG_NTFS_DEBUG is not set
# CONFIG_NTFS_RW is not set
#
# Pseudo filesystems
#
CONFIG_PROC_FS=y
# CONFIG_PROC_KCORE is not set
CONFIG_PROC_VMCORE=y
CONFIG_PROC_SYSCTL=y
CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
# CONFIG_TMPFS is not set
CONFIG_HUGETLBFS=y
CONFIG_HUGETLB_PAGE=y
CONFIG_CONFIGFS_FS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
# CONFIG_NFSD_V3_ACL is not set
CONFIG_NFSD_V4=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=y
CONFIG_NFS_ACL_SUPPORT=y
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
CONFIG_SUNRPC_GSS=y
# CONFIG_SUNRPC_REGISTER_V4 is not set
CONFIG_RPCSEC_GSS_KRB5=y
CONFIG_RPCSEC_GSS_SPKM3=y
CONFIG_SMB_FS=y
CONFIG_SMB_NLS_DEFAULT=y
CONFIG_SMB_NLS_REMOTE="cp437"
CONFIG_CIFS=y
CONFIG_CIFS_STATS=y
CONFIG_CIFS_STATS2=y
CONFIG_CIFS_WEAK_PW_HASH=y
# CONFIG_CIFS_UPCALL is not set
# CONFIG_CIFS_XATTR is not set
CONFIG_CIFS_DEBUG2=y
# CONFIG_CIFS_EXPERIMENTAL is not set
CONFIG_NCP_FS=y
CONFIG_NCPFS_PACKET_SIGNING=y
CONFIG_NCPFS_IOCTL_LOCKING=y
CONFIG_NCPFS_STRONG=y
CONFIG_NCPFS_NFS_NS=y
CONFIG_NCPFS_OS2_NS=y
CONFIG_NCPFS_SMALLDOS=y
CONFIG_NCPFS_NLS=y
CONFIG_NCPFS_EXTRAS=y
CONFIG_CODA_FS=y
CONFIG_AFS_FS=y
CONFIG_AFS_DEBUG=y
#
# Partition Types
#
CONFIG_PARTITION_ADVANCED=y
CONFIG_ACORN_PARTITION=y
CONFIG_ACORN_PARTITION_CUMANA=y
# CONFIG_ACORN_PARTITION_EESOX is not set
CONFIG_ACORN_PARTITION_ICS=y
CONFIG_ACORN_PARTITION_ADFS=y
CONFIG_ACORN_PARTITION_POWERTEC=y
# CONFIG_ACORN_PARTITION_RISCIX is not set
CONFIG_OSF_PARTITION=y
CONFIG_AMIGA_PARTITION=y
CONFIG_ATARI_PARTITION=y
CONFIG_MAC_PARTITION=y
CONFIG_MSDOS_PARTITION=y
CONFIG_BSD_DISKLABEL=y
# CONFIG_MINIX_SUBPARTITION is not set
CONFIG_SOLARIS_X86_PARTITION=y
# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_LDM_PARTITION is not set
CONFIG_SGI_PARTITION=y
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
CONFIG_KARMA_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_437 is not set
# CONFIG_NLS_CODEPAGE_737 is not set
CONFIG_NLS_CODEPAGE_775=y
# CONFIG_NLS_CODEPAGE_850 is not set
CONFIG_NLS_CODEPAGE_852=y
CONFIG_NLS_CODEPAGE_855=y
CONFIG_NLS_CODEPAGE_857=y
# CONFIG_NLS_CODEPAGE_860 is not set
CONFIG_NLS_CODEPAGE_861=y
CONFIG_NLS_CODEPAGE_862=y
# CONFIG_NLS_CODEPAGE_863 is not set
CONFIG_NLS_CODEPAGE_864=y
CONFIG_NLS_CODEPAGE_865=y
CONFIG_NLS_CODEPAGE_866=y
CONFIG_NLS_CODEPAGE_869=y
# CONFIG_NLS_CODEPAGE_936 is not set
# CONFIG_NLS_CODEPAGE_950 is not set
CONFIG_NLS_CODEPAGE_932=y
# CONFIG_NLS_CODEPAGE_949 is not set
# CONFIG_NLS_CODEPAGE_874 is not set
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
CONFIG_NLS_CODEPAGE_1251=y
# CONFIG_NLS_ASCII is not set
# CONFIG_NLS_ISO8859_1 is not set
CONFIG_NLS_ISO8859_2=y
# CONFIG_NLS_ISO8859_3 is not set
CONFIG_NLS_ISO8859_4=y
CONFIG_NLS_ISO8859_5=y
CONFIG_NLS_ISO8859_6=y
CONFIG_NLS_ISO8859_7=y
CONFIG_NLS_ISO8859_9=y
CONFIG_NLS_ISO8859_13=y
# CONFIG_NLS_ISO8859_14 is not set
CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_KOI8_R=y
# CONFIG_NLS_KOI8_U is not set
CONFIG_NLS_UTF8=y
CONFIG_DLM=y
CONFIG_DLM_DEBUG=y
#
# Kernel hacking
#
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_PRINTK_TIME=y
# CONFIG_ALLOW_WARNINGS is not set
CONFIG_FRAME_WARN=2048
CONFIG_MAGIC_SYSRQ=y
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
CONFIG_HEADERS_CHECK=y
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_SOFTLOCKUP=y
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=1
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_SCHED_DEBUG=y
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_OBJECTS=y
# CONFIG_DEBUG_OBJECTS_SELFTEST is not set
CONFIG_DEBUG_OBJECTS_FREE=y
CONFIG_DEBUG_OBJECTS_TIMERS=y
CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
CONFIG_SLUB_DEBUG_ON=y
CONFIG_SLUB_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_PI_LIST=y
CONFIG_RT_MUTEX_TESTER=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_LOCK_ALLOC=y
CONFIG_PROVE_LOCKING=y
CONFIG_LOCKDEP=y
# CONFIG_LOCK_STAT is not set
CONFIG_DEBUG_LOCKDEP=y
CONFIG_TRACE_IRQFLAGS=y
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
CONFIG_STACKTRACE=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_VM=y
# CONFIG_DEBUG_VIRTUAL is not set
CONFIG_DEBUG_WRITECOUNT=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_LIST=y
# CONFIG_DEBUG_SG is not set
CONFIG_DEBUG_NOTIFIERS=y
CONFIG_ARCH_WANT_FRAME_POINTERS=y
CONFIG_FRAME_POINTER=y
CONFIG_BOOT_PRINTK_DELAY=y
CONFIG_RCU_TORTURE_TEST=y
CONFIG_RCU_TORTURE_TEST_RUNNABLE=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_BACKTRACE_SELF_TEST=y
# CONFIG_FAULT_INJECTION is not set
CONFIG_LATENCYTOP=y
CONFIG_USER_STACKTRACE_SUPPORT=y
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FTRACE_NMI_ENTER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_HW_BRANCH_TRACER=y
CONFIG_HAVE_FTRACE_SYSCALLS=y
CONFIG_TRACER_MAX_TRACE=y
CONFIG_RING_BUFFER=y
CONFIG_FTRACE_NMI_ENTER=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
#
# Tracers
#
CONFIG_FUNCTION_TRACER=y
CONFIG_FUNCTION_GRAPH_TRACER=y
CONFIG_IRQSOFF_TRACER=y
# CONFIG_SYSPROF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
CONFIG_CONTEXT_SWITCH_TRACER=y
# CONFIG_EVENT_TRACER is not set
CONFIG_FTRACE_SYSCALLS=y
CONFIG_BOOT_TRACER=y
CONFIG_POWER_TRACER=y
CONFIG_STACK_TRACER=y
CONFIG_HW_BRANCH_TRACER=y
CONFIG_KMEMTRACE=y
CONFIG_WORKQUEUE_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_DYNAMIC_FTRACE=y
CONFIG_FTRACE_MCOUNT_RECORD=y
CONFIG_FTRACE_SELFTEST=y
CONFIG_FTRACE_STARTUP_TEST=y
# CONFIG_MMIOTRACE is not set
CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
CONFIG_FIREWIRE_OHCI_REMOTE_DMA=y
CONFIG_BUILD_DOCSRC=y
CONFIG_DYNAMIC_PRINTK_DEBUG=y
CONFIG_DMA_API_DEBUG=y
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
CONFIG_KGDB=y
CONFIG_KGDB_SERIAL_CONSOLE=y
# CONFIG_KGDB_TESTS is not set
CONFIG_HAVE_ARCH_KMEMCHECK=y
CONFIG_STRICT_DEVMEM=y
CONFIG_X86_VERBOSE_BOOTUP=y
CONFIG_EARLY_PRINTK=y
CONFIG_EARLY_PRINTK_DBGP=y
# CONFIG_DEBUG_STACKOVERFLOW is not set
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_PAGEALLOC=y
CONFIG_DEBUG_PER_CPU_MAPS=y
CONFIG_X86_PTDUMP=y
CONFIG_DEBUG_RODATA=y
CONFIG_DEBUG_RODATA_TEST=y
CONFIG_HAVE_MMIOTRACE_SUPPORT=y
CONFIG_IO_DELAY_TYPE_0X80=0
CONFIG_IO_DELAY_TYPE_0XED=1
CONFIG_IO_DELAY_TYPE_UDELAY=2
CONFIG_IO_DELAY_TYPE_NONE=3
# CONFIG_IO_DELAY_0X80 is not set
CONFIG_IO_DELAY_0XED=y
# CONFIG_IO_DELAY_UDELAY is not set
# CONFIG_IO_DELAY_NONE is not set
CONFIG_DEFAULT_IO_DELAY_TYPE=1
# CONFIG_DEBUG_BOOT_PARAMS is not set
CONFIG_CPA_DEBUG=y
# CONFIG_OPTIMIZE_INLINING is not set
#
# Security options
#
CONFIG_KEYS=y
CONFIG_KEYS_DEBUG_PROC_KEYS=y
CONFIG_SECURITY=y
CONFIG_SECURITYFS=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_NETWORK_XFRM=y
CONFIG_SECURITY_PATH=y
CONFIG_SECURITY_FILE_CAPABILITIES=y
CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
CONFIG_SECURITY_SELINUX=y
# CONFIG_SECURITY_SELINUX_BOOTPARAM is not set
CONFIG_SECURITY_SELINUX_DISABLE=y
# CONFIG_SECURITY_SELINUX_DEVELOP is not set
# CONFIG_SECURITY_SELINUX_AVC_STATS is not set
CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
CONFIG_CRYPTO=y
#
# Crypto core or helper
#
# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=y
CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=y
CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
CONFIG_CRYPTO_HASH2=y
CONFIG_CRYPTO_RNG=y
CONFIG_CRYPTO_RNG2=y
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_MANAGER2=y
CONFIG_CRYPTO_GF128MUL=y
# CONFIG_CRYPTO_NULL is not set
# CONFIG_CRYPTO_CRYPTD is not set
CONFIG_CRYPTO_AUTHENC=y
#
# Authenticated Encryption with Associated Data
#
# CONFIG_CRYPTO_CCM is not set
CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_SEQIV=y
#
# Block modes
#
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_CTR=y
# CONFIG_CRYPTO_CTS is not set
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_LRW=y
CONFIG_CRYPTO_PCBC=y
# CONFIG_CRYPTO_XTS is not set
#
# Hash modes
#
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=y
#
# Digest
#
CONFIG_CRYPTO_CRC32C=y
CONFIG_CRYPTO_CRC32C_INTEL=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=y
CONFIG_CRYPTO_RMD128=y
# CONFIG_CRYPTO_RMD160 is not set
CONFIG_CRYPTO_RMD256=y
# CONFIG_CRYPTO_RMD320 is not set
CONFIG_CRYPTO_SHA1=y
# CONFIG_CRYPTO_SHA256 is not set
# CONFIG_CRYPTO_SHA512 is not set
# CONFIG_CRYPTO_TGR192 is not set
CONFIG_CRYPTO_WP512=y
#
# Ciphers
#
CONFIG_CRYPTO_AES=y
# CONFIG_CRYPTO_AES_X86_64 is not set
CONFIG_CRYPTO_ANUBIS=y
CONFIG_CRYPTO_ARC4=y
CONFIG_CRYPTO_BLOWFISH=y
CONFIG_CRYPTO_CAMELLIA=y
CONFIG_CRYPTO_CAST5=y
CONFIG_CRYPTO_CAST6=y
CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_FCRYPT is not set
CONFIG_CRYPTO_KHAZAD=y
# CONFIG_CRYPTO_SALSA20 is not set
# CONFIG_CRYPTO_SALSA20_X86_64 is not set
# CONFIG_CRYPTO_SEED is not set
CONFIG_CRYPTO_SERPENT=y
CONFIG_CRYPTO_TEA=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_TWOFISH_COMMON=y
CONFIG_CRYPTO_TWOFISH_X86_64=y
#
# Compression
#
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
#
# Random Number Generation
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_HIFN_795X is not set
CONFIG_HAVE_KVM=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
# CONFIG_KVM_INTEL is not set
CONFIG_KVM_AMD=y
# CONFIG_KVM_TRACE is not set
CONFIG_VIRTIO=y
CONFIG_VIRTIO_RING=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_BINARY_PRINTF=y
#
# Library routines
#
CONFIG_BITREVERSE=y
CONFIG_GENERIC_FIND_FIRST_BIT=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=y
CONFIG_CRC16=y
CONFIG_CRC_T10DIF=y
CONFIG_CRC_ITU_T=y
CONFIG_CRC32=y
CONFIG_CRC7=y
CONFIG_LIBCRC32C=y
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
CONFIG_LZO_COMPRESS=y
CONFIG_LZO_DECOMPRESS=y
CONFIG_DECOMPRESS_LZMA=y
CONFIG_TEXTSEARCH=y
CONFIG_TEXTSEARCH_KMP=y
CONFIG_TEXTSEARCH_BM=y
CONFIG_TEXTSEARCH_FSM=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
CONFIG_CPUMASK_OFFSTACK=y
CONFIG_FORCE_SUCCESSFUL_BUILD=y
CONFIG_FORCE_MINIMAL_CONFIG=y
CONFIG_FORCE_MINIMAL_CONFIG_64=y
CONFIG_FORCE_MINIMAL_CONFIG_PHYS=y
^ permalink raw reply [flat|nested] 33+ messages in thread