* [PATCH] cpuset: use rcu_read_lock() to protect task_cs()
@ 2014-03-03 8:49 Li Zefan
0 siblings, 0 replies; 3+ messages in thread
From: Li Zefan @ 2014-03-03 8:49 UTC (permalink / raw)
To: Tejun Heo; +Cc: LKML, Cgroups, Sasha Levin, Fengguang Wu
We no longer use task_lock() to protect tsk->cgroups.
Reported-by: Fengguang Wu <fengguang.wu@intel.com>
Signed-off-by: Li Zefan <lizefan@huawei.com>
---
kernel/cpuset.c | 24 +++++++++++++-----------
1 file changed, 13 insertions(+), 11 deletions(-)
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index c63a0d9..4b3560d 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2235,10 +2235,10 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
struct cpuset *cpus_cs;
mutex_lock(&callback_mutex);
- task_lock(tsk);
+ rcu_read_lock();
cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
guarantee_online_cpus(cpus_cs, pmask);
- task_unlock(tsk);
+ rcu_read_unlock();
mutex_unlock(&callback_mutex);
}
@@ -2291,10 +2291,10 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
nodemask_t mask;
mutex_lock(&callback_mutex);
- task_lock(tsk);
+ rcu_read_lock();
mems_cs = effective_nodemask_cpuset(task_cs(tsk));
guarantee_online_mems(mems_cs, &mask);
- task_unlock(tsk);
+ rcu_read_unlock();
mutex_unlock(&callback_mutex);
return mask;
@@ -2410,10 +2410,10 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
/* Not hardwall and node outside mems_allowed: scan up cpusets */
mutex_lock(&callback_mutex);
- task_lock(current);
+ rcu_read_lock();
cs = nearest_hardwall_ancestor(task_cs(current));
allowed = node_isset(node, cs->mems_allowed);
- task_unlock(current);
+ rcu_read_unlock();
mutex_unlock(&callback_mutex);
return allowed;
@@ -2539,24 +2539,26 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
* @task: pointer to task_struct of some task.
*
* Description: Prints @task's name, cpuset name, and cached copy of its
- * mems_allowed to the kernel log. Must hold task_lock(task) to allow
- * dereferencing task_cs(task).
+ * mems_allowed to the kernel log.
*/
void cpuset_print_task_mems_allowed(struct task_struct *tsk)
{
/* Statically allocated to prevent using excess stack. */
static char cpuset_nodelist[CPUSET_NODELIST_LEN];
static DEFINE_SPINLOCK(cpuset_buffer_lock);
- struct cgroup *cgrp = task_cs(tsk)->css.cgroup;
+ struct cgroup *cgrp;
spin_lock(&cpuset_buffer_lock);
+ rcu_read_lock();
+ cgrp = task_cs(tsk)->css.cgroup;
nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
tsk->mems_allowed);
printk(KERN_INFO "%s cpuset=", tsk->comm);
pr_cont_cgroup_name(cgrp);
pr_cont(" mems_allowed=%s\n", cpuset_nodelist);
+ rcu_read_unlock();
spin_unlock(&cpuset_buffer_lock);
}
@@ -2588,9 +2590,9 @@ int cpuset_memory_pressure_enabled __read_mostly;
void __cpuset_memory_pressure_bump(void)
{
- task_lock(current);
+ rcu_read_lock();
fmeter_markevent(&task_cs(current)->fmeter);
- task_unlock(current);
+ rcu_read_unlock();
}
#ifdef CONFIG_PROC_PID_CPUSET
--
1.8.0.2
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH] cpuset: use rcu_read_lock() to protect task_cs()
@ 2014-03-03 8:49 Li Zefan
[not found] ` <53144223.6000302-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
0 siblings, 1 reply; 3+ messages in thread
From: Li Zefan @ 2014-03-03 8:49 UTC (permalink / raw)
To: Tejun Heo; +Cc: LKML, Cgroups, Sasha Levin, Fengguang Wu
We no longer use task_lock() to protect tsk->cgroups.
Reported-by: Fengguang Wu <fengguang.wu-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
Signed-off-by: Li Zefan <lizefan-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
---
kernel/cpuset.c | 24 +++++++++++++-----------
1 file changed, 13 insertions(+), 11 deletions(-)
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index c63a0d9..4b3560d 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2235,10 +2235,10 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
struct cpuset *cpus_cs;
mutex_lock(&callback_mutex);
- task_lock(tsk);
+ rcu_read_lock();
cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
guarantee_online_cpus(cpus_cs, pmask);
- task_unlock(tsk);
+ rcu_read_unlock();
mutex_unlock(&callback_mutex);
}
@@ -2291,10 +2291,10 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
nodemask_t mask;
mutex_lock(&callback_mutex);
- task_lock(tsk);
+ rcu_read_lock();
mems_cs = effective_nodemask_cpuset(task_cs(tsk));
guarantee_online_mems(mems_cs, &mask);
- task_unlock(tsk);
+ rcu_read_unlock();
mutex_unlock(&callback_mutex);
return mask;
@@ -2410,10 +2410,10 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
/* Not hardwall and node outside mems_allowed: scan up cpusets */
mutex_lock(&callback_mutex);
- task_lock(current);
+ rcu_read_lock();
cs = nearest_hardwall_ancestor(task_cs(current));
allowed = node_isset(node, cs->mems_allowed);
- task_unlock(current);
+ rcu_read_unlock();
mutex_unlock(&callback_mutex);
return allowed;
@@ -2539,24 +2539,26 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
* @task: pointer to task_struct of some task.
*
* Description: Prints @task's name, cpuset name, and cached copy of its
- * mems_allowed to the kernel log. Must hold task_lock(task) to allow
- * dereferencing task_cs(task).
+ * mems_allowed to the kernel log.
*/
void cpuset_print_task_mems_allowed(struct task_struct *tsk)
{
/* Statically allocated to prevent using excess stack. */
static char cpuset_nodelist[CPUSET_NODELIST_LEN];
static DEFINE_SPINLOCK(cpuset_buffer_lock);
- struct cgroup *cgrp = task_cs(tsk)->css.cgroup;
+ struct cgroup *cgrp;
spin_lock(&cpuset_buffer_lock);
+ rcu_read_lock();
+ cgrp = task_cs(tsk)->css.cgroup;
nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
tsk->mems_allowed);
printk(KERN_INFO "%s cpuset=", tsk->comm);
pr_cont_cgroup_name(cgrp);
pr_cont(" mems_allowed=%s\n", cpuset_nodelist);
+ rcu_read_unlock();
spin_unlock(&cpuset_buffer_lock);
}
@@ -2588,9 +2590,9 @@ int cpuset_memory_pressure_enabled __read_mostly;
void __cpuset_memory_pressure_bump(void)
{
- task_lock(current);
+ rcu_read_lock();
fmeter_markevent(&task_cs(current)->fmeter);
- task_unlock(current);
+ rcu_read_unlock();
}
#ifdef CONFIG_PROC_PID_CPUSET
--
1.8.0.2
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH] cpuset: use rcu_read_lock() to protect task_cs()
[not found] ` <53144223.6000302-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
@ 2014-03-03 22:31 ` Tejun Heo
0 siblings, 0 replies; 3+ messages in thread
From: Tejun Heo @ 2014-03-03 22:31 UTC (permalink / raw)
To: Li Zefan; +Cc: LKML, Cgroups, Sasha Levin, Fengguang Wu
On Mon, Mar 03, 2014 at 04:49:39PM +0800, Li Zefan wrote:
> We no longer use task_lock() to protect tsk->cgroups.
>
> Reported-by: Fengguang Wu <fengguang.wu-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
> Signed-off-by: Li Zefan <lizefan-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
Applied to for-3.15 (do we need this for 3.14-fixes?).
Thanks.
--
tejun
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2014-03-03 22:31 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-03-03 8:49 [PATCH] cpuset: use rcu_read_lock() to protect task_cs() Li Zefan
-- strict thread matches above, loose matches on Subject: below --
2014-03-03 8:49 Li Zefan
[not found] ` <53144223.6000302-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2014-03-03 22:31 ` Tejun Heo
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).