From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Sebastian Siewior <bigeasy@linutronix.de>,
Qais Yousef <qais.yousef@arm.com>, Scott Wood <swood@redhat.com>,
"Peter Zijlstra (Intel)" <peterz@infradead.org>,
Valentin Schneider <valentin.schneider@arm.com>,
Ingo Molnar <mingo@kernel.org>,
Peter Zijlstra <peterz@infradead.org>,
Juri Lelli <juri.lelli@redhat.com>,
Vincent Guittot <vincent.guittot@linaro.org>,
Dietmar Eggemann <dietmar.eggemann@arm.com>,
Steven Rostedt <rostedt@goodmis.org>,
Ben Segall <bsegall@google.com>, Mel Gorman <mgorman@suse.de>,
Daniel Bristot de Oliveira <bristot@redhat.com>,
Vincent Donnefort <vincent.donnefort@arm.com>
Subject: [patch 07/10] sched/core: Add mechanism to wait for affinity setting to complete
Date: Thu, 17 Sep 2020 11:42:09 +0200 [thread overview]
Message-ID: <20200917101624.615730057@linutronix.de> (raw)
In-Reply-To: 20200917094202.301694311@linutronix.de
RT kernels allow to disable migration while being preemptible. Tasks which
have migration disabled cannot be moved to a different CPU when the
affinity mask is changed until they leave the migrate disabled section.
Add a mechanism to queue the migration request in the task and wait for it
to complete. The task will handle it when it leaves the migrate disabled
section.
This ensures that __set_cpus_allowed_ptr() is guaranteed to return only after
the new affinity mask has taken effect.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/sched.h | 19 ++++++++++++
kernel/sched/core.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 95 insertions(+)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -629,8 +629,16 @@ struct wake_q_node {
};
#if defined(CONFIG_PREEMPT_RT) && defined(CONFIG_SMP)
+struct task_migrate_data {
+ const cpumask_t *mask;
+ struct completion *done;
+ bool check;
+ int res;
+};
+
struct task_migration_ctrl {
struct mutex mutex;
+ struct task_migrate_data *pending;
int disable_cnt;
};
@@ -638,8 +646,19 @@ struct task_migration_ctrl {
{ \
.mutex = __MUTEX_INITIALIZER(init_task.migration_ctrl.mutex), \
}
+
+static inline int task_self_migrate_result(struct task_migrate_data *data)
+{
+ return data->res;
+}
+
#else /* CONFIG_PREEMPT_RT && CONFIG_SMP */
+struct task_migrate_data { };
struct task_migration_ctrl { };
+static inline int task_self_migrate_result(struct task_migrate_data *data)
+{
+ return -ENOSYS;
+}
#endif /* !(CONFIG_PREEMPT_RT && CONFIG_SMP) */
struct task_struct {
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -442,6 +442,70 @@ static inline void hrtick_rq_init(struct
}
#endif /* CONFIG_SCHED_HRTICK */
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
+static inline void task_lock_migration_ctrl(struct task_struct *p)
+{
+ mutex_lock(&p->migration_ctrl.mutex);
+}
+
+static inline void task_unlock_migration_ctrl(struct task_struct *p)
+{
+ mutex_unlock(&p->migration_ctrl.mutex);
+}
+
+/*
+ * If the affinity of a task should be set and the task is in a migrate
+ * disabled region then the operation has to wait until the task leaves the
+ * migrate disabled region and takes care of setting it's affinity on its
+ * own.
+ */
+static bool task_self_migration(struct task_struct *p,
+ const struct cpumask *new_mask, bool check,
+ struct rq *rq, struct rq_flags *rf,
+ struct task_migrate_data *data)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ lockdep_assert_held(&p->migration_ctrl.mutex);
+ lockdep_assert_held(&rq->lock);
+ lockdep_assert_held(&p->pi_lock);
+
+ if (!p->migration_ctrl.disable_cnt)
+ return false;
+
+ BUG_ON(p == current);
+
+ /*
+ * Store a pointer to migration data in the migration control
+ * struct, which will be used by the task to set its own affinity
+ * when it leaves the migrate disabled section. The result is
+ * returned in @data::res.
+ */
+ data->mask = new_mask;
+ data->check = check;
+ data->done = &done;
+ p->migration_ctrl.pending = data;
+
+ /* Get a reference on @p, drop the locks and wait for it to complete */
+ get_task_struct(p);
+ task_rq_unlock(rq, p, rf);
+ wait_for_completion(&done);
+ put_task_struct(p);
+ return true;
+}
+
+#else /* defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT) */
+static inline void task_lock_migration_ctrl(struct task_struct *p) { }
+static inline void task_unlock_migration_ctrl(struct task_struct *p) { }
+static bool task_self_migration(struct task_struct *p,
+ const struct cpumask *new_mask, bool check,
+ struct rq *rq, struct rq_flags *rf,
+ struct task_migrate_data *data)
+{
+ return false;
+}
+#endif /* !(defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)) */
+
/*
* cmpxchg based fetch_or, macro so it works for different integer types
*/
@@ -1947,17 +2011,29 @@ static int set_cpus_allowed_ptr_locked(s
static int __set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask, bool check)
{
+ struct task_migrate_data sync_data;
struct rq_flags rf;
struct rq *rq;
int ret = 0;
+ /*
+ * On RT kernels the affinity setting might be delayed if the task
+ * is in a migrate disabled region. The request for changing the
+ * affinity is queued in the target task which acts upon it when
+ * leaving the migrate disabled sections. This requires
+ * serialization to protect the relevant data structures.
+ */
+ task_lock_migration_ctrl(p);
rq = task_rq_lock(p, &rf);
if (cpumask_equal(&p->cpus_mask, new_mask))
task_rq_unlock(rq, p, &rf);
+ else if (task_self_migration(p, new_mask, check, rq, &rf, &sync_data))
+ ret = task_self_migrate_result(&sync_data);
else
ret = set_cpus_allowed_ptr_locked(p, new_mask, check, rq, &rf);
+ task_unlock_migration_ctrl(p);
return ret;
}
next prev parent reply other threads:[~2020-09-17 10:51 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-09-17 9:42 [patch 00/10] sched: Migrate disable support for RT Thomas Gleixner
2020-09-17 9:42 ` [patch 01/10] sched: Fix balance_callback() Thomas Gleixner
2020-09-17 9:42 ` [patch 02/10] sched/hotplug: Ensure only per-cpu kthreads run during hotplug Thomas Gleixner
2020-09-17 9:42 ` [patch 03/10] sched/core: Wait for tasks being pushed away on hotplug Thomas Gleixner
2020-09-17 9:42 ` [patch 04/10] sched/hotplug: Consolidate task migration on CPU unplug Thomas Gleixner
2020-09-17 9:42 ` [patch 05/10] sched/core: Split __set_cpus_allowed_ptr() Thomas Gleixner
2020-09-17 9:42 ` [patch 06/10] sched: Add task components for migration control Thomas Gleixner
2020-09-17 9:42 ` Thomas Gleixner [this message]
2020-09-17 9:42 ` [patch 08/10] sched: Add update_migratory() callback to scheduler classes Thomas Gleixner
2020-09-17 9:42 ` [patch 09/10] sched/core: Add migrate_disable/enable() Thomas Gleixner
2020-09-17 14:24 ` peterz
2020-09-17 14:38 ` Sebastian Siewior
2020-09-17 14:49 ` peterz
2020-09-17 15:13 ` Sebastian Siewior
2020-09-17 15:54 ` peterz
2020-09-17 16:30 ` Sebastian Siewior
2020-09-18 8:22 ` peterz
2020-09-18 8:48 ` Sebastian Siewior
2020-09-18 7:00 ` Thomas Gleixner
2020-09-18 8:28 ` peterz
2020-09-17 9:42 ` [patch 10/10] sched/core: Make migrate disable and CPU hotplug cooperative Thomas Gleixner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200917101624.615730057@linutronix.de \
--to=tglx@linutronix.de \
--cc=bigeasy@linutronix.de \
--cc=bristot@redhat.com \
--cc=bsegall@google.com \
--cc=dietmar.eggemann@arm.com \
--cc=juri.lelli@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mgorman@suse.de \
--cc=mingo@kernel.org \
--cc=peterz@infradead.org \
--cc=qais.yousef@arm.com \
--cc=rostedt@goodmis.org \
--cc=swood@redhat.com \
--cc=valentin.schneider@arm.com \
--cc=vincent.donnefort@arm.com \
--cc=vincent.guittot@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox