linux-perf-users.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Wander Lairson Costa <wander@redhat.com>
To: Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@redhat.com>,
	Arnaldo Carvalho de Melo <acme@kernel.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Alexander Shishkin <alexander.shishkin@linux.intel.com>,
	Jiri Olsa <jolsa@kernel.org>, Namhyung Kim <namhyung@kernel.org>,
	Ian Rogers <irogers@google.com>,
	Adrian Hunter <adrian.hunter@intel.com>,
	Will Deacon <will@kernel.org>, Waiman Long <longman@redhat.com>,
	Boqun Feng <boqun.feng@gmail.com>,
	Juri Lelli <juri.lelli@redhat.com>,
	Vincent Guittot <vincent.guittot@linaro.org>,
	Dietmar Eggemann <dietmar.eggemann@arm.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Ben Segall <bsegall@google.com>, Mel Gorman <mgorman@suse.de>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	Valentin Schneider <vschneid@redhat.com>,
	"Eric W. Biederman" <ebiederm@xmission.com>,
	Wander Lairson Costa <wander@redhat.com>,
	Huacai Chen <chenhuacai@kernel.org>,
	Kefeng Wang <wangkefeng.wang@huawei.com>,
	Oleg Nesterov <oleg@redhat.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	"Liam R. Howlett" <Liam.Howlett@oracle.com>,
	Vlastimil Babka <vbabka@suse.cz>, Andrei Vagin <avagin@gmail.com>,
	Shakeel Butt <shakeelb@google.com>,
	linux-kernel@vger.kernel.org (open list),
	linux-perf-users@vger.kernel.org (open list:PERFORMANCE EVENTS
	SUBSYSTEM)
Subject: [PATCH v7 3/3] treewide: replace put_task_struct() with the atomic safe version
Date: Tue, 25 Apr 2023 08:43:03 -0300	[thread overview]
Message-ID: <20230425114307.36889-4-wander@redhat.com> (raw)
In-Reply-To: <20230425114307.36889-1-wander@redhat.com>

In places where put_task_struct() is called in a non-sleepable context,
we replace those calls by put_task_struct_atomic_safe().

These call sites were found by running internal regression tests and
looking for warnings generated by put_task_might_resched().

Signed-off-by: Wander Lairson Costa <wander@redhat.com>
Cc: Valentin Schneider <vschneid@redhat.com>
---
 kernel/events/core.c     |  6 +++---
 kernel/locking/rtmutex.c | 10 +++++-----
 kernel/sched/core.c      |  6 +++---
 kernel/sched/deadline.c  | 16 ++++++++--------
 kernel/sched/rt.c        |  4 ++--
 5 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 435815d3be3f..8f823da02324 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1181,7 +1181,7 @@ static void put_ctx(struct perf_event_context *ctx)
 		if (ctx->parent_ctx)
 			put_ctx(ctx->parent_ctx);
 		if (ctx->task && ctx->task != TASK_TOMBSTONE)
-			put_task_struct(ctx->task);
+			put_task_struct_atomic_safe(ctx->task);
 		call_rcu(&ctx->rcu_head, free_ctx);
 	}
 }
@@ -13019,7 +13019,7 @@ static void perf_event_exit_task_context(struct task_struct *child)
 	RCU_INIT_POINTER(child->perf_event_ctxp, NULL);
 	put_ctx(child_ctx); /* cannot be last */
 	WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
-	put_task_struct(current); /* cannot be last */
+	put_task_struct_atomic_safe(current); /* cannot be last */
 
 	clone_ctx = unclone_ctx(child_ctx);
 	raw_spin_unlock_irq(&child_ctx->lock);
@@ -13124,7 +13124,7 @@ void perf_event_free_task(struct task_struct *task)
 	 */
 	RCU_INIT_POINTER(task->perf_event_ctxp, NULL);
 	WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
-	put_task_struct(task); /* cannot be last */
+	put_task_struct_atomic_safe(task); /* cannot be last */
 	raw_spin_unlock_irq(&ctx->lock);
 
 
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 728f434de2bb..3ecb8d6ae039 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -509,7 +509,7 @@ static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh)
 {
 	if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) {
 		wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT);
-		put_task_struct(wqh->rtlock_task);
+		put_task_struct_atomic_safe(wqh->rtlock_task);
 		wqh->rtlock_task = NULL;
 	}
 
@@ -649,7 +649,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
 			       "task: %s (%d)\n", max_lock_depth,
 			       top_task->comm, task_pid_nr(top_task));
 		}
-		put_task_struct(task);
+		put_task_struct_atomic_safe(task);
 
 		return -EDEADLK;
 	}
@@ -817,7 +817,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
 		 * No requeue[7] here. Just release @task [8]
 		 */
 		raw_spin_unlock(&task->pi_lock);
-		put_task_struct(task);
+		put_task_struct_atomic_safe(task);
 
 		/*
 		 * [9] check_exit_conditions_3 protected by lock->wait_lock.
@@ -886,7 +886,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
 
 	/* [8] Release the task */
 	raw_spin_unlock(&task->pi_lock);
-	put_task_struct(task);
+	put_task_struct_atomic_safe(task);
 
 	/*
 	 * [9] check_exit_conditions_3 protected by lock->wait_lock.
@@ -990,7 +990,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
  out_unlock_pi:
 	raw_spin_unlock_irq(&task->pi_lock);
  out_put_task:
-	put_task_struct(task);
+	put_task_struct_atomic_safe(task);
 
 	return ret;
 }
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0d18c3969f90..a4783f0c9f01 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1007,7 +1007,7 @@ void wake_up_q(struct wake_q_head *head)
 		 * the queueing in wake_q_add() so as not to miss wakeups.
 		 */
 		wake_up_process(task);
-		put_task_struct(task);
+		put_task_struct_atomic_safe(task);
 	}
 }
 
@@ -2528,7 +2528,7 @@ int push_cpu_stop(void *arg)
 	raw_spin_rq_unlock(rq);
 	raw_spin_unlock_irq(&p->pi_lock);
 
-	put_task_struct(p);
+	put_task_struct_atomic_safe(p);
 	return 0;
 }
 
@@ -9316,7 +9316,7 @@ static int __balance_push_cpu_stop(void *arg)
 	rq_unlock(rq, &rf);
 	raw_spin_unlock_irq(&p->pi_lock);
 
-	put_task_struct(p);
+	put_task_struct_atomic_safe(p);
 
 	return 0;
 }
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 71b24371a6f7..0f8b8a490dc0 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -327,7 +327,7 @@ static void dl_change_utilization(struct task_struct *p, u64 new_bw)
 		 * so we are still safe.
 		 */
 		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
-			put_task_struct(p);
+			put_task_struct_atomic_safe(p);
 	}
 	__sub_rq_bw(p->dl.dl_bw, &rq->dl);
 	__add_rq_bw(new_bw, &rq->dl);
@@ -467,7 +467,7 @@ static void task_contending(struct sched_dl_entity *dl_se, int flags)
 		 * so we are still safe.
 		 */
 		if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
-			put_task_struct(dl_task_of(dl_se));
+			put_task_struct_atomic_safe(dl_task_of(dl_se));
 	} else {
 		/*
 		 * Since "dl_non_contending" is not set, the
@@ -1207,7 +1207,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
 	 * This can free the task_struct, including this hrtimer, do not touch
 	 * anything related to that after this.
 	 */
-	put_task_struct(p);
+	put_task_struct_atomic_safe(p);
 
 	return HRTIMER_NORESTART;
 }
@@ -1442,7 +1442,7 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
 	dl_se->dl_non_contending = 0;
 unlock:
 	task_rq_unlock(rq, p, &rf);
-	put_task_struct(p);
+	put_task_struct_atomic_safe(p);
 
 	return HRTIMER_NORESTART;
 }
@@ -1899,7 +1899,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
 		 * so we are still safe.
 		 */
 		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
-			put_task_struct(p);
+			put_task_struct_atomic_safe(p);
 	}
 	sub_rq_bw(&p->dl, &rq->dl);
 	rq_unlock(rq, &rf);
@@ -2351,7 +2351,7 @@ static int push_dl_task(struct rq *rq)
 			/* No more tasks */
 			goto out;
 
-		put_task_struct(next_task);
+		put_task_struct_atomic_safe(next_task);
 		next_task = task;
 		goto retry;
 	}
@@ -2366,7 +2366,7 @@ static int push_dl_task(struct rq *rq)
 	double_unlock_balance(rq, later_rq);
 
 out:
-	put_task_struct(next_task);
+	put_task_struct_atomic_safe(next_task);
 
 	return ret;
 }
@@ -2633,7 +2633,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
 static void switched_to_dl(struct rq *rq, struct task_struct *p)
 {
 	if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
-		put_task_struct(p);
+		put_task_struct_atomic_safe(p);
 
 	/* If p is not queued we will update its parameters at next wakeup. */
 	if (!task_on_rq_queued(p)) {
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 0a11f44adee5..e58a84535f61 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2150,7 +2150,7 @@ static int push_rt_task(struct rq *rq, bool pull)
 		/*
 		 * Something has shifted, try again.
 		 */
-		put_task_struct(next_task);
+		put_task_struct_atomic_safe(next_task);
 		next_task = task;
 		goto retry;
 	}
@@ -2163,7 +2163,7 @@ static int push_rt_task(struct rq *rq, bool pull)
 
 	double_unlock_balance(rq, lowest_rq);
 out:
-	put_task_struct(next_task);
+	put_task_struct_atomic_safe(next_task);
 
 	return ret;
 }
-- 
2.40.0


  parent reply	other threads:[~2023-04-25 11:45 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-25 11:43 [PATCH v7 0/3] Introduce put_task_struct_atomic_sleep() Wander Lairson Costa
2023-04-25 11:43 ` [PATCH v7 1/3] sched/core: warn on call put_task_struct in invalid context Wander Lairson Costa
2023-04-28 16:17   ` Sebastian Andrzej Siewior
2023-05-02 14:46     ` Wander Lairson Costa
2023-04-25 11:43 ` [PATCH v7 2/3] sched/task: Add the put_task_struct_atomic_safe() function Wander Lairson Costa
2023-05-04  8:42   ` Peter Zijlstra
2023-05-04  9:32     ` Valentin Schneider
2023-05-04 12:24       ` Wander Lairson Costa
2023-05-04 12:24     ` Wander Lairson Costa
2023-05-04 12:29     ` Oleg Nesterov
2023-05-04 14:33       ` Peter Zijlstra
2023-05-04 14:55         ` Wander Lairson Costa
2023-05-04 15:23           ` Oleg Nesterov
2023-05-04 15:30             ` Peter Zijlstra
2023-05-05 13:39               ` Peter Zijlstra
2023-05-04 18:29             ` Wander Lairson Costa
2023-05-04 19:22               ` Oleg Nesterov
2023-05-04 19:38                 ` Wander Lairson Costa
2023-05-04 20:16                   ` Oleg Nesterov
2023-05-08 12:30                     ` Wander Lairson Costa
2023-05-04 15:24           ` Peter Zijlstra
2023-05-04 18:21             ` Wander Lairson Costa
2023-05-05 13:32               ` Peter Zijlstra
2023-05-05 14:26                 ` Steven Rostedt
2023-05-05 14:29                   ` Steven Rostedt
2023-05-08 12:28                 ` Wander Lairson Costa
2023-04-25 11:43 ` Wander Lairson Costa [this message]
2023-04-26 12:05 ` [PATCH v7 0/3] Introduce put_task_struct_atomic_sleep() Valentin Schneider
2023-04-26 17:44 ` Waiman Long

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230425114307.36889-4-wander@redhat.com \
    --to=wander@redhat.com \
    --cc=Liam.Howlett@oracle.com \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=avagin@gmail.com \
    --cc=boqun.feng@gmail.com \
    --cc=bristot@redhat.com \
    --cc=bsegall@google.com \
    --cc=chenhuacai@kernel.org \
    --cc=dietmar.eggemann@arm.com \
    --cc=ebiederm@xmission.com \
    --cc=irogers@google.com \
    --cc=jolsa@kernel.org \
    --cc=juri.lelli@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=longman@redhat.com \
    --cc=mark.rutland@arm.com \
    --cc=mgorman@suse.de \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    --cc=oleg@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rostedt@goodmis.org \
    --cc=shakeelb@google.com \
    --cc=vbabka@suse.cz \
    --cc=vincent.guittot@linaro.org \
    --cc=vschneid@redhat.com \
    --cc=wangkefeng.wang@huawei.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).