public inbox for linux-rt-users@vger.kernel.org
 help / color / mirror / Atom feed
From: Clark Williams <williams@redhat.com>
To: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Thomas Gleixner <tglx@linutronix.de>,
	RT <linux-rt-users@vger.kernel.org>,
	LKML <linux-kernel@vger.kernel.org>
Subject: [RT PATCH] cputime: remove raw locks introduced by RT patchset
Date: Thu, 14 Apr 2016 22:09:43 -0500	[thread overview]
Message-ID: <20160414220943.3d4d7dd2@sluggy.hsv.redhat.com> (raw)

[-- Attachment #1: Type: text/plain, Size: 4239 bytes --]

Sebastian,

This patch removes the raw spinlock operations when updating cputtime
in the vtime_* functions in kernel/sched/cputime.c.

Based on Frederic's commit b7ce2277f087fd052, there is no need for
the raw spinlocks in vtime_* functions to guard against writer
concurrency and the RT versions of write_seqcount_begin() and
write_seqcount_end() make calls to preempt_disable_rt() and
preempt_enable_rt(), so we'll be in atomic context while updating
cputime.

I've run this patch on x86_64 4.4.6-rt14 and the RHEL-RT kernel, 
with 12h rteval runs on systems with both no tuning and systems
with isolcpus/rcu_nocbs/nohz_full cpus. No ill effects seen. 

Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Clark Williams <williams@redhat.com>
---
 kernel/sched/cputime.c | 18 ------------------
 1 file changed, 18 deletions(-)

diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 0f75a38cff96..9a823ced7e4a 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -696,45 +696,37 @@ static void __vtime_account_system(struct task_struct *tsk)
 
 void vtime_account_system(struct task_struct *tsk)
 {
-	raw_spin_lock(&tsk->vtime_lock);
 	write_seqcount_begin(&tsk->vtime_seq);
 	__vtime_account_system(tsk);
 	write_seqcount_end(&tsk->vtime_seq);
-	raw_spin_unlock(&tsk->vtime_lock);
 }
 
 void vtime_gen_account_irq_exit(struct task_struct *tsk)
 {
-	raw_spin_lock(&tsk->vtime_lock);
 	write_seqcount_begin(&tsk->vtime_seq);
 	__vtime_account_system(tsk);
 	if (context_tracking_in_user())
 		tsk->vtime_snap_whence = VTIME_USER;
 	write_seqcount_end(&tsk->vtime_seq);
-	raw_spin_unlock(&tsk->vtime_lock);
 }
 
 void vtime_account_user(struct task_struct *tsk)
 {
 	cputime_t delta_cpu;
 
-	raw_spin_lock(&tsk->vtime_lock);
 	write_seqcount_begin(&tsk->vtime_seq);
 	delta_cpu = get_vtime_delta(tsk);
 	tsk->vtime_snap_whence = VTIME_SYS;
 	account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
 	write_seqcount_end(&tsk->vtime_seq);
-	raw_spin_unlock(&tsk->vtime_lock);
 }
 
 void vtime_user_enter(struct task_struct *tsk)
 {
-	raw_spin_lock(&tsk->vtime_lock);
 	write_seqcount_begin(&tsk->vtime_seq);
 	__vtime_account_system(tsk);
 	tsk->vtime_snap_whence = VTIME_USER;
 	write_seqcount_end(&tsk->vtime_seq);
-	raw_spin_unlock(&tsk->vtime_lock);
 }
 
 void vtime_guest_enter(struct task_struct *tsk)
@@ -746,23 +738,19 @@ void vtime_guest_enter(struct task_struct *tsk)
 	 * synchronization against the reader (task_gtime())
 	 * that can thus safely catch up with a tickless delta.
 	 */
-	raw_spin_lock(&tsk->vtime_lock);
 	write_seqcount_begin(&tsk->vtime_seq);
 	__vtime_account_system(tsk);
 	current->flags |= PF_VCPU;
 	write_seqcount_end(&tsk->vtime_seq);
-	raw_spin_unlock(&tsk->vtime_lock);
 }
 EXPORT_SYMBOL_GPL(vtime_guest_enter);
 
 void vtime_guest_exit(struct task_struct *tsk)
 {
-	raw_spin_lock(&tsk->vtime_lock);
 	write_seqcount_begin(&tsk->vtime_seq);
 	__vtime_account_system(tsk);
 	current->flags &= ~PF_VCPU;
 	write_seqcount_end(&tsk->vtime_seq);
-	raw_spin_unlock(&tsk->vtime_lock);
 }
 EXPORT_SYMBOL_GPL(vtime_guest_exit);
 
@@ -775,30 +763,24 @@ void vtime_account_idle(struct task_struct *tsk)
 
 void arch_vtime_task_switch(struct task_struct *prev)
 {
-	raw_spin_lock(&prev->vtime_lock);
 	write_seqcount_begin(&prev->vtime_seq);
 	prev->vtime_snap_whence = VTIME_SLEEPING;
 	write_seqcount_end(&prev->vtime_seq);
-	raw_spin_unlock(&prev->vtime_lock);
 
-	raw_spin_lock(&current->vtime_lock);
 	write_seqcount_begin(&current->vtime_seq);
 	current->vtime_snap_whence = VTIME_SYS;
 	current->vtime_snap = sched_clock_cpu(smp_processor_id());
 	write_seqcount_end(&current->vtime_seq);
-	raw_spin_unlock(&current->vtime_lock);
 }
 
 void vtime_init_idle(struct task_struct *t, int cpu)
 {
 	unsigned long flags;
 
-	raw_spin_lock_irqsave(&t->vtime_lock, flags);
 	write_seqcount_begin(&t->vtime_seq);
 	t->vtime_snap_whence = VTIME_SYS;
 	t->vtime_snap = sched_clock_cpu(cpu);
 	write_seqcount_end(&t->vtime_seq);
-	raw_spin_unlock_irqrestore(&t->vtime_lock, flags);
 }
 
 cputime_t task_gtime(struct task_struct *t)
-- 
2.5.5


[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 801 bytes --]

             reply	other threads:[~2016-04-15  3:09 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-04-15  3:09 Clark Williams [this message]
2016-04-15  7:13 ` [RT PATCH] cputime: remove raw locks introduced by RT patchset Sebastian Andrzej Siewior
2016-04-15 13:43   ` Clark Williams

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20160414220943.3d4d7dd2@sluggy.hsv.redhat.com \
    --to=williams@redhat.com \
    --cc=bigeasy@linutronix.de \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-rt-users@vger.kernel.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox