public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Jeremy Fitzhardinge <jeremy@goop.org>
To: Jan Beulich <JBeulich@novell.com>
Cc: "mingo@elte.hu" <mingo@elte.hu>,
	"tglx@linutronix.de" <tglx@linutronix.de>,
	"hpa@zytor.com" <hpa@zytor.com>,
	ksrinivasan <ksrinivasan@novell.com>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>
Subject: Re: [PATCH 4/4, v2] x86: enlightenment for ticket spin locks -	 improve yield behavior on Xen
Date: Wed, 30 Jun 2010 12:08:46 +0200	[thread overview]
Message-ID: <4C2B17AE.8000002@goop.org> (raw)
In-Reply-To: <4C2A20E30200007800008A17@vpn.id2.novell.com>

On 06/29/2010 04:35 PM, Jan Beulich wrote:
> This optional patch improves yielding behavior in that the acquire
> function now checks whether the vCPU owning the lock is actually
> running, yielding immediately if it isn't.
>
> The (only) additional overhead this introduces for native execution is
> the writing of the owning CPU in the lock acquire paths. If this is
> considered a problem but the patch otherwise is deemed useful, even
> that code could be eliminated for native execution (by further
> alternative instruction patching).
>
> Signed-off-by: Jan Beulich <jbeulich@novell.com>
> Cc: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
> Cc: KY Srinivasan <ksrinivasan@novell.com>
>
> ---
>  arch/x86/include/asm/spinlock.h       |   13 +++++++++++++
>  arch/x86/include/asm/spinlock_types.h |    5 +++++
>  arch/x86/kernel/cpu/xen.c             |    5 ++++-
>  3 files changed, 22 insertions(+), 1 deletion(-)
>
> --- 2.6.35-rc3-virt-spinlocks.orig/arch/x86/include/asm/spinlock.h
> +++ 2.6.35-rc3-virt-spinlocks/arch/x86/include/asm/spinlock.h
> @@ -85,6 +85,15 @@ extern void virt_spin_unlock_stub(void);
>  # define UNLOCK_LOCK_PREFIX
>  #endif
>  
> +static __always_inline void __ticket_spin_set_owner(arch_spinlock_t *lock,
> +                                                    int owned)
> +{
> +#ifdef CONFIG_ENLIGHTEN_SPINLOCKS
> +	if (owned)
> +		lock->owner = percpu_read(cpu_number);
>   

Why not smp_processor_id()?  Is this different in some way?

> +#endif
> +}
> +
>  /*
>   * Ticket locks are conceptually two parts, one indicating the current head of
>   * the queue, and the other indicating the current tail. The lock is acquired
> @@ -124,6 +133,7 @@ static __always_inline void __ticket_spi
>  		ASM_OUTPUT2("+Q" (inc), "+m" (lock->slock)),
>  		[stub] "i" (virt_spin_lock_stub)
>  		: "memory", "cc");
> +	__ticket_spin_set_owner(lock, true);
>  }
>  
>  static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
> @@ -141,6 +151,7 @@ static __always_inline int __ticket_spin
>  		     : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
>  		     :
>  		     : "memory", "cc");
> +	__ticket_spin_set_owner(lock, tmp);
>  
>  	return tmp;
>  }
> @@ -192,6 +203,7 @@ static __always_inline void __ticket_spi
>  		     ASM_OUTPUT2("+r" (inc), "+m" (lock->slock), "=&r" (tmp)),
>  		     [stub] "i" (virt_spin_lock_stub)
>  		     : "memory", "cc");
> +	__ticket_spin_set_owner(lock, true);
>  }
>  
>  static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
> @@ -212,6 +224,7 @@ static __always_inline int __ticket_spin
>  		     : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
>  		     :
>  		     : "memory", "cc");
> +	__ticket_spin_set_owner(lock, tmp);
>  
>  	return tmp;
>  }
> --- 2.6.35-rc3-virt-spinlocks.orig/arch/x86/include/asm/spinlock_types.h
> +++ 2.6.35-rc3-virt-spinlocks/arch/x86/include/asm/spinlock_types.h
> @@ -17,6 +17,11 @@ typedef struct arch_spinlock {
>  # else
>  			u16 cur, seq;
>  # endif
> +# if CONFIG_NR_CPUS <= 256
> +			u8 owner;
> +# else
> +			u16 owner;
> +# endif
>  		};
>  #endif
>  	};
> --- 2.6.35-rc3-virt-spinlocks.orig/arch/x86/kernel/cpu/xen.c
> +++ 2.6.35-rc3-virt-spinlocks/arch/x86/kernel/cpu/xen.c
> @@ -79,7 +79,8 @@ static void xen_spin_lock(struct arch_sp
>  
>  	for (count = spin_count; ({ barrier(); lock->cur != token; }); )
>  		if (likely(cpu_online(raw_smp_processor_id()))
> -		    && unlikely(!--count)) {
> +		    && (per_cpu(runstate.state, lock->owner) != RUNSTATE_running
> +			|| unlikely(!--count))) {
>  			struct sched_poll sched_poll;
>  
>  			set_xen_guest_handle(sched_poll.ports,
> @@ -91,6 +92,8 @@ static void xen_spin_lock(struct arch_sp
>  		} else
>  			cpu_relax();
>  
> +	lock->owner = raw_smp_processor_id();
> +
>  	/*
>  	 * If we interrupted another spinlock while it was blocking, make
>  	 * sure it doesn't block (again) without re-checking the lock.
>
>
>
>   


  parent reply	other threads:[~2010-06-30 10:08 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-06-29 14:35 [PATCH 4/4, v2] x86: enlightenment for ticket spin locks - improve yield behavior on Xen Jan Beulich
2010-06-30  8:11 ` Peter Zijlstra
2010-06-30  8:49   ` Jan Beulich
2010-06-30  9:24     ` Peter Zijlstra
2010-06-30 10:10     ` Jeremy Fitzhardinge
2010-06-30 11:25       ` Jan Beulich
2010-06-30 10:08 ` Jeremy Fitzhardinge [this message]
2010-06-30 11:22   ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=4C2B17AE.8000002@goop.org \
    --to=jeremy@goop.org \
    --cc=JBeulich@novell.com \
    --cc=hpa@zytor.com \
    --cc=ksrinivasan@novell.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox