From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Jan Beulich <JBeulich@suse.com>
Cc: George Dunlap <George.Dunlap@eu.citrix.com>,
xen-devel <xen-devel@lists.xenproject.org>,
Keir Fraser <keir@xen.org>,
David Vrabel <david.vrabel@citrix.com>
Subject: Re: [PATCH] scheduler: adjust internal locking interface
Date: Fri, 11 Oct 2013 15:29:25 +0100 [thread overview]
Message-ID: <52580B45.3080305@citrix.com> (raw)
In-Reply-To: <525823DE02000078000FA971@nat28.tlf.novell.com>
[-- Attachment #1.1: Type: text/plain, Size: 6140 bytes --]
On 11/10/13 15:14, Jan Beulich wrote:
> --- a/xen/include/xen/sched-if.h
> +++ b/xen/include/xen/sched-if.h
> @@ -47,96 +47,70 @@ DECLARE_PER_CPU(struct schedule_data, sc
> DECLARE_PER_CPU(struct scheduler *, scheduler);
> DECLARE_PER_CPU(struct cpupool *, cpupool);
>
> -static inline spinlock_t * pcpu_schedule_lock(int cpu)
> -{
> - spinlock_t * lock=NULL;
> -
> - for ( ; ; )
> - {
> - /* The per_cpu(v->processor) may also change, if changing
> - * cpu pool also changes the scheduler lock. Retry
> - * until they match.
> - */
> - lock=per_cpu(schedule_data, cpu).schedule_lock;
> -
> - spin_lock(lock);
> - if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) )
> - break;
> - spin_unlock(lock);
> - }
> - return lock;
> +#define sched_lock(kind, param, cpu, irq, arg...) \
> +static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
> +{ \
> + for ( ; ; ) \
> + { \
> + spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
> + /* \
> + * v->processor may change when grabbing the lock; but \
> + * per_cpu(v->processor) may also change, if changing cpu pool \
> + * also changes the scheduler lock. Retry until they match. \
> + * \
> + * It may also be the case that v->processor may change but the \
> + * lock may be the same; this will succeed in that case. \
> + */ \
> + spin_lock##irq(lock, ## arg); \
> + if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
> + return lock; \
> + spin_unlock##irq(lock, ## arg); \
> + } \
> }
The readability of this (and others) would be much easier if the '\'
were aligned on the RHS and out of view of the main body.
~Andrew
>
> -static inline int pcpu_schedule_trylock(int cpu)
> -{
> - spinlock_t * lock=NULL;
> -
> - lock=per_cpu(schedule_data, cpu).schedule_lock;
> - if ( ! spin_trylock(lock) )
> - return 0;
> - if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
> - return 1;
> - else
> - {
> - spin_unlock(lock);
> - return 0;
> - }
> +#define sched_unlock(kind, param, cpu, irq, arg...) \
> +static inline void kind##_schedule_unlock##irq(spinlock_t *lock \
> + EXTRA_TYPE(arg), param) \
> +{ \
> + ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
> + spin_unlock##irq(lock, ## arg); \
> }
>
> -#define pcpu_schedule_lock_irq(p) \
> - do { local_irq_disable(); pcpu_schedule_lock(p); } while ( 0 )
> -#define pcpu_schedule_lock_irqsave(p, flags) \
> - do { local_irq_save(flags); pcpu_schedule_lock(p); } while ( 0 )
> +#define EXTRA_TYPE(arg)
> +sched_lock(pcpu, unsigned int cpu, cpu, )
> +sched_lock(vcpu, const struct vcpu *v, v->processor, )
> +sched_lock(pcpu, unsigned int cpu, cpu, _irq)
> +sched_lock(vcpu, const struct vcpu *v, v->processor, _irq)
> +sched_unlock(pcpu, unsigned int cpu, cpu, )
> +sched_unlock(vcpu, const struct vcpu *v, v->processor, )
> +sched_unlock(pcpu, unsigned int cpu, cpu, _irq)
> +sched_unlock(vcpu, const struct vcpu *v, v->processor, _irq)
> +#undef EXTRA_TYPE
> +
> +#define EXTRA_TYPE(arg) , unsigned long arg
> +#define spin_unlock_irqsave spin_unlock_irqrestore
> +sched_lock(pcpu, unsigned int cpu, cpu, _irqsave, *flags)
> +sched_lock(vcpu, const struct vcpu *v, v->processor, _irqsave, *flags)
> +#undef spin_unlock_irqsave
> +sched_unlock(pcpu, unsigned int cpu, cpu, _irqrestore, flags)
> +sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags)
> +#undef EXTRA_TYPE
> +
> +#undef sched_unlock
> +#undef sched_lock
>
> -static inline void pcpu_schedule_unlock(int cpu)
> +static inline spinlock_t *pcpu_schedule_trylock(unsigned int cpu)
> {
> - spin_unlock(per_cpu(schedule_data, cpu).schedule_lock);
> -}
> + spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock;
>
> -#define pcpu_schedule_unlock_irq(p) \
> - do { pcpu_schedule_unlock(p); local_irq_enable(); } while ( 0 )
> -#define pcpu_schedule_unlock_irqrestore(p, flags) \
> - do { pcpu_schedule_unlock(p); local_irq_restore(flags); } while ( 0 )
> -
> -static inline void vcpu_schedule_lock(struct vcpu *v)
> -{
> - spinlock_t * lock;
> -
> - for ( ; ; )
> - {
> - /* v->processor may change when grabbing the lock; but
> - * per_cpu(v->processor) may also change, if changing
> - * cpu pool also changes the scheduler lock. Retry
> - * until they match.
> - *
> - * It may also be the case that v->processor may change
> - * but the lock may be the same; this will succeed
> - * in that case.
> - */
> - lock=per_cpu(schedule_data, v->processor).schedule_lock;
> -
> - spin_lock(lock);
> - if ( likely(lock == per_cpu(schedule_data, v->processor).schedule_lock) )
> - break;
> - spin_unlock(lock);
> - }
> -}
> -
> -#define vcpu_schedule_lock_irq(v) \
> - do { local_irq_disable(); vcpu_schedule_lock(v); } while ( 0 )
> -#define vcpu_schedule_lock_irqsave(v, flags) \
> - do { local_irq_save(flags); vcpu_schedule_lock(v); } while ( 0 )
> -
> -static inline void vcpu_schedule_unlock(struct vcpu *v)
> -{
> - spin_unlock(per_cpu(schedule_data, v->processor).schedule_lock);
> + if ( !spin_trylock(lock) )
> + return NULL;
> + if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
> + return lock;
> + spin_unlock(lock);
> + return NULL;
> }
>
> -#define vcpu_schedule_unlock_irq(v) \
> - do { vcpu_schedule_unlock(v); local_irq_enable(); } while ( 0 )
> -#define vcpu_schedule_unlock_irqrestore(v, flags) \
> - do { vcpu_schedule_unlock(v); local_irq_restore(flags); } while ( 0 )
> -
> struct task_slice {
> struct vcpu *task;
> s_time_t time;
>
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel
[-- Attachment #1.2: Type: text/html, Size: 6666 bytes --]
[-- Attachment #2: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
next prev parent reply other threads:[~2013-10-11 14:29 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-10-11 14:14 [PATCH] scheduler: adjust internal locking interface Jan Beulich
2013-10-11 14:29 ` Andrew Cooper [this message]
2013-10-11 14:37 ` Jan Beulich
2013-10-11 14:41 ` George Dunlap
2013-10-11 15:03 ` Jan Beulich
2013-10-11 15:13 ` [PATCH v2 0/2] scheduler locking adjustments Jan Beulich
2013-10-11 15:16 ` [PATCH v2 1/2] scheduler: adjust internal locking interface Jan Beulich
2013-10-11 15:17 ` [PATCH v2 2/2] sched: fix race between sched_move_domain() and vcpu_wake() Jan Beulich
2013-10-11 16:14 ` [PATCH v2 0/2] scheduler locking adjustments Andrew Cooper
2013-10-11 17:17 ` Keir Fraser
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=52580B45.3080305@citrix.com \
--to=andrew.cooper3@citrix.com \
--cc=George.Dunlap@eu.citrix.com \
--cc=JBeulich@suse.com \
--cc=david.vrabel@citrix.com \
--cc=keir@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).