* [PATCH v2 1/1] s390/spinlock: Provide vcpu_is_preempted
[not found] <1475164265-246160-1-git-send-email-borntraeger@de.ibm.com>
@ 2016-09-29 15:51 ` Christian Borntraeger
[not found] ` <1475164265-246160-2-git-send-email-borntraeger@de.ibm.com>
1 sibling, 0 replies; 10+ messages in thread
From: Christian Borntraeger @ 2016-09-29 15:51 UTC (permalink / raw)
To: Peter Zijlstra, Martin Schwidefsky, Heiko Carstens
Cc: kernellwp, linux-s390, benh, jgross, kvm, Pan Xinhui, will.deacon,
linux-kernel, virtualization, mingo, paulus, mpe,
xen-devel-request, pbonzini, paulmck
this implements the s390 backend for commit
"kernel/sched: introduce vcpu preempted check interface"
by reworking the existing smp_vcpu_scheduled into
arch_vcpu_is_preempted. We can then also get rid of the
local cpu_is_preempted function by moving the
CIF_ENABLED_WAIT test into arch_vcpu_is_preempted.
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
---
arch/s390/include/asm/spinlock.h | 3 +++
arch/s390/kernel/smp.c | 9 +++++++--
arch/s390/lib/spinlock.c | 25 ++++++++-----------------
3 files changed, 18 insertions(+), 19 deletions(-)
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 63ebf37..e16e02f 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -21,6 +21,9 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
return __sync_bool_compare_and_swap(lock, old, new);
}
+bool arch_vcpu_is_preempted(int cpu);
+#define vcpu_is_preempted arch_vcpu_is_preempted
+
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 7b89a75..4aadd16 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -376,10 +376,15 @@ int smp_find_processor_id(u16 address)
return -1;
}
-int smp_vcpu_scheduled(int cpu)
+bool arch_vcpu_is_preempted(int cpu)
{
- return pcpu_running(pcpu_devices + cpu);
+ if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
+ return false;
+ if (pcpu_running(pcpu_devices + cpu))
+ return false;
+ return true;
}
+EXPORT_SYMBOL(arch_vcpu_is_preempted);
void smp_yield_cpu(int cpu)
{
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index e5f50a7..e48a48e 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
}
-static inline int cpu_is_preempted(int cpu)
-{
- if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
- return 0;
- if (smp_vcpu_scheduled(cpu))
- return 0;
- return 1;
-}
-
void arch_spin_lock_wait(arch_spinlock_t *lp)
{
unsigned int cpu = SPINLOCK_LOCKVAL;
@@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
continue;
}
/* First iteration: check if the lock owner is running. */
- if (first_diag && cpu_is_preempted(~owner)) {
+ if (first_diag && arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
continue;
@@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
* yield the CPU unconditionally. For LPAR rely on the
* sense running status.
*/
- if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
+ if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
}
@@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
continue;
}
/* Check if the lock owner is running. */
- if (first_diag && cpu_is_preempted(~owner)) {
+ if (first_diag && arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
continue;
@@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
* yield the CPU unconditionally. For LPAR rely on the
* sense running status.
*/
- if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
+ if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
}
@@ -165,7 +156,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
owner = 0;
while (1) {
if (count-- <= 0) {
- if (owner && cpu_is_preempted(~owner))
+ if (owner && arch_vcpu_is_preempted(~owner))
smp_yield_cpu(~owner);
count = spin_retry;
}
@@ -211,7 +202,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
owner = 0;
while (1) {
if (count-- <= 0) {
- if (owner && cpu_is_preempted(~owner))
+ if (owner && arch_vcpu_is_preempted(~owner))
smp_yield_cpu(~owner);
count = spin_retry;
}
@@ -241,7 +232,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
owner = 0;
while (1) {
if (count-- <= 0) {
- if (owner && cpu_is_preempted(~owner))
+ if (owner && arch_vcpu_is_preempted(~owner))
smp_yield_cpu(~owner);
count = spin_retry;
}
@@ -285,7 +276,7 @@ void arch_lock_relax(unsigned int cpu)
{
if (!cpu)
return;
- if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
+ if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
return;
smp_yield_cpu(~cpu);
}
--
2.5.5
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH v2 1/1] s390/spinlock: Provide vcpu_is_preempted
[not found] ` <1475164265-246160-2-git-send-email-borntraeger@de.ibm.com>
@ 2016-09-30 4:49 ` Pan Xinhui
2016-09-30 5:52 ` Boqun Feng
2016-09-30 10:29 ` Christian Borntraeger
2016-10-19 6:56 ` Christian Borntraeger
[not found] ` <190cd825-ca7c-d160-a0ac-4e0d27ac4a93@de.ibm.com>
2 siblings, 2 replies; 10+ messages in thread
From: Pan Xinhui @ 2016-09-30 4:49 UTC (permalink / raw)
To: Christian Borntraeger, Peter Zijlstra, Martin Schwidefsky,
Heiko Carstens
Cc: kernellwp, linux-s390, benh, jgross, kvm, Pan Xinhui, will.deacon,
linux-kernel, virtualization, mingo, paulus, mpe,
xen-devel-request, pbonzini, paulmck
在 2016/9/29 23:51, Christian Borntraeger 写道:
> this implements the s390 backend for commit
> "kernel/sched: introduce vcpu preempted check interface"
> by reworking the existing smp_vcpu_scheduled into
> arch_vcpu_is_preempted. We can then also get rid of the
> local cpu_is_preempted function by moving the
> CIF_ENABLED_WAIT test into arch_vcpu_is_preempted.
>
> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
> ---
hi, Christian
thanks for your patch!
> arch/s390/include/asm/spinlock.h | 3 +++
> arch/s390/kernel/smp.c | 9 +++++++--
> arch/s390/lib/spinlock.c | 25 ++++++++-----------------
> 3 files changed, 18 insertions(+), 19 deletions(-)
>
> diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
> index 63ebf37..e16e02f 100644
> --- a/arch/s390/include/asm/spinlock.h
> +++ b/arch/s390/include/asm/spinlock.h
> @@ -21,6 +21,9 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
> return __sync_bool_compare_and_swap(lock, old, new);
> }
>
> +bool arch_vcpu_is_preempted(int cpu);
> +#define vcpu_is_preempted arch_vcpu_is_preempted
> +
> /*
> * Simple spin lock operations. There are two variants, one clears IRQ's
> * on the local processor, one does not.
> diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
> index 7b89a75..4aadd16 100644
> --- a/arch/s390/kernel/smp.c
> +++ b/arch/s390/kernel/smp.c
> @@ -376,10 +376,15 @@ int smp_find_processor_id(u16 address)
> return -1;
> }
>
> -int smp_vcpu_scheduled(int cpu)
root@ltcalpine2-lp13:~/linux# git grep -wn smp_vcpu_scheduled arch/s390/
arch/s390/include/asm/smp.h:34:extern int smp_vcpu_scheduled(int cpu);
arch/s390/include/asm/smp.h:56:static inline int smp_vcpu_scheduled(int cpu) { return 1; }
arch/s390/kernel/smp.c:371:int smp_vcpu_scheduled(int cpu)
arch/s390/lib/spinlock.c:44: if (smp_vcpu_scheduled(cpu))
> +bool arch_vcpu_is_preempted(int cpu)
> {
> - return pcpu_running(pcpu_devices + cpu);
> + if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
> + return false;
> + if (pcpu_running(pcpu_devices + cpu))
> + return false;
I saw smp_vcpu_scheduled() returns true always on !SMP system.
maybe we can do somegthing silimar. like below
#ifndef CONFIG_SMP
static inline bool arch_vcpu_is_preempted(int cpu) { return !test_cpu_flag_of(CIF_ENABLED_WAIT, cpu); }
#else
...
but I can't help thinking that if this is a!SMP system, maybe we could only
#ifndef CONFIG_SMP
static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
#else
...
thanks
xinhui
> + return true;
> }
> +EXPORT_SYMBOL(arch_vcpu_is_preempted);
>
> void smp_yield_cpu(int cpu)
> {
> diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
> index e5f50a7..e48a48e 100644
> --- a/arch/s390/lib/spinlock.c
> +++ b/arch/s390/lib/spinlock.c
> @@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
> asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
> }
>
> -static inline int cpu_is_preempted(int cpu)
> -{
> - if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
> - return 0;
> - if (smp_vcpu_scheduled(cpu))
> - return 0;
> - return 1;
> -}
> -
> void arch_spin_lock_wait(arch_spinlock_t *lp)
> {
> unsigned int cpu = SPINLOCK_LOCKVAL;
> @@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
> continue;
> }
> /* First iteration: check if the lock owner is running. */
> - if (first_diag && cpu_is_preempted(~owner)) {
> + if (first_diag && arch_vcpu_is_preempted(~owner)) {
> smp_yield_cpu(~owner);
> first_diag = 0;
> continue;
> @@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
> * yield the CPU unconditionally. For LPAR rely on the
> * sense running status.
> */
> - if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
> + if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
> smp_yield_cpu(~owner);
> first_diag = 0;
> }
> @@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
> continue;
> }
> /* Check if the lock owner is running. */
> - if (first_diag && cpu_is_preempted(~owner)) {
> + if (first_diag && arch_vcpu_is_preempted(~owner)) {
> smp_yield_cpu(~owner);
> first_diag = 0;
> continue;
> @@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
> * yield the CPU unconditionally. For LPAR rely on the
> * sense running status.
> */
> - if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
> + if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
> smp_yield_cpu(~owner);
> first_diag = 0;
> }
> @@ -165,7 +156,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
> owner = 0;
> while (1) {
> if (count-- <= 0) {
> - if (owner && cpu_is_preempted(~owner))
> + if (owner && arch_vcpu_is_preempted(~owner))
> smp_yield_cpu(~owner);
> count = spin_retry;
> }
> @@ -211,7 +202,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
> owner = 0;
> while (1) {
> if (count-- <= 0) {
> - if (owner && cpu_is_preempted(~owner))
> + if (owner && arch_vcpu_is_preempted(~owner))
> smp_yield_cpu(~owner);
> count = spin_retry;
> }
> @@ -241,7 +232,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
> owner = 0;
> while (1) {
> if (count-- <= 0) {
> - if (owner && cpu_is_preempted(~owner))
> + if (owner && arch_vcpu_is_preempted(~owner))
> smp_yield_cpu(~owner);
> count = spin_retry;
> }
> @@ -285,7 +276,7 @@ void arch_lock_relax(unsigned int cpu)
> {
> if (!cpu)
> return;
> - if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
> + if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
> return;
> smp_yield_cpu(~cpu);
> }
>
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v2 1/1] s390/spinlock: Provide vcpu_is_preempted
2016-09-30 4:49 ` Pan Xinhui
@ 2016-09-30 5:52 ` Boqun Feng
2016-09-30 6:35 ` Pan Xinhui
2016-09-30 10:29 ` Christian Borntraeger
1 sibling, 1 reply; 10+ messages in thread
From: Boqun Feng @ 2016-09-30 5:52 UTC (permalink / raw)
To: Pan Xinhui
Cc: kernellwp, linux-s390, benh, jgross, kvm, Peter Zijlstra,
Pan Xinhui, Heiko Carstens, linux-kernel, will.deacon,
virtualization, mingo, paulus, mpe, xen-devel-request,
Martin Schwidefsky, pbonzini, paulmck
[-- Attachment #1.1: Type: text/plain, Size: 6500 bytes --]
On Fri, Sep 30, 2016 at 12:49:52PM +0800, Pan Xinhui wrote:
>
>
> 在 2016/9/29 23:51, Christian Borntraeger 写道:
> > this implements the s390 backend for commit
> > "kernel/sched: introduce vcpu preempted check interface"
> > by reworking the existing smp_vcpu_scheduled into
> > arch_vcpu_is_preempted. We can then also get rid of the
> > local cpu_is_preempted function by moving the
> > CIF_ENABLED_WAIT test into arch_vcpu_is_preempted.
> >
> > Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
> > ---
>
> hi, Christian
> thanks for your patch!
>
> > arch/s390/include/asm/spinlock.h | 3 +++
> > arch/s390/kernel/smp.c | 9 +++++++--
> > arch/s390/lib/spinlock.c | 25 ++++++++-----------------
> > 3 files changed, 18 insertions(+), 19 deletions(-)
> >
> > diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
> > index 63ebf37..e16e02f 100644
> > --- a/arch/s390/include/asm/spinlock.h
> > +++ b/arch/s390/include/asm/spinlock.h
> > @@ -21,6 +21,9 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
> > return __sync_bool_compare_and_swap(lock, old, new);
> > }
> >
> > +bool arch_vcpu_is_preempted(int cpu);
> > +#define vcpu_is_preempted arch_vcpu_is_preempted
> > +
> > /*
> > * Simple spin lock operations. There are two variants, one clears IRQ's
> > * on the local processor, one does not.
> > diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
> > index 7b89a75..4aadd16 100644
> > --- a/arch/s390/kernel/smp.c
> > +++ b/arch/s390/kernel/smp.c
> > @@ -376,10 +376,15 @@ int smp_find_processor_id(u16 address)
> > return -1;
> > }
> >
> > -int smp_vcpu_scheduled(int cpu)
> root@ltcalpine2-lp13:~/linux# git grep -wn smp_vcpu_scheduled arch/s390/
> arch/s390/include/asm/smp.h:34:extern int smp_vcpu_scheduled(int cpu);
> arch/s390/include/asm/smp.h:56:static inline int smp_vcpu_scheduled(int cpu) { return 1; }
> arch/s390/kernel/smp.c:371:int smp_vcpu_scheduled(int cpu)
> arch/s390/lib/spinlock.c:44: if (smp_vcpu_scheduled(cpu))
>
> > +bool arch_vcpu_is_preempted(int cpu)
> > {
> > - return pcpu_running(pcpu_devices + cpu);
> > + if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
> > + return false;
> > + if (pcpu_running(pcpu_devices + cpu))
> > + return false;
> I saw smp_vcpu_scheduled() returns true always on !SMP system.
>
> maybe we can do somegthing silimar. like below
>
> #ifndef CONFIG_SMP
> static inline bool arch_vcpu_is_preempted(int cpu) { return !test_cpu_flag_of(CIF_ENABLED_WAIT, cpu); }
> #else
> ...
>
> but I can't help thinking that if this is a!SMP system, maybe we could only
> #ifndef CONFIG_SMP
> static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
> #else
Why do we need a vcpu_is_preempted() implementation for UP? Where will
you use it?
Regards,
Boqun
> ...
>
>
> thanks
> xinhui
>
> > + return true;
> > }
> > +EXPORT_SYMBOL(arch_vcpu_is_preempted);
> >
> > void smp_yield_cpu(int cpu)
> > {
> > diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
> > index e5f50a7..e48a48e 100644
> > --- a/arch/s390/lib/spinlock.c
> > +++ b/arch/s390/lib/spinlock.c
> > @@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
> > asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
> > }
> >
> > -static inline int cpu_is_preempted(int cpu)
> > -{
> > - if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
> > - return 0;
> > - if (smp_vcpu_scheduled(cpu))
> > - return 0;
> > - return 1;
> > -}
> > -
> > void arch_spin_lock_wait(arch_spinlock_t *lp)
> > {
> > unsigned int cpu = SPINLOCK_LOCKVAL;
> > @@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
> > continue;
> > }
> > /* First iteration: check if the lock owner is running. */
> > - if (first_diag && cpu_is_preempted(~owner)) {
> > + if (first_diag && arch_vcpu_is_preempted(~owner)) {
> > smp_yield_cpu(~owner);
> > first_diag = 0;
> > continue;
> > @@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
> > * yield the CPU unconditionally. For LPAR rely on the
> > * sense running status.
> > */
> > - if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
> > + if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
> > smp_yield_cpu(~owner);
> > first_diag = 0;
> > }
> > @@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
> > continue;
> > }
> > /* Check if the lock owner is running. */
> > - if (first_diag && cpu_is_preempted(~owner)) {
> > + if (first_diag && arch_vcpu_is_preempted(~owner)) {
> > smp_yield_cpu(~owner);
> > first_diag = 0;
> > continue;
> > @@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
> > * yield the CPU unconditionally. For LPAR rely on the
> > * sense running status.
> > */
> > - if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
> > + if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
> > smp_yield_cpu(~owner);
> > first_diag = 0;
> > }
> > @@ -165,7 +156,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
> > owner = 0;
> > while (1) {
> > if (count-- <= 0) {
> > - if (owner && cpu_is_preempted(~owner))
> > + if (owner && arch_vcpu_is_preempted(~owner))
> > smp_yield_cpu(~owner);
> > count = spin_retry;
> > }
> > @@ -211,7 +202,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
> > owner = 0;
> > while (1) {
> > if (count-- <= 0) {
> > - if (owner && cpu_is_preempted(~owner))
> > + if (owner && arch_vcpu_is_preempted(~owner))
> > smp_yield_cpu(~owner);
> > count = spin_retry;
> > }
> > @@ -241,7 +232,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
> > owner = 0;
> > while (1) {
> > if (count-- <= 0) {
> > - if (owner && cpu_is_preempted(~owner))
> > + if (owner && arch_vcpu_is_preempted(~owner))
> > smp_yield_cpu(~owner);
> > count = spin_retry;
> > }
> > @@ -285,7 +276,7 @@ void arch_lock_relax(unsigned int cpu)
> > {
> > if (!cpu)
> > return;
> > - if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
> > + if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
> > return;
> > smp_yield_cpu(~cpu);
> > }
> >
>
[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 455 bytes --]
[-- Attachment #2: Type: text/plain, Size: 183 bytes --]
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v2 1/1] s390/spinlock: Provide vcpu_is_preempted
2016-09-30 5:52 ` Boqun Feng
@ 2016-09-30 6:35 ` Pan Xinhui
2016-10-04 10:17 ` Christian Borntraeger
0 siblings, 1 reply; 10+ messages in thread
From: Pan Xinhui @ 2016-09-30 6:35 UTC (permalink / raw)
To: Boqun Feng
Cc: kernellwp, linux-s390, benh, jgross, kvm, Peter Zijlstra,
Pan Xinhui, Heiko Carstens, linux-kernel, will.deacon,
virtualization, mingo, paulus, mpe, xen-devel-request,
Martin Schwidefsky, pbonzini, paulmck
在 2016/9/30 13:52, Boqun Feng 写道:
> On Fri, Sep 30, 2016 at 12:49:52PM +0800, Pan Xinhui wrote:
>>
>>
>> 在 2016/9/29 23:51, Christian Borntraeger 写道:
>>> this implements the s390 backend for commit
>>> "kernel/sched: introduce vcpu preempted check interface"
>>> by reworking the existing smp_vcpu_scheduled into
>>> arch_vcpu_is_preempted. We can then also get rid of the
>>> local cpu_is_preempted function by moving the
>>> CIF_ENABLED_WAIT test into arch_vcpu_is_preempted.
>>>
>>> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
>>> ---
>>
>> hi, Christian
>> thanks for your patch!
>>
>>> arch/s390/include/asm/spinlock.h | 3 +++
>>> arch/s390/kernel/smp.c | 9 +++++++--
>>> arch/s390/lib/spinlock.c | 25 ++++++++-----------------
>>> 3 files changed, 18 insertions(+), 19 deletions(-)
>>>
>>> diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
>>> index 63ebf37..e16e02f 100644
>>> --- a/arch/s390/include/asm/spinlock.h
>>> +++ b/arch/s390/include/asm/spinlock.h
>>> @@ -21,6 +21,9 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
>>> return __sync_bool_compare_and_swap(lock, old, new);
>>> }
>>>
>>> +bool arch_vcpu_is_preempted(int cpu);
>>> +#define vcpu_is_preempted arch_vcpu_is_preempted
>>> +
>>> /*
>>> * Simple spin lock operations. There are two variants, one clears IRQ's
>>> * on the local processor, one does not.
>>> diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
>>> index 7b89a75..4aadd16 100644
>>> --- a/arch/s390/kernel/smp.c
>>> +++ b/arch/s390/kernel/smp.c
>>> @@ -376,10 +376,15 @@ int smp_find_processor_id(u16 address)
>>> return -1;
>>> }
>>>
>>> -int smp_vcpu_scheduled(int cpu)
>> root@ltcalpine2-lp13:~/linux# git grep -wn smp_vcpu_scheduled arch/s390/
>> arch/s390/include/asm/smp.h:34:extern int smp_vcpu_scheduled(int cpu);
>> arch/s390/include/asm/smp.h:56:static inline int smp_vcpu_scheduled(int cpu) { return 1; }
>> arch/s390/kernel/smp.c:371:int smp_vcpu_scheduled(int cpu)
>> arch/s390/lib/spinlock.c:44: if (smp_vcpu_scheduled(cpu))
>>
>>> +bool arch_vcpu_is_preempted(int cpu)
>>> {
>>> - return pcpu_running(pcpu_devices + cpu);
>>> + if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
>>> + return false;
>>> + if (pcpu_running(pcpu_devices + cpu))
>>> + return false;
>> I saw smp_vcpu_scheduled() returns true always on !SMP system.
>>
>> maybe we can do somegthing silimar. like below
>>
>> #ifndef CONFIG_SMP
>> static inline bool arch_vcpu_is_preempted(int cpu) { return !test_cpu_flag_of(CIF_ENABLED_WAIT, cpu); }
>> #else
>> ...
>>
>> but I can't help thinking that if this is a!SMP system, maybe we could only
>> #ifndef CONFIG_SMP
>> static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
>> #else
>
> Why do we need a vcpu_is_preempted() implementation for UP? Where will
> you use it?
>
yep, I also wonder that :)
But there is a definitaion of smp_vcpu_scheduled() for !SMP kernel.
So I am a little worried that some code has included this spinlock.h for UP kernel also.
Hi, Christian
Could you help confirms that your patch works on UP? :)
thanks
xinhui
> Regards,
> Boqun
>
>> ...
>>
>>
>> thanks
>> xinhui
>>
>>> + return true;
>>> }
>>> +EXPORT_SYMBOL(arch_vcpu_is_preempted);
>>>
>>> void smp_yield_cpu(int cpu)
>>> {
>>> diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
>>> index e5f50a7..e48a48e 100644
>>> --- a/arch/s390/lib/spinlock.c
>>> +++ b/arch/s390/lib/spinlock.c
>>> @@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
>>> asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
>>> }
>>>
>>> -static inline int cpu_is_preempted(int cpu)
>>> -{
>>> - if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
>>> - return 0;
>>> - if (smp_vcpu_scheduled(cpu))
>>> - return 0;
>>> - return 1;
>>> -}
>>> -
>>> void arch_spin_lock_wait(arch_spinlock_t *lp)
>>> {
>>> unsigned int cpu = SPINLOCK_LOCKVAL;
>>> @@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
>>> continue;
>>> }
>>> /* First iteration: check if the lock owner is running. */
>>> - if (first_diag && cpu_is_preempted(~owner)) {
>>> + if (first_diag && arch_vcpu_is_preempted(~owner)) {
>>> smp_yield_cpu(~owner);
>>> first_diag = 0;
>>> continue;
>>> @@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
>>> * yield the CPU unconditionally. For LPAR rely on the
>>> * sense running status.
>>> */
>>> - if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
>>> + if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
>>> smp_yield_cpu(~owner);
>>> first_diag = 0;
>>> }
>>> @@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
>>> continue;
>>> }
>>> /* Check if the lock owner is running. */
>>> - if (first_diag && cpu_is_preempted(~owner)) {
>>> + if (first_diag && arch_vcpu_is_preempted(~owner)) {
>>> smp_yield_cpu(~owner);
>>> first_diag = 0;
>>> continue;
>>> @@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
>>> * yield the CPU unconditionally. For LPAR rely on the
>>> * sense running status.
>>> */
>>> - if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
>>> + if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
>>> smp_yield_cpu(~owner);
>>> first_diag = 0;
>>> }
>>> @@ -165,7 +156,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
>>> owner = 0;
>>> while (1) {
>>> if (count-- <= 0) {
>>> - if (owner && cpu_is_preempted(~owner))
>>> + if (owner && arch_vcpu_is_preempted(~owner))
>>> smp_yield_cpu(~owner);
>>> count = spin_retry;
>>> }
>>> @@ -211,7 +202,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
>>> owner = 0;
>>> while (1) {
>>> if (count-- <= 0) {
>>> - if (owner && cpu_is_preempted(~owner))
>>> + if (owner && arch_vcpu_is_preempted(~owner))
>>> smp_yield_cpu(~owner);
>>> count = spin_retry;
>>> }
>>> @@ -241,7 +232,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
>>> owner = 0;
>>> while (1) {
>>> if (count-- <= 0) {
>>> - if (owner && cpu_is_preempted(~owner))
>>> + if (owner && arch_vcpu_is_preempted(~owner))
>>> smp_yield_cpu(~owner);
>>> count = spin_retry;
>>> }
>>> @@ -285,7 +276,7 @@ void arch_lock_relax(unsigned int cpu)
>>> {
>>> if (!cpu)
>>> return;
>>> - if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
>>> + if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
>>> return;
>>> smp_yield_cpu(~cpu);
>>> }
>>>
>>
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v2 1/1] s390/spinlock: Provide vcpu_is_preempted
2016-09-30 4:49 ` Pan Xinhui
2016-09-30 5:52 ` Boqun Feng
@ 2016-09-30 10:29 ` Christian Borntraeger
1 sibling, 0 replies; 10+ messages in thread
From: Christian Borntraeger @ 2016-09-30 10:29 UTC (permalink / raw)
To: Pan Xinhui, Peter Zijlstra, Martin Schwidefsky, Heiko Carstens
Cc: kernellwp, linux-s390, benh, jgross, kvm, Pan Xinhui, will.deacon,
linux-kernel, virtualization, mingo, paulus, mpe,
xen-devel-request, pbonzini, paulmck
On 09/30/2016 06:49 AM, Pan Xinhui wrote:
>
> but I can't help thinking that if this is a!SMP system, maybe we could only
> #ifndef CONFIG_SMP
> static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
> #else
Yes, I will add that to v3. Thanks for spotting.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v2 1/1] s390/spinlock: Provide vcpu_is_preempted
2016-09-30 6:35 ` Pan Xinhui
@ 2016-10-04 10:17 ` Christian Borntraeger
0 siblings, 0 replies; 10+ messages in thread
From: Christian Borntraeger @ 2016-10-04 10:17 UTC (permalink / raw)
To: Pan Xinhui, Boqun Feng
Cc: kernellwp, linux-s390, benh, jgross, kvm, Peter Zijlstra,
Pan Xinhui, Heiko Carstens, linux-kernel, will.deacon,
virtualization, mingo, paulus, mpe, xen-devel-request,
Martin Schwidefsky, pbonzini, paulmck
On 09/30/2016 08:35 AM, Pan Xinhui wrote:
>
>
> 在 2016/9/30 13:52, Boqun Feng 写道:
>> On Fri, Sep 30, 2016 at 12:49:52PM +0800, Pan Xinhui wrote:
>>>
>>>
>>> 在 2016/9/29 23:51, Christian Borntraeger 写道:
>>>> this implements the s390 backend for commit
>>>> "kernel/sched: introduce vcpu preempted check interface"
>>>> by reworking the existing smp_vcpu_scheduled into
>>>> arch_vcpu_is_preempted. We can then also get rid of the
>>>> local cpu_is_preempted function by moving the
>>>> CIF_ENABLED_WAIT test into arch_vcpu_is_preempted.
>>>>
>>>> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
>>>> ---
>>>
>>> hi, Christian
>>> thanks for your patch!
>>>
>>>> arch/s390/include/asm/spinlock.h | 3 +++
>>>> arch/s390/kernel/smp.c | 9 +++++++--
>>>> arch/s390/lib/spinlock.c | 25 ++++++++-----------------
>>>> 3 files changed, 18 insertions(+), 19 deletions(-)
>>>>
>>>> diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
>>>> index 63ebf37..e16e02f 100644
>>>> --- a/arch/s390/include/asm/spinlock.h
>>>> +++ b/arch/s390/include/asm/spinlock.h
>>>> @@ -21,6 +21,9 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
>>>> return __sync_bool_compare_and_swap(lock, old, new);
>>>> }
>>>>
>>>> +bool arch_vcpu_is_preempted(int cpu);
>>>> +#define vcpu_is_preempted arch_vcpu_is_preempted
>>>> +
>>>> /*
>>>> * Simple spin lock operations. There are two variants, one clears IRQ's
>>>> * on the local processor, one does not.
>>>> diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
>>>> index 7b89a75..4aadd16 100644
>>>> --- a/arch/s390/kernel/smp.c
>>>> +++ b/arch/s390/kernel/smp.c
>>>> @@ -376,10 +376,15 @@ int smp_find_processor_id(u16 address)
>>>> return -1;
>>>> }
>>>>
>>>> -int smp_vcpu_scheduled(int cpu)
>>> root@ltcalpine2-lp13:~/linux# git grep -wn smp_vcpu_scheduled arch/s390/
>>> arch/s390/include/asm/smp.h:34:extern int smp_vcpu_scheduled(int cpu);
>>> arch/s390/include/asm/smp.h:56:static inline int smp_vcpu_scheduled(int cpu) { return 1; }
>>> arch/s390/kernel/smp.c:371:int smp_vcpu_scheduled(int cpu)
>>> arch/s390/lib/spinlock.c:44: if (smp_vcpu_scheduled(cpu))
>>>
>>>> +bool arch_vcpu_is_preempted(int cpu)
>>>> {
>>>> - return pcpu_running(pcpu_devices + cpu);
>>>> + if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
>>>> + return false;
>>>> + if (pcpu_running(pcpu_devices + cpu))
>>>> + return false;
>>> I saw smp_vcpu_scheduled() returns true always on !SMP system.
>>>
>>> maybe we can do somegthing silimar. like below
>>>
>>> #ifndef CONFIG_SMP
>>> static inline bool arch_vcpu_is_preempted(int cpu) { return !test_cpu_flag_of(CIF_ENABLED_WAIT, cpu); }
>>> #else
>>> ...
>>>
>>> but I can't help thinking that if this is a!SMP system, maybe we could only
>>> #ifndef CONFIG_SMP
>>> static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
>>> #else
>>
>> Why do we need a vcpu_is_preempted() implementation for UP? Where will
>> you use it?
>>
> yep, I also wonder that :)
>
> But there is a definitaion of smp_vcpu_scheduled() for !SMP kernel.
> So I am a little worried that some code has included this spinlock.h for UP kernel also.
>
> Hi, Christian
> Could you help confirms that your patch works on UP? :)
My patch as is seems to work fine for !SMP. So it looks like the extra define
is not necessary and we could simply go with v2
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v2 1/1] s390/spinlock: Provide vcpu_is_preempted
[not found] ` <1475164265-246160-2-git-send-email-borntraeger@de.ibm.com>
2016-09-30 4:49 ` Pan Xinhui
@ 2016-10-19 6:56 ` Christian Borntraeger
[not found] ` <190cd825-ca7c-d160-a0ac-4e0d27ac4a93@de.ibm.com>
2 siblings, 0 replies; 10+ messages in thread
From: Christian Borntraeger @ 2016-10-19 6:56 UTC (permalink / raw)
To: Peter Zijlstra, Martin Schwidefsky, Heiko Carstens
Cc: kernellwp, linux-s390, benh, jgross, kvm, Pan Xinhui, will.deacon,
linux-kernel, virtualization, mingo, paulus, mpe,
xen-devel-request, pbonzini, paulmck
On 09/29/2016 05:51 PM, Christian Borntraeger wrote:
> this implements the s390 backend for commit
> "kernel/sched: introduce vcpu preempted check interface"
> by reworking the existing smp_vcpu_scheduled into
> arch_vcpu_is_preempted. We can then also get rid of the
> local cpu_is_preempted function by moving the
> CIF_ENABLED_WAIT test into arch_vcpu_is_preempted.
>
> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Martin, Peter,
I think we could go with the patch as is. In other words not providing
arch_vcpu_is_preempted for !CONFIG_SMP.
This will result in compile errors if code does spinning or yielding for
non-SMP kernels - which does not make sense to me, so this might actually
be a nice indicator.
If you prefer the !CONFIG_SMP implementation let me know and I will respin.
In any case, Martin if the patch is ok for you, can you ack, so that Peter
can take that patch together with Pan Xinhui series?
> ---
> arch/s390/include/asm/spinlock.h | 3 +++
> arch/s390/kernel/smp.c | 9 +++++++--
> arch/s390/lib/spinlock.c | 25 ++++++++-----------------
> 3 files changed, 18 insertions(+), 19 deletions(-)
>
> diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
> index 63ebf37..e16e02f 100644
> --- a/arch/s390/include/asm/spinlock.h
> +++ b/arch/s390/include/asm/spinlock.h
> @@ -21,6 +21,9 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
> return __sync_bool_compare_and_swap(lock, old, new);
> }
>
> +bool arch_vcpu_is_preempted(int cpu);
> +#define vcpu_is_preempted arch_vcpu_is_preempted
> +
> /*
> * Simple spin lock operations. There are two variants, one clears IRQ's
> * on the local processor, one does not.
> diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
> index 7b89a75..4aadd16 100644
> --- a/arch/s390/kernel/smp.c
> +++ b/arch/s390/kernel/smp.c
> @@ -376,10 +376,15 @@ int smp_find_processor_id(u16 address)
> return -1;
> }
>
> -int smp_vcpu_scheduled(int cpu)
> +bool arch_vcpu_is_preempted(int cpu)
> {
> - return pcpu_running(pcpu_devices + cpu);
> + if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
> + return false;
> + if (pcpu_running(pcpu_devices + cpu))
> + return false;
> + return true;
> }
> +EXPORT_SYMBOL(arch_vcpu_is_preempted);
>
> void smp_yield_cpu(int cpu)
> {
> diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
> index e5f50a7..e48a48e 100644
> --- a/arch/s390/lib/spinlock.c
> +++ b/arch/s390/lib/spinlock.c
> @@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
> asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
> }
>
> -static inline int cpu_is_preempted(int cpu)
> -{
> - if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
> - return 0;
> - if (smp_vcpu_scheduled(cpu))
> - return 0;
> - return 1;
> -}
> -
> void arch_spin_lock_wait(arch_spinlock_t *lp)
> {
> unsigned int cpu = SPINLOCK_LOCKVAL;
> @@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
> continue;
> }
> /* First iteration: check if the lock owner is running. */
> - if (first_diag && cpu_is_preempted(~owner)) {
> + if (first_diag && arch_vcpu_is_preempted(~owner)) {
> smp_yield_cpu(~owner);
> first_diag = 0;
> continue;
> @@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
> * yield the CPU unconditionally. For LPAR rely on the
> * sense running status.
> */
> - if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
> + if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
> smp_yield_cpu(~owner);
> first_diag = 0;
> }
> @@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
> continue;
> }
> /* Check if the lock owner is running. */
> - if (first_diag && cpu_is_preempted(~owner)) {
> + if (first_diag && arch_vcpu_is_preempted(~owner)) {
> smp_yield_cpu(~owner);
> first_diag = 0;
> continue;
> @@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
> * yield the CPU unconditionally. For LPAR rely on the
> * sense running status.
> */
> - if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
> + if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
> smp_yield_cpu(~owner);
> first_diag = 0;
> }
> @@ -165,7 +156,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
> owner = 0;
> while (1) {
> if (count-- <= 0) {
> - if (owner && cpu_is_preempted(~owner))
> + if (owner && arch_vcpu_is_preempted(~owner))
> smp_yield_cpu(~owner);
> count = spin_retry;
> }
> @@ -211,7 +202,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
> owner = 0;
> while (1) {
> if (count-- <= 0) {
> - if (owner && cpu_is_preempted(~owner))
> + if (owner && arch_vcpu_is_preempted(~owner))
> smp_yield_cpu(~owner);
> count = spin_retry;
> }
> @@ -241,7 +232,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
> owner = 0;
> while (1) {
> if (count-- <= 0) {
> - if (owner && cpu_is_preempted(~owner))
> + if (owner && arch_vcpu_is_preempted(~owner))
> smp_yield_cpu(~owner);
> count = spin_retry;
> }
> @@ -285,7 +276,7 @@ void arch_lock_relax(unsigned int cpu)
> {
> if (!cpu)
> return;
> - if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
> + if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
> return;
> smp_yield_cpu(~cpu);
> }
>
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v2 1/1] s390/spinlock: Provide vcpu_is_preempted
[not found] ` <190cd825-ca7c-d160-a0ac-4e0d27ac4a93@de.ibm.com>
@ 2016-10-19 7:44 ` Heiko Carstens
2016-10-19 8:42 ` [PATCH v3] " Christian Borntraeger
0 siblings, 1 reply; 10+ messages in thread
From: Heiko Carstens @ 2016-10-19 7:44 UTC (permalink / raw)
To: Christian Borntraeger
Cc: kernellwp, linux-s390, benh, jgross, kvm, Peter Zijlstra,
Pan Xinhui, will.deacon, linux-kernel, virtualization, mingo,
paulus, mpe, xen-devel-request, Martin Schwidefsky, pbonzini,
paulmck
On Wed, Oct 19, 2016 at 08:56:36AM +0200, Christian Borntraeger wrote:
> On 09/29/2016 05:51 PM, Christian Borntraeger wrote:
> > this implements the s390 backend for commit
> > "kernel/sched: introduce vcpu preempted check interface"
> > by reworking the existing smp_vcpu_scheduled into
> > arch_vcpu_is_preempted. We can then also get rid of the
> > local cpu_is_preempted function by moving the
> > CIF_ENABLED_WAIT test into arch_vcpu_is_preempted.
> >
> > Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
>
>
> Martin, Peter,
>
> I think we could go with the patch as is. In other words not providing
> arch_vcpu_is_preempted for !CONFIG_SMP.
>
> This will result in compile errors if code does spinning or yielding for
> non-SMP kernels - which does not make sense to me, so this might actually
> be a nice indicator.
> If you prefer the !CONFIG_SMP implementation let me know and I will respin.
...but I do prefer an implementation for !CONFIG_SMP. I'm tired of fixing
silly compile errors that only happen on s390.
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH v3] s390/spinlock: Provide vcpu_is_preempted
2016-10-19 7:44 ` Heiko Carstens
@ 2016-10-19 8:42 ` Christian Borntraeger
2016-10-19 8:50 ` Heiko Carstens
0 siblings, 1 reply; 10+ messages in thread
From: Christian Borntraeger @ 2016-10-19 8:42 UTC (permalink / raw)
To: Peter Zijlstra, Martin Schwidefsky, Heiko Carstens
Cc: kernellwp, linux-s390, benh, jgross, kvm, Pan Xinhui, will.deacon,
linux-kernel, virtualization, mingo, paulus, mpe,
xen-devel-request, pbonzini, paulmck
this implements the s390 backend for commit
"kernel/sched: introduce vcpu preempted check interface"
by reworking the existing smp_vcpu_scheduled into
arch_vcpu_is_preempted. We can then also get rid of the
local cpu_is_preempted function by moving the
CIF_ENABLED_WAIT test into arch_vcpu_is_preempted.
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
---
arch/s390/include/asm/spinlock.h | 8 ++++++++
arch/s390/kernel/smp.c | 9 +++++++--
arch/s390/lib/spinlock.c | 25 ++++++++-----------------
3 files changed, 23 insertions(+), 19 deletions(-)
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 7e9e09f..7ecd890 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -23,6 +23,14 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
return __sync_bool_compare_and_swap(lock, old, new);
}
+#ifndef CONFIG_SMP
+static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
+#else
+bool arch_vcpu_is_preempted(int cpu);
+#endif
+
+#define vcpu_is_preempted arch_vcpu_is_preempted
+
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 35531fe..b988ed1 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -368,10 +368,15 @@ int smp_find_processor_id(u16 address)
return -1;
}
-int smp_vcpu_scheduled(int cpu)
+bool arch_vcpu_is_preempted(int cpu)
{
- return pcpu_running(pcpu_devices + cpu);
+ if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
+ return false;
+ if (pcpu_running(pcpu_devices + cpu))
+ return false;
+ return true;
}
+EXPORT_SYMBOL(arch_vcpu_is_preempted);
void smp_yield_cpu(int cpu)
{
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index e5f50a7..e48a48e 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
}
-static inline int cpu_is_preempted(int cpu)
-{
- if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
- return 0;
- if (smp_vcpu_scheduled(cpu))
- return 0;
- return 1;
-}
-
void arch_spin_lock_wait(arch_spinlock_t *lp)
{
unsigned int cpu = SPINLOCK_LOCKVAL;
@@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
continue;
}
/* First iteration: check if the lock owner is running. */
- if (first_diag && cpu_is_preempted(~owner)) {
+ if (first_diag && arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
continue;
@@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
* yield the CPU unconditionally. For LPAR rely on the
* sense running status.
*/
- if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
+ if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
}
@@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
continue;
}
/* Check if the lock owner is running. */
- if (first_diag && cpu_is_preempted(~owner)) {
+ if (first_diag && arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
continue;
@@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
* yield the CPU unconditionally. For LPAR rely on the
* sense running status.
*/
- if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
+ if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
}
@@ -165,7 +156,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
owner = 0;
while (1) {
if (count-- <= 0) {
- if (owner && cpu_is_preempted(~owner))
+ if (owner && arch_vcpu_is_preempted(~owner))
smp_yield_cpu(~owner);
count = spin_retry;
}
@@ -211,7 +202,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
owner = 0;
while (1) {
if (count-- <= 0) {
- if (owner && cpu_is_preempted(~owner))
+ if (owner && arch_vcpu_is_preempted(~owner))
smp_yield_cpu(~owner);
count = spin_retry;
}
@@ -241,7 +232,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
owner = 0;
while (1) {
if (count-- <= 0) {
- if (owner && cpu_is_preempted(~owner))
+ if (owner && arch_vcpu_is_preempted(~owner))
smp_yield_cpu(~owner);
count = spin_retry;
}
@@ -285,7 +276,7 @@ void arch_lock_relax(unsigned int cpu)
{
if (!cpu)
return;
- if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
+ if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
return;
smp_yield_cpu(~cpu);
}
--
2.5.5
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH v3] s390/spinlock: Provide vcpu_is_preempted
2016-10-19 8:42 ` [PATCH v3] " Christian Borntraeger
@ 2016-10-19 8:50 ` Heiko Carstens
0 siblings, 0 replies; 10+ messages in thread
From: Heiko Carstens @ 2016-10-19 8:50 UTC (permalink / raw)
To: Christian Borntraeger
Cc: kernellwp, linux-s390, benh, jgross, kvm, Peter Zijlstra,
Pan Xinhui, will.deacon, linux-kernel, virtualization, mingo,
paulus, mpe, xen-devel-request, Martin Schwidefsky, pbonzini,
paulmck
On Wed, Oct 19, 2016 at 10:42:04AM +0200, Christian Borntraeger wrote:
> this implements the s390 backend for commit
> "kernel/sched: introduce vcpu preempted check interface"
> by reworking the existing smp_vcpu_scheduled into
> arch_vcpu_is_preempted. We can then also get rid of the
> local cpu_is_preempted function by moving the
> CIF_ENABLED_WAIT test into arch_vcpu_is_preempted.
>
> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
> ---
> arch/s390/include/asm/spinlock.h | 8 ++++++++
> arch/s390/kernel/smp.c | 9 +++++++--
> arch/s390/lib/spinlock.c | 25 ++++++++-----------------
> 3 files changed, 23 insertions(+), 19 deletions(-)
Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com>
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2016-10-19 8:50 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <1475164265-246160-1-git-send-email-borntraeger@de.ibm.com>
2016-09-29 15:51 ` [PATCH v2 1/1] s390/spinlock: Provide vcpu_is_preempted Christian Borntraeger
[not found] ` <1475164265-246160-2-git-send-email-borntraeger@de.ibm.com>
2016-09-30 4:49 ` Pan Xinhui
2016-09-30 5:52 ` Boqun Feng
2016-09-30 6:35 ` Pan Xinhui
2016-10-04 10:17 ` Christian Borntraeger
2016-09-30 10:29 ` Christian Borntraeger
2016-10-19 6:56 ` Christian Borntraeger
[not found] ` <190cd825-ca7c-d160-a0ac-4e0d27ac4a93@de.ibm.com>
2016-10-19 7:44 ` Heiko Carstens
2016-10-19 8:42 ` [PATCH v3] " Christian Borntraeger
2016-10-19 8:50 ` Heiko Carstens
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).