linux-rt-users.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* Re: [PATCH 1/1 -rt] lib:atomic64 using raw_spin_lock_irq[save|resotre] for atomicity
       [not found] ` <1314840480-25182-2-git-send-email-haishan.bai@gmail.com>
@ 2011-09-01  2:37   ` Yong Zhang
  2011-09-01  3:02     ` Shan Hai
  0 siblings, 1 reply; 2+ messages in thread
From: Yong Zhang @ 2011-09-01  2:37 UTC (permalink / raw)
  To: Shan Hai
  Cc: akpm, eric.dumazet, vapier, asharma, linux-kernel, linux-rt-users,
	Thomas Gleixner

On Thu, Sep 01, 2011 at 09:28:00AM +0800, Shan Hai wrote:
> The spin_lock_irq[save|restore] could break the atomicity of the
> atomic64_* operations in the PREEMPT-RT configuration, because
> the spin_lock_irq[save|restore] themselves are preemptable in the
> PREEMPT-RT, using raw variant of the spin lock could provide the
> atomicity that atomic64_* need.
> 
> Signed-off-by: Shan Hai <haishan.bai@gmail.com>

I think you could show your panic info also in the header.

And this should be routed to tglx(Cc'ing), and also to
linux-rt-users.

comments below:

> ---
>  lib/atomic64.c |   40 ++++++++++++++++++++--------------------
>  1 files changed, 20 insertions(+), 20 deletions(-)
> 
> diff --git a/lib/atomic64.c b/lib/atomic64.c
> index e12ae0d..26f524a 100644
> --- a/lib/atomic64.c
> +++ b/lib/atomic64.c
> @@ -48,9 +48,9 @@ long long atomic64_read(const atomic64_t *v)
>  	spinlock_t *lock = lock_addr(v);
>  	long long val;
>  
> -	spin_lock_irqsave(lock, flags);
> +	raw_spin_lock_irqsave(lock, flags);
>  	val = v->counter;
> -	spin_unlock_irqrestore(lock, flags);
> +	raw_spin_unlock_irqrestore(lock, flags);

I think this is indeed a problem;
but I don't see you touch the declaration and initialising of the lock,
why?

Thanks,
Yong

>  	return val;
>  }
>  EXPORT_SYMBOL(atomic64_read);
> @@ -60,9 +60,9 @@ void atomic64_set(atomic64_t *v, long long i)
>  	unsigned long flags;
>  	spinlock_t *lock = lock_addr(v);
>  
> -	spin_lock_irqsave(lock, flags);
> +	raw_spin_lock_irqsave(lock, flags);
>  	v->counter = i;
> -	spin_unlock_irqrestore(lock, flags);
> +	raw_spin_unlock_irqrestore(lock, flags);
>  }
>  EXPORT_SYMBOL(atomic64_set);
>  
> @@ -71,9 +71,9 @@ void atomic64_add(long long a, atomic64_t *v)
>  	unsigned long flags;
>  	spinlock_t *lock = lock_addr(v);
>  
> -	spin_lock_irqsave(lock, flags);
> +	raw_spin_lock_irqsave(lock, flags);
>  	v->counter += a;
> -	spin_unlock_irqrestore(lock, flags);
> +	raw_spin_unlock_irqrestore(lock, flags);
>  }
>  EXPORT_SYMBOL(atomic64_add);
>  
> @@ -83,9 +83,9 @@ long long atomic64_add_return(long long a, atomic64_t *v)
>  	spinlock_t *lock = lock_addr(v);
>  	long long val;
>  
> -	spin_lock_irqsave(lock, flags);
> +	raw_spin_lock_irqsave(lock, flags);
>  	val = v->counter += a;
> -	spin_unlock_irqrestore(lock, flags);
> +	raw_spin_unlock_irqrestore(lock, flags);
>  	return val;
>  }
>  EXPORT_SYMBOL(atomic64_add_return);
> @@ -95,9 +95,9 @@ void atomic64_sub(long long a, atomic64_t *v)
>  	unsigned long flags;
>  	spinlock_t *lock = lock_addr(v);
>  
> -	spin_lock_irqsave(lock, flags);
> +	raw_spin_lock_irqsave(lock, flags);
>  	v->counter -= a;
> -	spin_unlock_irqrestore(lock, flags);
> +	raw_spin_unlock_irqrestore(lock, flags);
>  }
>  EXPORT_SYMBOL(atomic64_sub);
>  
> @@ -107,9 +107,9 @@ long long atomic64_sub_return(long long a, atomic64_t *v)
>  	spinlock_t *lock = lock_addr(v);
>  	long long val;
>  
> -	spin_lock_irqsave(lock, flags);
> +	raw_spin_lock_irqsave(lock, flags);
>  	val = v->counter -= a;
> -	spin_unlock_irqrestore(lock, flags);
> +	raw_spin_unlock_irqrestore(lock, flags);
>  	return val;
>  }
>  EXPORT_SYMBOL(atomic64_sub_return);
> @@ -120,11 +120,11 @@ long long atomic64_dec_if_positive(atomic64_t *v)
>  	spinlock_t *lock = lock_addr(v);
>  	long long val;
>  
> -	spin_lock_irqsave(lock, flags);
> +	raw_spin_lock_irqsave(lock, flags);
>  	val = v->counter - 1;
>  	if (val >= 0)
>  		v->counter = val;
> -	spin_unlock_irqrestore(lock, flags);
> +	raw_spin_unlock_irqrestore(lock, flags);
>  	return val;
>  }
>  EXPORT_SYMBOL(atomic64_dec_if_positive);
> @@ -135,11 +135,11 @@ long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
>  	spinlock_t *lock = lock_addr(v);
>  	long long val;
>  
> -	spin_lock_irqsave(lock, flags);
> +	raw_spin_lock_irqsave(lock, flags);
>  	val = v->counter;
>  	if (val == o)
>  		v->counter = n;
> -	spin_unlock_irqrestore(lock, flags);
> +	raw_spin_unlock_irqrestore(lock, flags);
>  	return val;
>  }
>  EXPORT_SYMBOL(atomic64_cmpxchg);
> @@ -150,10 +150,10 @@ long long atomic64_xchg(atomic64_t *v, long long new)
>  	spinlock_t *lock = lock_addr(v);
>  	long long val;
>  
> -	spin_lock_irqsave(lock, flags);
> +	raw_spin_lock_irqsave(lock, flags);
>  	val = v->counter;
>  	v->counter = new;
> -	spin_unlock_irqrestore(lock, flags);
> +	raw_spin_unlock_irqrestore(lock, flags);
>  	return val;
>  }
>  EXPORT_SYMBOL(atomic64_xchg);
> @@ -164,12 +164,12 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u)
>  	spinlock_t *lock = lock_addr(v);
>  	int ret = 0;
>  
> -	spin_lock_irqsave(lock, flags);
> +	raw_spin_lock_irqsave(lock, flags);
>  	if (v->counter != u) {
>  		v->counter += a;
>  		ret = 1;
>  	}
> -	spin_unlock_irqrestore(lock, flags);
> +	raw_spin_unlock_irqrestore(lock, flags);
>  	return ret;
>  }
>  EXPORT_SYMBOL(atomic64_add_unless);
> -- 
> 1.7.4.1
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/

-- 
Only stand for myself

^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH 1/1 -rt] lib:atomic64 using raw_spin_lock_irq[save|resotre] for atomicity
  2011-09-01  2:37   ` [PATCH 1/1 -rt] lib:atomic64 using raw_spin_lock_irq[save|resotre] for atomicity Yong Zhang
@ 2011-09-01  3:02     ` Shan Hai
  0 siblings, 0 replies; 2+ messages in thread
From: Shan Hai @ 2011-09-01  3:02 UTC (permalink / raw)
  To: Yong Zhang
  Cc: akpm, eric.dumazet, vapier, asharma, linux-kernel, linux-rt-users,
	Thomas Gleixner

On 09/01/2011 10:37 AM, Yong Zhang wrote:
> On Thu, Sep 01, 2011 at 09:28:00AM +0800, Shan Hai wrote:
>> The spin_lock_irq[save|restore] could break the atomicity of the
>> atomic64_* operations in the PREEMPT-RT configuration, because
>> the spin_lock_irq[save|restore] themselves are preemptable in the
>> PREEMPT-RT, using raw variant of the spin lock could provide the
>> atomicity that atomic64_* need.
>>
>> Signed-off-by: Shan Hai<haishan.bai@gmail.com>
> I think you could show your panic info also in the header.
>
> And this should be routed to tglx(Cc'ing), and also to
> linux-rt-users.
>

Got it.

comments below:
>> ---
>>   lib/atomic64.c |   40 ++++++++++++++++++++--------------------
>>   1 files changed, 20 insertions(+), 20 deletions(-)
>>
>> diff --git a/lib/atomic64.c b/lib/atomic64.c
>> index e12ae0d..26f524a 100644
>> --- a/lib/atomic64.c
>> +++ b/lib/atomic64.c
>> @@ -48,9 +48,9 @@ long long atomic64_read(const atomic64_t *v)
>>   	spinlock_t *lock = lock_addr(v);
>>   	long long val;
>>
>> -	spin_lock_irqsave(lock, flags);
>> +	raw_spin_lock_irqsave(lock, flags);
>>   	val = v->counter;
>> -	spin_unlock_irqrestore(lock, flags);
>> +	raw_spin_unlock_irqrestore(lock, flags);
> I think this is indeed a problem;
> but I don't see you touch the declaration and initialising of the lock,
> why?
>

My fault, should replace the lock type too, thanks for the suggestion.

Best regards
Shan Hai

> Thanks,
> Yong
>
>>   	return val;
>>   }
>>   EXPORT_SYMBOL(atomic64_read);
>> @@ -60,9 +60,9 @@ void atomic64_set(atomic64_t *v, long long i)
>>   	unsigned long flags;
>>   	spinlock_t *lock = lock_addr(v);
>>
>> -	spin_lock_irqsave(lock, flags);
>> +	raw_spin_lock_irqsave(lock, flags);
>>   	v->counter = i;
>> -	spin_unlock_irqrestore(lock, flags);
>> +	raw_spin_unlock_irqrestore(lock, flags);
>>   }
>>   EXPORT_SYMBOL(atomic64_set);
>>
>> @@ -71,9 +71,9 @@ void atomic64_add(long long a, atomic64_t *v)
>>   	unsigned long flags;
>>   	spinlock_t *lock = lock_addr(v);
>>
>> -	spin_lock_irqsave(lock, flags);
>> +	raw_spin_lock_irqsave(lock, flags);
>>   	v->counter += a;
>> -	spin_unlock_irqrestore(lock, flags);
>> +	raw_spin_unlock_irqrestore(lock, flags);
>>   }
>>   EXPORT_SYMBOL(atomic64_add);
>>
>> @@ -83,9 +83,9 @@ long long atomic64_add_return(long long a, atomic64_t *v)
>>   	spinlock_t *lock = lock_addr(v);
>>   	long long val;
>>
>> -	spin_lock_irqsave(lock, flags);
>> +	raw_spin_lock_irqsave(lock, flags);
>>   	val = v->counter += a;
>> -	spin_unlock_irqrestore(lock, flags);
>> +	raw_spin_unlock_irqrestore(lock, flags);
>>   	return val;
>>   }
>>   EXPORT_SYMBOL(atomic64_add_return);
>> @@ -95,9 +95,9 @@ void atomic64_sub(long long a, atomic64_t *v)
>>   	unsigned long flags;
>>   	spinlock_t *lock = lock_addr(v);
>>
>> -	spin_lock_irqsave(lock, flags);
>> +	raw_spin_lock_irqsave(lock, flags);
>>   	v->counter -= a;
>> -	spin_unlock_irqrestore(lock, flags);
>> +	raw_spin_unlock_irqrestore(lock, flags);
>>   }
>>   EXPORT_SYMBOL(atomic64_sub);
>>
>> @@ -107,9 +107,9 @@ long long atomic64_sub_return(long long a, atomic64_t *v)
>>   	spinlock_t *lock = lock_addr(v);
>>   	long long val;
>>
>> -	spin_lock_irqsave(lock, flags);
>> +	raw_spin_lock_irqsave(lock, flags);
>>   	val = v->counter -= a;
>> -	spin_unlock_irqrestore(lock, flags);
>> +	raw_spin_unlock_irqrestore(lock, flags);
>>   	return val;
>>   }
>>   EXPORT_SYMBOL(atomic64_sub_return);
>> @@ -120,11 +120,11 @@ long long atomic64_dec_if_positive(atomic64_t *v)
>>   	spinlock_t *lock = lock_addr(v);
>>   	long long val;
>>
>> -	spin_lock_irqsave(lock, flags);
>> +	raw_spin_lock_irqsave(lock, flags);
>>   	val = v->counter - 1;
>>   	if (val>= 0)
>>   		v->counter = val;
>> -	spin_unlock_irqrestore(lock, flags);
>> +	raw_spin_unlock_irqrestore(lock, flags);
>>   	return val;
>>   }
>>   EXPORT_SYMBOL(atomic64_dec_if_positive);
>> @@ -135,11 +135,11 @@ long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
>>   	spinlock_t *lock = lock_addr(v);
>>   	long long val;
>>
>> -	spin_lock_irqsave(lock, flags);
>> +	raw_spin_lock_irqsave(lock, flags);
>>   	val = v->counter;
>>   	if (val == o)
>>   		v->counter = n;
>> -	spin_unlock_irqrestore(lock, flags);
>> +	raw_spin_unlock_irqrestore(lock, flags);
>>   	return val;
>>   }
>>   EXPORT_SYMBOL(atomic64_cmpxchg);
>> @@ -150,10 +150,10 @@ long long atomic64_xchg(atomic64_t *v, long long new)
>>   	spinlock_t *lock = lock_addr(v);
>>   	long long val;
>>
>> -	spin_lock_irqsave(lock, flags);
>> +	raw_spin_lock_irqsave(lock, flags);
>>   	val = v->counter;
>>   	v->counter = new;
>> -	spin_unlock_irqrestore(lock, flags);
>> +	raw_spin_unlock_irqrestore(lock, flags);
>>   	return val;
>>   }
>>   EXPORT_SYMBOL(atomic64_xchg);
>> @@ -164,12 +164,12 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u)
>>   	spinlock_t *lock = lock_addr(v);
>>   	int ret = 0;
>>
>> -	spin_lock_irqsave(lock, flags);
>> +	raw_spin_lock_irqsave(lock, flags);
>>   	if (v->counter != u) {
>>   		v->counter += a;
>>   		ret = 1;
>>   	}
>> -	spin_unlock_irqrestore(lock, flags);
>> +	raw_spin_unlock_irqrestore(lock, flags);
>>   	return ret;
>>   }
>>   EXPORT_SYMBOL(atomic64_add_unless);
>> -- 
>> 1.7.4.1
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>> Please read the FAQ at  http://www.tux.org/lkml/


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2011-09-01  3:02 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <1314840480-25182-1-git-send-email-haishan.bai@gmail.com>
     [not found] ` <1314840480-25182-2-git-send-email-haishan.bai@gmail.com>
2011-09-01  2:37   ` [PATCH 1/1 -rt] lib:atomic64 using raw_spin_lock_irq[save|resotre] for atomicity Yong Zhang
2011-09-01  3:02     ` Shan Hai

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).