From: Cyril Hrubis <chrubis@suse.cz>
To: ltp@lists.linux.it
Subject: [LTP] [PATCH v3 1/7] tst_atomic: Add load, store and use __atomic builtins
Date: Tue, 12 Sep 2017 14:40:09 +0200 [thread overview]
Message-ID: <20170912124009.GA29720@rei> (raw)
In-Reply-To: <20170901130121.22821-1-rpalethorpe@suse.com>
Hi!
> +static inline int tst_atomic_load(int *v)
> +{
> + int ret;
> +
> + asm volatile("" : : : "memory");
> + ret = *v;
> + asm volatile("" : : : "memory");
> +
> + return ret;
> +}
> +
> +static inline void tst_atomic_store(int i, int *v)
> +{
> + asm volatile("" : : : "memory");
> + *v = i;
> + asm volatile("" : : : "memory");
> +}
These two functions are defined several times here, maybe we should just
define something as NEEDS_GENERIC_ASM_LOAD_STORE and do
#ifdef NEEDS_GENERIC_ASM_LOAD_STORE
...
#endif
Where these are defined at the end of the file.
> #elif defined(__powerpc__) || defined(__powerpc64__)
> static inline int tst_atomic_add_return(int i, int *v)
> {
> @@ -83,7 +154,26 @@ static inline int tst_atomic_add_return(int i, int *v)
> return t;
> }
>
> +static inline int tst_atomic_load(int *v)
> +{
> + int ret;
> +
> + asm volatile("sync\n" : : : "memory");
> + ret = *v;
> + asm volatile("sync\n" : : : "memory");
> +
> + return ret;
> +}
> +
> +static inline void tst_atomic_store(int i, int *v)
> +{
> + asm volatile("sync\n" : : : "memory");
> + *v = i;
> + asm volatile("sync\n" : : : "memory");
> +}
> +
> #elif defined(__s390__) || defined(__s390x__)
> +
> static inline int tst_atomic_add_return(int i, int *v)
> {
> int old_val, new_val;
> @@ -102,11 +192,29 @@ static inline int tst_atomic_add_return(int i, int *v)
> return old_val + i;
> }
>
> +static inline int tst_atomic_load(int *v)
> +{
> + int ret;
> +
> + asm volatile("" : : : "memory");
> + ret = *v;
> + asm volatile("" : : : "memory");
> +
> + return ret;
> +}
> +
> +static inline void tst_atomic_store(int i, int *v)
> +{
> + asm volatile("" : : : "memory");
> + *v = i;
> + asm volatile("" : : : "memory");
> +}
> +
> #elif defined(__arc__)
>
> /*ARCv2 defines the smp barriers */
> #ifdef __ARC700__
> -#define smp_mb()
> +#define smp_mb() asm volatile("" : : : "memory")
> #else
> #define smp_mb() asm volatile("dmb 3\n" : : : "memory")
> #endif
> @@ -132,6 +240,24 @@ static inline int tst_atomic_add_return(int i, int *v)
> return val;
> }
>
> +static inline int tst_atomic_load(int *v)
> +{
> + int ret;
> +
> + smp_mb();
> + ret = *v;
> + smp_mb();
> +
> + return ret;
> +}
> +
> +static inline void tst_atomic_store(int i, int *v)
> +{
> + smp_mb();
> + *v = i;
> + smp_mb();
> +}
> +
> #elif defined (__aarch64__)
> static inline int tst_atomic_add_return(int i, int *v)
> {
> @@ -140,7 +266,7 @@ static inline int tst_atomic_add_return(int i, int *v)
>
> __asm__ __volatile__(
> " prfm pstl1strm, %2 \n"
> -"1: ldxr %w0, %2 \n"
> +"1: ldaxr %w0, %2 \n"
> " add %w0, %w0, %w3 \n"
> " stlxr %w1, %w0, %2 \n"
> " cbnz %w1, 1b \n"
> @@ -152,9 +278,90 @@ static inline int tst_atomic_add_return(int i, int *v)
> return result;
> }
>
> +/* We are using load and store exclusive (ldaxr & stlxr) instructions to try
> + * and help prevent the tst_atomic_load and, more likely, tst_atomic_store
> + * functions from interfering with tst_atomic_add_return which takes advantage
> + * of exclusivity. It is not clear if this is a good idea or not, but does
> + * mean that all three functions are very similar.
> + */
> +static inline int tst_atomic_load(int *v)
> +{
> + int ret;
> + unsigned long tmp;
> +
> + asm volatile("//atomic_load \n"
> + " prfm pstl1strm, %[v] \n"
> + "1: ldaxr %w[ret], %[v] \n"
> + " stlxr %w[tmp], %w[ret], %[v] \n"
> + " cbnz %w[tmp], 1b \n"
> + " dmb ish \n"
> + : [tmp] "=&r" (tmp), [ret] "=&r" (ret), [v] "+Q" (*v)
> + : : "memory");
> +
> + return ret;
> +}
> +
> +static inline void tst_atomic_store(int i, int *v)
> +{
> + unsigned long tmp;
> +
> + asm volatile("//atomic_store \n"
> + " prfm pstl1strm, %[v] \n"
> + "1: ldaxr %w[tmp], %[v] \n"
> + " stlxr %w[tmp], %w[i], %[v] \n"
> + " cbnz %w[tmp], 1b \n"
> + " dmb ish \n"
> + : [tmp] "=&r" (tmp), [v] "+Q" (*v)
> + : [i] "r" (i)
> + : "memory");
> +}
> +
> +#elif defined(__sparc__) && defined(__arch64__)
> +static inline int tst_atomic_add_return(int i, int *v)
> +{
> + int ret, tmp;
> +
> + /* Based on arch/sparc/lib/atomic_64.S with the exponential backoff
> + * function removed because we are unlikely to have a large (>= 16?)
> + * number of cores continuously trying to update one variable.
> + */
> + asm volatile("/*atomic_add_return*/ \n"
> + "1: ldsw [%[v]], %[ret]; \n"
> + " add %[ret], %[i], %[tmp]; \n"
> + " cas [%[v]], %[ret], %[tmp]; \n"
> + " cmp %[ret], %[tmp]; \n"
> + " bne,pn %%icc, 1b; \n"
> + " nop; \n"
> + " add %[ret], %[i], %[ret]; \n"
> + : [ret] "=r&" (ret), [tmp] "=r&" (tmp)
> + : [i] "r" (i), [v] "r" (v)
> + : "memory", "cc");
> +
> + return ret;
> +}
> +
> +static inline int tst_atomic_load(int *v)
> +{
> + int ret;
> +
> + /* See arch/sparc/include/asm/barrier_64.h */
> + asm volatile("" : : : "memory");
> + ret = *v;
> + asm volatile("" : : : "memory");
> +
> + return ret;
> +}
> +
> +static inline void tst_atomic_store(int i, int *v)
> +{
> + asm volatile("" : : : "memory");
> + *v = i;
> + asm volatile("" : : : "memory");
> +}
> +
> #else /* HAVE_SYNC_ADD_AND_FETCH == 1 */
> -# error Your compiler does not provide __sync_add_and_fetch and LTP\
> - implementation is missing for your architecture.
> +# error Your compiler does not provide __atomic_add_fetch, __sync_add_and_fetch \
> + and an LTP implementation is missing for your architecture.
> #endif
>
> static inline int tst_atomic_inc(int *v)
> diff --git a/include/tst_fuzzy_sync.h b/include/tst_fuzzy_sync.h
> index 229217495..f97137c35 100644
> --- a/include/tst_fuzzy_sync.h
> +++ b/include/tst_fuzzy_sync.h
> @@ -32,6 +32,7 @@
>
> #include <sys/time.h>
> #include <time.h>
> +#include "tst_atomic.h"
Hmm, isn't this added out-of-order here?
> #ifndef CLOCK_MONOTONIC_RAW
> # define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
> diff --git a/m4/ltp-atomic.m4 b/m4/ltp-atomic.m4
> new file mode 100644
> index 000000000..836f0a4fd
> --- /dev/null
> +++ b/m4/ltp-atomic.m4
> @@ -0,0 +1,34 @@
> +dnl
> +dnl Copyright (c) Linux Test Project, 2016
> +dnl
> +dnl This program is free software; you can redistribute it and/or modify
> +dnl it under the terms of the GNU General Public License as published by
> +dnl the Free Software Foundation; either version 2 of the License, or
> +dnl (at your option) any later version.
> +dnl
> +dnl This program is distributed in the hope that it will be useful,
> +dnl but WITHOUT ANY WARRANTY; without even the implied warranty of
> +dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
> +dnl the GNU General Public License for more details.
> +dnl
> +
> +AC_DEFUN([LTP_CHECK_ATOMIC_MEMORY_MODEL],[dnl
> + AC_MSG_CHECKING([for __atomic_* compiler builtins])
> + AC_LINK_IFELSE([AC_LANG_SOURCE([
> +int main(void) {
> + int i = 0, j = 0;
> + __atomic_add_fetch(&i, 1, __ATOMIC_ACQ_REL);
> + __atomic_load_n(&i, __ATOMIC_SEQ_CST);
> + __atomic_compare_exchange_n(&i, &j, 0, 0, __ATOMIC_RELEASE, __ATOMIC_ACQUIRE);
We can drop the exchange function here now.
> + __atomic_store_n(&i, 0, __ATOMIC_RELAXED);
> + return i;
> +}])],[has_atomic_mm="yes"])
> +
> +if test "x$has_atomic_mm" = xyes; then
> + AC_DEFINE(HAVE_ATOMIC_MEMORY_MODEL,1,
> + [Define to 1 if you have the __atomic_* compiler builtins])
> + AC_MSG_RESULT(yes)
> +else
> + AC_MSG_RESULT(no)
> +fi
> +])
> --
> 2.14.1
>
>
> --
> Mailing list info: https://lists.linux.it/listinfo/ltp
--
Cyril Hrubis
chrubis@suse.cz
prev parent reply other threads:[~2017-09-12 12:40 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-09-01 13:01 [LTP] [PATCH v3 1/7] tst_atomic: Add load, store and use __atomic builtins Richard Palethorpe
2017-09-01 13:01 ` [LTP] [PATCH v3 2/7] tst_atomic: Add atomic store and load tests Richard Palethorpe
2017-09-12 13:15 ` Cyril Hrubis
2017-09-01 13:01 ` [LTP] [PATCH v3 3/7] fzsync: Add long running thread support and deviation stats Richard Palethorpe
2017-09-12 14:41 ` Cyril Hrubis
2017-09-15 9:10 ` Richard Palethorpe
2017-09-15 12:48 ` Cyril Hrubis
2017-09-12 14:43 ` Cyril Hrubis
2017-09-15 10:05 ` Richard Palethorpe
2017-09-15 12:51 ` Richard Palethorpe
2017-09-15 12:54 ` Cyril Hrubis
2017-09-01 13:01 ` [LTP] [PATCH v3 4/7] fzsync: Add functionality test for library Richard Palethorpe
2017-09-12 14:08 ` Cyril Hrubis
2017-09-22 11:43 ` Richard Palethorpe
2017-09-01 13:01 ` [LTP] [PATCH v3 5/7] Convert cve-2016-7117 test to use long running threads Richard Palethorpe
2017-09-01 13:01 ` [LTP] [PATCH v3 6/7] Convert cve-2014-0196 " Richard Palethorpe
2017-09-01 13:01 ` [LTP] [PATCH v3 7/7] Convert cve-2017-2671 " Richard Palethorpe
2017-09-12 12:40 ` Cyril Hrubis [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170912124009.GA29720@rei \
--to=chrubis@suse.cz \
--cc=ltp@lists.linux.it \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox