summaryrefslogtreecommitdiff
path: root/arch/arm/atomic.h
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2014-11-19 00:40:32 -0500
committerRich Felker <dalias@aerifal.cx>2014-11-19 01:02:01 -0500
commit4a241f14a6bea81b9b50edda09f8184e35a75860 (patch)
treecafc783295719edfa98d5e654e337acfe19ee83e /arch/arm/atomic.h
parentd8bdc97d148088bdaa672f56d4b8e0a15b03e70e (diff)
downloadmusl-4a241f14a6bea81b9b50edda09f8184e35a75860.tar.gz
overhaul ARM atomics/tls for performance and compatibility
previously, builds for pre-armv6 targets hard-coded use of the "kuser helper" system for atomics and thread-pointer access, resulting in binaries that fail to run (crash) on systems where this functionality has been disabled (as a security/hardening measure) in the kernel. additionally, builds for armv6 hard-coded an outdated/deprecated memory barrier instruction which may require emulation (extremely slow) on future models. this overhaul replaces the behavior for all pre-armv7 builds (both of the above cases) to perform runtime detection of the appropriate mechanisms for barrier, atomic compare-and-swap, and thread pointer access. detection is based on information provided by the kernel in auxv: presence of the HWCAP_TLS bit for AT_HWCAP and the architecture version encoded in AT_PLATFORM. direct use of the instructions is preferred when possible, since probing for the existence of the kuser helper page would be difficult and would incur runtime cost. for builds targeting armv7 or later, the runtime detection code is not compiled at all, and much more efficient versions of the non-cas atomic operations are provided by using ldrex/strex directly rather than wrapping cas.
Diffstat (limited to 'arch/arm/atomic.h')
-rw-r--r--arch/arm/atomic.h194
1 files changed, 153 insertions, 41 deletions
diff --git a/arch/arm/atomic.h b/arch/arm/atomic.h
index 8665c874..8ae35bb7 100644
--- a/arch/arm/atomic.h
+++ b/arch/arm/atomic.h
@@ -22,37 +22,150 @@ static inline int a_ctz_64(uint64_t x)
return a_ctz_l(y);
}
-#if ((__ARM_ARCH_6__ || __ARM_ARCH_6K__ || __ARM_ARCH_6ZK__) && !__thumb__) \
- || __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH >= 7
-
#if __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH >= 7
-#define MEM_BARRIER "dmb ish"
-#else
-#define MEM_BARRIER "mcr p15,0,r0,c7,c10,5"
-#endif
-static inline int __k_cas(int t, int s, volatile int *p)
+static inline void a_barrier()
{
- int ret;
- __asm__(
- " " MEM_BARRIER "\n"
+ __asm__ __volatile__("dmb ish");
+}
+
+static inline int a_cas(volatile int *p, int t, int s)
+{
+ int old;
+ __asm__ __volatile__(
+ " dmb ish\n"
"1: ldrex %0,%3\n"
- " subs %0,%0,%1\n"
-#ifdef __thumb__
- " itt eq\n"
-#endif
- " strexeq %0,%2,%3\n"
- " teqeq %0,#1\n"
- " beq 1b\n"
- " " MEM_BARRIER "\n"
- : "=&r"(ret)
+ " cmp %0,%1\n"
+ " bne 1f\n"
+ " strex %0,%2,%3\n"
+ " cmp %0, #0\n"
+ " bne 1b\n"
+ " mov %0, %1\n"
+ "1: dmb ish\n"
+ : "=&r"(old)
: "r"(t), "r"(s), "Q"(*p)
: "memory", "cc" );
- return ret;
+ return old;
+}
+
+static inline int a_swap(volatile int *x, int v)
+{
+ int old, tmp;
+ __asm__ __volatile__(
+ " dmb ish\n"
+ "1: ldrex %0,%3\n"
+ " strex %1,%2,%3\n"
+ " cmp %1, #0\n"
+ " bne 1b\n"
+ " dmb ish\n"
+ : "=&r"(old), "=&r"(tmp)
+ : "r"(v), "Q"(*x)
+ : "memory", "cc" );
+ return old;
+}
+
+static inline int a_fetch_add(volatile int *x, int v)
+{
+ int old, tmp;
+ __asm__ __volatile__(
+ " dmb ish\n"
+ "1: ldrex %0,%3\n"
+ " add %0,%0,%2\n"
+ " strex %1,%0,%3\n"
+ " cmp %1, #0\n"
+ " bne 1b\n"
+ " dmb ish\n"
+ : "=&r"(old), "=&r"(tmp)
+ : "r"(v), "Q"(*x)
+ : "memory", "cc" );
+ return old-v;
+}
+
+static inline void a_inc(volatile int *x)
+{
+ int tmp, tmp2;
+ __asm__ __volatile__(
+ " dmb ish\n"
+ "1: ldrex %0,%2\n"
+ " add %0,%0,#1\n"
+ " strex %1,%0,%2\n"
+ " cmp %1, #0\n"
+ " bne 1b\n"
+ " dmb ish\n"
+ : "=&r"(tmp), "=&r"(tmp2)
+ : "Q"(*x)
+ : "memory", "cc" );
+}
+
+static inline void a_dec(volatile int *x)
+{
+ int tmp, tmp2;
+ __asm__ __volatile__(
+ " dmb ish\n"
+ "1: ldrex %0,%2\n"
+ " sub %0,%0,#1\n"
+ " strex %1,%0,%2\n"
+ " cmp %1, #0\n"
+ " bne 1b\n"
+ " dmb ish\n"
+ : "=&r"(tmp), "=&r"(tmp2)
+ : "Q"(*x)
+ : "memory", "cc" );
+}
+
+static inline void a_and(volatile int *x, int v)
+{
+ int tmp, tmp2;
+ __asm__ __volatile__(
+ " dmb ish\n"
+ "1: ldrex %0,%3\n"
+ " and %0,%0,%2\n"
+ " strex %1,%0,%3\n"
+ " cmp %1, #0\n"
+ " bne 1b\n"
+ " dmb ish\n"
+ : "=&r"(tmp), "=&r"(tmp2)
+ : "r"(v), "Q"(*x)
+ : "memory", "cc" );
+}
+
+static inline void a_or(volatile int *x, int v)
+{
+ int tmp, tmp2;
+ __asm__ __volatile__(
+ " dmb ish\n"
+ "1: ldrex %0,%3\n"
+ " orr %0,%0,%2\n"
+ " strex %1,%0,%3\n"
+ " cmp %1, #0\n"
+ " bne 1b\n"
+ " dmb ish\n"
+ : "=&r"(tmp), "=&r"(tmp2)
+ : "r"(v), "Q"(*x)
+ : "memory", "cc" );
+}
+
+static inline void a_store(volatile int *p, int x)
+{
+ __asm__ __volatile__(
+ " dmb ish\n"
+ " str %1,%0\n"
+ " dmb ish\n"
+ : "=m"(*p)
+ : "r"(x)
+ : "memory", "cc" );
}
+
#else
-#define __k_cas ((int (*)(int, int, volatile int *))0xffff0fc0)
-#endif
+
+int __a_cas(int, int, volatile int *) __attribute__((__visibility__("hidden")));
+#define __k_cas __a_cas
+
+static inline void a_barrier()
+{
+ __asm__ __volatile__("bl __a_barrier"
+ : : : "memory", "cc", "ip", "lr" );
+}
static inline int a_cas(volatile int *p, int t, int s)
{
@@ -65,11 +178,6 @@ static inline int a_cas(volatile int *p, int t, int s)
}
}
-static inline void *a_cas_p(volatile void *p, void *t, void *s)
-{
- return (void *)a_cas(p, (int)t, (int)s);
-}
-
static inline int a_swap(volatile int *x, int v)
{
int old;
@@ -98,19 +206,9 @@ static inline void a_dec(volatile int *x)
static inline void a_store(volatile int *p, int x)
{
- while (__k_cas(*p, x, p));
-}
-
-#define a_spin a_barrier
-
-static inline void a_barrier()
-{
- __k_cas(0, 0, &(int){0});
-}
-
-static inline void a_crash()
-{
- *(volatile char *)0=0;
+ a_barrier();
+ *p = x;
+ a_barrier();
}
static inline void a_and(volatile int *p, int v)
@@ -127,6 +225,20 @@ static inline void a_or(volatile int *p, int v)
while (__k_cas(old, old|v, p));
}
+#endif
+
+static inline void *a_cas_p(volatile void *p, void *t, void *s)
+{
+ return (void *)a_cas(p, (int)t, (int)s);
+}
+
+#define a_spin a_barrier
+
+static inline void a_crash()
+{
+ *(volatile char *)0=0;
+}
+
static inline void a_or_l(volatile void *p, long v)
{
a_or(p, v);