summaryrefslogtreecommitdiff
path: root/arch/x32/atomic_arch.h
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2016-01-21 19:08:54 +0000
committerRich Felker <dalias@aerifal.cx>2016-01-21 19:08:54 +0000
commit1315596b510189b5159e742110b504177bdd4932 (patch)
tree27159b7b95b944671454b11f36ee13308241f4b5 /arch/x32/atomic_arch.h
parentce3e24eaae91e7a90f87eb7f1edea8df5942de11 (diff)
downloadmusl-1315596b510189b5159e742110b504177bdd4932.tar.gz
refactor internal atomic.h
rather than having each arch provide its own atomic.h, there is a new shared atomic.h in src/internal which pulls arch-specific definitions from arc/$(ARCH)/atomic_arch.h. the latter can be extremely minimal, defining only a_cas or new ll/sc type primitives which the shared atomic.h will use to construct everything else. this commit avoids making heavy changes to the individual archs' atomic implementations. definitions which are identical or near-identical to what the new shared atomic.h would produce have been removed, but otherwise the changes made are just hooking up the arch-specific files to the new infrastructure. major changes to take advantage of the new system will come in subsequent commits.
Diffstat (limited to 'arch/x32/atomic_arch.h')
-rw-r--r--arch/x32/atomic_arch.h106
1 files changed, 106 insertions, 0 deletions
diff --git a/arch/x32/atomic_arch.h b/arch/x32/atomic_arch.h
new file mode 100644
index 00000000..8d1a03e5
--- /dev/null
+++ b/arch/x32/atomic_arch.h
@@ -0,0 +1,106 @@
+#define a_ctz_64 a_ctz_64
+static inline int a_ctz_64(uint64_t x)
+{
+ __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
+ return x;
+}
+
+#define a_ctz_l a_ctz_l
+static inline int a_ctz_l(unsigned long x)
+{
+ __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
+ return x;
+}
+
+#define a_and_64 a_and_64
+static inline void a_and_64(volatile uint64_t *p, uint64_t v)
+{
+ __asm__( "lock ; and %1, %0"
+ : "=m"(*p) : "r"(v) : "memory" );
+}
+
+#define a_or_64 a_or_64
+static inline void a_or_64(volatile uint64_t *p, uint64_t v)
+{
+ __asm__( "lock ; or %1, %0"
+ : "=m"(*p) : "r"(v) : "memory" );
+}
+
+#define a_or_l a_or_l
+static inline void a_or_l(volatile void *p, long v)
+{
+ __asm__( "lock ; or %1, %0"
+ : "=m"(*(long *)p) : "r"(v) : "memory" );
+}
+
+#define a_cas a_cas
+static inline int a_cas(volatile int *p, int t, int s)
+{
+ __asm__( "lock ; cmpxchg %3, %1"
+ : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
+ return t;
+}
+
+#define a_or a_or
+static inline void a_or(volatile int *p, int v)
+{
+ __asm__( "lock ; or %1, %0"
+ : "=m"(*p) : "r"(v) : "memory" );
+}
+
+#define a_and a_and
+static inline void a_and(volatile int *p, int v)
+{
+ __asm__( "lock ; and %1, %0"
+ : "=m"(*p) : "r"(v) : "memory" );
+}
+
+#define a_swap a_swap
+static inline int a_swap(volatile int *x, int v)
+{
+ __asm__( "xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
+ return v;
+}
+
+#define a_fetch_add a_fetch_add
+static inline int a_fetch_add(volatile int *x, int v)
+{
+ __asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
+ return v;
+}
+
+#define a_inc a_inc
+static inline void a_inc(volatile int *x)
+{
+ __asm__( "lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory" );
+}
+
+#define a_dec a_dec
+static inline void a_dec(volatile int *x)
+{
+ __asm__( "lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory" );
+}
+
+#define a_store a_store
+static inline void a_store(volatile int *p, int x)
+{
+ __asm__( "mov %1, %0 ; lock ; orl $0,(%%rsp)" : "=m"(*p) : "r"(x) : "memory" );
+}
+
+#define a_spin a_spin
+static inline void a_spin()
+{
+ __asm__ __volatile__( "pause" : : : "memory" );
+}
+
+#define a_barrier a_barrier
+static inline void a_barrier()
+{
+ __asm__ __volatile__( "" : : : "memory" );
+}
+
+#define a_crash a_crash
+static inline void a_crash()
+{
+ __asm__ __volatile__( "hlt" : : : "memory" );
+}