aboutsummaryrefslogtreecommitdiff
path: root/sys/linux
diff options
context:
space:
mode:
Diffstat (limited to 'sys/linux')
-rw-r--r--sys/linux/amd64/arch/atomic.h123
-rw-r--r--sys/linux/arm/arch/atomic.h107
-rw-r--r--sys/linux/arm64/arch/atomic.h82
-rw-r--r--sys/linux/i386/arch/atomic.h108
-rw-r--r--sys/linux/riscv64/arch/atomic.h38
5 files changed, 458 insertions, 0 deletions
diff --git a/sys/linux/amd64/arch/atomic.h b/sys/linux/amd64/arch/atomic.h
new file mode 100644
index 0000000..b3aeed1
--- /dev/null
+++ b/sys/linux/amd64/arch/atomic.h
@@ -0,0 +1,123 @@
+static inline int
+atomic·cas(volatile int *p, int t, int s)
+{
+ __asm__ __volatile__ (
+ "lock ; cmpxchg %3, %1"
+ : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
+ return t;
+}
+
+static inline void
+*atomic·casp(volatile void *p, void *t, void *s)
+{
+ __asm__( "lock ; cmpxchg %3, %1"
+ : "=a"(t), "=m"(*(void *volatile *)p)
+ : "a"(t), "r"(s) : "memory" );
+ return t;
+}
+
+static inline int
+atomic·swap(volatile int *p, int v)
+{
+ __asm__ __volatile__(
+ "xchg %0, %1"
+ : "=r"(v), "=m"(*p) : "0"(v) : "memory" );
+ return v;
+}
+
+static inline int
+atomic·fetchadd(volatile int *p, int v)
+{
+ __asm__ __volatile__(
+ "lock ; xadd %0, %1"
+ : "=r"(v), "=m"(*p) : "0"(v) : "memory" );
+ return v;
+}
+
+static inline void
+atomic·and(volatile int *p, int v)
+{
+ __asm__ __volatile__(
+ "lock ; and %1, %0"
+ : "=m"(*p) : "r"(v) : "memory" );
+}
+
+static inline void
+atomic·or(volatile int *p, int v)
+{
+ __asm__ __volatile__(
+ "lock ; or %1, %0"
+ : "=m"(*p) : "r"(v) : "memory" );
+}
+
+static inline void
+atomic·and64(volatile uint64 *p, uint64 v)
+{
+ __asm__ __volatile(
+ "lock ; and %1, %0"
+ : "=m"(*p) : "r"(v) : "memory" );
+}
+
+static inline void
+atomic·or64(volatile uint64 *p, uint64 v)
+{
+ __asm__ __volatile__(
+ "lock ; or %1, %0"
+ : "=m"(*p) : "r"(v) : "memory" );
+}
+
+static inline void
+atomic·inc(volatile int *p)
+{
+ __asm__ __volatile__(
+ "lock ; incl %0"
+ : "=m"(*p) : "m"(*p) : "memory" );
+}
+
+static inline void
+atomic·dec(volatile int *p)
+{
+ __asm__ __volatile__(
+ "lock ; decl %0"
+ : "=m"(*p) : "m"(*p) : "memory" );
+}
+
+static inline void
+atomic·store(volatile int *p, int x)
+{
+ __asm__ __volatile__(
+ "mov %1, %0 ; lock ; orl $0,(%%rsp)"
+ : "=m"(*p) : "r"(x) : "memory" );
+}
+
+static inline void
+atomic·barrier()
+{
+ __asm__ __volatile__( "" : : : "memory" );
+}
+
+static inline void
+atomic·spin()
+{
+ __asm__ __volatile__( "pause" : : : "memory" );
+}
+
+static inline void
+atomic·crash()
+{
+ __asm__ __volatile__( "hlt" : : : "memory" );
+}
+
+static inline int
+atomic·ctz64(uint64 x)
+{
+ __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
+ return x;
+}
+
+static inline int
+atomic·clz64(uint64 x)
+{
+ __asm__( "bsr %1,%0 ; xor $63,%0" : "=r"(x) : "r"(x) );
+ return x;
+}
diff --git a/sys/linux/arm/arch/atomic.h b/sys/linux/arm/arch/atomic.h
new file mode 100644
index 0000000..abb8a7b
--- /dev/null
+++ b/sys/linux/arm/arch/atomic.h
@@ -0,0 +1,107 @@
+#include "libc.h"
+
+#if __ARM_ARCH_4__ || __ARM_ARCH_4T__ || __ARM_ARCH == 4
+#define BLX "mov lr,pc\n\tbx"
+#else
+#define BLX "blx"
+#endif
+
+extern hidden uintptr __atomic·casptr, __atomic·barrierptr;
+
+#if((__ARM_ARCH_6__ || __ARM_ARCH_6K__ || __ARM_ARCH_6KZ__ || __ARM_ARCH_6ZK__) && !__thumb__) \
+ || __ARM_ARCH_6T2__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH >= 7
+
+static inline int
+atomic·ll(volatile int *p)
+{
+ int v;
+ __asm__ __volatile__ ("ldrex %0, %1" : "=r"(v) : "Q"(*p));
+ return v;
+}
+
+static inline int
+atomic·sc(volatile int *p, int v)
+{
+ int r;
+ __asm__ __volatile__ ("strex %0,%2,%1" : "=&r"(r), "=Q"(*p) : "r"(v) : "memory");
+ return !r;
+}
+
+#if __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH >= 7
+
+static inline void
+atomic·barrier()
+{
+ __asm__ __volatile__ ("dmb ish" : : : "memory");
+}
+
+#endif
+
+#define atomic·prellsc atomic·barrier
+#define atomic·postllsc atomic·barrier
+
+#else
+
+static inline int
+atomic·cas(volatile int *p, int t, int s)
+{
+ for(;;){
+ register int r0 __asm__("r0") = t;
+ register int r1 __asm__("r1") = s;
+ register volatile int *r2 __asm__("r2") = p;
+ register uintptr r3 __asm__("r3") = __atomic·casptr;
+ int old;
+ __asm__ __volatile__ (
+ BLX " r3"
+ : "+r"(r0), "+r"(r3) : "r"(r1), "r"(r2)
+ : "memory", "lr", "ip", "cc" );
+ if(!r0) return t;
+ if((old=*p)!=t) return old;
+ }
+}
+
+#endif
+
+#ifndef atomic·barrier
+static inline void
+atomic·barrier()
+{
+ register uintptr ip __asm__("ip") = __atomic·barrierptr;
+ __asm__ __volatile__( BLX " ip" : "+r"(ip) : : "memory", "cc", "lr" );
+}
+#endif
+
+static inline void
+atomic·crash()
+{
+ __asm__ __volatile__(
+#ifndef __thumb__
+ ".word 0xe7f000f0"
+#else
+ ".short 0xdeff"
+#endif
+ : : : "memory");
+}
+
+#if __ARM_ARCH >= 5 && (!__thumb__ || __thumb2__)
+
+static inline int
+atomic·clz32(uint32 x)
+{
+ __asm__ ("clz %0, %1" : "=r"(x) : "r"(x));
+ return x;
+}
+
+#if __ARM_ARCH_6T2__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH >= 7
+
+static inline int
+atomic·ctz32(uint32 x)
+{
+ uint32 xr;
+ __asm__ ("rbit %0, %1" : "=r"(xr) : "r"(x));
+ return atomic·clz32(xr);
+}
+
+#endif
+
+#endif
diff --git a/sys/linux/arm64/arch/atomic.h b/sys/linux/arm64/arch/atomic.h
new file mode 100644
index 0000000..2fa4b04
--- /dev/null
+++ b/sys/linux/arm64/arch/atomic.h
@@ -0,0 +1,82 @@
+static inline int
+atomic·ll(volatile int *p)
+{
+ int v;
+ __asm__ __volatile__ ("ldaxr %w0,%1" : "=r"(v) : "Q"(*p));
+ return v;
+}
+
+static inline int
+atomic·sc(volatile int *p, int v)
+{
+ int r;
+ __asm__ __volatile__ ("stlxr %w0,%w2,%1" : "=&r"(r), "=Q"(*p) : "r"(v) : "memory");
+ return !r;
+}
+
+static inline void
+atomic·barrier()
+{
+ __asm__ __volatile__ ("dmb ish" : : : "memory");
+}
+
+static inline int
+atomic·cas(volatile int *p, int t, int s)
+{
+ int old;
+ do{
+ old = atomic·ll(p);
+ if(old != t){
+ atomic·barrier();
+ break;
+ }
+ }while(!atomic·sc(p, s));
+ return old;
+}
+
+static inline void
+*atomic·llp(volatile void *p)
+{
+ void *v;
+ __asm__ __volatile__ ("ldaxr %0, %1" : "=r"(v) : "Q"(*(void *volatile *)p));
+ return v;
+}
+
+static inline int
+atomic·scp(volatile int *p, void *v)
+{
+ int r;
+ __asm__ __volatile__ ("stlxr %w0,%2,%1" : "=&r"(r), "=Q"(*(void *volatile *)p) : "r"(v) : "memory");
+ return !r;
+}
+
+static inline void
+*atomic·casp(volatile void *p, void *t, void *s)
+{
+ void *old;
+ do{
+ old = atomic·llp(p);
+ if(old != t){
+ atomic·barrier();
+ break;
+ }
+ }while(!atomic·scp(p, s));
+ return old;
+}
+
+static inline int
+atomic·ctz64(uint64 x)
+{
+ __asm__(
+ " rbit %0, %1\n"
+ " clz %0, %0\n"
+ : "=r"(x) : "r"(x));
+ return x;
+}
+
+static inline int
+atomic·clz64(uint64 x)
+{
+ __asm__("clz %0, %1" : "=r"(x) : "r"(x));
+ return x;
+}
diff --git a/sys/linux/i386/arch/atomic.h b/sys/linux/i386/arch/atomic.h
new file mode 100644
index 0000000..eab161d
--- /dev/null
+++ b/sys/linux/i386/arch/atomic.h
@@ -0,0 +1,108 @@
+static inline int
+atomic·cas(volatile int *p, int t, int s)
+{
+ __asm__ __volatile__ (
+ "lock ; cmpxchg %3, %1"
+ : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
+ return t;
+}
+
+static inline int
+atomic·swap(volatile int *p, int v)
+{
+ __asm__ __volatile__(
+ "xchg %0, %1"
+ : "=r"(v), "=m"(*p) : "0"(v) : "memory" );
+ return v;
+}
+
+static inline int
+atomic·fetchadd(volatile int *p, int v)
+{
+ __asm__ __volatile__(
+ "lock ; xadd %0, %1"
+ : "=r"(v), "=m"(*p) : "0"(v) : "memory" );
+ return v;
+}
+
+static inline void
+atomic·and(volatile int *p, int v)
+{
+ __asm__ __volatile__(
+ "lock ; and %1, %0"
+ : "=m"(*p) : "r"(v) : "memory" );
+}
+
+static inline void
+atomic·or(volatile int *p, int v)
+{
+ __asm__ __volatile__(
+ "lock ; or %1, %0"
+ : "=m"(*p) : "r"(v) : "memory" );
+}
+
+static inline void
+atomic·inc(volatile int *p)
+{
+ __asm__ __volatile__(
+ "lock ; incl %0"
+ : "=m"(*p) : "m"(*p) : "memory" );
+}
+
+static inline void
+atomic·dec(volatile int *p)
+{
+ __asm__ __volatile__(
+ "lock ; decl %0"
+ : "=m"(*p) : "m"(*p) : "memory" );
+}
+
+static inline void
+atomic·store(volatile int *p, int x)
+{
+ __asm__ __volatile__(
+ "mov %1, %0 ; lock ; orl $0,(%%esp)"
+ : "=m"(*p) : "r"(x) : "memory" );
+}
+
+static inline void
+atomic·barrier()
+{
+ __asm__ __volatile__( "" : : : "memory" );
+}
+
+static inline void
+atomic·spin()
+{
+ __asm__ __volatile__( "pause" : : : "memory" );
+}
+
+static inline void
+atomic·crash()
+{
+ __asm__ __volatile__( "hlt" : : : "memory" );
+}
+
+static inline int
+atomic·ctz64(uint64 x)
+{
+ int r;
+ __asm__( "bsf %1,%0 ; jnz 1f ; bsf %2,%0 ; add $32,%0\n1:"
+ : "=&r"(r) : "r"((unsigned)x), "r"((unsigned)(x>>32)) );
+ return r;
+}
+
+static inline int
+atomic·ctz32(uint32 x)
+{
+ int r;
+ __asm__( "bsf %1,%0" : "=r"(r) : "r"(x) );
+ return r;
+}
+
+static inline int
+atomic·clz32(uint32 x)
+{
+ __asm__( "bsr %1,%0 ; xor $31,%0" : "=r"(x) : "r"(x) );
+ return x;
+}
diff --git a/sys/linux/riscv64/arch/atomic.h b/sys/linux/riscv64/arch/atomic.h
new file mode 100644
index 0000000..95db16d
--- /dev/null
+++ b/sys/linux/riscv64/arch/atomic.h
@@ -0,0 +1,38 @@
+static inline void
+atomic·barrier()
+{
+ __asm__ __volatile__ ("fence rw,rw" : : : "memory");
+}
+
+static inline int
+atomic·cas(volatile int *p, int t, int s)
+{
+ int old, tmp;
+ __asm__ __volatile__ (
+ "\n1: lr.w.aqrl %0, (%2)\n"
+ " bne %0, %3, 1f\n"
+ " sc.w.aqrl %1, %4, (%2)\n"
+ " bnez %1, 1b\n"
+ "1:"
+ : "=&r"(old), "=&r"(tmp)
+ : "r"(p), "r"((long)t), "r"((long)s)
+ : "memory");
+ return old;
+}
+
+static inline void
+*atomic·casp(volatile void *p, void *t, void *s)
+{
+ void *old;
+ int tmp;
+ __asm__ __volatile__ (
+ "\n1: lr.d.aqrl %0, (%2)\n"
+ " bne %0, %3, 1f\n"
+ " sc.d.aqrl %1, %4, (%2)\n"
+ " bnez %1, 1b\n"
+ "1:"
+ : "=&r"(old), "=&r"(tmp)
+ : "r"(p), "r"(t), "r"(s)
+ : "memory");
+ return old;
+}