static inline int atomic·cas(volatile int *p, int t, int s) { __asm__ __volatile__ ( "lock ; cmpxchg %3, %1" : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" ); return t; } static inline int atomic·swap(volatile int *p, int v) { __asm__ __volatile__( "xchg %0, %1" : "=r"(v), "=m"(*p) : "0"(v) : "memory" ); return v; } static inline int atomic·fetchadd(volatile int *p, int v) { __asm__ __volatile__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*p) : "0"(v) : "memory" ); return v; } static inline void atomic·and(volatile int *p, int v) { __asm__ __volatile__( "lock ; and %1, %0" : "=m"(*p) : "r"(v) : "memory" ); } static inline void atomic·or(volatile int *p, int v) { __asm__ __volatile__( "lock ; or %1, %0" : "=m"(*p) : "r"(v) : "memory" ); } static inline void atomic·inc(volatile int *p) { __asm__ __volatile__( "lock ; incl %0" : "=m"(*p) : "m"(*p) : "memory" ); } static inline void atomic·dec(volatile int *p) { __asm__ __volatile__( "lock ; decl %0" : "=m"(*p) : "m"(*p) : "memory" ); } static inline void atomic·store(volatile int *p, int x) { __asm__ __volatile__( "mov %1, %0 ; lock ; orl $0,(%%esp)" : "=m"(*p) : "r"(x) : "memory" ); } static inline void atomic·barrier() { __asm__ __volatile__( "" : : : "memory" ); } static inline void atomic·spin() { __asm__ __volatile__( "pause" : : : "memory" ); } static inline void atomic·crash() { __asm__ __volatile__( "hlt" : : : "memory" ); } static inline int atomic·ctz64(uint64 x) { int r; __asm__( "bsf %1,%0 ; jnz 1f ; bsf %2,%0 ; add $32,%0\n1:" : "=&r"(r) : "r"((unsigned)x), "r"((unsigned)(x>>32)) ); return r; } static inline int atomic·ctz32(uint32 x) { int r; __asm__( "bsf %1,%0" : "=r"(r) : "r"(x) ); return r; } static inline int atomic·clz32(uint32 x) { __asm__( "bsr %1,%0 ; xor $31,%0" : "=r"(x) : "r"(x) ); return x; }