/usr/include/CAS.h is in libowfat-dev 0.29-4.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 | #ifndef _CAS_H
#define _CAS_H
#include <stddef.h>
/* Atomic operations for lock-free data structures.
* We operate on machine words and use size_t as a type.
* CAS stands for Compare And Swap, the most common operation. */
/* The API was inspired by the Linux kernel */
#if defined(__INTEL_COMPILER) || (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1))
#define USE_BUILTINS
#endif
/* if (*x == oldval) { *x=newval; return 1; } else return 0; */
static inline int compare_and_swap(volatile size_t* x,size_t oldval,size_t newval) {
#ifdef USE_BUILTINS
return __sync_bool_compare_and_swap(x,oldval,newval);
#elif defined(__i386__)
char result;
asm volatile ("lock; cmpxchgl %3, %0; setz %1" : "=m"(*x), "=q" (result) : "m" (*x), "r" (newval), "a" (oldval) : "memory");
return result;
#elif defined(__x86_64__)
char result;
asm volatile ("lock; cmpxchgq %3, %0; setz %1" : "=m"(*x), "=q" (result) : "m" (*x), "r" (newval), "a" (oldval) : "memory");
return result;
#else
#error architecture not supported and gcc too old, edit CAS.h
#endif
}
/* return *x += val; */
static inline size_t atomic_add_return(size_t* x,size_t val) {
#ifdef USE_BUILTINS
return __sync_add_and_fetch(x,val);
#elif defined(__i386__)
size_t i = val;
asm volatile ("lock; xaddl %1, %0" : "+m" (*x), "+r" (val) :: "memory" );
return i + val;
#elif defined(__x86_64__)
size_t i = val;
asm volatile ("lock; xaddq %1, %0" : "+m" (*x), "+r" (val) :: "memory" );
return i + val;
#else
size_t y;
for (y=*x; compare_and_swap(&x,y,y+val)==0; y=*x) ;
return y+val;
#endif
}
/* *x += val; */
static inline void atomic_add(size_t* x,size_t val) {
#ifdef USE_BUILTINS
__sync_add_and_fetch(x,val);
#elif defined(__i386__)
asm volatile ("lock; addl %1, %0" : "+m" (*x) : "ir" (val) );
#elif defined(__x86_64__)
asm volatile ("lock; addq %1, %0" : "+m" (*x) : "ir" (val) );
#else
atomic_add_return(&x,val);
#endif
}
static inline void atomic_inc(size_t* x) {
#ifdef __i386__
asm volatile ("lock; incl %0" : "+m" (*x) );
#elif defined(__x86_64__)
asm volatile ("lock; incq %0" : "+m" (*x) );
#else
atomic_add(x,1);
#endif
}
static inline size_t atomic_inc_return(size_t* x) {
return atomic_add_return(x,1);
}
static inline void atomic_dec(size_t* x) {
#ifdef __i386__
asm volatile ("lock; decl %0" : "+m" (*x) );
#elif defined(__x86_64__)
asm volatile ("lock; decq %0" : "+m" (*x) );
#else
atomic_add(x,-1);
#endif
}
static inline size_t atomic_dec_return(size_t* x) {
return atomic_add_return(x,-1);
}
/* *x |= val; */
static inline void atomic_or(volatile size_t* x,size_t val) {
#ifdef USE_BUILTINS
__sync_or_and_fetch(x,val);
#elif defined(__i386__)
asm volatile ("lock; orl %1, %0" : "+m" (*x) : "r" (val) );
#elif defined(__x86_64__)
asm volatile ("lock; orq %1, %0" : "+m" (*x) : "r" (val) );
#else
#error architecture not supported and gcc too old, edit CAS.h
#endif
}
/* *x &= val; */
static inline void atomic_and(volatile size_t* x,size_t val) {
#ifdef USE_BUILTINS
__sync_and_and_fetch(x,val);
#elif defined(__i386__)
asm volatile ("lock; andl %1, %0" : "+m" (*x) : "r" (val) );
#elif defined(__x86_64__)
asm volatile ("lock; andq %1, %0" : "+m" (*x) : "r" (val) );
#else
#error architecture not supported and gcc too old, edit CAS.h
#endif
}
/* Optimization barrier */
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
#if defined(__i386__) || defined(__x86_64__)
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
#define wmb() asm volatile("sfence":::"memory")
/* Atomic operations are already serializing on x86 */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#elif defined(__powerpc__)
#define mb() asm volatile("sync":::"memory")
#define rmb() asm volatile("sync":::"memory")
#define wmb() asm volatile("sync":::"memory")
#define smp_mb__before_atomic_dec() mb()
#define smp_mb__after_atomic_dec() mb()
#define smp_mb__before_atomic_inc() mb()
#define smp_mb__after_atomic_inc() mb()
#elif defined(USE_BUILTINS)
#define mb() __sync_synchronize()
#define rmb() __sync_synchronize()
#define wmb() __sync_synchronize()
#endif
#undef USE_BUILTINS
#endif
|