35 #if defined(OVR_OS_WIN32)
96 #if !defined(OVR_ENABLE_THREADS) || defined(OVR_CPU_X86) || defined(OVR_OS_WIN32) || defined(OVR_OS_IPHONE)
98 struct FullSync {
inline FullSync() { } };
99 struct AcquireSync {
inline AcquireSync() { } };
100 struct ReleaseSync {
inline ReleaseSync() { } };
102 #elif defined(OVR_CPU_PPC64) || defined(OVR_CPU_PPC)
103 struct FullSync {
inline FullSync() {
asm volatile(
"sync\n"); } ~FullSync() {
asm volatile(
"isync\n"); } };
104 struct AcquireSync {
inline AcquireSync() { } ~AcquireSync() {
asm volatile(
"isync\n"); } };
105 struct ReleaseSync {
inline ReleaseSync() {
asm volatile(
"sync\n"); } };
107 #elif defined(OVR_CPU_MIPS)
108 struct FullSync {
inline FullSync() {
asm volatile(
"sync\n"); } ~FullSync() {
asm volatile(
"sync\n"); } };
109 struct AcquireSync {
inline AcquireSync() { } ~AcquireSync() {
asm volatile(
"sync\n"); } };
110 struct ReleaseSync {
inline ReleaseSync() {
asm volatile(
"sync\n"); } };
112 #elif defined(OVR_CPU_ARM)
113 struct FullSync {
inline FullSync() {
asm volatile(
"dmb\n"); } ~FullSync() {
asm volatile(
"dmb\n"); } };
114 struct AcquireSync {
inline AcquireSync() { } ~AcquireSync() {
asm volatile(
"dmb\n"); } };
115 struct ReleaseSync {
inline ReleaseSync() {
asm volatile(
"dmb\n"); } };
118 #elif defined(OVR_CC_GNU) && (__GNUC__ >= 4)
120 struct FullSync {
inline FullSync() { } };
121 struct AcquireSync {
inline AcquireSync() { } };
122 struct ReleaseSync {
inline ReleaseSync() { } };
130 #if !defined(OVR_ENABLE_THREADS)
137 #elif defined(OVR_OS_WIN32)
142 #if defined(OVR_CC_MSVC) && (OVR_CC_MSVC < 1300)
143 typedef T* InterlockTPtr;
145 typedef ET* InterlockETPtr;
147 typedef volatile T* InterlockTPtr;
149 typedef InterlockTPtr InterlockETPtr;
151 inline static T Exchange_NoSync(
volatile T* p, T val) {
return InterlockedExchange((InterlockTPtr)p, val); }
152 inline static T ExchangeAdd_NoSync(
volatile T* p, T val) {
return InterlockedExchangeAdd((InterlockTPtr)p, val); }
153 inline static bool CompareAndSet_NoSync(
volatile T* p, T c, T val) {
return InterlockedCompareExchange((InterlockETPtr)p, (ET)val, (ET)c) == (ET)c; }
155 #elif defined(OVR_CPU_PPC64) || defined(OVR_CPU_PPC)
161 asm volatile(
"1:\n\t"
162 "lwarx %[r],0,%[i]\n\t"
163 "stwcx. %[j],0,%[i]\n\t"
165 :
"+m" (*i), [
r]
"=&b" (ret) : [i]
"b" (i), [j]
"b" (j) :
"cc",
"memory");
174 asm volatile(
"1:\n\t"
175 "lwarx %[r],0,%[i]\n\t"
176 "add %[o],%[r],%[j]\n\t"
177 "stwcx. %[o],0,%[i]\n\t"
179 :
"+m" (*i), [
r]
"=&b" (ret), [o]
"=&r" (dummy) : [i]
"b" (i), [j]
"b" (j) :
"cc",
"memory");
188 asm volatile(
"1:\n\t"
189 "lwarx %[r],0,%[i]\n\t"
190 "cmpw 0,%[r],%[cmp]\n\t"
193 "stwcx. %[val],0,%[i]\n\t"
196 :
"+m" (*i), [
r]
"=&b" (ret) : [i]
"b" (i), [cmp]
"b" (c), [val]
"b" (value) :
"cc",
"memory");
198 return (ret & 0x20000000) ? 1 : 0;
201 #elif defined(OVR_CPU_MIPS)
208 asm volatile(
"1:\n\t"
209 "ll %[r],0(%[i])\n\t"
210 "sc %[j],0(%[i])\n\t"
213 :
"+m" (*i), [
r]
"=&d" (ret) : [i]
"d" (i), [j]
"d" (j) :
"cc",
"memory");
222 asm volatile(
"1:\n\t"
223 "ll %[r],0(%[i])\n\t"
224 "addu %[j],%[r],%[j]\n\t"
225 "sc %[j],0(%[i])\n\t"
228 :
"+m" (*i), [
r]
"=&d" (ret) : [i]
"d" (i), [j]
"d" (j) :
"cc",
"memory");
237 asm volatile(
"1:\n\t"
239 "ll %[o],0(%[i])\n\t"
240 "bne %[o],%[c],2f\n\t"
242 "sc %[r],0(%[i])\n\t"
246 :
"+m" (*i),[
r]
"=&d" (ret), [o]
"=&d" (dummy) : [i]
"d" (i), [c]
"d" (c), [v]
"d" (value)
252 #elif defined(OVR_CPU_ARM) && defined(OVR_CC_ARM)
260 if (__strex(j, i) == 0)
269 if (__strex(r + j, i) == 0)
281 if (__strex(value, i) == 0)
286 #elif defined(OVR_CPU_ARM)
293 asm volatile(
"1:\n\t"
294 "ldrex %[r],[%[i]]\n\t"
295 "strex %[t],%[j],[%[i]]\n\t"
298 :
"+m" (*i), [
r]
"=&r" (ret), [t]
"=&r" (dummy) : [i]
"r" (i), [j]
"r" (j) :
"cc",
"memory");
307 asm volatile(
"1:\n\t"
308 "ldrex %[r],[%[i]]\n\t"
309 "add %[o],%[r],%[j]\n\t"
310 "strex %[t],%[o],[%[i]]\n\t"
313 :
"+m" (*i), [
r]
"=&r" (ret), [o]
"=&r" (dummy), [t]
"=&r" (test) : [i]
"r" (i), [j]
"r" (j) :
"cc",
"memory");
320 UInt32 ret = 1, dummy, test;
322 asm volatile(
"1:\n\t"
323 "ldrex %[o],[%[i]]\n\t"
326 "strex %[r],%[v],[%[i]]\n\t"
330 :
"+m" (*i),[
r]
"=&r" (ret), [o]
"=&r" (dummy), [t]
"=&r" (test) : [i]
"r" (i), [c]
"r" (c), [v]
"r" (value)
336 #elif defined(OVR_CPU_X86)
341 asm volatile(
"xchgl %1,%[i]\n"
342 :
"+m" (*i),
"=q" (j) : [i]
"m" (*i),
"1" (j) :
"cc",
"memory");
349 asm volatile(
"lock; xaddl %1,%[i]\n"
350 :
"+m" (*i),
"+q" (j) : [i]
"m" (*i) :
"cc",
"memory");
359 asm volatile(
"lock; cmpxchgl %[v],%[i]\n"
360 :
"+m" (*i),
"=a" (ret) : [i]
"m" (*i),
"1" (c), [v]
"q" (value) :
"cc",
"memory");
365 #elif defined(OVR_CC_GNU) && (__GNUC__ >= 4 && __GNUC_MINOR__ >= 1)
369 static inline T Exchange_NoSync(
volatile T *i, T j)
374 }
while (!__sync_bool_compare_and_swap(i, v, j));
378 static inline T ExchangeAdd_NoSync(
volatile T *i, T j)
380 return __sync_fetch_and_add(i, j);
383 static inline bool CompareAndSet_NoSync(
volatile T *i, T c, T value)
385 return __sync_bool_compare_and_swap(i, c, value);
396 #if !defined(OVR_64BIT_POINTERS) || !defined(OVR_ENABLE_THREADS)
402 #elif defined(OVR_OS_WIN32)
406 typedef volatile T* InterlockTPtr;
407 inline static T Exchange_NoSync(
volatile T* p,
T val) {
return InterlockedExchange64((InterlockTPtr)p, val); }
408 inline static T ExchangeAdd_NoSync(
volatile T* p,
T val) {
return InterlockedExchangeAdd64((InterlockTPtr)p, val); }
409 inline static bool CompareAndSet_NoSync(
volatile T* p,
T c,
T val) {
return InterlockedCompareExchange64((InterlockTPtr)p, val, c) == c; }
411 #elif defined(OVR_CPU_PPC64)
419 asm volatile(
"1:\n\t"
420 "ldarx %[r],0,%[i]\n\t"
422 "stdcx. %[o],0,%[i]\n\t"
424 :
"+m" (*i), [
r]
"=&b" (ret), [o]
"=&r" (dummy) : [i]
"b" (i), [j]
"b" (j) :
"cc");
433 asm volatile(
"1:\n\t"
434 "ldarx %[r],0,%[i]\n\t"
435 "add %[o],%[r],%[j]\n\t"
436 "stdcx. %[o],0,%[i]\n\t"
438 :
"+m" (*i), [
r]
"=&b" (ret), [o]
"=&r" (dummy) : [i]
"b" (i), [j]
"b" (j) :
"cc");
447 asm volatile(
"1:\n\t"
448 "ldarx %[r],0,%[i]\n\t"
449 "cmpw 0,%[r],%[cmp]\n\t"
452 "stdcx. %[val],0,%[i]\n\t"
455 :
"+m" (*i), [
r]
"=&b" (ret), [o]
"=&r" (dummy) : [i]
"b" (i), [cmp]
"b" (c), [val]
"b" (value) :
"cc");
457 return (ret & 0x20000000) ? 1 : 0;
460 #elif defined(OVR_CC_GNU) && (__GNUC__ >= 4 && __GNUC_MINOR__ >= 1)
464 static inline T Exchange_NoSync(
volatile T *i,
T j)
469 }
while (!__sync_bool_compare_and_swap(i, v, j));
473 static inline T ExchangeAdd_NoSync(
volatile T *i,
T j)
475 return __sync_fetch_and_add(i, j);
478 static inline bool CompareAndSet_NoSync(
volatile T *i,
T c,
T value)
480 return __sync_bool_compare_and_swap(i, c, value);
496 typedef typename O::T
O_T;
503 #ifndef OVR_ENABLE_THREADS
506 inline static O_T Exchange_NoSync(
volatile O_T* p,
O_T val) {
O_T old = *p; *p = val;
return old; }
508 inline static O_T ExchangeAdd_NoSync(
volatile O_T* p,
O_T val) {
O_T old = *p; *p += val;
return old; }
511 inline static bool CompareAndSet_NoSync(
volatile O_T* p,
O_T c,
O_T val) {
if (*p==c) { *p = val;
return 1; }
return 0; }
568 typedef typename Ops::T
T;
569 typedef volatile typename Ops::T*
PT;
610 inline operator T()
const {
return Value; }
633 class AtomicPtr :
public AtomicValueBase<T*>
686 class AtomicInt :
public AtomicValueBase<T>
773 newVal = comp >> bits;
783 newVal = comp << bits;
803 void operator delete(
void*) {}
808 #if !defined(OVR_ENABLE_THREADS)
813 inline Lock(
unsigned) { }
819 #elif defined(OVR_OS_WIN32)
823 Lock(
unsigned spinCount = 0);
826 inline void DoLock() { ::EnterCriticalSection(&cs); }
827 inline void Unlock() { ::LeaveCriticalSection(&cs); }
842 pthread_mutexattr_settype(&
RecursiveAttr, PTHREAD_MUTEX_RECURSIVE);
851 #endif // OVR_ENABLE_THREDS
void ReleaseLock(Lock *plock)
static O_T Exchange_Release(volatile O_T *p, O_T val)
static C Exchange_Acquire(volatile C *p, C val)
AtomicValueBase< T >::Ops Ops
bool CompareAndSet_NoSync(T c, T val)
static bool RecursiveAttrInit
static C Exchange_Release(volatile C *p, C val)
static bool CompareAndSet_Sync(volatile O_T *p, O_T c, O_T val)
static O_T Exchange_Sync(volatile O_T *p, O_T val)
bool CompareAndSet_Acquire(T c, T val)
T ExchangeAdd_NoSync(T val)
T ExchangeAdd_Acquire(T val)
bool CompareAndSet_Sync(T c, T val)
T ExchangeAdd_Release(T val)
static O_T Load_Acquire(const volatile O_T *p)
T * ExchangeAdd_Sync(I incr)
static C Exchange_NoSync(volatile C *p, C val)
T operator<<=(unsigned bits)
static bool CompareAndSet_Release(volatile C *p, C c, C val)
static pthread_mutexattr_t RecursiveAttr
static C ExchangeAdd_Release(volatile C *p, C val)
AtomicOpsRaw< sizeof(C)> Ops
T Exchange_Release(T val)
static void Store_Release(volatile C *p, C val)
static bool CompareAndSet_Release(volatile O_T *p, O_T c, O_T val)
static bool CompareAndSet_NoSync(volatile C *p, C c, C val)
static O_T ExchangeAdd_Sync(volatile O_T *p, O_T val)
void Store_Release(T val)
O::AcquireSync O_AcquireSync
T * ExchangeAdd_NoSync(I incr)
T * ExchangeAdd_Acquire(I incr)
static bool CompareAndSet_Acquire(volatile O_T *p, O_T c, O_T val)
T operator>>=(unsigned bits)
static void Store_Release(volatile O_T *p, O_T val)
static C ExchangeAdd_NoSync(volatile C *p, C val)
static bool CompareAndSet_Sync(volatile C *p, C c, C val)
AtomicValueBase< T * >::Ops Ops
static O_T Exchange_Acquire(volatile O_T *p, O_T val)
static C ExchangeAdd_Acquire(volatile C *p, C val)
UInt64 Buffer[(sizeof(Lock)+sizeof(UInt64)-1)/sizeof(UInt64)]
static C Load_Acquire(const volatile C *p)
static O_T ExchangeAdd_Acquire(volatile O_T *p, O_T val)
T Exchange_Acquire(T val)
static C ExchangeAdd_Sync(volatile C *p, C val)
static bool CompareAndSet_Relse(volatile C *p, C c, C val)
T ExchangeAdd_Sync(T val)
bool CompareAndSet_Release(T c, T val)
static O_T ExchangeAdd_Release(volatile O_T *p, O_T val)
static C Exchange_Sync(volatile C *p, C val)
T * ExchangeAdd_Release(I incr)
#define OVR_COMPILER_ASSERT(x)
O::ReleaseSync O_ReleaseSync