11 #ifndef ATOMICOPS_HPP_INCLUDED
12 #define ATOMICOPS_HPP_INCLUDED
27 #if defined(__INTEL_COMPILER)
29 #elif defined(_MSC_VER)
31 #elif defined(__GNUC__)
35 #if defined(_M_IA64) || defined(__ia64__)
37 #elif defined(_WIN64) || defined(__amd64__) || defined(_M_X64) || defined(__x86_64__)
39 #elif defined(_M_IX86) || defined(__i386__)
41 #elif defined(_M_PPC) || defined(__powerpc__)
44 #define AE_ARCH_UNKNOWN
49 #define AE_UNUSED(x) ((void)x)
53 #if defined(AE_VCPP) || defined(AE_ICC)
54 #define AE_FORCEINLINE __forceinline
57 #define AE_FORCEINLINE inline
59 #define AE_FORCEINLINE inline
64 #if defined(AE_VCPP) || defined(AE_ICC)
65 #define AE_ALIGN(x) __declspec(align(x))
67 #define AE_ALIGN(x) __attribute__((aligned(x)))
70 #define AE_ALIGN(x) __attribute__((aligned(x)))
87 memory_order_sync = memory_order_seq_cst
92 #if defined(AE_VCPP) || defined(AE_ICC)
97 #if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
98 #define AeFullSync _mm_mfence
99 #define AeLiteSync _mm_mfence
100 #elif defined(AE_ARCH_IA64)
101 #define AeFullSync __mf
102 #define AeLiteSync __mf
103 #elif defined(AE_ARCH_PPC)
104 #include <ppcintrinsics.h>
105 #define AeFullSync __sync
106 #define AeLiteSync __lwsync
111 #pragma warning(push)
112 #pragma warning(disable: 4316)
113 #pragma warning(disable: 4365) // Disable erroneous 'conversion from long to unsigned int, signed/unsigned mismatch' error when using `assert`
118 AE_FORCEINLINE
void compiler_fence(memory_order order)
121 case memory_order_relaxed:
break;
122 case memory_order_acquire: _ReadBarrier();
break;
123 case memory_order_release: _WriteBarrier();
break;
124 case memory_order_acq_rel: _ReadWriteBarrier();
break;
125 case memory_order_seq_cst: _ReadWriteBarrier();
break;
126 default: assert(
false);
133 #if defined(AE_ARCH_X86) || defined(AE_ARCH_X64)
134 AE_FORCEINLINE
void fence(memory_order order)
137 case memory_order_relaxed:
break;
138 case memory_order_acquire: _ReadBarrier();
break;
139 case memory_order_release: _WriteBarrier();
break;
140 case memory_order_acq_rel: _ReadWriteBarrier();
break;
141 case memory_order_seq_cst:
146 default: assert(
false);
150 AE_FORCEINLINE
void fence(memory_order order)
154 case memory_order_relaxed:
156 case memory_order_acquire:
161 case memory_order_release:
166 case memory_order_acq_rel:
171 case memory_order_seq_cst:
176 default: assert(
false);
187 AE_FORCEINLINE
void compiler_fence(memory_order order)
190 case memory_order_relaxed:
break;
191 case memory_order_acquire: std::atomic_signal_fence(std::memory_order_acquire);
break;
192 case memory_order_release: std::atomic_signal_fence(std::memory_order_release);
break;
193 case memory_order_acq_rel: std::atomic_signal_fence(std::memory_order_acq_rel);
break;
194 case memory_order_seq_cst: std::atomic_signal_fence(std::memory_order_seq_cst);
break;
195 default: assert(
false);
199 AE_FORCEINLINE
void fence(memory_order order)
202 case memory_order_relaxed:
break;
203 case memory_order_acquire: std::atomic_thread_fence(std::memory_order_acquire);
break;
204 case memory_order_release: std::atomic_thread_fence(std::memory_order_release);
break;
205 case memory_order_acq_rel: std::atomic_thread_fence(std::memory_order_acq_rel);
break;
206 case memory_order_seq_cst: std::atomic_thread_fence(std::memory_order_seq_cst);
break;
207 default: assert(
false);
218 #if !defined(AE_VCPP) || _MSC_VER >= 1700
219 #define AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
222 #ifdef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
238 #pragma warning(disable: 4100) // Get rid of (erroneous) 'unreferenced formal parameter' warning
240 template<
typename U>
weak_atomic(U&& x) : value(std::forward<U>(x)) { }
244 #pragma warning(default: 4100)
247 AE_FORCEINLINE
operator T()
const {
return load(); }
250 #ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
251 template<
typename U> AE_FORCEINLINE
weak_atomic const& operator=(U&& x) { value = std::forward<U>(x);
return *
this; }
252 AE_FORCEINLINE
weak_atomic const& operator=(
weak_atomic const& other) { value = other.value;
return *
this; }
254 AE_FORCEINLINE T load()
const {
return value; }
259 value.store(std::forward<U>(x), std::memory_order_relaxed);
265 value.store(other.value.load(std::memory_order_relaxed), std::memory_order_relaxed);
269 AE_FORCEINLINE T load()
const {
return value.load(std::memory_order_relaxed); }
274 #ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
279 std::atomic<T> value;
292 #endif // ATOMICOPS_HPP_INCLUDED