Nugget
compiler_msvc.h
1 // Copyright (c) Electronic Arts Inc. All rights reserved.
4 
5 
6 #ifndef EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_H
7 #define EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_H
8 
9 #if defined(EA_PRAGMA_ONCE_SUPPORTED)
10  #pragma once
11 #endif
12 
13 
14 EA_DISABLE_ALL_VC_WARNINGS();
15 #include <Windows.h>
16 #include <intrin.h>
17 EA_RESTORE_ALL_VC_WARNINGS();
18 
19 
21 
22 
23 #define EASTL_COMPILER_ATOMIC_HAS_8BIT
24 #define EASTL_COMPILER_ATOMIC_HAS_16BIT
25 #define EASTL_COMPILER_ATOMIC_HAS_32BIT
26 #define EASTL_COMPILER_ATOMIC_HAS_64BIT
27 
28 #if EA_PLATFORM_PTR_SIZE == 8
29  #define EASTL_COMPILER_ATOMIC_HAS_128BIT
30 #endif
31 
32 
34 
35 
36 #define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_8 char
37 #define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_16 short
38 #define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_32 long
39 #define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_64 __int64
40 
41 namespace eastl
42 {
43 
44 namespace internal
45 {
46 
48 {
49  __int64 value[2];
50 };
51 
52 } // namespace internal
53 
54 } // namespace eastl
55 
56 #define EASTL_COMPILER_ATOMIC_FIXED_WIDTH_TYPE_128 eastl::internal::FixedWidth128
57 
58 
60 
61 
72 #if defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)
73 
74 
75  #define EASTL_MSVC_ATOMIC_FETCH_OP(ret, ptr, val, MemoryOrder, Intrinsic) \
76  ret = Intrinsic(ptr, val)
77 
78  #define EASTL_MSVC_ATOMIC_EXCHANGE_OP(ret, ptr, val, MemoryOrder, Intrinsic) \
79  ret = Intrinsic(ptr, val)
80 
81  #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP(ret, ptr, comparand, exchange, MemoryOrder, Intrinsic) \
82  ret = Intrinsic(ptr, exchange, comparand)
83 
84  #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128_OP(ret, ptr, comparandResult, exchangeHigh, exchangeLow, MemoryOrder) \
85  ret = _InterlockedCompareExchange128_np(ptr, exchangeHigh, exchangeLow, comparandResult)
86 
87 
88 #elif defined(EA_PROCESSOR_ARM32) || defined(EA_PROCESSOR_ARM64)
89 
90 
91  #define EASTL_MSVC_INTRINSIC_RELAXED(Intrinsic) \
92  EA_PREPROCESSOR_JOIN(Intrinsic, _nf)
93 
94  #define EASTL_MSVC_INTRINSIC_ACQUIRE(Intrinsic) \
95  EA_PREPROCESSOR_JOIN(Intrinsic, _acq)
96 
97  #define EASTL_MSVC_INTRINSIC_RELEASE(Intrinsic) \
98  EA_PREPROCESSOR_JOIN(Intrinsic, _rel)
99 
100  #define EASTL_MSVC_INTRINSIC_ACQ_REL(Intrinsic) \
101  Intrinsic
102 
103  #define EASTL_MSVC_INTRINSIC_SEQ_CST(Intrinsic) \
104  Intrinsic
105 
106 
107  #define EASTL_MSVC_ATOMIC_FETCH_OP(ret, ptr, val, MemoryOrder, Intrinsic) \
108  ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(Intrinsic)(ptr, val)
109 
110  #define EASTL_MSVC_ATOMIC_EXCHANGE_OP(ret, ptr, val, MemoryOrder, Intrinsic) \
111  ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(Intrinsic)(ptr, val)
112 
113  #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP(ret, ptr, comparand, exchange, MemoryOrder, Intrinsic) \
114  ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(Intrinsic)(ptr, exchange, comparand)
115 
116  #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128_OP(ret, ptr, comparandResult, exchangeHigh, exchangeLow, MemoryOrder) \
117  ret = EA_PREPROCESSOR_JOIN(EASTL_MSVC_INTRINSIC_, MemoryOrder)(_InterlockedCompareExchange128)(ptr, exchangeHigh, exchangeLow, comparandResult)
118 
119 
120 #endif
121 
122 
124 
125 
126 #define EASTL_MSVC_NOP_POST_INTRIN_COMPUTE(ret, lhs, rhs)
127 
128 #define EASTL_MSVC_NOP_PRE_INTRIN_COMPUTE(ret, val) \
129  ret = (val)
130 
131 
132 #define EASTL_MSVC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, POST_INTRIN_COMPUTE) \
133  { \
134  integralType retIntegral; \
135  type valCompute; \
136  \
137  PRE_INTRIN_COMPUTE(valCompute, (val)); \
138  const integralType valIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, valCompute); \
139  \
140  EASTL_MSVC_ATOMIC_FETCH_OP(retIntegral, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \
141  valIntegral, MemoryOrder, fetchIntrinsic); \
142  \
143  ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \
144  POST_INTRIN_COMPUTE(ret, ret, (val)); \
145  }
146 
147 #define EASTL_MSVC_ATOMIC_EXCHANGE_INTRIN_N(integralType, exchangeIntrinsic, type, ret, ptr, val, MemoryOrder) \
148  { \
149  integralType retIntegral; \
150  EASTL_MSVC_ATOMIC_EXCHANGE_OP(retIntegral, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \
151  EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (val)), MemoryOrder, \
152  exchangeIntrinsic); \
153  \
154  ret = EASTL_ATOMIC_TYPE_PUN_CAST(type, retIntegral); \
155  }
156 
157 #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_N(integralType, cmpxchgStrongIntrinsic, type, ret, ptr, expected, desired, MemoryOrder) \
158  { \
159  integralType comparandIntegral = EASTL_ATOMIC_TYPE_PUN_CAST(integralType, *(expected)); \
160  integralType oldIntegral; \
161  EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP(oldIntegral, EASTL_ATOMIC_VOLATILE_INTEGRAL_CAST(integralType, (ptr)), \
162  comparandIntegral, EASTL_ATOMIC_TYPE_PUN_CAST(integralType, (desired)), \
163  MemoryOrder, cmpxchgStrongIntrinsic); \
164  \
165  if (oldIntegral == comparandIntegral) \
166  { \
167  ret = true; \
168  } \
169  else \
170  { \
171  *(expected) = EASTL_ATOMIC_TYPE_PUN_CAST(type, oldIntegral); \
172  ret = false; \
173  } \
174  }
175 
186 #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_128(type, ret, ptr, expected, desired, MemoryOrder) \
187  { \
188  union TypePun \
189  { \
190  type templateType; \
191  \
192  struct exchange128 \
193  { \
194  __int64 value[2]; \
195  }; \
196  \
197  struct exchange128 exchangePun; \
198  }; \
199  \
200  union TypePun typePun = { (desired) }; \
201  \
202  unsigned char cmpxchgRetChar; \
203  cmpxchgRetChar = EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_128_OP(cmpxchgRetChar, EASTL_ATOMIC_VOLATILE_TYPE_CAST(__int64, (ptr)), \
204  EASTL_ATOMIC_TYPE_CAST(__int64, (expected)), \
205  typePun.exchangePun.value[1], typePun.exchangePun.value[0], \
206  MemoryOrder); \
207  \
208  ret = static_cast<bool>(cmpxchgRetChar); \
209  }
210 
211 
213 
214 
215 #define EASTL_MSVC_ATOMIC_FETCH_OP_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE) \
216  EASTL_MSVC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, EASTL_MSVC_NOP_POST_INTRIN_COMPUTE)
217 
218 #define EASTL_MSVC_ATOMIC_OP_FETCH_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, POST_INTRIN_COMPUTE) \
219  EASTL_MSVC_ATOMIC_FETCH_INTRIN_N(integralType, fetchIntrinsic, type, ret, ptr, val, MemoryOrder, PRE_INTRIN_COMPUTE, POST_INTRIN_COMPUTE)
220 
221 #define EASTL_MSVC_ATOMIC_EXCHANGE_OP_N(integralType, exchangeIntrinsic, type, ret, ptr, val, MemoryOrder) \
222  EASTL_MSVC_ATOMIC_EXCHANGE_INTRIN_N(integralType, exchangeIntrinsic, type, ret, ptr, val, MemoryOrder)
223 
224 #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_N(integralType, cmpxchgStrongIntrinsic, type, ret, ptr, expected, desired, MemoryOrder) \
225  EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_N(integralType, cmpxchgStrongIntrinsic, type, ret, ptr, expected, desired, MemoryOrder)
226 
227 #define EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_OP_128(type, ret, ptr, expected, desired, MemoryOrder) \
228  EASTL_MSVC_ATOMIC_CMPXCHG_STRONG_INTRIN_128(type, ret, ptr, expected, desired, MemoryOrder)
229 
230 
232 
233 
234 #include "compiler_msvc_fetch_add.h"
235 #include "compiler_msvc_fetch_sub.h"
236 
237 #include "compiler_msvc_fetch_and.h"
238 #include "compiler_msvc_fetch_xor.h"
239 #include "compiler_msvc_fetch_or.h"
240 
241 #include "compiler_msvc_add_fetch.h"
242 #include "compiler_msvc_sub_fetch.h"
243 
244 #include "compiler_msvc_and_fetch.h"
245 #include "compiler_msvc_xor_fetch.h"
246 #include "compiler_msvc_or_fetch.h"
247 
248 #include "compiler_msvc_exchange.h"
249 
250 #include "compiler_msvc_cmpxchg_weak.h"
251 #include "compiler_msvc_cmpxchg_strong.h"
252 
253 #include "compiler_msvc_barrier.h"
254 
255 #include "compiler_msvc_cpu_pause.h"
256 
257 #include "compiler_msvc_signal_fence.h"
258 
259 
260 #endif /* EASTL_ATOMIC_INTERNAL_COMPILER_MSVC_H */
EA Standard Template Library.
Definition: algorithm.h:288
Definition: compiler_msvc.h:48