Nugget
atomic_integral.h
1 // Copyright (c) Electronic Arts Inc. All rights reserved.
4 
5 
6 #ifndef EASTL_ATOMIC_INTERNAL_INTEGRAL_H
7 #define EASTL_ATOMIC_INTERNAL_INTEGRAL_H
8 
9 #if defined(EA_PRAGMA_ONCE_SUPPORTED)
10  #pragma once
11 #endif
12 
13 
14 #include "atomic_push_compiler_options.h"
15 
16 
17 namespace eastl
18 {
19 
20 
21 namespace internal
22 {
23 
24 
25 #define EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(funcName) \
26  template <typename Order> \
27  T funcName(T /*arg*/, Order /*order*/) EA_NOEXCEPT \
28  { \
29  EASTL_ATOMIC_STATIC_ASSERT_INVALID_MEMORY_ORDER(T); \
30  } \
31  \
32  template <typename Order> \
33  T funcName(T /*arg*/, Order /*order*/) volatile EA_NOEXCEPT \
34  { \
35  EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
36  } \
37  \
38  T funcName(T /*arg*/) volatile EA_NOEXCEPT \
39  { \
40  EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
41  }
42 
43 
44 #define EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(operatorOp) \
45  T operator operatorOp() volatile EA_NOEXCEPT \
46  { \
47  EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
48  } \
49  \
50  T operator operatorOp(int) volatile EA_NOEXCEPT \
51  { \
52  EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
53  }
54 
55 
56 #define EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(operatorOp) \
57  T operator operatorOp(T /*arg*/) volatile EA_NOEXCEPT \
58  { \
59  EASTL_ATOMIC_STATIC_ASSERT_VOLATILE_MEM_FN(T); \
60  }
61 
62 
63  template <typename T, unsigned width = sizeof(T)>
64  struct atomic_integral_base : public atomic_base_width<T, width>
65  {
66  private:
67 
69 
70  public: /* ctors */
71 
72  EA_CONSTEXPR atomic_integral_base(T desired) EA_NOEXCEPT
73  : Base{ desired }
74  {
75  }
76 
77  EA_CONSTEXPR atomic_integral_base() EA_NOEXCEPT = default;
78 
79  atomic_integral_base(const atomic_integral_base&) EA_NOEXCEPT = delete;
80 
81  public: /* assignment operator */
82 
83  using Base::operator=;
84 
85  atomic_integral_base& operator=(const atomic_integral_base&) EA_NOEXCEPT = delete;
86  atomic_integral_base& operator=(const atomic_integral_base&) volatile EA_NOEXCEPT = delete;
87 
88  public: /* fetch_add */
89 
90  EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_add)
91 
92  public: /* add_fetch */
93 
94  EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(add_fetch)
95 
96  public: /* fetch_sub */
97 
98  EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_sub)
99 
100  public: /* sub_fetch */
101 
102  EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(sub_fetch)
103 
104  public: /* fetch_and */
105 
106  EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_and)
107 
108  public: /* and_fetch */
109 
110  EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(and_fetch)
111 
112  public: /* fetch_or */
113 
114  EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_or)
115 
116  public: /* or_fetch */
117 
118  EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(or_fetch)
119 
120  public: /* fetch_xor */
121 
122  EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(fetch_xor)
123 
124  public: /* xor_fetch */
125 
126  EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_FUNCS_IMPL(xor_fetch)
127 
128  public: /* operator++ && operator-- */
129 
130  EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(++)
131 
132  EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_INC_DEC_OPERATOR_IMPL(--)
133 
134  public: /* operator+= && operator-= */
135 
136  EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(+=)
137 
138  EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(-=)
139 
140  public: /* operator&= */
141 
142  EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(&=)
143 
144  public: /* operator|= */
145 
146  EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(|=)
147 
148  public: /* operator^= */
149 
150  EASTL_ATOMIC_INTEGRAL_STATIC_ASSERT_ASSIGNMENT_OPERATOR_IMPL(^=)
151 
152  };
153 
154 
155  template <typename T, unsigned width = sizeof(T)>
157 
158 #define EASTL_ATOMIC_INTEGRAL_FUNC_IMPL(op, bits) \
159  T retVal; \
160  EA_PREPROCESSOR_JOIN(op, bits)(T, retVal, this->GetAtomicAddress(), arg); \
161  return retVal;
162 
163 #define EASTL_ATOMIC_INTEGRAL_FETCH_IMPL(funcName, op, bits) \
164  T funcName(T arg) EA_NOEXCEPT \
165  { \
166  EASTL_ATOMIC_INTEGRAL_FUNC_IMPL(op, bits); \
167  }
168 
169 #define EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, orderType, op, bits) \
170  T funcName(T arg, orderType) EA_NOEXCEPT \
171  { \
172  EASTL_ATOMIC_INTEGRAL_FUNC_IMPL(op, bits); \
173  }
174 
175 #define EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, Order) \
176  EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_, fetchOp), Order)
177 
178 #define EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(funcName, fetchOp, bits) \
179  using Base::funcName; \
180  \
181  EASTL_ATOMIC_INTEGRAL_FETCH_IMPL(funcName, EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _SEQ_CST_), bits) \
182  \
183  EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_relaxed_s, \
184  EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _RELAXED_), bits) \
185  \
186  EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_acquire_s, \
187  EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _ACQUIRE_), bits) \
188  \
189  EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_release_s, \
190  EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _RELEASE_), bits) \
191  \
192  EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_acq_rel_s, \
193  EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _ACQ_REL_), bits) \
194  \
195  EASTL_ATOMIC_INTEGRAL_FETCH_ORDER_IMPL(funcName, eastl::internal::memory_order_seq_cst_s, \
196  EASTL_ATOMIC_INTEGRAL_FETCH_OP_JOIN(fetchOp, _SEQ_CST_), bits)
197 
198 #define EASTL_ATOMIC_INTEGRAL_FETCH_INC_DEC_OPERATOR_IMPL(operatorOp, preFuncName, postFuncName) \
199  using Base::operator operatorOp; \
200  \
201  T operator operatorOp() EA_NOEXCEPT \
202  { \
203  return preFuncName(1, eastl::memory_order_seq_cst); \
204  } \
205  \
206  T operator operatorOp(int) EA_NOEXCEPT \
207  { \
208  return postFuncName(1, eastl::memory_order_seq_cst); \
209  }
210 
211 #define EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(operatorOp, funcName) \
212  using Base::operator operatorOp; \
213  \
214  T operator operatorOp(T arg) EA_NOEXCEPT \
215  { \
216  return funcName(arg, eastl::memory_order_seq_cst); \
217  }
218 
219 
220 #define EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(bytes, bits) \
221  template <typename T> \
222  struct atomic_integral_width<T, bytes> : public atomic_integral_base<T, bytes> \
223  { \
224  private: \
225  \
226  using Base = atomic_integral_base<T, bytes>; \
227  \
228  public: /* ctors */ \
229  \
230  EA_CONSTEXPR atomic_integral_width(T desired) EA_NOEXCEPT \
231  : Base{ desired } \
232  { \
233  } \
234  \
235  EA_CONSTEXPR atomic_integral_width() EA_NOEXCEPT = default; \
236  \
237  atomic_integral_width(const atomic_integral_width&) EA_NOEXCEPT = delete; \
238  \
239  public: /* assignment operator */ \
240  \
241  using Base::operator=; \
242  \
243  atomic_integral_width& operator=(const atomic_integral_width&) EA_NOEXCEPT = delete; \
244  atomic_integral_width& operator=(const atomic_integral_width&) volatile EA_NOEXCEPT = delete; \
245  \
246  public: /* fetch_add */ \
247  \
248  EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_add, FETCH_ADD, bits) \
249  \
250  public: /* add_fetch */ \
251  \
252  EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(add_fetch, ADD_FETCH, bits) \
253  \
254  public: /* fetch_sub */ \
255  \
256  EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_sub, FETCH_SUB, bits) \
257  \
258  public: /* sub_fetch */ \
259  \
260  EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(sub_fetch, SUB_FETCH, bits) \
261  \
262  public: /* fetch_and */ \
263  \
264  EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_and, FETCH_AND, bits) \
265  \
266  public: /* and_fetch */ \
267  \
268  EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(and_fetch, AND_FETCH, bits) \
269  \
270  public: /* fetch_or */ \
271  \
272  EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_or, FETCH_OR, bits) \
273  \
274  public: /* or_fetch */ \
275  \
276  EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(or_fetch, OR_FETCH, bits) \
277  \
278  public: /* fetch_xor */ \
279  \
280  EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(fetch_xor, FETCH_XOR, bits) \
281  \
282  public: /* xor_fetch */ \
283  \
284  EASTL_ATOMIC_INTEGRAL_FETCH_FUNCS_IMPL(xor_fetch, XOR_FETCH, bits) \
285  \
286  public: /* operator++ && operator-- */ \
287  \
288  EASTL_ATOMIC_INTEGRAL_FETCH_INC_DEC_OPERATOR_IMPL(++, add_fetch, fetch_add) \
289  \
290  EASTL_ATOMIC_INTEGRAL_FETCH_INC_DEC_OPERATOR_IMPL(--, sub_fetch, fetch_sub) \
291  \
292  public: /* operator+= && operator-= */ \
293  \
294  EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(+=, add_fetch) \
295  \
296  EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(-=, sub_fetch) \
297  \
298  public: /* operator&= */ \
299  \
300  EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(&=, and_fetch) \
301  \
302  public: /* operator|= */ \
303  \
304  EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(|=, or_fetch) \
305  \
306  public: /* operator^= */ \
307  \
308  EASTL_ATOMIC_INTEGRAL_FETCH_ASSIGNMENT_OPERATOR_IMPL(^=, xor_fetch) \
309  \
310  };
311 
312 
313 #if defined(EASTL_ATOMIC_HAS_8BIT)
314  EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(1, 8)
315 #endif
316 
317 #if defined(EASTL_ATOMIC_HAS_16BIT)
318  EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(2, 16)
319 #endif
320 
321 #if defined(EASTL_ATOMIC_HAS_32BIT)
322  EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(4, 32)
323 #endif
324 
325 #if defined(EASTL_ATOMIC_HAS_64BIT)
326  EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(8, 64)
327 #endif
328 
329 #if defined(EASTL_ATOMIC_HAS_128BIT)
330  EASTL_ATOMIC_INTEGRAL_WIDTH_SPECIALIZE(16, 128)
331 #endif
332 
333 
334 } // namespace internal
335 
336 
337 } // namespace eastl
338 
339 
340 #include "atomic_pop_compiler_options.h"
341 
342 
343 #endif /* EASTL_ATOMIC_INTERNAL_INTEGRAL_H */
EA Standard Template Library.
Definition: algorithm.h:288
Definition: atomic_base_width.h:26
Definition: atomic_integral.h:65
Definition: atomic_integral.h:156