Nugget
arch_x86.h
1 // Copyright (c) Electronic Arts Inc. All rights reserved.
4 
5 
6 #ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_H
7 #define EASTL_ATOMIC_INTERNAL_ARCH_X86_H
8 
9 #if defined(EA_PRAGMA_ONCE_SUPPORTED)
10  #pragma once
11 #endif
12 
13 
34 
35 #if (defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64)
36  #define EASTL_ARCH_ATOMIC_HAS_128BIT
37 #elif defined(EA_COMPILER_MSVC)
38  #if EA_PLATFORM_PTR_SIZE == 8
39  #define EASTL_ARCH_ATOMIC_HAS_128BIT
40  #endif
41 #endif
42 
44 
45 
60 #if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
61 
62 
63  #define EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED(ret, observed, val) \
64  static_assert(false, "EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED() must be implmented!");
65 
66  #define EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET(ret, prevObserved, val)
67 
68 
69  #define EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, MemoryOrder, PRE_COMPUTE_DESIRED, POST_COMPUTE_RET) \
70  { \
71  bool cmpxchgRet; \
72  EASTL_ATOMIC_LOAD_RELAXED_64(type, ret, ptr); \
73  do \
74  { \
75  type computedDesired; \
76  PRE_COMPUTE_DESIRED(computedDesired, ret, (val)); \
77  EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _64)(type, cmpxchgRet, ptr, &(ret), computedDesired); \
78  } while (!cmpxchgRet); \
79  POST_COMPUTE_RET(ret, ret, (val)); \
80  }
81 
82 
83 #endif
84 
85 
98 #if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
99 
100 
101  #define EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED(ret, observed, val) \
102  static_assert(false, "EASTL_ARCH_ATOMIC_X86_NOP_PRE_COMPUTE_DESIRED() must be implmented!");
103 
104  #define EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET(ret, prevObserved, val)
105 
106 
107  #define EASTL_ARCH_ATOMIC_X86_OP_128_IMPL(type, ret, ptr, val, MemoryOrder, PRE_COMPUTE_DESIRED, POST_COMPUTE_RET) \
108  { \
109  bool cmpxchgRet; \
110  /* This is intentionally a non-atomic 128-bit load which may observe shearing. */ \
111  /* Either we do not observe *(ptr) but then the cmpxchg will fail and the observed */ \
112  /* atomic load will be returned. Or the non-atomic load got lucky and the cmpxchg succeeds */ \
113  /* because the observed value equals the value in *(ptr) thus we optimistically do a non-atomic load. */ \
114  ret = *(ptr); \
115  do \
116  { \
117  type computedDesired; \
118  PRE_COMPUTE_DESIRED(computedDesired, ret, (val)); \
119  EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRet, ptr, &(ret), computedDesired); \
120  } while (!cmpxchgRet); \
121  POST_COMPUTE_RET(ret, ret, (val)); \
122  }
123 
124 
125 #endif
126 
127 
129 
130 
131 #include "arch_x86_fetch_add.h"
132 #include "arch_x86_fetch_sub.h"
133 
134 #include "arch_x86_fetch_and.h"
135 #include "arch_x86_fetch_xor.h"
136 #include "arch_x86_fetch_or.h"
137 
138 #include "arch_x86_add_fetch.h"
139 #include "arch_x86_sub_fetch.h"
140 
141 #include "arch_x86_and_fetch.h"
142 #include "arch_x86_xor_fetch.h"
143 #include "arch_x86_or_fetch.h"
144 
145 #include "arch_x86_exchange.h"
146 
147 #include "arch_x86_cmpxchg_weak.h"
148 #include "arch_x86_cmpxchg_strong.h"
149 
150 #include "arch_x86_memory_barrier.h"
151 
152 #include "arch_x86_thread_fence.h"
153 
154 #include "arch_x86_load.h"
155 #include "arch_x86_store.h"
156 
157 
158 #endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_H */