Nugget
arch_x86_exchange.h
1 // Copyright (c) Electronic Arts Inc. All rights reserved.
4 
5 
6 #ifndef EASTL_ATOMIC_INTERNAL_ARCH_X86_EXCHANGE_H
7 #define EASTL_ATOMIC_INTERNAL_ARCH_X86_EXCHANGE_H
8 
9 #if defined(EA_PRAGMA_ONCE_SUPPORTED)
10  #pragma once
11 #endif
12 
13 
15 //
16 // void EASTL_ARCH_ATOMIC_EXCHANGE_*_N(type, type ret, type * ptr, type val)
17 //
18 #if defined(EA_COMPILER_MSVC) && defined(EA_PROCESSOR_X86)
19 
20 
21  #define EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED(ret, observed, val) \
22  ret = (val)
23 
24 
25  #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_64(type, ret, ptr, val) \
26  EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELAXED, \
27  EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \
28  EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
29 
30  #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_64(type, ret, ptr, val) \
31  EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQUIRE, \
32  EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \
33  EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
34 
35  #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_64(type, ret, ptr, val) \
36  EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, RELEASE, \
37  EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \
38  EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
39 
40  #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_64(type, ret, ptr, val) \
41  EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, ACQ_REL, \
42  EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \
43  EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
44 
45  #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_64(type, ret, ptr, val) \
46  EASTL_ARCH_ATOMIC_X86_OP_64_IMPL(type, ret, ptr, val, SEQ_CST, \
47  EASTL_ARCH_ATOMIC_X86_EXCHANGE_PRE_COMPUTE_DESIRED, \
48  EASTL_ARCH_ATOMIC_X86_NOP_POST_COMPUTE_RET)
49 
50 
51 #endif
52 
53 
54 #if ((defined(__clang__) || defined(EA_COMPILER_GNUC)) && defined(EA_PROCESSOR_X86_64))
55 
56 
57  #define EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, MemoryOrder) \
58  { \
59  bool cmpxchgRet; \
60  /* This is intentionally a non-atomic 128-bit load which may observe shearing. */ \
61  /* Either we do not observe *(ptr) but then the cmpxchg will fail and the observed */ \
62  /* atomic load will be returned. Or the non-atomic load got lucky and the cmpxchg succeeds */ \
63  /* because the observed value equals the value in *(ptr) thus we optimistically do a non-atomic load. */ \
64  ret = *(ptr); \
65  do \
66  { \
67  EA_PREPROCESSOR_JOIN(EA_PREPROCESSOR_JOIN(EASTL_ATOMIC_CMPXCHG_STRONG_, MemoryOrder), _128)(type, cmpxchgRet, ptr, &(ret), val); \
68  } while (!cmpxchgRet); \
69  }
70 
71 
72  #define EASTL_ARCH_ATOMIC_EXCHANGE_RELAXED_128(type, ret, ptr, val) \
73  EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, RELAXED)
74 
75  #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQUIRE_128(type, ret, ptr, val) \
76  EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, ACQUIRE)
77 
78  #define EASTL_ARCH_ATOMIC_EXCHANGE_RELEASE_128(type, ret, ptr, val) \
79  EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, RELEASE)
80 
81  #define EASTL_ARCH_ATOMIC_EXCHANGE_ACQ_REL_128(type, ret, ptr, val) \
82  EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, ACQ_REL)
83 
84  #define EASTL_ARCH_ATOMIC_EXCHANGE_SEQ_CST_128(type, ret, ptr, val) \
85  EASTL_ARCH_ATOMIC_X86_EXCHANGE_128(type, ret, ptr, val, SEQ_CST)
86 
87 
88 #endif
89 
90 
91 #endif /* EASTL_ATOMIC_INTERNAL_ARCH_X86_EXCHANGE_H */