Line data Source code
1 : // Protocol Buffers - Google's data interchange format
2 : // Copyright 2008 Google Inc. All rights reserved.
3 : // https://developers.google.com/protocol-buffers/
4 : //
5 : // Redistribution and use in source and binary forms, with or without
6 : // modification, are permitted provided that the following conditions are
7 : // met:
8 : //
9 : // * Redistributions of source code must retain the above copyright
10 : // notice, this list of conditions and the following disclaimer.
11 : // * Redistributions in binary form must reproduce the above
12 : // copyright notice, this list of conditions and the following disclaimer
13 : // in the documentation and/or other materials provided with the
14 : // distribution.
15 : // * Neither the name of Google Inc. nor the names of its
16 : // contributors may be used to endorse or promote products derived from
17 : // this software without specific prior written permission.
18 : //
19 : // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 : // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 : // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 : // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 : // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 : // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 : // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 : // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 : // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 : // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 : // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 :
31 : #include <google/protobuf/arena.h>
32 :
33 : #ifdef ADDRESS_SANITIZER
34 : #include <sanitizer/asan_interface.h>
35 : #endif
36 :
37 : namespace google {
38 : namespace protobuf {
39 :
40 : google::protobuf::internal::SequenceNumber Arena::lifecycle_id_generator_;
41 : #ifdef PROTOBUF_USE_DLLS
42 : Arena::ThreadCache& Arena::thread_cache() {
43 : static GOOGLE_THREAD_LOCAL ThreadCache thread_cache_ = { -1, NULL };
44 : return thread_cache_;
45 : }
46 : #elif defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
47 : Arena::ThreadCache& Arena::thread_cache() {
48 : static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ =
49 : new internal::ThreadLocalStorage<ThreadCache>();
50 : return *thread_cache_->Get();
51 : }
52 : #else
53 : GOOGLE_THREAD_LOCAL Arena::ThreadCache Arena::thread_cache_ = { -1, NULL };
54 : #endif
55 :
56 0 : void Arena::Init() {
57 0 : lifecycle_id_ = lifecycle_id_generator_.GetNext();
58 0 : blocks_ = 0;
59 0 : hint_ = 0;
60 0 : owns_first_block_ = true;
61 0 : cleanup_list_ = 0;
62 :
63 0 : if (options_.initial_block != NULL && options_.initial_block_size > 0) {
64 : // Add first unowned block to list.
65 0 : Block* first_block = reinterpret_cast<Block*>(options_.initial_block);
66 0 : first_block->size = options_.initial_block_size;
67 0 : first_block->pos = kHeaderSize;
68 0 : first_block->next = NULL;
69 : // Thread which calls Init() owns the first block. This allows the
70 : // single-threaded case to allocate on the first block without taking any
71 : // locks.
72 0 : first_block->owner = &thread_cache();
73 0 : SetThreadCacheBlock(first_block);
74 0 : AddBlockInternal(first_block);
75 0 : owns_first_block_ = false;
76 : }
77 :
78 : // Call the initialization hook
79 0 : if (options_.on_arena_init != NULL) {
80 0 : hooks_cookie_ = options_.on_arena_init(this);
81 : } else {
82 0 : hooks_cookie_ = NULL;
83 : }
84 0 : }
85 :
86 0 : Arena::~Arena() {
87 0 : uint64 space_allocated = ResetInternal();
88 :
89 : // Call the destruction hook
90 0 : if (options_.on_arena_destruction != NULL) {
91 0 : options_.on_arena_destruction(this, hooks_cookie_, space_allocated);
92 : }
93 0 : }
94 :
95 0 : uint64 Arena::Reset() {
96 : // Invalidate any ThreadCaches pointing to any blocks we just destroyed.
97 0 : lifecycle_id_ = lifecycle_id_generator_.GetNext();
98 0 : return ResetInternal();
99 : }
100 :
101 0 : uint64 Arena::ResetInternal() {
102 0 : CleanupList();
103 0 : uint64 space_allocated = FreeBlocks();
104 :
105 : // Call the reset hook
106 0 : if (options_.on_arena_reset != NULL) {
107 0 : options_.on_arena_reset(this, hooks_cookie_, space_allocated);
108 : }
109 :
110 0 : return space_allocated;
111 : }
112 :
113 0 : Arena::Block* Arena::NewBlock(void* me, Block* my_last_block, size_t n,
114 : size_t start_block_size, size_t max_block_size) {
115 : size_t size;
116 0 : if (my_last_block != NULL) {
117 : // Double the current block size, up to a limit.
118 0 : size = 2 * (my_last_block->size);
119 0 : if (size > max_block_size) size = max_block_size;
120 : } else {
121 : size = start_block_size;
122 : }
123 0 : if (n > size - kHeaderSize) {
124 : // TODO(sanjay): Check if n + kHeaderSize would overflow
125 0 : size = kHeaderSize + n;
126 : }
127 :
128 0 : Block* b = reinterpret_cast<Block*>(options_.block_alloc(size));
129 0 : b->pos = kHeaderSize + n;
130 0 : b->size = size;
131 0 : if (b->avail() == 0) {
132 : // Do not attempt to reuse this block.
133 0 : b->owner = NULL;
134 : } else {
135 0 : b->owner = me;
136 : }
137 : #ifdef ADDRESS_SANITIZER
138 : // Poison the rest of the block for ASAN. It was unpoisoned by the underlying
139 : // malloc but it's not yet usable until we return it as part of an allocation.
140 : ASAN_POISON_MEMORY_REGION(
141 : reinterpret_cast<char*>(b) + b->pos, b->size - b->pos);
142 : #endif
143 0 : return b;
144 : }
145 :
146 0 : void Arena::AddBlock(Block* b) {
147 0 : MutexLock l(&blocks_lock_);
148 0 : AddBlockInternal(b);
149 0 : }
150 :
151 0 : void Arena::AddBlockInternal(Block* b) {
152 0 : b->next = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
153 0 : google::protobuf::internal::Release_Store(&blocks_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
154 0 : if (b->avail() != 0) {
155 : // Direct future allocations to this block.
156 0 : google::protobuf::internal::Release_Store(&hint_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
157 : }
158 0 : }
159 :
160 0 : void Arena::AddListNode(void* elem, void (*cleanup)(void*)) {
161 0 : Node* node = reinterpret_cast<Node*>(AllocateAligned(sizeof(Node)));
162 0 : node->elem = elem;
163 0 : node->cleanup = cleanup;
164 : node->next = reinterpret_cast<Node*>(
165 : google::protobuf::internal::NoBarrier_AtomicExchange(&cleanup_list_,
166 0 : reinterpret_cast<google::protobuf::internal::AtomicWord>(node)));
167 0 : }
168 :
169 0 : void* Arena::AllocateAligned(const std::type_info* allocated, size_t n) {
170 : // Align n to next multiple of 8 (from Hacker's Delight, Chapter 3.)
171 0 : n = (n + 7) & -8;
172 :
173 : // Monitor allocation if needed.
174 0 : if (GOOGLE_PREDICT_FALSE(hooks_cookie_ != NULL) &&
175 0 : options_.on_arena_allocation != NULL) {
176 0 : options_.on_arena_allocation(allocated, n, hooks_cookie_);
177 : }
178 :
179 : // If this thread already owns a block in this arena then try to use that.
180 : // This fast path optimizes the case where multiple threads allocate from the
181 : // same arena.
182 0 : if (thread_cache().last_lifecycle_id_seen == lifecycle_id_ &&
183 0 : thread_cache().last_block_used_ != NULL) {
184 0 : if (thread_cache().last_block_used_->avail() < n) {
185 0 : return SlowAlloc(n);
186 : }
187 0 : return AllocFromBlock(thread_cache().last_block_used_, n);
188 : }
189 :
190 : // Check whether we own the last accessed block on this arena.
191 : // This fast path optimizes the case where a single thread uses multiple
192 : // arenas.
193 0 : void* me = &thread_cache();
194 0 : Block* b = reinterpret_cast<Block*>(google::protobuf::internal::Acquire_Load(&hint_));
195 0 : if (!b || b->owner != me || b->avail() < n) {
196 0 : return SlowAlloc(n);
197 : }
198 0 : return AllocFromBlock(b, n);
199 : }
200 :
201 0 : void* Arena::AllocFromBlock(Block* b, size_t n) {
202 0 : size_t p = b->pos;
203 0 : b->pos = p + n;
204 : #ifdef ADDRESS_SANITIZER
205 : ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<char*>(b) + p, n);
206 : #endif
207 0 : return reinterpret_cast<char*>(b) + p;
208 : }
209 :
210 0 : void* Arena::SlowAlloc(size_t n) {
211 0 : void* me = &thread_cache();
212 0 : Block* b = FindBlock(me); // Find block owned by me.
213 : // See if allocation fits in my latest block.
214 0 : if (b != NULL && b->avail() >= n) {
215 0 : SetThreadCacheBlock(b);
216 0 : google::protobuf::internal::NoBarrier_Store(&hint_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
217 0 : return AllocFromBlock(b, n);
218 : }
219 0 : b = NewBlock(me, b, n, options_.start_block_size, options_.max_block_size);
220 0 : AddBlock(b);
221 0 : if (b->owner == me) { // If this block can be reused (see NewBlock()).
222 0 : SetThreadCacheBlock(b);
223 : }
224 0 : return reinterpret_cast<char*>(b) + kHeaderSize;
225 : }
226 :
227 0 : uint64 Arena::SpaceAllocated() const {
228 0 : uint64 space_allocated = 0;
229 0 : Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
230 0 : while (b != NULL) {
231 0 : space_allocated += (b->size);
232 0 : b = b->next;
233 : }
234 0 : return space_allocated;
235 : }
236 :
237 0 : uint64 Arena::SpaceUsed() const {
238 0 : uint64 space_used = 0;
239 0 : Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
240 0 : while (b != NULL) {
241 0 : space_used += (b->pos - kHeaderSize);
242 0 : b = b->next;
243 : }
244 0 : return space_used;
245 : }
246 :
247 0 : uint64 Arena::FreeBlocks() {
248 0 : uint64 space_allocated = 0;
249 0 : Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
250 0 : Block* first_block = NULL;
251 0 : while (b != NULL) {
252 0 : space_allocated += (b->size);
253 0 : Block* next = b->next;
254 0 : if (next != NULL) {
255 0 : options_.block_dealloc(b, b->size);
256 : } else {
257 0 : if (owns_first_block_) {
258 0 : options_.block_dealloc(b, b->size);
259 : } else {
260 : // User passed in the first block, skip free'ing the memory.
261 : first_block = b;
262 : }
263 : }
264 0 : b = next;
265 : }
266 0 : blocks_ = 0;
267 0 : hint_ = 0;
268 0 : if (!owns_first_block_) {
269 : // Make the first block that was passed in through ArenaOptions
270 : // available for reuse.
271 0 : first_block->pos = kHeaderSize;
272 : // Thread which calls Reset() owns the first block. This allows the
273 : // single-threaded case to allocate on the first block without taking any
274 : // locks.
275 0 : first_block->owner = &thread_cache();
276 0 : SetThreadCacheBlock(first_block);
277 0 : AddBlockInternal(first_block);
278 : }
279 0 : return space_allocated;
280 : }
281 :
282 0 : void Arena::CleanupList() {
283 : Node* head =
284 0 : reinterpret_cast<Node*>(google::protobuf::internal::NoBarrier_Load(&cleanup_list_));
285 0 : while (head != NULL) {
286 0 : head->cleanup(head->elem);
287 0 : head = head->next;
288 : }
289 0 : cleanup_list_ = 0;
290 0 : }
291 :
292 0 : Arena::Block* Arena::FindBlock(void* me) {
293 : // TODO(sanjay): We might want to keep a separate list with one
294 : // entry per thread.
295 0 : Block* b = reinterpret_cast<Block*>(google::protobuf::internal::Acquire_Load(&blocks_));
296 0 : while (b != NULL && b->owner != me) {
297 0 : b = b->next;
298 : }
299 0 : return b;
300 : }
301 :
302 : } // namespace protobuf
303 92 : } // namespace google
|