Nugget
fixed_pool.h
1 // Copyright (c) Electronic Arts Inc. All rights reserved.
4 
6 // This file implements the following
7 // aligned_buffer
8 // fixed_pool_base
9 // fixed_pool
10 // fixed_pool_with_overflow
11 // fixed_hashtable_allocator
12 // fixed_vector_allocator
13 // fixed_swap
14 //
16 
17 
18 #ifndef EASTL_INTERNAL_FIXED_POOL_H
19 #define EASTL_INTERNAL_FIXED_POOL_H
20 
21 
22 #include <EABase/eabase.h>
23 #if defined(EA_PRAGMA_ONCE_SUPPORTED)
24  #pragma once
25 #endif
26 
27 #include <EASTL/internal/config.h>
28 #include <EASTL/functional.h>
29 #include <EASTL/memory.h>
30 #include <EASTL/allocator.h>
31 #include <EASTL/type_traits.h>
32 
33 
34 EA_DISABLE_ALL_VC_WARNINGS();
35 #include <new>
36 EA_RESTORE_ALL_VC_WARNINGS();
37 
38 // 4275 - non dll-interface class used as base for DLL-interface classkey 'identifier'
39 EA_DISABLE_VC_WARNING(4275);
40 
41 
42 namespace eastl
43 {
44 
49  #ifndef EASTL_FIXED_POOL_DEFAULT_NAME
50  #define EASTL_FIXED_POOL_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_pool" // Unless the user overrides something, this is "EASTL fixed_pool".
51  #endif
52 
53 
54 
56  // aligned_buffer
58 
79  typedef char EASTL_MAY_ALIAS aligned_buffer_char;
80 
81  template <size_t size, size_t alignment>
82  struct aligned_buffer { aligned_buffer_char buffer[size]; };
83 
84  template<size_t size>
85  struct aligned_buffer<size, 2> { EA_PREFIX_ALIGN(2) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(2); };
86 
87  template<size_t size>
88  struct aligned_buffer<size, 4> { EA_PREFIX_ALIGN(4) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(4); };
89 
90  template<size_t size>
91  struct aligned_buffer<size, 8> { EA_PREFIX_ALIGN(8) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(8); };
92 
93  template<size_t size>
94  struct aligned_buffer<size, 16> { EA_PREFIX_ALIGN(16) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(16); };
95 
96  template<size_t size>
97  struct aligned_buffer<size, 32> { EA_PREFIX_ALIGN(32) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(32); };
98 
99  template<size_t size>
100  struct aligned_buffer<size, 64> { EA_PREFIX_ALIGN(64) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(64); };
101 
102  template<size_t size>
103  struct aligned_buffer<size, 128> { EA_PREFIX_ALIGN(128) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(128); };
104 
105  template<size_t size>
106  struct aligned_buffer<size, 256> { EA_PREFIX_ALIGN(256) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(256); };
107 
108  template<size_t size>
109  struct aligned_buffer<size, 512> { EA_PREFIX_ALIGN(512) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(512); };
110 
111  template<size_t size>
112  struct aligned_buffer<size, 1024> { EA_PREFIX_ALIGN(1024) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(1024); };
113 
114  template<size_t size>
115  struct aligned_buffer<size, 2048> { EA_PREFIX_ALIGN(2048) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(2048); };
116 
117  template<size_t size>
118  struct aligned_buffer<size, 4096> { EA_PREFIX_ALIGN(4096) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(4096); };
119 
120 
121 
122 
124  // fixed_pool_base
126 
134  {
135  public:
138  fixed_pool_base(void* pMemory = NULL)
139  : mpHead((Link*)pMemory)
140  , mpNext((Link*)pMemory)
141  , mpCapacity((Link*)pMemory)
142  , mnNodeSize(0) // This is normally set in the init function.
143  {
144  #if EASTL_FIXED_SIZE_TRACKING_ENABLED
145  mnCurrentSize = 0;
146  mnPeakSize = 0;
147  #endif
148  }
149 
150 
153  // Disabled because the default is sufficient. While it normally makes no sense to deep copy
154  // this data, our usage of this class is such that this is OK and wanted.
155  //
156  // fixed_pool_base(const fixed_pool_base& x)
157  // {
158  // }
159 
160 
164  {
165  // By design we do nothing. We don't attempt to deep-copy member data.
166  return *this;
167  }
168 
169 
177  EASTL_API void init(void* pMemory, size_t memorySize, size_t nodeSize,
178  size_t alignment, size_t alignmentOffset = 0);
179 
180 
186  size_t peak_size() const
187  {
188  #if EASTL_FIXED_SIZE_TRACKING_ENABLED
189  return mnPeakSize;
190  #else
191  return 0;
192  #endif
193  }
194 
195 
200  bool can_allocate() const
201  {
202  return (mpHead != NULL) || (mpNext != mpCapacity);
203  }
204 
205  public:
208  struct Link
209  {
210  Link* mpNext;
211  };
212 
213  Link* mpHead;
214  Link* mpNext;
215  Link* mpCapacity;
216  size_t mnNodeSize;
217 
218  #if EASTL_FIXED_SIZE_TRACKING_ENABLED
219  uint32_t mnCurrentSize;
220  uint32_t mnPeakSize;
221  #endif
222 
223  }; // fixed_pool_base
224 
225 
226 
227 
228 
230  // fixed_pool
232 
240  class EASTL_API fixed_pool : public fixed_pool_base
241  {
242  public:
251  fixed_pool(void* pMemory = NULL)
252  : fixed_pool_base(pMemory)
253  {
254  }
255 
256 
261  fixed_pool(void* pMemory, size_t memorySize, size_t nodeSize,
262  size_t alignment, size_t alignmentOffset = 0)
263  {
264  init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
265  }
266 
267 
270  // Disabled because the default is sufficient. While it normally makes no sense to deep copy
271  // this data, our usage of this class is such that this is OK and wanted.
272  //
273  // fixed_pool(const fixed_pool& x)
274  // {
275  // }
276 
277 
281  {
282  // By design we do nothing. We don't attempt to deep-copy member data.
283  return *this;
284  }
285 
286 
292  void* allocate()
293  {
294  Link* pLink = mpHead;
295 
296  if(pLink) // If we have space...
297  {
298  #if EASTL_FIXED_SIZE_TRACKING_ENABLED
299  if(++mnCurrentSize > mnPeakSize)
300  mnPeakSize = mnCurrentSize;
301  #endif
302 
303  mpHead = pLink->mpNext;
304  return pLink;
305  }
306  else
307  {
308  // If there's no free node in the free list, just
309  // allocate another from the reserved memory area
310 
311  if(mpNext != mpCapacity)
312  {
313  pLink = mpNext;
314 
315  mpNext = reinterpret_cast<Link*>(reinterpret_cast<char*>(mpNext) + mnNodeSize);
316 
317  #if EASTL_FIXED_SIZE_TRACKING_ENABLED
318  if(++mnCurrentSize > mnPeakSize)
319  mnPeakSize = mnCurrentSize;
320  #endif
321 
322  return pLink;
323  }
324 
325  return NULL;
326  }
327  }
328 
329  void* allocate(size_t /*alignment*/, size_t /*offset*/)
330  {
331  return allocate();
332  }
333 
340  void deallocate(void* p)
341  {
342  #if EASTL_FIXED_SIZE_TRACKING_ENABLED
343  --mnCurrentSize;
344  #endif
345 
346  ((Link*)p)->mpNext = mpHead;
347  mpHead = ((Link*)p);
348  }
349 
350 
352 
353 
354  const char* get_name() const
355  {
356  return EASTL_FIXED_POOL_DEFAULT_NAME;
357  }
358 
359 
360  void set_name(const char*)
361  {
362  // Nothing to do. We don't allocate memory.
363  }
364 
365  }; // fixed_pool
366 
367 
368 
369 
370 
372  // fixed_pool_with_overflow
374 
377  template <typename OverflowAllocator = EASTLAllocatorType>
379  {
380  public:
381  typedef OverflowAllocator overflow_allocator_type;
382 
383 
384  fixed_pool_with_overflow(void* pMemory = NULL)
385  : fixed_pool_base(pMemory),
386  mOverflowAllocator(EASTL_FIXED_POOL_DEFAULT_NAME)
387  {
388  // Leave mpPoolBegin, mpPoolEnd uninitialized.
389  }
390 
391 
392  fixed_pool_with_overflow(void* pMemory, const overflow_allocator_type& allocator)
393  : fixed_pool_base(pMemory),
394  mOverflowAllocator(allocator)
395  {
396  // Leave mpPoolBegin, mpPoolEnd uninitialized.
397  }
398 
399 
400  fixed_pool_with_overflow(void* pMemory, size_t memorySize, size_t nodeSize,
401  size_t alignment, size_t alignmentOffset = 0)
402  : mOverflowAllocator(EASTL_FIXED_POOL_DEFAULT_NAME)
403  {
404  fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
405 
406  mpPoolBegin = pMemory;
407  }
408 
409 
410  fixed_pool_with_overflow(void* pMemory, size_t memorySize, size_t nodeSize,
411  size_t alignment, size_t alignmentOffset,
412  const overflow_allocator_type& allocator)
413  : mOverflowAllocator(allocator)
414  {
415  fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
416 
417  mpPoolBegin = pMemory;
418  }
419 
420 
421  // Disabled because the default is sufficient. While it normally makes no sense to deep copy
422  // this data, our usage of this class is such that this is OK and wanted.
423  //
424  //fixed_pool_with_overflow(const fixed_pool_with_overflow& x)
425  //{
426  // ...
427  //}
428 
429 
431  {
432  #if EASTL_ALLOCATOR_COPY_ENABLED
433  mOverflowAllocator = x.mOverflowAllocator;
434  #else
435  (void)x;
436  #endif
437 
438  return *this;
439  }
440 
441 
442  void init(void* pMemory, size_t memorySize, size_t nodeSize,
443  size_t alignment, size_t alignmentOffset = 0)
444  {
445  fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
446 
447  mpPoolBegin = pMemory;
448  }
449 
450 
451  void* allocate()
452  {
453  void* p = NULL;
454  Link* pLink = mpHead;
455 
456  if(pLink)
457  {
458  // Unlink from chain
459  p = pLink;
460  mpHead = pLink->mpNext;
461  }
462  else
463  {
464  // If there's no free node in the free list, just
465  // allocate another from the reserved memory area
466 
467  if(mpNext != mpCapacity)
468  {
469  p = pLink = mpNext;
470  mpNext = reinterpret_cast<Link*>(reinterpret_cast<char*>(mpNext) + mnNodeSize);
471  }
472  else
473  p = mOverflowAllocator.allocate(mnNodeSize);
474  }
475 
476  #if EASTL_FIXED_SIZE_TRACKING_ENABLED
477  if(p && (++mnCurrentSize > mnPeakSize))
478  mnPeakSize = mnCurrentSize;
479  #endif
480 
481  return p;
482  }
483 
484 
485  void* allocate(size_t alignment, size_t alignmentOffset)
486  {
487  void* p = NULL;
488  Link* pLink = mpHead;
489 
490  if (pLink)
491  {
492  // Unlink from chain
493  p = pLink;
494  mpHead = pLink->mpNext;
495  }
496  else
497  {
498  // If there's no free node in the free list, just
499  // allocate another from the reserved memory area
500 
501  if (mpNext != mpCapacity)
502  {
503  p = pLink = mpNext;
504  mpNext = reinterpret_cast<Link*>(reinterpret_cast<char*>(mpNext)+mnNodeSize);
505  }
506  else
507  {
508  p = allocate_memory(mOverflowAllocator, mnNodeSize, alignment, alignmentOffset);
509  EASTL_ASSERT_MSG(p != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined.");
510  }
511 
512  }
513 
514  #if EASTL_FIXED_SIZE_TRACKING_ENABLED
515  if (p && (++mnCurrentSize > mnPeakSize))
516  mnPeakSize = mnCurrentSize;
517  #endif
518 
519  return p;
520  }
521 
522  void deallocate(void* p)
523  {
524  #if EASTL_FIXED_SIZE_TRACKING_ENABLED
525  --mnCurrentSize;
526  #endif
527 
528  if((p >= mpPoolBegin) && (p < mpCapacity))
529  {
530  ((Link*)p)->mpNext = mpHead;
531  mpHead = ((Link*)p);
532  }
533  else
534  mOverflowAllocator.deallocate(p, (size_t)mnNodeSize);
535  }
536 
537 
539 
540 
541  const char* get_name() const
542  {
543  return mOverflowAllocator.get_name();
544  }
545 
546 
547  void set_name(const char* pName)
548  {
549  mOverflowAllocator.set_name(pName);
550  }
551 
552 
553  const overflow_allocator_type& get_overflow_allocator() const
554  {
555  return mOverflowAllocator;
556  }
557 
558 
559  overflow_allocator_type& get_overflow_allocator()
560  {
561  return mOverflowAllocator;
562  }
563 
564 
565  void set_overflow_allocator(const overflow_allocator_type& overflowAllocator)
566  {
567  mOverflowAllocator = overflowAllocator;
568  }
569  public:
570  OverflowAllocator mOverflowAllocator;
571  void* mpPoolBegin; // Ideally we wouldn't need this member variable. he problem is that the information about the pool buffer and object size is stored in the owning container and we can't have access to it without increasing the amount of code we need and by templating more code. It may turn out that simply storing data here is smaller in the end.
572 
573  }; // fixed_pool_with_overflow
574 
575 
576 
577 
578 
580  // fixed_node_allocator
582 
605  template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator = EASTLAllocatorType>
607  {
608  public:
611  typedef OverflowAllocator overflow_allocator_type;
612 
613  enum
614  {
615  kNodeSize = nodeSize,
616  kNodeCount = nodeCount,
617  kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
618  kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
619  kNodeAlignment = nodeAlignment,
620  kNodeAlignmentOffset = nodeAlignmentOffset
621  };
622 
623  public:
624  pool_type mPool;
625 
626  public:
627  //fixed_node_allocator(const char* pName)
628  //{
629  // mPool.set_name(pName);
630  //}
631 
632 
633  fixed_node_allocator(void* pNodeBuffer)
634  : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
635  {
636  }
637 
638 
639  fixed_node_allocator(void* pNodeBuffer, const overflow_allocator_type& allocator)
640  : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, allocator)
641  {
642  }
643 
644 
660  : mPool(x.mPool.mpNext, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, x.mPool.mOverflowAllocator)
661  {
662  }
663 
664 
665  this_type& operator=(const this_type& x)
666  {
667  mPool = x.mPool;
668  return *this;
669  }
670 
671 
672  void* allocate(size_t n, int /*flags*/ = 0)
673  {
674  (void)n;
675  EASTL_ASSERT(n == kNodeSize);
676  return mPool.allocate();
677  }
678 
679 
680  void* allocate(size_t n, size_t alignment, size_t offset, int /*flags*/ = 0)
681  {
682  (void)n;
683  EASTL_ASSERT(n == kNodeSize);
684  return mPool.allocate(alignment, offset);
685  }
686 
687 
688  void deallocate(void* p, size_t)
689  {
690  mPool.deallocate(p);
691  }
692 
693 
698  bool can_allocate() const
699  {
700  return mPool.can_allocate();
701  }
702 
703 
709  void reset(void* pNodeBuffer)
710  {
711  mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
712  }
713 
714 
715  const char* get_name() const
716  {
717  return mPool.get_name();
718  }
719 
720 
721  void set_name(const char* pName)
722  {
723  mPool.set_name(pName);
724  }
725 
726 
727  const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT
728  {
729  return mPool.mOverflowAllocator;
730  }
731 
732 
733  overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT
734  {
735  return mPool.mOverflowAllocator;
736  }
737 
738 
739  void set_overflow_allocator(const overflow_allocator_type& allocator)
740  {
741  mPool.mOverflowAllocator = allocator;
742  }
743 
744 
745  void copy_overflow_allocator(const this_type& x) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators.
746  {
747  mPool.mOverflowAllocator = x.mPool.mOverflowAllocator;
748  }
749 
750  }; // fixed_node_allocator
751 
752 
753  // This is a near copy of the code above, with the only difference being
754  // the 'false' bEnableOverflow template parameter, the pool_type and this_type typedefs,
755  // and the get_overflow_allocator / set_overflow_allocator functions.
756  template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, typename OverflowAllocator>
757  class fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, OverflowAllocator>
758  {
759  public:
760  typedef fixed_pool pool_type;
762  typedef OverflowAllocator overflow_allocator_type;
763 
764  enum
765  {
766  kNodeSize = nodeSize,
767  kNodeCount = nodeCount,
768  kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
769  kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
770  kNodeAlignment = nodeAlignment,
771  kNodeAlignmentOffset = nodeAlignmentOffset
772  };
773 
774  public:
775  pool_type mPool;
776 
777  public:
778  fixed_node_allocator(void* pNodeBuffer)
779  : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
780  {
781  }
782 
783 
784  fixed_node_allocator(void* pNodeBuffer, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization.
785  : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
786  {
787  }
788 
789 
804  fixed_node_allocator(const this_type& x) // No need to copy the overflow allocator, because bEnableOverflow is false in this specialization.
805  : mPool(x.mPool.mpNext, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
806  {
807  }
808 
809 
810  this_type& operator=(const this_type& x)
811  {
812  mPool = x.mPool;
813  return *this;
814  }
815 
816 
817  void* allocate(size_t n, int /*flags*/ = 0)
818  {
819  (void)n;
820  EASTL_ASSERT(n == kNodeSize);
821  return mPool.allocate();
822  }
823 
824 
825  void* allocate(size_t n, size_t alignment, size_t offset, int /*flags*/ = 0)
826  {
827  (void)n;
828  EASTL_ASSERT(n == kNodeSize);
829  return mPool.allocate(alignment, offset);
830  }
831 
832 
833  void deallocate(void* p, size_t)
834  {
835  mPool.deallocate(p);
836  }
837 
838 
839  bool can_allocate() const
840  {
841  return mPool.can_allocate();
842  }
843 
844 
845  void reset(void* pNodeBuffer)
846  {
847  mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
848  }
849 
850 
851  const char* get_name() const
852  {
853  return mPool.get_name();
854  }
855 
856 
857  void set_name(const char* pName)
858  {
859  mPool.set_name(pName);
860  }
861 
862 
863  const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT
864  {
865  EASTL_ASSERT(false);
866  overflow_allocator_type* pNULL = NULL;
867  return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile.
868  }
869 
870 
871  overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT
872  {
873  EASTL_ASSERT(false);
874  overflow_allocator_type* pNULL = NULL;
875  return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile.
876  }
877 
878 
879  void set_overflow_allocator(const overflow_allocator_type& /*allocator*/)
880  {
881  // We don't have an overflow allocator.
882  EASTL_ASSERT(false);
883  }
884 
885 
886  void copy_overflow_allocator(const this_type&) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators.
887  {
888  // We don't have an overflow allocator.
889  }
890 
891  }; // fixed_node_allocator
892 
893 
894 
895 
897  // global operators
899 
900  template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator>
901  inline bool operator==(const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& a,
902  const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& b)
903  {
904  return (&a == &b); // They are only equal if they are the same object.
905  }
906 
907 
908  template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator>
909  inline bool operator!=(const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& a,
910  const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& b)
911  {
912  return (&a != &b); // They are only equal if they are the same object.
913  }
914 
915 
916 
917 
918 
919 
921  // fixed_hashtable_allocator
923 
937  template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator = EASTLAllocatorType>
939  {
940  public:
943  typedef OverflowAllocator overflow_allocator_type;
944 
945  enum
946  {
947  kBucketCount = bucketCount + 1, // '+1' because the hash table needs a null terminating bucket.
948  kBucketsSize = bucketCount * sizeof(void*),
949  kNodeSize = nodeSize,
950  kNodeCount = nodeCount,
951  kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
952  kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, // Don't need to include kBucketsSize in this calculation, as fixed_hash_xxx containers have a separate buffer for buckets.
953  kNodeAlignment = nodeAlignment,
954  kNodeAlignmentOffset = nodeAlignmentOffset,
955  kAllocFlagBuckets = 0x00400000 // Flag to allocator which indicates that we are allocating buckets and not nodes.
956  };
957 
958  protected:
959  pool_type mPool;
960  void* mpBucketBuffer;
961 
962  public:
963  // Disabled because it causes compile conflicts.
964  //fixed_hashtable_allocator(const char* pName)
965  //{
966  // mPool.set_name(pName);
967  //}
968 
969  fixed_hashtable_allocator(void* pNodeBuffer)
970  : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
971  mpBucketBuffer(NULL)
972  {
973  // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called.
974  }
975 
976 
977  fixed_hashtable_allocator(void* pNodeBuffer, const overflow_allocator_type& allocator)
978  : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, allocator),
979  mpBucketBuffer(NULL)
980  {
981  // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called.
982  }
983 
984 
985  fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer)
986  : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
987  mpBucketBuffer(pBucketBuffer)
988  {
989  }
990 
991 
992  fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer, const overflow_allocator_type& allocator)
993  : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, allocator),
994  mpBucketBuffer(pBucketBuffer)
995  {
996  }
997 
998 
1005  : mPool(x.mPool.mpHead, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, x.mPool.mOverflowAllocator),
1006  mpBucketBuffer(x.mpBucketBuffer)
1007  {
1008  }
1009 
1010 
1012  {
1013  mPool = x.mPool;
1014  return *this;
1015  }
1016 
1017 
1018  void* allocate(size_t n, int flags = 0)
1019  {
1020  // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
1021  EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the hashtable has a copy of this enum.
1022 
1023  if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
1024  {
1025  EASTL_ASSERT(n == kNodeSize); EA_UNUSED(n);
1026  return mPool.allocate();
1027  }
1028 
1029  // If bucket size no longer fits within local buffer...
1030  if ((flags & kAllocFlagBuckets) == kAllocFlagBuckets && (n > kBucketsSize))
1031  return get_overflow_allocator().allocate(n);
1032 
1033  EASTL_ASSERT(n <= kBucketsSize);
1034  return mpBucketBuffer;
1035  }
1036 
1037 
1038  void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0)
1039  {
1040  // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
1041  if ((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
1042  {
1043  EASTL_ASSERT(n == kNodeSize); EA_UNUSED(n);
1044  return mPool.allocate(alignment, offset);
1045  }
1046 
1047  // If bucket size no longer fits within local buffer...
1048  if ((flags & kAllocFlagBuckets) == kAllocFlagBuckets && (n > kBucketsSize))
1049  return get_overflow_allocator().allocate(n, alignment, offset);
1050 
1051  EASTL_ASSERT(n <= kBucketsSize);
1052  return mpBucketBuffer;
1053  }
1054 
1055 
1056  void deallocate(void* p, size_t)
1057  {
1058  if(p != mpBucketBuffer) // If we are freeing a node and not buckets...
1059  mPool.deallocate(p);
1060  }
1061 
1062 
1063  bool can_allocate() const
1064  {
1065  return mPool.can_allocate();
1066  }
1067 
1068 
1069  void reset(void* pNodeBuffer)
1070  {
1071  // No need to modify mpBucketBuffer, as that is constant.
1072  mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
1073  }
1074 
1075 
1076  const char* get_name() const
1077  {
1078  return mPool.get_name();
1079  }
1080 
1081 
1082  void set_name(const char* pName)
1083  {
1084  mPool.set_name(pName);
1085  }
1086 
1087 
1088  const overflow_allocator_type& get_overflow_allocator() const
1089  {
1090  return mPool.mOverflowAllocator;
1091  }
1092 
1093 
1094  overflow_allocator_type& get_overflow_allocator()
1095  {
1096  return mPool.mOverflowAllocator;
1097  }
1098 
1099 
1100  void set_overflow_allocator(const overflow_allocator_type& allocator)
1101  {
1102  mPool.mOverflowAllocator = allocator;
1103  }
1104 
1105 
1106  void copy_overflow_allocator(const this_type& x) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators.
1107  {
1108  mPool.mOverflowAllocator = x.mPool.mOverflowAllocator;
1109  }
1110 
1111  }; // fixed_hashtable_allocator
1112 
1113 
1114  // This is a near copy of the code above, with the only difference being
1115  // the 'false' bEnableOverflow template parameter, the pool_type and this_type typedefs,
1116  // and the get_overflow_allocator / set_overflow_allocator functions.
1117  template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, typename OverflowAllocator>
1118  class fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, OverflowAllocator>
1119  {
1120  public:
1121  typedef fixed_pool pool_type;
1123  typedef OverflowAllocator overflow_allocator_type;
1124 
1125  enum
1126  {
1127  kBucketCount = bucketCount + 1, // '+1' because the hash table needs a null terminating bucket.
1128  kBucketsSize = bucketCount * sizeof(void*),
1129  kNodeSize = nodeSize,
1130  kNodeCount = nodeCount,
1131  kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
1132  kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, // Don't need to include kBucketsSize in this calculation, as fixed_hash_xxx containers have a separate buffer for buckets.
1133  kNodeAlignment = nodeAlignment,
1134  kNodeAlignmentOffset = nodeAlignmentOffset,
1135  kAllocFlagBuckets = 0x00400000 // Flag to allocator which indicates that we are allocating buckets and not nodes.
1136  };
1137 
1138  protected:
1139  pool_type mPool;
1140  void* mpBucketBuffer;
1141 
1142  public:
1143  // Disabled because it causes compile conflicts.
1144  //fixed_hashtable_allocator(const char* pName)
1145  //{
1146  // mPool.set_name(pName);
1147  //}
1148 
1149  fixed_hashtable_allocator(void* pNodeBuffer)
1150  : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
1151  mpBucketBuffer(NULL)
1152  {
1153  // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called.
1154  }
1155 
1156  fixed_hashtable_allocator(void* pNodeBuffer, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization.
1157  : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
1158  mpBucketBuffer(NULL)
1159  {
1160  // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called.
1161  }
1162 
1163 
1164  fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer)
1165  : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
1166  mpBucketBuffer(pBucketBuffer)
1167  {
1168  }
1169 
1170 
1171  fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization.
1172  : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
1173  mpBucketBuffer(pBucketBuffer)
1174  {
1175  }
1176 
1177 
1183  fixed_hashtable_allocator(const this_type& x) // No need to copy the overflow allocator, because bEnableOverflow is false in this specialization.
1184  : mPool(x.mPool.mpHead, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
1185  mpBucketBuffer(x.mpBucketBuffer)
1186  {
1187  }
1188 
1189 
1191  {
1192  mPool = x.mPool;
1193  return *this;
1194  }
1195 
1196 
1197  void* allocate(size_t n, int flags = 0)
1198  {
1199  // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
1200  EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the hashtable has a copy of this enum.
1201  if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
1202  {
1203  EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away.
1204  return mPool.allocate();
1205  }
1206 
1207  // Don't allow hashtable buckets to overflow in this case.
1208  EASTL_ASSERT(n <= kBucketsSize);
1209  return mpBucketBuffer;
1210  }
1211 
1212 
1213  void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0)
1214  {
1215  // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
1216  if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
1217  {
1218  EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away.
1219  return mPool.allocate(alignment, offset);
1220  }
1221 
1222  // Don't allow hashtable buckets to overflow in this case.
1223  EASTL_ASSERT(n <= kBucketsSize);
1224  return mpBucketBuffer;
1225  }
1226 
1227 
1228  void deallocate(void* p, size_t)
1229  {
1230  if(p != mpBucketBuffer) // If we are freeing a node and not buckets...
1231  mPool.deallocate(p);
1232  }
1233 
1234 
1235  bool can_allocate() const
1236  {
1237  return mPool.can_allocate();
1238  }
1239 
1240 
1241  void reset(void* pNodeBuffer)
1242  {
1243  // No need to modify mpBucketBuffer, as that is constant.
1244  mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
1245  }
1246 
1247 
1248  const char* get_name() const
1249  {
1250  return mPool.get_name();
1251  }
1252 
1253 
1254  void set_name(const char* pName)
1255  {
1256  mPool.set_name(pName);
1257  }
1258 
1259 
1260  const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT
1261  {
1262  EASTL_ASSERT(false);
1263  overflow_allocator_type* pNULL = NULL;
1264  return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile.
1265  }
1266 
1267 
1268  overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT
1269  {
1270  EASTL_ASSERT(false);
1271  overflow_allocator_type* pNULL = NULL;
1272  return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile.
1273  }
1274 
1275  void set_overflow_allocator(const overflow_allocator_type& /*allocator*/)
1276  {
1277  // We don't have an overflow allocator.
1278  EASTL_ASSERT(false);
1279  }
1280 
1281  void copy_overflow_allocator(const this_type&) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators.
1282  {
1283  // We don't have an overflow allocator.
1284  }
1285 
1286  }; // fixed_hashtable_allocator
1287 
1288 
1290  // global operators
1292 
1293  template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator>
1294  inline bool operator==(const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& a,
1295  const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& b)
1296  {
1297  return (&a == &b); // They are only equal if they are the same object.
1298  }
1299 
1300 
1301  template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator>
1302  inline bool operator!=(const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& a,
1303  const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& b)
1304  {
1305  return (&a != &b); // They are only equal if they are the same object.
1306  }
1307 
1308 
1309 
1310 
1311 
1312 
1314  // fixed_vector_allocator
1316 
1327  template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator = EASTLAllocatorType>
1329  {
1330  public:
1332  typedef OverflowAllocator overflow_allocator_type;
1333 
1334  enum
1335  {
1336  kNodeSize = nodeSize,
1337  kNodeCount = nodeCount,
1338  kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
1339  kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
1340  kNodeAlignment = nodeAlignment,
1341  kNodeAlignmentOffset = nodeAlignmentOffset
1342  };
1343 
1344  public:
1345  overflow_allocator_type mOverflowAllocator;
1346  void* mpPoolBegin; // To consider: Find some way to make this data unnecessary, without increasing template proliferation.
1347 
1348  public:
1349  // Disabled because it causes compile conflicts.
1350  //fixed_vector_allocator(const char* pName = NULL)
1351  //{
1352  // mOverflowAllocator.set_name(pName);
1353  //}
1354 
1355  fixed_vector_allocator(void* pNodeBuffer = nullptr)
1356  : mpPoolBegin(pNodeBuffer)
1357  {
1358  }
1359 
1360  fixed_vector_allocator(void* pNodeBuffer, const overflow_allocator_type& allocator)
1361  : mOverflowAllocator(allocator), mpPoolBegin(pNodeBuffer)
1362  {
1363  }
1364 
1366  {
1367  mpPoolBegin = x.mpPoolBegin;
1368  mOverflowAllocator = x.mOverflowAllocator;
1369  }
1370 
1371  fixed_vector_allocator& operator=(const fixed_vector_allocator& x)
1372  {
1373  // We leave our mpPoolBegin variable alone.
1374 
1375  #if EASTL_ALLOCATOR_COPY_ENABLED
1376  mOverflowAllocator = x.mOverflowAllocator;
1377  #else
1378  (void)x;
1379  #endif
1380 
1381  return *this;
1382  }
1383 
1384  void* allocate(size_t n, int flags = 0)
1385  {
1386  return mOverflowAllocator.allocate(n, flags);
1387  }
1388 
1389  void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0)
1390  {
1391  return mOverflowAllocator.allocate(n, alignment, offset, flags);
1392  }
1393 
1394  void deallocate(void* p, size_t n)
1395  {
1396  if(p != mpPoolBegin)
1397  mOverflowAllocator.deallocate(p, n); // Can't do this to our own allocation.
1398  }
1399 
1400  const char* get_name() const
1401  {
1402  return mOverflowAllocator.get_name();
1403  }
1404 
1405  void set_name(const char* pName)
1406  {
1407  mOverflowAllocator.set_name(pName);
1408  }
1409 
1410  const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT
1411  {
1412  return mOverflowAllocator;
1413  }
1414 
1415  overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT
1416  {
1417  return mOverflowAllocator;
1418  }
1419 
1420  void set_overflow_allocator(const overflow_allocator_type& allocator)
1421  {
1422  mOverflowAllocator = allocator;
1423  }
1424 
1425  void copy_overflow_allocator(const this_type& x) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators.
1426  {
1427  mOverflowAllocator = x.mOverflowAllocator;
1428  }
1429 
1430  }; // fixed_vector_allocator
1431 
1432 
1433  template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, typename OverflowAllocator>
1434  class fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, OverflowAllocator>
1435  {
1436  public:
1438  typedef OverflowAllocator overflow_allocator_type;
1439 
1440  enum
1441  {
1442  kNodeSize = nodeSize,
1443  kNodeCount = nodeCount,
1444  kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
1445  kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
1446  kNodeAlignment = nodeAlignment,
1447  kNodeAlignmentOffset = nodeAlignmentOffset
1448  };
1449 
1450  // Disabled because it causes compile conflicts.
1451  //fixed_vector_allocator(const char* = NULL) // This char* parameter is present so that this class can be like the other version.
1452  //{
1453  //}
1454 
1456  {
1457  }
1458 
1459  fixed_vector_allocator(void* /*pNodeBuffer*/)
1460  {
1461  }
1462 
1463  fixed_vector_allocator(void* /*pNodeBuffer*/, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization.
1464  {
1465  }
1466 
1469  // Disabled because there is nothing to do. No member data. And the default for this is sufficient.
1470  // fixed_vector_allocator(const fixed_vector_allocator&)
1471  // {
1472  // }
1473 
1474  // Disabled because there is nothing to do. No member data.
1475  //fixed_vector_allocator& operator=(const fixed_vector_allocator& x)
1476  //{
1477  // return *this;
1478  //}
1479 
1480  void* allocate(size_t /*n*/, int /*flags*/ = 0)
1481  {
1482  EASTL_ASSERT(false); // A fixed_vector should not reallocate, else the user has exhausted its space.
1483  return NULL;
1484  }
1485 
1486  void* allocate(size_t /*n*/, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0)
1487  {
1488  EASTL_ASSERT(false);
1489  return NULL;
1490  }
1491 
1492  void deallocate(void* /*p*/, size_t /*n*/)
1493  {
1494  }
1495 
1496  const char* get_name() const
1497  {
1498  return EASTL_FIXED_POOL_DEFAULT_NAME;
1499  }
1500 
1501  void set_name(const char* /*pName*/)
1502  {
1503  }
1504 
1505  const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT
1506  {
1507  EASTL_ASSERT(false);
1508  overflow_allocator_type* pNULL = NULL;
1509  return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile.
1510  }
1511 
1512  overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT
1513  {
1514  EASTL_ASSERT(false);
1515  overflow_allocator_type* pNULL = NULL;
1516  return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile.
1517  }
1518 
1519  void set_overflow_allocator(const overflow_allocator_type& /*allocator*/)
1520  {
1521  // We don't have an overflow allocator.
1522  EASTL_ASSERT(false);
1523  }
1524 
1525  void copy_overflow_allocator(const this_type&) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators.
1526  {
1527  // We don't have an overflow allocator.
1528  }
1529 
1530  }; // fixed_vector_allocator
1531 
1532 
1534  // global operators
1536 
1537  template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator>
1538  inline bool operator==(const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& a,
1539  const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& b)
1540  {
1541  return (&a == &b); // They are only equal if they are the same object.
1542  }
1543 
1544 
1545  template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename OverflowAllocator>
1546  inline bool operator!=(const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& a,
1547  const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, OverflowAllocator>& b)
1548  {
1549  return (&a != &b); // They are only equal if they are the same object.
1550  }
1551 
1552 
1553 
1554 
1555 
1557  // fixed_swap
1559 
1573  template <typename Container, bool UseHeapTemporary>
1575  {
1576  public:
1577  static void swap(Container& a, Container& b);
1578  };
1579 
1580 
1581  template <typename Container>
1582  class fixed_swap_impl<Container, false>
1583  {
1584  public:
1585  static void swap(Container& a, Container& b)
1586  {
1587  Container temp(EASTL_MOVE(a)); // Can't use global swap because that could
1588  a = EASTL_MOVE(b); // itself call this swap function in return.
1589  b = EASTL_MOVE(temp);
1590  }
1591  };
1592 
1593 
1594  template <typename Container>
1595  class fixed_swap_impl<Container, true>
1596  {
1597  public:
1598  static void swap(Container& a, Container& b)
1599  {
1600  EASTLAllocatorType allocator(*EASTLAllocatorDefault(), EASTL_TEMP_DEFAULT_NAME);
1601  void* const pMemory = allocator.allocate(sizeof(a));
1602 
1603  if(pMemory)
1604  {
1605  Container* pTemp = ::new(pMemory) Container(EASTL_MOVE(a));
1606  a = EASTL_MOVE(b);
1607  b = EASTL_MOVE(*pTemp);
1608 
1609  pTemp->~Container();
1610  allocator.deallocate(pMemory, sizeof(a));
1611  }
1612  }
1613  };
1614 
1615 
1616  template<typename Container>
1617  void fixed_swap(Container& a, Container& b)
1618  {
1619  return fixed_swap_impl<Container, sizeof(Container) >= EASTL_MAX_STACK_USAGE>::swap(a, b);
1620  }
1621 
1622 
1623 
1624 } // namespace eastl
1625 
1626 
1627 EA_RESTORE_VC_WARNING();
1628 
1629 
1630 #endif // Header include guard
Definition: allocator.h:52
Definition: fixed_pool.h:939
fixed_hashtable_allocator(const this_type &x)
Definition: fixed_pool.h:1004
Definition: fixed_pool.h:607
void reset(void *pNodeBuffer)
Definition: fixed_pool.h:709
fixed_node_allocator(const this_type &x)
Definition: fixed_pool.h:659
bool can_allocate() const
Definition: fixed_pool.h:698
Definition: fixed_pool.h:379
Definition: fixed_pool.h:241
void * allocate()
Definition: fixed_pool.h:292
fixed_pool(void *pMemory, size_t memorySize, size_t nodeSize, size_t alignment, size_t alignmentOffset=0)
Definition: fixed_pool.h:261
void deallocate(void *p)
Definition: fixed_pool.h:340
bool can_allocate() const
Definition: fixed_pool.h:200
fixed_pool & operator=(const fixed_pool &)
Definition: fixed_pool.h:280
fixed_pool(void *pMemory=NULL)
Definition: fixed_pool.h:251
Definition: fixed_pool.h:1575
Definition: fixed_pool.h:1329
EA Standard Template Library.
Definition: algorithm.h:288
char EASTL_MAY_ALIAS aligned_buffer_char
Definition: fixed_pool.h:79
void * allocate_memory(Allocator &a, size_t n, size_t alignment, size_t alignmentOffset)
Definition: allocator.h:354
Definition: fixed_pool.h:82
Definition: fixed_pool.h:134
fixed_pool_base(void *pMemory=NULL)
Definition: fixed_pool.h:138
fixed_pool_base & operator=(const fixed_pool_base &)
Definition: fixed_pool.h:163
bool can_allocate() const
Definition: fixed_pool.h:200
EASTL_API void init(void *pMemory, size_t memorySize, size_t nodeSize, size_t alignment, size_t alignmentOffset=0)
Definition: fixed_pool.cpp:15
size_t peak_size() const
Definition: fixed_pool.h:186
Definition: red_black_tree.h:152
Definition: type_traits.h:348