UDocumentation UE5.7 10.02.2026 (Source)
API documentation for Unreal Engine 5.7
MallocBinnedCommon.h
Go to the documentation of this file.
1// Copyright Epic Games, Inc. All Rights Reserved.
2
3#pragma once
4
5#include "CoreTypes.h"
6#include <atomic>
7#include "HAL/MemoryBase.h"
8#include "HAL/PlatformMutex.h"
9#include "HAL/PlatformTLS.h"
10#include "Async/UniqueLock.h"
11#include "Async/WordMutex.h"
12#include "Misc/ScopeLock.h"
14#include "Templates/MemoryOps.h"
16
17
18// A project can define it's own UE_MBC_MAX_LISTED_SMALL_POOL_SIZE and UE_MBC_NUM_LISTED_SMALL_POOLS to reduce runtime memory usage
19// MallocBinnedCommon.cpp has a list of predefined bins that go up to 28672
20// By default allocators (i.e. MB3) that use these bins will rely on this number as a baseline for a small bins count
21// These allocators can increase the amount of small bins they want to manage by going over the default MBC bins list
22// In MB3 case that means defining BINNED3_MAX_SMALL_POOL_SIZE to something like 65536
23// Every bin over the UE_MBC_MAX_LISTED_SMALL_POOL_SIZE would come with a 4kb increment
24// These small bins would be kept in user mode, increasing application's memory footprint and reducing the time it takes to allocate memory from the said bins
25// If application needs to aggressively reduce it's memory footprint, potentially trading some perf due to an increased amount of kernel calls to allocate memory
26// it can redefine UE_MBC_MAX_LISTED_SMALL_POOL_SIZE and BINNED3_MAX_SMALL_POOL_SIZE to smaller numbers, the good value is 16384 for both
27// This, however, would require the app to redefine UE_MBC_NUM_LISTED_SMALL_POOLS too to match the number of bins that fall under the new define's threshold
28// In case of 16384, we'll skip 3 larger bins and so UE_MBC_NUM_LISTED_SMALL_POOLS should be set to 48 at the time of writing
29#if !defined(UE_MBC_MAX_LISTED_SMALL_POOL_SIZE)
30# define UE_MBC_MAX_LISTED_SMALL_POOL_SIZE 28672
31#endif
32
33#if !defined(UE_MBC_NUM_LISTED_SMALL_POOLS)
34# define UE_MBC_NUM_LISTED_SMALL_POOLS 52
35#endif
36
37#if !defined(BINNEDCOMMON_USE_SEPARATE_VM_PER_POOL)
38# if PLATFORM_WINDOWS
39# define BINNEDCOMMON_USE_SEPARATE_VM_PER_POOL 1
40# else
41# define BINNEDCOMMON_USE_SEPARATE_VM_PER_POOL 0
42# endif
43#endif
44
45#define UE_MBC_MIN_SMALL_POOL_ALIGNMENT 8 // Minimum alignment of bins. Added to support 8 bytes bin. If you need to mask lower bits - use this!
46#define UE_MBC_MAX_SMALL_POOL_ALIGNMENT 256
47#define UE_MBC_STANDARD_ALIGNMENT 16 // Standard alignment for all allocations, except 8 bytes that is aligned by 8 bytes
48#define UE_MBC_MIN_BIN_SIZE 8 // Minimum supported bin size
49#define UE_MBC_BIN_SIZE_SHIFT 3 // Shift for a bin size to save memory
50
51#if !defined(AGGRESSIVE_MEMORY_SAVING)
52# error "AGGRESSIVE_MEMORY_SAVING must be defined"
53#endif
54
55#if AGGRESSIVE_MEMORY_SAVING
56# define UE_DEFAULT_GMallocBinnedBundleSize 8192
57#else
58# define UE_DEFAULT_GMallocBinnedBundleSize 65536
59#endif
60
61#if !defined(UE_DEFAULT_GMallocBinnedPerThreadCaches)
62# define UE_DEFAULT_GMallocBinnedPerThreadCaches 1
63#endif
64#define UE_DEFAULT_GMallocBinnedBundleCount 64
65#define UE_DEFAULT_GMallocBinnedAllocExtra 32
66#define UE_DEFAULT_GMallocBinnedMaxBundlesBeforeRecycle 8
67
68#ifndef UE_MBC_ALLOW_RUNTIME_TWEAKING
69# define UE_MBC_ALLOW_RUNTIME_TWEAKING 0
70#endif
71
72#if UE_MBC_ALLOW_RUNTIME_TWEAKING
78#else
79# define GMallocBinnedPerThreadCaches UE_DEFAULT_GMallocBinnedPerThreadCaches
80# define GMallocBinnedBundleSize UE_DEFAULT_GMallocBinnedBundleSize
81# define GMallocBinnedBundleCount UE_DEFAULT_GMallocBinnedBundleCount
82# define GMallocBinnedAllocExtra UE_DEFAULT_GMallocBinnedAllocExtra
83# define GMallocBinnedMaxBundlesBeforeRecycle UE_DEFAULT_GMallocBinnedMaxBundlesBeforeRecycle
84#endif //~UE_MBC_ALLOW_RUNTIME_TWEAKING
85
86#ifndef UE_MBC_ALLOCATOR_STATS
87# define UE_MBC_ALLOCATOR_STATS (!UE_BUILD_SHIPPING || WITH_EDITOR)
88#endif
89
90#if UE_MBC_ALLOCATOR_STATS
91# define UE_MBC_UPDATE_STATS(x) x
93#else
94# define UE_MBC_UPDATE_STATS(x)
95#endif
96
97#ifndef UE_MBC_LOG_LARGE_ALLOCATION
98# define UE_MBC_LOG_LARGE_ALLOCATION 0
99#endif
100
101#ifndef UE_MBC_LIGHTWEIGHT_BIN_CALLSTACK_TRACKER
102# define UE_MBC_LIGHTWEIGHT_BIN_CALLSTACK_TRACKER 0
103#endif
104
105#if UE_MBC_LIGHTWEIGHT_BIN_CALLSTACK_TRACKER && !UE_MBC_ALLOCATOR_STATS
106# error "MB lightweight bin callstack tracker needs UE_MBC_ALLOCATOR_STATS to be enabled."
107#endif
108
109#define UE_MBC_MAX_SUPPORTED_PLATFORM_PAGE_SIZE 16 * 1024 // MB3 and it's bins were designed around 4\16KB native page size. Having larger page size will increase memory waste and will be inefficient for current allocator design
110
112
115
117
118
120{
121 uint64* Bits; // one bits in middle layers mean "all allocated"
122 uint32 Capacity; // rounded up to a power of two
123 uint32 DesiredCapacity;
124 uint32 Rows;
125 uint32 OffsetOfLastRow;
126 uint32 AllocationSize;
127
128public:
130 : Bits(nullptr)
131 {
132 }
133
134 static constexpr uint32 GetMemoryRequirements(uint32 NumPages)
135 {
136 uint32 AllocationSize = 8;
138 uint32 Capacity = 64;
139 uint32 OffsetOfLastRow = 0;
140
141 while (Capacity < NumPages)
142 {
143 Capacity *= 64;
144 RowsUint64s *= 64;
145 OffsetOfLastRow = AllocationSize / 8;
146 AllocationSize += 8 * RowsUint64s;
147 }
148
149 uint32 LastRowTotal = (AllocationSize - OffsetOfLastRow * 8) * 8;
150 uint32 ExtraBits = LastRowTotal - NumPages;
151 AllocationSize -= (ExtraBits / 64) * 8;
152 return AllocationSize;
153 }
154
155 void FBitTreeInit(uint32 InDesiredCapacity, void * Memory, uint32 MemorySize, bool InitialValue);
157 bool IsAllocated(uint32 Index) const;
158 void AllocBit(uint32 Index);
159 uint32 NextAllocBit() const;
160 uint32 NextAllocBit(uint32 StartIndex) const;
161 void FreeBit(uint32 Index);
163
164 uint32 Slow_NextAllocBits(uint32 NumBits, uint64 StartIndex); // Warning, slow! NumBits must be a power of two or a multiple of 64.
165};
166
181
182
184{
185public:
186 // This needs to be small enough to fit inside the smallest allocation handled by MallocBinned2\3
188 {
190 uint64 Count : 8; // 8 bits is enough to store count as UE_DEFAULT_GMallocBinnedBundleCount is 64
191 uint64 Reserved : 8; // Reserved for ARM HW for TBI, MTE, etc
192
194 {
195 NextNodeInCurrentBundle = reinterpret_cast<uint64>(Next);
196 }
197
202 };
203
204 CORE_API virtual void OnMallocInitialized() override;
205
206protected:
208 {
210 : PtrToPoolPageBitShift(0)
211 , HashKeyShift(0)
212 , PoolMask(0)
213 , MaxHashBuckets(0)
214 , AddressSpaceBase(0)
215 {
216 }
217
222
224 {
225 const uint64 PoolPageToPoolBitShift = FPlatformMath::CeilLogTwo64(InNumPoolsPerPage);
226
227 PtrToPoolPageBitShift = FPlatformMath::CeilLogTwo(InPageSize);
228 HashKeyShift = PtrToPoolPageBitShift + PoolPageToPoolBitShift;
229 PoolMask = (1ull << PoolPageToPoolBitShift) - 1;
230 MaxHashBuckets = FMath::RoundUpToPowerOfTwo64(AddressLimit - AddressBase) >> HashKeyShift;
231 AddressSpaceBase = AddressBase;
232 }
233
235 {
236 check((UPTRINT)InPtr >= AddressSpaceBase);
237 const UPTRINT Ptr = (UPTRINT)InPtr - AddressSpaceBase;
238 OutBucketCollision = Ptr >> HashKeyShift;
239 OutBucketIndex = uint32(OutBucketCollision & (MaxHashBuckets - 1));
240 OutPoolIndex = uint32((Ptr >> PtrToPoolPageBitShift) & PoolMask);
241 }
242
244 {
245 return MaxHashBuckets;
246 }
247
248 private:
250 uint64 PtrToPoolPageBitShift;
251
253 uint64 HashKeyShift;
254
256 uint64 PoolMask;
257
258 // PageSize dependent constants
259 uint64 MaxHashBuckets;
260
261 // Base address for any virtual allocations. Can be non 0 on some platforms
262 uint64 AddressSpaceBase;
263 };
264
265private:
266 struct FBundle
267 {
268 UE_FORCEINLINE_HINT FBundle()
269 {
270 Reset();
271 }
272
273 inline void Reset()
274 {
275 Head = nullptr;
276 Count = 0;
277 }
278
279 inline void PushHead(FBundleNode* Node)
280 {
281 Node->SetNextNodeInCurrentBundle(Head);
282 Head = Node;
283 Count++;
284 }
285
286 inline FBundleNode* PopHead()
287 {
288 FBundleNode* Result = Head;
289
290 Count--;
291 Head = Head->GetNextNodeInCurrentBundle();
292 return Result;
293 }
294
295 FBundleNode* Head;
297 };
298
299protected:
301 {
302 // return true if we actually pushed it
304 {
306
307 if ((PartialBundle.Count >= (uint32)GMallocBinnedBundleCount) | (PartialBundle.Count * InBinSize >= (uint32)GMallocBinnedBundleSize))
308 {
309 if (FullBundle.Head)
310 {
311 return false;
312 }
313 FullBundle = PartialBundle;
314 PartialBundle.Reset();
315 }
316 PartialBundle.PushHead((FBundleNode*)InPtr);
317 return true;
318 }
319
321 {
322 return !((!!FullBundle.Head) & ((PartialBundle.Count >= (uint32)GMallocBinnedBundleCount) | (PartialBundle.Count * InBinSize >= (uint32)GMallocBinnedBundleSize)));
323 }
324
326 {
327 if ((!PartialBundle.Head) & (!!FullBundle.Head))
328 {
329 PartialBundle = FullBundle;
330 FullBundle.Reset();
331 }
332 return PartialBundle.Head ? PartialBundle.PopHead() : nullptr;
333 }
334
335 // tries to recycle the full bundle, if that fails, it is returned for freeing
336 template <class T>
338 {
339 FBundleNode* Result = nullptr;
340 if (FullBundle.Head)
341 {
342 FullBundle.Head->Count = FullBundle.Count;
343 if (!InGlobalRecycler.PushBundle(InPoolIndex, FullBundle.Head))
344 {
345 Result = FullBundle.Head;
346 }
347 FullBundle.Reset();
348 }
349 return Result;
350 }
351
352 template <class T>
354 {
355 if (!PartialBundle.Head)
356 {
357 PartialBundle.Count = 0;
358 PartialBundle.Head = InGlobalRecycler.PopBundle(InPoolIndex);
359 if (PartialBundle.Head)
360 {
361 PartialBundle.Count = PartialBundle.Head->Count;
362 return true;
363 }
364 return false;
365 }
366 return true;
367 }
368
370 {
371 FBundleNode* Partial = PartialBundle.Head;
372 if (Partial)
373 {
374 PartialBundle.Reset();
375 }
376
377 FBundleNode* Full = FullBundle.Head;
378 if (Full)
379 {
380 FullBundle.Reset();
381 }
382
383 FBundleNode* Result = Partial;
384 if (Result)
385 {
386 FBundleNode* Prev = Result;
387 FBundleNode* Next = Result->GetNextNodeInCurrentBundle();
388 while (Next)
389 {
390 Prev = Next;
391 Next = Next->GetNextNodeInCurrentBundle();
392 }
393 Prev->SetNextNodeInCurrentBundle(Full);
394 }
395 else
396 {
397 Result = Full;
398 }
399
400 return Result;
401 }
402
403 private:
404 FBundle PartialBundle;
405 FBundle FullBundle;
406 };
407
409 uint64 NumPoolsPerPage; // Number of AllocType::FPoolInfo
412
414
415 std::atomic<uint64> MemoryTrimEpoch{ 0 };
416
417#if UE_MBC_ALLOCATOR_STATS
418 static std::atomic<int64> TLSMemory;
419 static std::atomic<int64> ConsolidatedMemory;
420 static std::atomic<int64> AllocatedSmallPoolMemory; // requested small pool memory allocations
421 static std::atomic<int64> AllocatedOSSmallPoolMemory; // total small pool memory allocated by the os, always larger than AllocatedSmallPoolMemory
422 static std::atomic<int64> AllocatedLargePoolMemory; // memory requests to the OS which don't fit in the small pool
423 static std::atomic<int64> AllocatedLargePoolMemoryWAlignment; // when we allocate at OS level we need to align to a size
424
427
429#endif
430
431 [[noreturn]] static void OutOfMemory(uint64 Size, uint32 Alignment = 0)
432 {
433 // this is expected not to return
435 }
436
437 [[noreturn]] void UnrecognizedPointerFatalError(void* Ptr);
438
439#if UE_MBC_LOG_LARGE_ALLOCATION
440 void LogLargeAllocation(SIZE_T Size) const;
441#else
443#endif
444
445#if UE_MBC_LIGHTWEIGHT_BIN_CALLSTACK_TRACKER
447#endif
448};
449
450template <class AllocType, int NumSmallPools, int MaxSmallPoolSize>
452{
454
455 static constexpr int NUM_SMALL_POOLS = NumSmallPools;
456 static constexpr int MAX_SMALL_POOL_SIZE = MaxSmallPoolSize;
457
459 struct FPoolHashBucket
460 {
461 UPTRINT BucketIndex;
462 typename AllocType::FPoolInfo* FirstPool;
463 FPoolHashBucket* Prev;
464 FPoolHashBucket* Next;
465
466 FPoolHashBucket()
467 {
468 BucketIndex = 0;
469 FirstPool = nullptr;
470 Prev = this;
471 Next = this;
472 }
473
474 void Link(FPoolHashBucket* After)
475 {
476 After->Prev = Prev;
477 After->Next = this;
478 Prev->Next = After;
479 this->Prev = After;
480 }
481
482 void Unlink()
483 {
484 Next->Prev = Prev;
485 Prev->Next = Next;
486 Prev = this;
487 Next = this;
488 }
489 };
490
491public:
493 {
494#if UE_MBC_ALLOCATOR_STATS
496#endif
497 }
498
499protected:
500 static constexpr int SIZE_TO_POOL_INDEX_NUM = 1 + (MAX_SMALL_POOL_SIZE >> UE_MBC_BIN_SIZE_SHIFT);
501
503 {
505 {
507 // If the current thread doesn't have the Lock, we can't return the TLS cache for being used on the current thread as we risk racing with another thread doing trimming.
508 // This can only happen in such a scenario.
509 //
510 // FMemory::MarkTLSCachesAsUnusedOnCurrentThread();
511 // Node->Event->Wait(); <----- UNSAFE to use the TLS cache by its owner thread but can happen when the wait implementation allocates or frees something.
512 // FMemory::MarkTLSCachesAsUsedOnCurrentThread();
513 if (ThreadSingleton && ThreadSingleton->bLockedByOwnerThread)
514 {
515 return ThreadSingleton;
516 }
517 return nullptr;
518 }
519
520 static void SetTLS()
521 {
524 if (!ThreadSingleton)
525 {
526 const int64 TLSSize = Align(sizeof(FPerThreadFreeBlockLists), AllocType::OsAllocationGranularity);
527 ThreadSingleton = new (AllocType::AllocateMetaDataMemory(TLSSize)) FPerThreadFreeBlockLists();
528 UE_MBC_UPDATE_STATS(TLSMemory.fetch_add(TLSSize, std::memory_order_relaxed));
529
531 ThreadSingleton->bLockedByOwnerThread = true;
532 ThreadSingleton->Lock();
534 RegisterThreadFreeBlockLists(ThreadSingleton);
535 }
536 }
537
538 static void UnlockTLS()
539 {
541 if (ThreadSingleton)
542 {
543 ThreadSingleton->bLockedByOwnerThread = false;
544 ThreadSingleton->Unlock();
545 }
546 }
547
548 static void LockTLS()
549 {
551 if (ThreadSingleton)
552 {
554 ThreadSingleton->bLockedByOwnerThread = true;
555 }
556 }
557
558 static void ClearTLS()
559 {
562 if (ThreadSingleton)
563 {
564 const int64 TLSSize = Align(sizeof(FPerThreadFreeBlockLists), AllocType::OsAllocationGranularity);
565 UE_MBC_UPDATE_STATS(TLSMemory.fetch_sub(TLSSize, std::memory_order_relaxed));
566
567 UnregisterThreadFreeBlockLists(ThreadSingleton);
568 ThreadSingleton->bLockedByOwnerThread = false;
569 ThreadSingleton->Unlock();
570 ThreadSingleton->~FPerThreadFreeBlockLists();
571
572 AllocType::FreeMetaDataMemory(ThreadSingleton, TLSSize);
573 }
575 }
576
581
582 // return true if the pointer was pushed
587
588 // return true if a pointer can be pushed
593
594 // returns a bundle that needs to be freed if it can't be recycled
595 template <class T>
600
601 // returns true if we have anything to pop
602 template <class T>
607
612
613 void Lock()
614 {
615 Mutex.Lock();
616 }
617
618 bool TryLock()
619 {
620 return Mutex.TryLock();
621 }
622
623 void Unlock()
624 {
625 Mutex.Unlock();
626 }
627
628 // should only be called from inside the Lock.
630 {
631 if (MemoryTrimEpoch >= NewEpoch)
632 {
633 return false;
634 }
635
636 MemoryTrimEpoch = NewEpoch;
637 return true;
638 }
639
640#if UE_MBC_ALLOCATOR_STATS
641 public:
643#endif
644 private:
645 UE::FWordMutex Mutex;
646 uint64 MemoryTrimEpoch = 0;
647 FFreeBlockList FreeLists[NUM_SMALL_POOLS];
648 bool bLockedByOwnerThread = false;
649 };
650
651 struct Internal
652 {
653 using PoolInfo = typename AllocType::FPoolInfo;
657 static PoolInfo* GetOrCreatePoolInfo(AllocType& Allocator, void* InPtr, typename PoolInfo::ECanary Kind)
658 {
663 {
664 const uint64 PoolArraySize = NumPools * sizeof(PoolInfo);
665
666 void* Result = Allocator.AllocateMetaDataMemory(PoolArraySize);
668
669 if (!Result)
670 {
671 Allocator.ExternalAllocMutex.Unlock();
672 OutOfMemory(PoolArraySize); // OutOfMemory is fatal and does not return
673 }
674
676 return (PoolInfo*)Result;
677 };
678
679 uint32 BucketIndex;
681 uint32 PoolIndex;
682 Allocator.PtrToPoolMapping.GetHashBucketAndPoolIndices(InPtr, BucketIndex, BucketIndexCollision, PoolIndex);
683
684 FPoolHashBucket* FirstBucket = &Allocator.HashBuckets[BucketIndex];
685 FPoolHashBucket* Collision = FirstBucket;
686 do
687 {
688 if (!Collision->FirstPool)
689 {
690 Collision->BucketIndex = BucketIndexCollision;
691 Collision->FirstPool = CreatePoolArray(Allocator.NumPoolsPerPage);
692 Collision->FirstPool[PoolIndex].SetCanary(Kind, false, true);
693 return &Collision->FirstPool[PoolIndex];
694 }
695
696 if (Collision->BucketIndex == BucketIndexCollision)
697 {
698 Collision->FirstPool[PoolIndex].SetCanary(Kind, false, false);
699 return &Collision->FirstPool[PoolIndex];
700 }
701
702 Collision = Collision->Next;
703 } while (Collision != FirstBucket);
704
705 // Create a new hash bucket entry
706 if (!Allocator.HashBucketFreeList)
707 {
708 Allocator.HashBucketFreeList = (FPoolHashBucket*)Allocator.AllocateMetaDataMemory(AllocType::OsAllocationGranularity);
709 UE_MBC_UPDATE_STATS(HashMemory += AllocType::OsAllocationGranularity);
710
711 for (UPTRINT i = 0, n = AllocType::OsAllocationGranularity / sizeof(FPoolHashBucket); i < n; ++i)
712 {
713 Allocator.HashBucketFreeList->Link(new (Allocator.HashBucketFreeList + i) FPoolHashBucket());
714 }
715 }
716
717 FPoolHashBucket* NextFree = Allocator.HashBucketFreeList->Next;
718 FPoolHashBucket* NewBucket = Allocator.HashBucketFreeList;
719
720 NewBucket->Unlink();
721
722 if (NextFree == NewBucket)
723 {
724 NextFree = nullptr;
725 }
726 Allocator.HashBucketFreeList = NextFree;
727
728 if (!NewBucket->FirstPool)
729 {
730 NewBucket->FirstPool = CreatePoolArray(Allocator.NumPoolsPerPage);
731 NewBucket->FirstPool[PoolIndex].SetCanary(Kind, false, true);
732 }
733 else
734 {
735 NewBucket->FirstPool[PoolIndex].SetCanary(Kind, false, false);
736 }
737
738 NewBucket->BucketIndex = BucketIndexCollision;
739
740 FirstBucket->Link(NewBucket);
741
742 return &NewBucket->FirstPool[PoolIndex];
743 }
744
746 {
747 uint32 BucketIndex;
749 uint32 PoolIndex;
750 Allocator.PtrToPoolMapping.GetHashBucketAndPoolIndices(InPtr, BucketIndex, BucketIndexCollision, PoolIndex);
751
752 FPoolHashBucket* FirstBucket = &Allocator.HashBuckets[BucketIndex];
753 FPoolHashBucket* Collision = FirstBucket;
754 do
755 {
756 if (Collision->BucketIndex == BucketIndexCollision)
757 {
758 return &Collision->FirstPool[PoolIndex];
759 }
760
761 Collision = Collision->Next;
762 } while (Collision != FirstBucket);
763
764 return nullptr;
765 }
766 };
767
773
779
780 virtual void SetupTLSCachesOnCurrentThread() override
781 {
782 //NOALLOC_SCOPE_CYCLE_COUNTER(STAT_FMallocBinned_SetupTLSCachesOnCurrentThread);
783
785 {
786 return;
787 }
789 {
791 }
794 }
795
797 {
798 //NOALLOC_SCOPE_CYCLE_COUNTER(STAT_FMallocBinned_ClearTLSCachesOnCurrentThread);
799
801 {
802 return;
803 }
804
805 ((AllocType*)this)->FlushCurrentThreadCacheInternal();
807 }
808
810 {
811 //NOALLOC_SCOPE_CYCLE_COUNTER(STAT_FMallocBinned_MarkTLSCachesAsUsedOnCurrentThread);
812
814 {
815 return;
816 }
817
819 }
820
822 {
823 //NOALLOC_SCOPE_CYCLE_COUNTER(STAT_FMallocBinned_MarkTLSCachesAsUnusedOnCurrentThread);
824
826 {
827 return;
828 }
829
830 // Will only flush if memory trimming epoch has been bumped while the thread was active.
831 const bool bNewEpochOnly = true;
832 ((AllocType*)this)->FlushCurrentThreadCacheInternal(bNewEpochOnly);
834 }
835
836 inline SIZE_T QuantizeSizeCommon(SIZE_T Count, uint32 Alignment, const AllocType& Alloc) const
837 {
838 checkSlow(FMath::IsPowerOfTwo(Alignment));
840 if ((Count <= MAX_SMALL_POOL_SIZE) & (Alignment <= UE_MBC_STANDARD_ALIGNMENT)) // one branch, not two
841 {
842 SizeOut = Alloc.PoolIndexToBinSize(BoundSizeToPoolIndex(Count, Alloc.MemSizeToPoolIndex));
843 check(SizeOut >= Count);
844 return SizeOut;
845 }
846 Alignment = FMath::Max<uint32>(Alignment, UE_MBC_STANDARD_ALIGNMENT);
847 Count = Align(Count, Alignment);
848 if ((Count <= MAX_SMALL_POOL_SIZE) & (Alignment <= UE_MBC_MAX_SMALL_POOL_ALIGNMENT))
849 {
850 uint32 PoolIndex = BoundSizeToPoolIndex(Count, Alloc.MemSizeToPoolIndex);
851 do
852 {
853 const uint32 BinSize = Alloc.PoolIndexToBinSize(PoolIndex);
854 if (IsAligned(BinSize, Alignment))
855 {
856 SizeOut = SIZE_T(BinSize);
857 check(SizeOut >= Count);
858 return SizeOut;
859 }
860
861 PoolIndex++;
862 } while (PoolIndex < NUM_SMALL_POOLS);
863 }
864
865 Alignment = FPlatformMath::Max<uint32>(Alignment, Alloc.OsAllocationGranularity);
866 SizeOut = Align(Count, Alignment);
867 check(SizeOut >= Count);
868 return SizeOut;
869 }
870
871 inline uint32 BoundSizeToPoolIndex(SIZE_T Size, const uint8(&MemSizeToPoolIndex)[SIZE_TO_POOL_INDEX_NUM]) const
872 {
874 checkSlow(Index >= 0 && Index < SIZE_TO_POOL_INDEX_NUM); // and it should be in the table
875 const uint32 PoolIndex = uint32(MemSizeToPoolIndex[Index]);
876 checkSlow(PoolIndex >= 0 && PoolIndex < NUM_SMALL_POOLS);
877 return PoolIndex;
878 }
879
880 // force no inline, so it will not bloat fast code path since this is unlikely to happen
881 FORCENOINLINE bool PromoteToLargerBin(SIZE_T& Size, uint32& Alignment, const AllocType& Alloc) const
882 {
883 // try to promote our allocation request to a larger bin with a matching natural alignment
884 // if requested alignment is larger than UE_MBC_STANDARD_ALIGNMENT but smaller than UE_MBC_MAX_SMALL_POOL_ALIGNMENT
885 // so we don't do a page allocation with a lot of memory waste
886 Alignment = FMath::Max<uint32>(Alignment, UE_MBC_STANDARD_ALIGNMENT);
887 const SIZE_T AlignedSize = Align(Size, Alignment);
888 if (UNLIKELY((AlignedSize <= MAX_SMALL_POOL_SIZE) && (Alignment <= UE_MBC_MAX_SMALL_POOL_ALIGNMENT)))
889 {
890 uint32 PoolIndex = BoundSizeToPoolIndex(AlignedSize, Alloc.MemSizeToPoolIndex);
891 do
892 {
893 const uint32 BlockSize = Alloc.PoolIndexToBinSize(PoolIndex);
894 if (IsAligned(BlockSize, Alignment))
895 {
896 // we found a matching pool for our alignment and size requirements, so modify the size request to match
897 Size = SIZE_T(BlockSize);
898 Alignment = UE_MBC_STANDARD_ALIGNMENT;
899 return true;
900 }
901
902 PoolIndex++;
903 } while (PoolIndex < NUM_SMALL_POOLS);
904 }
905
906 return false;
907 }
908
910 {
911 if (((AllocType*)this)->GetSmallAllocationSize(Ptr, SizeOut))
912 {
913 return true;
914 }
915 if (!Ptr)
916 {
917 return false;
918 }
919
920 typename AllocType::FPoolInfo* Pool;
921 {
922 //NOALLOC_SCOPE_CYCLE_COUNTER(STAT_FMallocBinned_GetAllocationSizeExternal);
924 Pool = Internal::FindPoolInfo(*(AllocType*)this, Ptr);
925 }
926
927 if (!Pool)
928 {
930 }
931 const SIZE_T PoolOsBytes = Pool->GetOsAllocatedBytes();
932 const SIZE_T PoolOSRequestedBytes = Pool->GetOSRequestedBytes();
933 checkf(PoolOSRequestedBytes <= PoolOsBytes, TEXT("FMallocBinned::GetAllocationSizeExternal %zu %zu"), PoolOSRequestedBytes, PoolOsBytes);
935 return true;
936 }
937
938#if UE_MBC_ALLOCATOR_STATS
940 {
941 //NOALLOC_SCOPE_CYCLE_COUNTER(STAT_FMallocBinned_GetTotalAllocatedSmallPoolMemory);
943 {
946 {
947 FreeBlockAllocatedMemory += FreeBlockLists->AllocatedMemory;
948 }
949 FreeBlockAllocatedMemory += ConsolidatedMemory.load(std::memory_order_relaxed);
950 }
951
952 return AllocatedSmallPoolMemory.load(std::memory_order_relaxed) + FreeBlockAllocatedMemory;
953 }
954#endif
955
956 void UpdateStatsCommon(const AllocType& Alloc)
957 {
958#if UE_MBC_ALLOCATOR_STATS && CSV_PROFILER_STATS
959 if (!GMallocBinnedEnableCSVStats && !FCsvProfiler::Get()->IsCategoryEnabled(CSV_CATEGORY_INDEX(MallocBinned)))
960 {
961 return;
962 }
963
964 FCsvProfiler::Get()->EnableCategoryByIndex(CSV_CATEGORY_INDEX(MallocBinned), true);
965
966 static bool bFirstTime = true;
970
971 if (bFirstTime)
972 {
973 for (int32 i = 0; i < NumSmallPools; i++)
974 {
975 const int BinSize = Alloc.PoolIndexToBinSize(i);
976
977 TCHAR Name[64];
978 FCString::Sprintf(Name, TEXT("FragmentationBin%d"), BinSize);
980
981 FCString::Sprintf(Name, TEXT("WasteBin%d"), BinSize);
982 WasteStats[i] = FName(Name);
983
984 FCString::Sprintf(Name, TEXT("TotalMemBin%d"), BinSize);
986 }
987
988 bFirstTime = false;
989 }
990
991 for (int32 i = 0; i < NumSmallPools; i++)
992 {
993 const float Fragmentation = 1.0f - (float)Alloc.SmallPoolTables[i].TotalUsedBins / (float)Alloc.SmallPoolTables[i].TotalAllocatedBins;
994 FCsvProfiler::RecordCustomStat(FragmentationsStats[i], CSV_CATEGORY_INDEX(MallocBinned), int(Fragmentation * 100.0f), ECsvCustomStatOp::Set);
995
996 const float TotalMem = (float)Alloc.SmallPoolTables[i].TotalAllocatedMem / 1024.0f / 1024.0f;
997 FCsvProfiler::RecordCustomStat(TotalMemStats[i], CSV_CATEGORY_INDEX(MallocBinned), TotalMem, ECsvCustomStatOp::Set);
998
999 FCsvProfiler::RecordCustomStat(WasteStats[i], CSV_CATEGORY_INDEX(MallocBinned), TotalMem * Fragmentation, ECsvCustomStatOp::Set);
1000 }
1001
1002 CSV_CUSTOM_STAT(MallocBinned, RequestedSmallPoolMemoryMB, (float)GetTotalAllocatedSmallPoolMemory() / (1024.0f * 1024.0f), ECsvCustomStatOp::Set);
1003 CSV_CUSTOM_STAT(MallocBinned, TotalAllocatedSmallPoolMemoryMB, (float)AllocatedOSSmallPoolMemory.load(std::memory_order_relaxed) / (1024.0f * 1024.0f), ECsvCustomStatOp::Set);
1004 CSV_CUSTOM_STAT(MallocBinned, RequestedLargeAllocsMemoryMB, (float)AllocatedLargePoolMemory.load(std::memory_order_relaxed) / (1024.0f * 1024.0f), ECsvCustomStatOp::Set);
1005 CSV_CUSTOM_STAT(MallocBinned, TotalAllocatedLargeAllocsMemoryMB, (float)AllocatedLargePoolMemoryWAlignment.load(std::memory_order_relaxed) / (1024.0f * 1024.0f), ECsvCustomStatOp::Set);
1006#endif
1007 }
1008
1010 {
1011 const uint64 MaxHashBuckets = PtrToPoolMapping.GetMaxHashBuckets();
1012 const uint64 HashAllocSize = Align(MaxHashBuckets * sizeof(FPoolHashBucket), OsAllocationGranularity);
1013 HashBuckets = (FPoolHashBucket*)AllocType::AllocateMetaDataMemory(HashAllocSize);
1015 verify(HashBuckets);
1016
1017 DefaultConstructItems<FPoolHashBucket>(HashBuckets, MaxHashBuckets);
1018 }
1019
1020private:
1021 FPoolHashBucket* HashBuckets = nullptr; // Hash buckets for external allocations, reserved in constructor based on the platform constants like page size and virtual address high\low hints
1022 FPoolHashBucket* HashBucketFreeList = nullptr; // Hash buckets for allocations that were allocated outside of the platform constants virtual address high\low hints
1023
1024 static void RegisterThreadFreeBlockLists(FPerThreadFreeBlockLists* FreeBlockLists)
1025 {
1026 //NOALLOC_SCOPE_CYCLE_COUNTER(STAT_FMallocBinned_RegisterThreadFreeBlockLists);
1029 }
1030
1031 static void UnregisterThreadFreeBlockLists(FPerThreadFreeBlockLists* FreeBlockLists)
1032 {
1033 //NOALLOC_SCOPE_CYCLE_COUNTER(STAT_FMallocBinned_UnregisterThreadFreeBlockLists);
1036 UE_MBC_UPDATE_STATS(ConsolidatedMemory.fetch_add(FreeBlockLists->AllocatedMemory, std::memory_order_relaxed));
1037 }
1038};
1039
1040#if UE_ENABLE_INCLUDE_ORDER_DEPRECATED_IN_5_7
1041# include "HAL/CriticalSection.h"
1042#endif
constexpr T Align(T Val, uint64 Alignment)
Definition AlignmentTemplates.h:18
constexpr bool IsAligned(T Val, uint64 Alignment)
Definition AlignmentTemplates.h:50
#define FORCENOINLINE
Definition AndroidPlatform.h:142
#define checkSlow(expr)
Definition AssertionMacros.h:332
#define check(expr)
Definition AssertionMacros.h:314
#define checkf(expr, format,...)
Definition AssertionMacros.h:315
#define verify(expr)
Definition AssertionMacros.h:319
#define TSAN_SAFE
Definition CoreMiscDefines.h:144
#define TEXT(x)
Definition Platform.h:1272
FPlatformTypes::SIZE_T SIZE_T
An unsigned integer the same size as a pointer, the same as UPTRINT.
Definition Platform.h:1150
FPlatformTypes::TCHAR TCHAR
Either ANSICHAR or WIDECHAR, depending on whether the platform supports wide characters or the requir...
Definition Platform.h:1135
FPlatformTypes::int64 int64
A 64-bit signed integer.
Definition Platform.h:1127
FPlatformTypes::int32 int32
A 32-bit signed integer.
Definition Platform.h:1125
FPlatformTypes::UPTRINT UPTRINT
An unsigned integer the same size as a pointer.
Definition Platform.h:1146
#define UE_FORCEINLINE_HINT
Definition Platform.h:723
#define UNLIKELY(x)
Definition Platform.h:857
FPlatformTypes::uint64 uint64
A 64-bit unsigned integer.
Definition Platform.h:1117
UE_FORCEINLINE_HINT TSharedRef< CastToType, Mode > StaticCastSharedRef(TSharedRef< CastFromType, Mode > const &InSharedRef)
Definition SharedPointer.h:127
#define CSV_CATEGORY_INDEX(CategoryName)
Definition CsvProfiler.h:75
#define CSV_DECLARE_CATEGORY_EXTERN(CategoryName)
Definition CsvProfiler.h:79
#define CSV_CUSTOM_STAT(Category, StatName, Value, Op)
Definition CsvProfiler.h:160
void Init()
Definition LockFreeList.h:4
#define UE_MBC_BIN_SIZE_SHIFT
Definition MallocBinnedCommon.h:49
CORE_API int32 GMallocBinnedFlushRegisteredThreadCachesOnOneThread
Definition MallocBinnedCommon.cpp:547
#define GMallocBinnedAllocExtra
Definition MallocBinnedCommon.h:82
#define UE_MBC_MAX_SMALL_POOL_ALIGNMENT
Definition MallocBinnedCommon.h:46
#define UE_MBC_UPDATE_STATS(x)
Definition MallocBinnedCommon.h:91
#define GMallocBinnedBundleCount
Definition MallocBinnedCommon.h:81
#define GMallocBinnedPerThreadCaches
Definition MallocBinnedCommon.h:79
#define GMallocBinnedBundleSize
Definition MallocBinnedCommon.h:80
#define UE_MBC_ALLOW_RUNTIME_TWEAKING
Definition MallocBinnedCommon.h:69
CORE_API float GMallocBinnedFlushThreadCacheMaxWaitTime
Definition MallocBinnedCommon.cpp:539
#define UE_MBC_MIN_SMALL_POOL_ALIGNMENT
Definition MallocBinnedCommon.h:45
CORE_API int32 GMallocBinnedEnableCSVStats
#define GMallocBinnedMaxBundlesBeforeRecycle
Definition MallocBinnedCommon.h:83
#define UE_MBC_STANDARD_ALIGNMENT
Definition MallocBinnedCommon.h:47
USkinnedMeshComponent float
Definition SkinnedMeshComponent.h:60
FRWLock Lock
Definition UnversionedPropertySerialization.cpp:921
uint32 Size
Definition VulkanMemory.cpp:4034
uint8_t uint8
Definition binka_ue_file_header.h:8
uint32_t uint32
Definition binka_ue_file_header.h:6
Definition MallocBinnedCommon.h:120
FBitTree()
Definition MallocBinnedCommon.h:129
uint32 CountOnes(uint32 UpTo) const
Definition MallocBinnedCommon.cpp:445
uint32 Slow_NextAllocBits(uint32 NumBits, uint64 StartIndex)
Definition MallocBinnedCommon.cpp:472
static constexpr uint32 GetMemoryRequirements(uint32 NumPages)
Definition MallocBinnedCommon.h:134
void FBitTreeInit(uint32 InDesiredCapacity, void *Memory, uint32 MemorySize, bool InitialValue)
Definition MallocBinnedCommon.cpp:168
uint32 AllocBit()
Definition MallocBinnedCommon.cpp:229
uint32 NextAllocBit() const
Definition MallocBinnedCommon.cpp:319
void FreeBit(uint32 Index)
Definition MallocBinnedCommon.cpp:415
bool IsAllocated(uint32 Index) const
Definition MallocBinnedCommon.cpp:278
Definition MallocBinnedCommon.h:184
static std::atomic< int64 > AllocatedOSSmallPoolMemory
Definition MallocBinnedCommon.h:421
void UnrecognizedPointerFatalError(void *Ptr)
Definition MallocBinnedCommon.cpp:633
static std::atomic< int64 > ConsolidatedMemory
Definition MallocBinnedCommon.h:419
static void OutOfMemory(uint64 Size, uint32 Alignment=0)
Definition MallocBinnedCommon.h:431
static uint32 OsAllocationGranularity
Definition MallocBinnedCommon.h:410
static int64 PoolInfoMemory
Definition MallocBinnedCommon.h:425
std::atomic< uint64 > MemoryTrimEpoch
Definition MallocBinnedCommon.h:415
static CORE_API uint32 BinnedTlsSlot
Definition MallocBinnedCommon.h:413
static std::atomic< int64 > TLSMemory
Definition MallocBinnedCommon.h:418
static std::atomic< int64 > AllocatedLargePoolMemory
Definition MallocBinnedCommon.h:422
static int64 HashMemory
Definition MallocBinnedCommon.h:426
void GetAllocatorStatsInternal(FGenericMemoryStats &OutStats, int64 TotalAllocatedSmallPoolMemory)
virtual CORE_API void OnMallocInitialized() override
Definition MallocBinnedCommon.cpp:814
FPtrToPoolMapping PtrToPoolMapping
Definition MallocBinnedCommon.h:408
UE::FPlatformRecursiveMutex ExternalAllocMutex
Definition MallocBinnedCommon.h:411
void LogLargeAllocation(SIZE_T Size) const
Definition MallocBinnedCommon.h:442
static std::atomic< int64 > AllocatedSmallPoolMemory
Definition MallocBinnedCommon.h:420
static std::atomic< int64 > AllocatedLargePoolMemoryWAlignment
Definition MallocBinnedCommon.h:423
uint64 NumPoolsPerPage
Definition MallocBinnedCommon.h:409
Definition MallocBinnedCommonUtils.h:98
Definition MemoryBase.h:99
Definition NameTypes.h:617
Definition Array.h:670
Definition MallocBinnedCommon.h:452
static TArray< FPerThreadFreeBlockLists * > & GetRegisteredFreeBlockLists()
Definition MallocBinnedCommon.h:774
virtual void SetupTLSCachesOnCurrentThread() override
Definition MallocBinnedCommon.h:780
void AllocateHashBuckets()
Definition MallocBinnedCommon.h:1009
uint32 BoundSizeToPoolIndex(SIZE_T Size, const uint8(&MemSizeToPoolIndex)[SIZE_TO_POOL_INDEX_NUM]) const
Definition MallocBinnedCommon.h:871
SIZE_T QuantizeSizeCommon(SIZE_T Count, uint32 Alignment, const AllocType &Alloc) const
Definition MallocBinnedCommon.h:836
static UE::FPlatformRecursiveMutex & GetFreeBlockListsRegistrationMutex()
Definition MallocBinnedCommon.h:768
void UpdateStatsCommon(const AllocType &Alloc)
Definition MallocBinnedCommon.h:956
static constexpr int SIZE_TO_POOL_INDEX_NUM
Definition MallocBinnedCommon.h:500
virtual void MarkTLSCachesAsUnusedOnCurrentThread() override
Definition MallocBinnedCommon.h:821
int64 GetTotalAllocatedSmallPoolMemory() const
Definition MallocBinnedCommon.h:939
bool GetAllocationSizeExternal(void *Ptr, SIZE_T &SizeOut)
Definition MallocBinnedCommon.h:909
virtual void GetAllocatorStats(FGenericMemoryStats &OutStats) override
Definition MallocBinnedCommon.h:492
FORCENOINLINE bool PromoteToLargerBin(SIZE_T &Size, uint32 &Alignment, const AllocType &Alloc) const
Definition MallocBinnedCommon.h:881
virtual void ClearAndDisableTLSCachesOnCurrentThread() override
Definition MallocBinnedCommon.h:796
virtual void MarkTLSCachesAsUsedOnCurrentThread() override
Definition MallocBinnedCommon.h:809
Definition WordMutex.h:21
bool TryLock()
Definition WordMutex.h:28
void Unlock()
Definition WordMutex.h:45
void Lock()
Definition WordMutex.h:34
Definition UniqueLock.h:20
@ Count
Definition AudioMixerDevice.h:90
Definition ByteSwap.h:14
UE_STRING_CLASS Result(Forward< LhsType >(Lhs), RhsLen)
Definition String.cpp.inl:732
FPThreadsRecursiveMutex FPlatformRecursiveMutex
Definition AndroidPlatformMutex.h:12
U16 Index
Definition radfft.cpp:71
static uint32 AllocTlsSlot(void)
Definition AndroidPlatformTLS.h:30
static UE_FORCEINLINE_HINT void * GetTlsValue(uint32 SlotIndex)
Definition AndroidPlatformTLS.h:57
static UE_FORCEINLINE_HINT void SetTlsValue(uint32 SlotIndex, void *Value)
Definition AndroidPlatformTLS.h:47
Definition MemoryMisc.h:21
static CORE_API void OnOutOfMemory(uint64 Size, uint32 Alignment)
Definition GenericPlatformMemory.cpp:216
static UE_FORCEINLINE_HINT bool IsValidTlsSlot(uint32 SlotIndex)
Definition GenericPlatformTLS.h:20
Definition MallocBinnedCommon.h:188
void SetNextNodeInCurrentBundle(FBundleNode *Next)
Definition MallocBinnedCommon.h:193
uint64 NextNodeInCurrentBundle
Definition MallocBinnedCommon.h:189
FBundleNode * GetNextNodeInCurrentBundle()
Definition MallocBinnedCommon.h:198
uint64 Reserved
Definition MallocBinnedCommon.h:191
uint64 Count
Definition MallocBinnedCommon.h:190
Definition MallocBinnedCommon.h:301
FBundleNode * RecyleFull(uint32 InPoolIndex, T &InGlobalRecycler)
Definition MallocBinnedCommon.h:337
UE_FORCEINLINE_HINT bool CanPushToFront(uint32 InPoolIndex, uint32 InBinSize) const
Definition MallocBinnedCommon.h:320
bool ObtainPartial(uint32 InPoolIndex, T &InGlobalRecycler)
Definition MallocBinnedCommon.h:353
bool PushToFront(void *InPtr, uint32 InPoolIndex, uint32 InBinSize)
Definition MallocBinnedCommon.h:303
void * PopFromFront(uint32 InPoolIndex)
Definition MallocBinnedCommon.h:325
FBundleNode * PopBundles(uint32 InPoolIndex)
Definition MallocBinnedCommon.h:369
Definition MallocBinnedCommon.h:208
UE_FORCEINLINE_HINT uint64 GetMaxHashBuckets() const
Definition MallocBinnedCommon.h:243
FPtrToPoolMapping(uint32 InPageSize, uint64 InNumPoolsPerPage, uint64 AddressBase, uint64 AddressLimit)
Definition MallocBinnedCommon.h:218
void GetHashBucketAndPoolIndices(const void *InPtr, uint32 &OutBucketIndex, UPTRINT &OutBucketCollision, uint32 &OutPoolIndex) const
Definition MallocBinnedCommon.h:234
FPtrToPoolMapping()
Definition MallocBinnedCommon.h:209
void Init(uint32 InPageSize, uint64 InNumPoolsPerPage, uint64 AddressBase, uint64 AddressLimit)
Definition MallocBinnedCommon.h:223
static constexpr UE_FORCEINLINE_HINT bool IsPowerOfTwo(T Value)
Definition UnrealMathUtility.h:519
Definition MallocBinnedCommon.h:168
FSizeTableEntry()=default
static uint8 FillSizeTable(uint64 PlatformPageSize, FSizeTableEntry *SizeTable, uint32 BasePageSize, uint32 MaxSize, uint32 SizeIncrement)
Definition MallocBinnedCommon.cpp:142
uint32 BinSize
Definition MallocBinnedCommon.h:169
bool operator<(const FSizeTableEntry &Other) const
Definition MallocBinnedCommon.h:175
uint32 NumMemoryPagesPerBlock
Definition MallocBinnedCommon.h:170
static int32 Sprintf(CharType *Dest, const FmtType &Fmt, Types... Args)
Definition CString.h:569
Definition MallocBinnedCommon.h:503
static void ClearTLS()
Definition MallocBinnedCommon.h:558
FBundleNode * RecycleFullBundle(uint32 InPoolIndex, T &InGlobalRecycler)
Definition MallocBinnedCommon.h:596
UE_FORCEINLINE_HINT bool Free(void *InPtr, uint32 InPoolIndex, uint32 InBinSize)
Definition MallocBinnedCommon.h:583
static FPerThreadFreeBlockLists * Get() TSAN_SAFE
Definition MallocBinnedCommon.h:504
bool ObtainRecycledPartial(uint32 InPoolIndex, T &InGlobalRecycler)
Definition MallocBinnedCommon.h:603
UE_FORCEINLINE_HINT void * Malloc(uint32 InPoolIndex)
Definition MallocBinnedCommon.h:577
UE_FORCEINLINE_HINT bool CanFree(uint32 InPoolIndex, uint32 InBinSize) const
Definition MallocBinnedCommon.h:589
bool UpdateEpoch(uint64 NewEpoch)
Definition MallocBinnedCommon.h:629
static void LockTLS()
Definition MallocBinnedCommon.h:548
FBundleNode * PopBundles(uint32 InPoolIndex)
Definition MallocBinnedCommon.h:608
void Lock()
Definition MallocBinnedCommon.h:613
static void UnlockTLS()
Definition MallocBinnedCommon.h:538
static void SetTLS()
Definition MallocBinnedCommon.h:520
bool TryLock()
Definition MallocBinnedCommon.h:618
void Unlock()
Definition MallocBinnedCommon.h:623
int64 AllocatedMemory
Definition MallocBinnedCommon.h:642
static PoolInfo * GetOrCreatePoolInfo(AllocType &Allocator, void *InPtr, typename PoolInfo::ECanary Kind)
Definition MallocBinnedCommon.h:657
typename AllocType::FPoolInfo PoolInfo
Definition MallocBinnedCommon.h:653
static PoolInfo * FindPoolInfo(AllocType &Allocator, void *InPtr)
Definition MallocBinnedCommon.h:745