UDocumentation UE5.7 10.02.2026 (Source)
API documentation for Unreal Engine 5.7
MallocBinnedGPU.h
Go to the documentation of this file.
1// Copyright Epic Games, Inc. All Rights Reserved.
2
3#pragma once
4
5#include "Containers/Array.h"
6#include "CoreTypes.h"
10#include "Templates/Atomic.h"
11#include "Templates/MemoryOps.h"
12
13#if PLATFORM_64BITS && PLATFORM_HAS_FPlatformVirtualMemoryBlock
14#include "Async/UniqueLock.h"
19#include "HAL/MemoryBase.h"
20#include "HAL/PlatformMath.h"
21#include "HAL/PlatformMutex.h"
22#include "HAL/PlatformTLS.h"
23#include "HAL/UnrealMemory.h"
24#include "Math/NumericLimits.h"
26#include "Misc/ScopeLock.h"
27#include "Misc/ScopeLock.h"
29#include "Templates/Function.h"
30
31
32#define BINNEDGPU_MAX_GMallocBinnedGPUMaxBundlesBeforeRecycle (8)
33
34#define COLLECT_BINNEDGPU_STATS (!UE_BUILD_SHIPPING)
35
36#if COLLECT_BINNEDGPU_STATS
37 #define MBG_STAT(x) x
38#else
39 #define MBG_STAT(x)
40#endif
41
43
44struct FArenaParams
45{
46 // these are parameters you set
47 uint64 AddressLimit = 1024 * 1024 * 1024; // this controls the size of the root hash table
48 uint32 BasePageSize = 4096; // this is used to make sensible calls to malloc and figures into the standard pool sizes if bUseStandardSmallPoolSizes is true
49 uint32 AllocationGranularity = 4096; // this is the granularity of the commit and decommit calls used on the VM slabs
51 uint32 MaxStandardPoolSize = 128 * 1024; // these are added to the standard pool sizes, mainly to use the TLS caches, they are typically one block per slab
57 uint8 PoolCount;
59 bool bPerThreadCaches = true;
61 bool bAttemptToAlignSmallBocks = true;
63
64 // This lambdas is similar to the platform virtual memory HAL and by default just call that.
66
67 // These allow you to override the large block allocator. The value add here is that MBA tracks the metadata for you and call tell the difference between a large block pointer and a small block pointer.
68 // By defaults these just use the platform VM interface to allocate some committed memory
71
72 // these are parameters are derived from other parameters
74 uint32 MaxPoolSize;
75 uint32 MinimumAlignment;
77};
78
79class FMallocBinnedGPU final : public FMalloc
80{
81 struct FGlobalRecycler;
82 struct FPoolInfoLarge;
83 struct FPoolInfoSmall;
84 struct FPoolTable;
85 struct PoolHashBucket;
86 struct Private;
87
88
90 {
91 uint8 MemoryModifiedByCPU[32 - sizeof(void*)]; // might be modified for free list links, etc
92 void *GPUMemory; // pointer to the actual GPU memory, which we cannot modify with the CPU
93
96 {
98 }
99 };
100
101 struct FFreeBlock
102 {
103 enum
104 {
105 CANARY_VALUE = 0xc3
106 };
107
110 , PoolIndex(InPoolIndex)
111 , Canary(CANARY_VALUE)
112 , NextFreeBlock(nullptr)
113 {
115 NumFreeBlocks = InPageSize / InBlockSize;
116 }
117
119 {
120 return NumFreeBlocks;
121 }
123 {
124 return Canary == FFreeBlock::CANARY_VALUE;
125 }
126
127 inline void CanaryTest() const
128 {
129 if (!IsCanaryOk())
130 {
131 CanaryFail();
132 }
133 }
134 void CanaryFail() const;
135
137 {
138 --NumFreeBlocks;
139 return (uint8*)(((FGPUMemoryBlockProxy*)this)->GPUMemory) + NumFreeBlocks * (uint32(BlockSizeShifted) << MinimumAlignmentShift);
140 }
141
142 uint16 BlockSizeShifted; // Size of the blocks that this list points to >> ArenaParams.MinimumAlignmentShift
143 uint8 PoolIndex; // Index of this pool
144 uint8 Canary; // Constant value of 0xe3
145 uint32 NumFreeBlocks; // Number of consecutive free blocks here, at least 1.
146 FFreeBlock* NextFreeBlock; // Next free block or nullptr
147 };
148
149 struct FPoolTable
150 {
151 uint32 BlockSize;
154
155 FBitTree BlockOfBlockAllocationBits; // one bits in here mean the virtual memory is committed
156 FBitTree BlockOfBlockIsExhausted; // one bit in here means the pool is completely full
157
160
162 };
163
164 struct FPtrToPoolMapping
165 {
166 FPtrToPoolMapping()
167 : PtrToPoolPageBitShift(0)
168 , HashKeyShift(0)
169 , PoolMask(0)
170 , MaxHashBuckets(0)
171 {
172 }
173 explicit FPtrToPoolMapping(uint32 InPageSize, uint64 InNumPoolsPerPage, uint64 AddressLimit)
174 {
175 Init(InPageSize, InNumPoolsPerPage, AddressLimit);
176 }
177
179 {
180 uint64 PoolPageToPoolBitShift = FPlatformMath::CeilLogTwo(InNumPoolsPerPage);
181
182 PtrToPoolPageBitShift = FPlatformMath::CeilLogTwo(InPageSize);
183 HashKeyShift = PtrToPoolPageBitShift + PoolPageToPoolBitShift;
184 PoolMask = (1ull << PoolPageToPoolBitShift) - 1;
185 MaxHashBuckets = AddressLimit >> HashKeyShift;
186 }
187
188 inline void GetHashBucketAndPoolIndices(const void* InPtr, uint32& OutBucketIndex, UPTRINT& OutBucketCollision, uint32& OutPoolIndex) const
189 {
190 OutBucketCollision = (UPTRINT)InPtr >> HashKeyShift;
191 OutBucketIndex = uint32(OutBucketCollision & (MaxHashBuckets - 1));
192 OutPoolIndex = ((UPTRINT)InPtr >> PtrToPoolPageBitShift) & PoolMask;
193 }
194
195 UE_FORCEINLINE_HINT uint64 GetMaxHashBuckets() const
196 {
197 return MaxHashBuckets;
198 }
199
200 private:
202 uint64 PtrToPoolPageBitShift;
204 uint64 HashKeyShift;
206 uint64 PoolMask;
207 // PageSize dependent constants
208 uint64 MaxHashBuckets;
209 };
210
211 struct FBundleNode
212 {
213 FBundleNode* NextNodeInCurrentBundle;
214 union
215 {
216 FBundleNode* NextBundle;
217 int32 Count;
218 };
219 };
220
221 struct FBundle
222 {
223 UE_FORCEINLINE_HINT FBundle()
224 {
225 Reset();
226 }
227
228 inline void Reset()
229 {
230 Head = nullptr;
231 Count = 0;
232 }
233
234 inline void PushHead(FBundleNode* Node)
235 {
236 Node->NextNodeInCurrentBundle = Head;
237 Node->NextBundle = nullptr;
238 Head = Node;
239 Count++;
240 }
241
242 inline FBundleNode* PopHead()
243 {
244 FBundleNode* Result = Head;
245
246 Count--;
247 Head = Head->NextNodeInCurrentBundle;
248 return Result;
249 }
250
251 FBundleNode* Head;
253 };
254
255 struct FFreeBlockList
256 {
257 // return true if we actually pushed it
259 {
260 check(InPtr);
261
262 if ((PartialBundle.Count >= (uint32)LocalArenaParams.MaxBlocksPerBundle) | (PartialBundle.Count * InBlockSize >= (uint32)LocalArenaParams.MaxSizePerBundle))
263 {
264 if (FullBundle.Head)
265 {
266 return false;
267 }
268 FullBundle = PartialBundle;
269 PartialBundle.Reset();
270 }
271 PartialBundle.PushHead((FBundleNode*)new FGPUMemoryBlockProxy(InPtr));
272 MBG_STAT(Allocator.GPUProxyMemory += sizeof(FGPUMemoryBlockProxy);)
273 return true;
274 }
276 {
277 return !((!!FullBundle.Head) & ((PartialBundle.Count >= (uint32)LocalArenaParams.MaxBlocksPerBundle) | (PartialBundle.Count * InBlockSize >= (uint32)LocalArenaParams.MaxSizePerBundle)));
278 }
279 inline void* PopFromFront(FMallocBinnedGPU& Allocator, uint32 InPoolIndex)
280 {
281 if ((!PartialBundle.Head) & (!!FullBundle.Head))
282 {
283 PartialBundle = FullBundle;
284 FullBundle.Reset();
285 }
286 void *Result = nullptr;
287 if (PartialBundle.Head)
288 {
289 FGPUMemoryBlockProxy* Proxy = (FGPUMemoryBlockProxy*)PartialBundle.PopHead();
290 Result = Proxy->GPUMemory;
291 check(Result);
292 delete Proxy;
293 MBG_STAT(Allocator.GPUProxyMemory -= sizeof(FGPUMemoryBlockProxy);)
294 }
295 return Result;
296 }
297
298 // tries to recycle the full bundle, if that fails, it is returned for freeing
299 FBundleNode* RecyleFull(FArenaParams& LocalArenaParams, FGlobalRecycler& GGlobalRecycler, uint32 InPoolIndex);
300 bool ObtainPartial(FArenaParams& LocalArenaParams, FGlobalRecycler& GGlobalRecycler, uint32 InPoolIndex);
301 FBundleNode* PopBundles(uint32 InPoolIndex);
302 private:
303 FBundle PartialBundle;
304 FBundle FullBundle;
305 };
306
307 struct FPerThreadFreeBlockLists
308 {
309 UE_FORCEINLINE_HINT static FPerThreadFreeBlockLists* Get(uint32 BinnedGPUTlsSlot)
310 {
311 return FPlatformTLS::IsValidTlsSlot(BinnedGPUTlsSlot) ? (FPerThreadFreeBlockLists*)FPlatformTLS::GetTlsValue(BinnedGPUTlsSlot) : nullptr;
312 }
313 static void SetTLS(FMallocBinnedGPU& Allocator);
314 static int64 ClearTLS(FMallocBinnedGPU& Allocator);
315
316 FPerThreadFreeBlockLists(uint32 PoolCount)
317 : AllocatedMemory(0)
318 {
319 FreeLists.AddDefaulted(PoolCount);
320 }
321
323 {
324 return FreeLists[InPoolIndex].PopFromFront(Allocator, InPoolIndex);
325 }
326 // return true if the pointer was pushed
328 {
329 return FreeLists[InPoolIndex].PushToFront(Allocator, InPtr, InPoolIndex, InBlockSize, LocalArenaParams);
330 }
331 // return true if a pointer can be pushed
333 {
334 return FreeLists[InPoolIndex].CanPushToFront(InPoolIndex, InBlockSize, LocalArenaParams);
335 }
336 // returns a bundle that needs to be freed if it can't be recycled
338 {
339 return FreeLists[InPoolIndex].RecyleFull(LocalArenaParams, GlobalRecycler, InPoolIndex);
340 }
341 // returns true if we have anything to pop
343 {
344 return FreeLists[InPoolIndex].ObtainPartial(LocalArenaParams, GlobalRecycler, InPoolIndex);
345 }
346 FBundleNode* PopBundles(uint32 InPoolIndex)
347 {
348 return FreeLists[InPoolIndex].PopBundles(InPoolIndex);
349 }
350 int64 AllocatedMemory;
351 TArray<FFreeBlockList> FreeLists;
352 };
353
354 struct FGlobalRecycler
355 {
356 void Init(uint32 PoolCount)
357 {
358 Bundles.AddDefaulted(PoolCount);
359 }
360 bool PushBundle(uint32 NumCachedBundles, uint32 InPoolIndex, FBundleNode* InBundle)
361 {
363 {
364 if (!Bundles[InPoolIndex].FreeBundles[Slot])
365 {
366 if (!FPlatformAtomics::InterlockedCompareExchangePointer((void**)&Bundles[InPoolIndex].FreeBundles[Slot], InBundle, nullptr))
367 {
368 return true;
369 }
370 }
371 }
372 return false;
373 }
374
375 FBundleNode* PopBundle(uint32 NumCachedBundles, uint32 InPoolIndex)
376 {
378 {
379 FBundleNode* Result = Bundles[InPoolIndex].FreeBundles[Slot];
380 if (Result)
381 {
382 if (FPlatformAtomics::InterlockedCompareExchangePointer((void**)&Bundles[InPoolIndex].FreeBundles[Slot], nullptr, Result) == Result)
383 {
384 return Result;
385 }
386 }
387 }
388 return nullptr;
389 }
390
391 private:
393 {
396 {
398 }
399 };
401 };
402
403
404 inline uint64 PoolIndexFromPtr(const void* Ptr)
405 {
406 if (PoolSearchDiv == 0)
407 {
408 return (UPTRINT(Ptr) - UPTRINT(PoolBaseVMPtr[0])) >> ArenaParams.MaxMemoryPerBlockSizeShift;
409 }
410 uint64 PoolIndex = ArenaParams.PoolCount;
411 if (((uint8*)Ptr >= PoolBaseVMPtr[0]) & ((uint8*)Ptr < HighestPoolBaseVMPtr + ArenaParams.MaxMemoryPerBlockSize))
412 {
413 PoolIndex = uint64((uint8*)Ptr - PoolBaseVMPtr[0]) / PoolSearchDiv;
414 if (PoolIndex >= ArenaParams.PoolCount)
415 {
416 PoolIndex = ArenaParams.PoolCount - 1;
417 }
418 if ((uint8*)Ptr < PoolBaseVMPtr[(int32)PoolIndex])
419 {
420 do
421 {
422 PoolIndex--;
423 check(PoolIndex < ArenaParams.PoolCount);
424 } while ((uint8*)Ptr < PoolBaseVMPtr[(int32)PoolIndex]);
425 if ((uint8*)Ptr >= PoolBaseVMPtr[(int32)PoolIndex] + ArenaParams.MaxMemoryPerBlockSize)
426 {
427 PoolIndex = ArenaParams.PoolCount; // was in the gap
428 }
429 }
430 else if ((uint8*)Ptr >= PoolBaseVMPtr[(int32)PoolIndex] + ArenaParams.MaxMemoryPerBlockSize)
431 {
432 do
433 {
434 PoolIndex++;
435 check(PoolIndex < ArenaParams.PoolCount);
436 } while ((uint8*)Ptr >= PoolBaseVMPtr[(int32)PoolIndex] + ArenaParams.MaxMemoryPerBlockSize);
437 if ((uint8*)Ptr < PoolBaseVMPtr[(int32)PoolIndex])
438 {
439 PoolIndex = ArenaParams.PoolCount; // was in the gap
440 }
441 }
442 }
443 return PoolIndex;
444 }
445
447 {
449 }
450 inline uint64 PoolIndexFromPtrChecked(const void* Ptr)
451 {
453 check(Result < ArenaParams.PoolCount);
454 return Result;
455 }
456
457 UE_FORCEINLINE_HINT bool IsOSAllocation(const void* Ptr)
458 {
459 return PoolIndexFromPtr(Ptr) >= ArenaParams.PoolCount;
460 }
461
462
464 {
465 uint32 PoolIndex = PoolIndexFromPtrChecked(Ptr);
466 uint8* PoolStart = PoolBasePtr(PoolIndex);
469
471
472 check(Result < PoolStart + ArenaParams.MaxMemoryPerBlockSize);
473 return Result;
474 }
476 {
479 check(Ptr + BlockOfBlocksSize <= PoolStart + ArenaParams.MaxMemoryPerBlockSize);
480 return Ptr;
481 }
484
486 {
487 if ((InOutSize <= ArenaParams.MaxPoolSize) & (Alignment <= ArenaParams.MinimumAlignment)) // one branch, not two
488 {
489 return true;
490 }
491 SIZE_T AlignedSize = Align(InOutSize, Alignment);
492 if (ArenaParams.bAttemptToAlignSmallBocks & (AlignedSize <= ArenaParams.MaxPoolSize) & (Alignment <= ArenaParams.MaximumAlignmentForSmallBlock)) // one branch, not three
493 {
494 uint32 PoolIndex = BoundSizeToPoolIndex(AlignedSize);
495 while (true)
496 {
497 uint32 BlockSize = PoolIndexToBlockSize(PoolIndex);
498 if (IsAligned(BlockSize, Alignment))
499 {
500 InOutSize = SIZE_T(BlockSize);
501 return true;
502 }
503 PoolIndex++;
504 check(PoolIndex < ArenaParams.PoolCount);
505 }
506 }
507 return false;
508 }
509
510public:
511
512
514 FArenaParams& GetParams()
515 {
516 return ArenaParams;
517 }
519
520 CORE_API virtual ~FMallocBinnedGPU();
521
522
523 // FMalloc interface.
524 CORE_API virtual bool IsInternallyThreadSafe() const override;
525 inline virtual void* Malloc(SIZE_T Size, uint32 Alignment) override
526 {
527 Alignment = FMath::Max<uint32>(Alignment, ArenaParams.MinimumAlignment);
528
529 void* Result = nullptr;
530
531 // Only allocate from the small pools if the size is small enough and the alignment isn't crazy large.
532 // With large alignments, we'll waste a lot of memory allocating an entire page, but such alignments are highly unlikely in practice.
534 {
535 FPerThreadFreeBlockLists* Lists = ArenaParams.bPerThreadCaches ? FPerThreadFreeBlockLists::Get(BinnedGPUTlsSlot) : nullptr;
536 if (Lists)
537 {
538 uint32 PoolIndex = BoundSizeToPoolIndex(Size);
539 uint32 BlockSize = PoolIndexToBlockSize(PoolIndex);
540 Result = Lists->Malloc(*this, PoolIndex);
541 if (Result)
542 {
543 Lists->AllocatedMemory += BlockSize;
544 checkSlow(IsAligned(Result, Alignment));
545 }
546 }
547 }
548 if (Result == nullptr)
549 {
550 Result = MallocExternal(Size, Alignment);
551 }
552
553 return Result;
554 }
555 inline virtual void* Realloc(void* Ptr, SIZE_T NewSize, uint32 Alignment) override
556 {
557 check(!"MallocBinnedGPU cannot realloc memory because the memory is assumed to not be writable by the CPU");
558 return nullptr;
559 }
560
561 inline virtual void Free(void* Ptr) override
562 {
563 uint64 PoolIndex = PoolIndexFromPtr(Ptr);
564 if (PoolIndex < ArenaParams.PoolCount)
565 {
566 FPerThreadFreeBlockLists* Lists = ArenaParams.bPerThreadCaches ? FPerThreadFreeBlockLists::Get(BinnedGPUTlsSlot) : nullptr;
567 if (Lists)
568 {
569 int32 BlockSize = PoolIndexToBlockSize(PoolIndex);
570 if (Lists->Free(*this, Ptr, PoolIndex, BlockSize, ArenaParams))
571 {
572 Lists->AllocatedMemory -= BlockSize;
573 return;
574 }
575 }
576 }
577 FreeExternal(Ptr);
578 }
579 inline virtual bool GetAllocationSize(void *Ptr, SIZE_T &SizeOut) override
580 {
581 uint64 PoolIndex = PoolIndexFromPtr(Ptr);
582 if (PoolIndex < ArenaParams.PoolCount)
583 {
584 SizeOut = PoolIndexToBlockSize(PoolIndex);
585 return true;
586 }
587 return GetAllocationSizeExternal(Ptr, SizeOut);
588 }
589
590 inline virtual SIZE_T QuantizeSize(SIZE_T Count, uint32 Alignment) override
591 {
592 check(DEFAULT_ALIGNMENT <= ArenaParams.MinimumAlignment); // used below
593 checkSlow((Alignment & (Alignment - 1)) == 0); // Check the alignment is a power of two
595 if ((Count <= ArenaParams.MaxPoolSize) & (Alignment <= ArenaParams.MinimumAlignment)) // one branch, not two
596 {
597 SizeOut = PoolIndexToBlockSize(BoundSizeToPoolIndex(Count));
598 }
599 else
600 {
601 Alignment = FPlatformMath::Max<uint32>(Alignment, ArenaParams.AllocationGranularity);
602 SizeOut = Align(Count, Alignment);
603 }
604 check(SizeOut >= Count);
605 return SizeOut;
606 }
607
608 CORE_API virtual bool ValidateHeap() override;
609 CORE_API virtual void Trim(bool bTrimThreadCaches) override;
610 CORE_API virtual void SetupTLSCachesOnCurrentThread() override;
611 CORE_API virtual void ClearAndDisableTLSCachesOnCurrentThread() override;
612 CORE_API virtual const TCHAR* GetDescriptiveName() override;
613 // End FMalloc interface.
614
615 CORE_API void FlushCurrentThreadCache();
616 CORE_API void* MallocExternal(SIZE_T Size, uint32 Alignment);
617 CORE_API void FreeExternal(void *Ptr);
618 CORE_API bool GetAllocationSizeExternal(void* Ptr, SIZE_T& SizeOut);
619
620 MBG_STAT(int64 GetTotalAllocatedSmallPoolMemory();)
621 CORE_API virtual void GetAllocatorStats(FGenericMemoryStats& out_Stats) override;
623 CORE_API virtual void DumpAllocatorStats(class FOutputDevice& Ar) override;
624
625 inline uint32 BoundSizeToPoolIndex(SIZE_T Size)
626 {
627 auto Index = ((Size + ArenaParams.MinimumAlignment - 1) >> ArenaParams.MinimumAlignmentShift);
628 checkSlow(Index >= 0 && Index <= (ArenaParams.MaxPoolSize >> ArenaParams.MinimumAlignmentShift)); // and it should be in the table
629 uint32 PoolIndex = uint32(MemSizeToIndex[Index]);
630 checkSlow(PoolIndex >= 0 && PoolIndex < ArenaParams.PoolCount);
631 return PoolIndex;
632 }
634 {
635 return uint32(SmallBlockSizesReversedShifted[ArenaParams.PoolCount - PoolIndex - 1]) << ArenaParams.MinimumAlignmentShift;
636 }
637
638 CORE_API void Commit(uint32 InPoolIndex, void *Ptr, SIZE_T Size);
639 CORE_API void Decommit(uint32 InPoolIndex, void *Ptr, SIZE_T Size);
640
641
642 // Pool tables for different pool sizes
643 TArray<FPoolTable> SmallPoolTables;
644
646
647 PoolHashBucket* HashBuckets;
648 PoolHashBucket* HashBucketFreeList;
650
652 FGlobalRecycler GGlobalRecycler;
653 FPtrToPoolMapping PtrToPoolMapping;
654
656
657 TArray<uint16> SmallBlockSizesReversedShifted; // this is reversed to get the smallest elements on our main cache line
659 uint64 PoolSearchDiv; // if this is zero, the VM turned out to be contiguous anyway so we use a simple subtract and shift
660 uint8* HighestPoolBaseVMPtr; // this is a duplicate of PoolBaseVMPtr[ArenaParams.PoolCount - 1]
664 // Mapping of sizes to small table indices
666
667 MBG_STAT(
668 int64 BinnedGPUAllocatedSmallPoolMemory = 0; // memory that's requested to be allocated by the game
670
671 int64 BinnedGPUAllocatedLargePoolMemory = 0; // memory requests to the OS which don't fit in the small pool
672 int64 BinnedGPUAllocatedLargePoolMemoryWAlignment = 0; // when we allocate at OS level we need to align to a size
673
678 TAtomic<int64> ConsolidatedMemory;
680 )
681
683 UE::FPlatformRecursiveMutex& GetFreeBlockListsRegistrationMutex()
684 {
686 }
688 TArray<FPerThreadFreeBlockLists*>& GetRegisteredFreeBlockLists()
689 {
691 }
692 void RegisterThreadFreeBlockLists(FPerThreadFreeBlockLists* FreeBlockLists)
693 {
694 UE::TUniqueLock Lock(GetFreeBlockListsRegistrationMutex());
695 GetRegisteredFreeBlockLists().Add(FreeBlockLists);
696 }
697 int64 UnregisterThreadFreeBlockLists(FPerThreadFreeBlockLists* FreeBlockLists)
698 {
699 UE::TUniqueLock Lock(GetFreeBlockListsRegistrationMutex());
700 GetRegisteredFreeBlockLists().Remove(FreeBlockLists);
701 return FreeBlockLists->AllocatedMemory;
702 }
703
705};
706
708
709#if UE_ENABLE_INCLUDE_ORDER_DEPRECATED_IN_5_7
710# include "HAL/CriticalSection.h"
711#endif
712
713#endif
OODEFFUNC typedef void(OODLE_CALLBACK t_fp_OodleCore_Plugin_Free)(void *ptr)
constexpr T Align(T Val, uint64 Alignment)
Definition AlignmentTemplates.h:18
constexpr bool IsAligned(T Val, uint64 Alignment)
Definition AlignmentTemplates.h:50
#define checkSlow(expr)
Definition AssertionMacros.h:332
#define check(expr)
Definition AssertionMacros.h:314
FPlatformTypes::SIZE_T SIZE_T
An unsigned integer the same size as a pointer, the same as UPTRINT.
Definition Platform.h:1150
FPlatformTypes::TCHAR TCHAR
Either ANSICHAR or WIDECHAR, depending on whether the platform supports wide characters or the requir...
Definition Platform.h:1135
FPlatformTypes::int64 int64
A 64-bit signed integer.
Definition Platform.h:1127
FPlatformTypes::int32 int32
A 32-bit signed integer.
Definition Platform.h:1125
FPlatformTypes::UPTRINT UPTRINT
An unsigned integer the same size as a pointer.
Definition Platform.h:1146
#define UE_FORCEINLINE_HINT
Definition Platform.h:723
FPlatformTypes::uint64 uint64
A 64-bit unsigned integer.
Definition Platform.h:1117
UE_FORCEINLINE_HINT TSharedRef< CastToType, Mode > StaticCastSharedRef(TSharedRef< CastFromType, Mode > const &InSharedRef)
Definition SharedPointer.h:127
void Init()
Definition LockFreeList.h:4
#define BINNEDCOMMON_USE_SEPARATE_VM_PER_POOL
Definition MallocBinnedCommon.h:41
@ DEFAULT_ALIGNMENT
Definition MemoryBase.h:24
#define PRAGMA_DISABLE_UNSAFE_TYPECAST_WARNINGS
Definition MSVCPlatformCompilerPreSetup.h:81
#define PRAGMA_RESTORE_UNSAFE_TYPECAST_WARNINGS
Definition MSVCPlatformCompilerPreSetup.h:100
#define MAX_uint16
Definition NumericLimits.h:20
#define MAX_uint8
Definition NumericLimits.h:19
FRWLock Lock
Definition UnversionedPropertySerialization.cpp:921
uint32 Size
Definition VulkanMemory.cpp:4034
uint8_t uint8
Definition binka_ue_file_header.h:8
uint16_t uint16
Definition binka_ue_file_header.h:7
uint32_t uint32
Definition binka_ue_file_header.h:6
Definition AndroidPlatformMemory.h:38
Definition MallocBinnedCommon.h:120
Definition MemoryBase.h:99
Definition OutputDevice.h:133
Definition Array.h:670
SizeType AddDefaulted()
Definition Array.h:2795
Definition Atomic.h:538
Definition AndroidPlatformMisc.h:14
Definition UniqueLock.h:20
UE::FRecursiveMutex Mutex
Definition MeshPaintVirtualTexture.cpp:164
FORCEINLINE T * Get(const FObjectPtr &ObjectPtr)
Definition ObjectPtr.h:426
Definition OverriddenPropertySet.cpp:45
UE_STRING_CLASS Result(Forward< LhsType >(Lhs), RhsLen)
Definition String.cpp.inl:732
FPThreadsRecursiveMutex FPlatformRecursiveMutex
Definition AndroidPlatformMutex.h:12
U16 Index
Definition radfft.cpp:71
static UE_FORCEINLINE_HINT void * GetTlsValue(uint32 SlotIndex)
Definition AndroidPlatformTLS.h:57
Definition MemoryMisc.h:21
static const uint32 InvalidTlsSlot
Definition GenericPlatformTLS.h:13
static UE_FORCEINLINE_HINT bool IsValidTlsSlot(uint32 SlotIndex)
Definition GenericPlatformTLS.h:20