UDocumentation UE5.7 10.02.2026 (Source)
API documentation for Unreal Engine 5.7
RaceDetectorTypes.h
Go to the documentation of this file.
1// Copyright Epic Games, Inc. All Rights Reserved.
2
3#pragma once
4
6
7#if USING_INSTRUMENTATION
8
9#include "CoreTypes.h"
10
11#include "Sanitizer/Types.h"
15
17
18namespace UE::Sanitizer::RaceDetector {
19
20 using namespace UE::Instrumentation;
21
22 extern volatile bool bDetailedLogGlobal;
23
24 // A read-write lock that doesn't put the thread into a WAIT state but instead repeatedly tries to acquire the lock.
25 // This version is customized to remove instrumentation and be as optimized as possible for instrumentation purpose.
26 // FPlatformAtomics are used to make sure all atomics are inlined.
27 // std::atomic often end up causing calls into non-inlined instrumented functions that causes costly reentrancy.
28 class FRWSpinLock
29 {
30 public:
31 UE_NONCOPYABLE(FRWSpinLock);
32
34
36 {
37 return FPlatformAtomics::InterlockedCompareExchange((volatile int32*)&Lock, UINT32_MAX, 0) == 0;
38 }
39
41 {
42 while (!TryWriteLock())
43 {
44 // Reduce contention by doing a simple relaxed read to see if we have a chance of being able to lock.
45 while (Lock != 0)
46 {
48 }
49 }
50 }
51
53 {
54 Lock = 0;
55 }
56
58 {
60 // Check to make sure we don't already have a write lock or that we've not reached the limit of reader locks.
61 if (LocalValue >= UINT32_MAX - 1)
62 {
63 return false;
64 }
65
66 return FPlatformAtomics::InterlockedCompareExchange((volatile int32*)&Lock, LocalValue + 1, LocalValue) == LocalValue;
67 }
68
70 {
71 FPlatformAtomics::InterlockedDecrement((volatile int32*)&Lock);
72 }
73
75 {
76 while (!TryReadLock())
77 {
79 }
80 }
81
82 private:
83 volatile uint32 Lock = 0;
84 };
85
86 template<typename MutexType>
87 class TReadScopeLock
88 {
89 public:
90 UE_NONCOPYABLE(TReadScopeLock);
91
93 : Mutex(InMutex)
94 {
95 Mutex.ReadLock();
96 }
97
99 {
100 Mutex.ReadUnlock();
101 }
102
103 private:
104 MutexType& Mutex;
105 };
106
107 template<typename MutexType>
108 class TWriteScopeLock
109 {
110 public:
111 UE_NONCOPYABLE(TWriteScopeLock);
112
114 : Mutex(InMutex)
115 {
116 Mutex.WriteLock();
117 }
118
120 {
121 Mutex.WriteUnlock();
122 }
123
124 private:
125 MutexType& Mutex;
126 };
127
129 {
130 SLT_ReadOnly = 0,
131 SLT_Write,
132 };
133
134 template<typename MutexType>
135 class TRWScopeLock
136 {
137 public:
138 UE_NONCOPYABLE(TRWScopeLock);
139
141 : Mutex(InMutex)
142 , LockType(InLockType)
143 {
144 if (LockType == SLT_ReadOnly)
145 {
146 Mutex.ReadLock();
147 }
148 else
149 {
150 Mutex.WriteLock();
151 }
152 }
153
155 {
156 if (LockType == SLT_ReadOnly)
157 {
158 Mutex.ReadUnlock();
159 }
160 else
161 {
162 Mutex.WriteUnlock();
163 }
164 }
165
166 private:
167 MutexType& Mutex;
168 FRWScopeLockType LockType;
169 };
170
171 // ------------------------------------------------------------------------------
172 // Clocks.
173 // ------------------------------------------------------------------------------
174 using FClock = uint32;
175 using FContextId = uint8;
176
177 class FClockBank
178 {
179 public:
181 {
182 Reset();
183 }
184
186 {
187 for (int32 Index = 0; Index < UE_ARRAY_COUNT(Clocks); ++Index)
188 {
189 if (Other.Clocks[Index] > Clocks[Index])
190 {
191 Clocks[Index] = Other.Clocks[Index];
192 Locations[Index] = ReturnAddress;
193 }
194 }
195 }
196
198 {
199 Other.Acquire(*this, ReturnAddress);
200 }
201
203 {
204 for (int32 Index = 0; Index < UE_ARRAY_COUNT(Clocks); ++Index)
205 {
206 if (Clocks[Index] > Other.Clocks[Index])
207 {
208 Other.Clocks[Index] = Clocks[Index];
209 Other.Locations[Index] = ReturnAddress;
210 }
211 else if (Other.Clocks[Index] > Clocks[Index])
212 {
213 Clocks[Index] = Other.Clocks[Index];
214 Locations[Index] = ReturnAddress;
215 }
216 }
217 }
218
220 {
221 memset(Clocks, 0, sizeof(Clocks));
222 memset(Locations, 0, sizeof(Locations));
223 }
224
226 {
227 return Clocks[ContextId];
228 }
229
231 {
232 return FCallstackLocation(&Locations[ContextId], 1);
233 }
234
235 private:
236 FClock Clocks[256] = {};
237 void* Locations[256] = {};
238 };
239
240 // ------------------------------------------------------------------------------
241 // Memory access.
242 // ------------------------------------------------------------------------------
243 enum EMemoryAccessType : uint8 {
245 ACCESS_TYPE_READ = 0b1,
246 ACCESS_TYPE_WRITE = 0b10,
247 ACCESS_TYPE_ATOMIC = 0b100,
248 ACCESS_TYPE_VPTR = 0b1000,
249
253 };
255
257 {
258 switch (AccessType & ACCESS_TYPE_ATOMIC_READ_WRITE)
259 {
260 case ACCESS_TYPE_READ:
261 return TEXT("Read");
263 return TEXT("Write");
265 return TEXT("AtomicRead");
267 return TEXT("AtomicWrite");
269 return TEXT("AtomicReadWrite");
270 }
271 return TEXT("Unknown");
272 }
274 {
275 return EnumHasAnyFlags(AccessType, EMemoryAccessType::ACCESS_TYPE_READ);
276 }
278 {
279 return EnumHasAnyFlags(AccessType, EMemoryAccessType::ACCESS_TYPE_WRITE);
280 }
282 {
283 return EnumHasAnyFlags(AccessType, EMemoryAccessType::ACCESS_TYPE_ATOMIC);
284 }
285
286 struct FMemoryAccess
287 {
289 {
290 }
291
292 // Not sure why yet but Clang is not inlining this by default
293 // Did we mess up the clang compilation settings??
295 : RawValue(InRawValue)
296 {
297 }
298
300 {
301 // Apparently this is much faster than using the bitfields. 30% faster in warm TSAN benchmark!!!
302 // The constructor was accessing the same value in memory/store buffer multiple time doing its bit tweaking and it caused tons of Core::X86::Pmc::Core::LsBadStatus2.
303 // That ended up stalling on S[0].RawValue == CurrentAccess.RawValue in InstrumentMemoryAccess when trying to extract the full uint64.
304 // https://blog.stuffedcow.net/2014/01/x86-memory-disambiguation/
305 const uint8 AccessValue = (uint8)(((1ull << InSize) - 1ull) << InOffset);
306 RawValue = (uint64)InAccessType << 48 | (uint64)AccessValue << 40 | (uint64)InContextId << 32 | (uint64)InClock;
307 }
308
309 INSTRUMENTATION_FUNCTION_ATTRIBUTES FORCEINLINE uint8 GetOffset() const { return (uint8)FMath::CountTrailingZeros(Access); }
310 INSTRUMENTATION_FUNCTION_ATTRIBUTES FORCEINLINE uint8 GetSize() const { return (uint8)FMath::CountBits(Access >> GetOffset()); }
311 INSTRUMENTATION_FUNCTION_ATTRIBUTES FORCEINLINE bool IsValid() const { return AccessType != EMemoryAccessType::ACCESS_TYPE_INVALID; }
312
313 union
314 {
315 // If you touch this layout, make sure to update the constructor above
316 struct
317 {
318 FClock Clock; // 0
319 FContextId ContextId; // 4
320 // Each bit represent a 1-byte slot used in our 8 byte shadow
321 // and can easily be tested for overlaps with other accesses.
322 uint8 Access; // 5
323 union
324 {
325 struct
326 {
327 uint8 bIsRead : 1;
328 uint8 bIsWrite : 1;
329 uint8 bIsAtomic : 1;
330 uint8 bIsVPtr : 1;
331 };
332 EMemoryAccessType AccessType : 4; // 6 plenty of bits left here
333 };
334 uint8 Reserved; // 7 plenty of bits left here
335 };
336
337 uint64 RawValue;
338 };
339 };
340
341 enum class EHistoryEntryType : uint8
342 {
343 Invalid = 0, // Just make sure we do not mistake 0 memory for something valid
344 FunctionEntry = 0xAA, // Any number will do but make them stand out in the trace
345 MemoryAccess = 0xBB,
346 FunctionExit = 0xCC
347 };
348
349 struct FHistoryEntryBase
350 {
352
354 : Type(InType)
355 {
356 }
357 };
358
360 {
361 void* Pointer;
362 FMemoryAccess Access;
363
366 , Pointer(InPointer)
367 , Access(InRawAccess)
368 {
369 }
370 };
371
373 {
374 void* ReturnAddress;
375
379 {
380 }
381 };
382
384 {
387 {
388 }
389 };
390
391 struct FHistoryChunk
392 {
394
398
399 uint32 StartClock = 0;
400 uint32 EndClock = 0;
401 uint32 Offset = 0;
402 double LastUsed = FPlatformTime::Seconds();
403 uint8 Buffer[2*1024*1024];
404 FHistoryChunk* Prev = nullptr;
405 FHistoryChunk* Next = nullptr;
406 };
407
409 {
411 FClock Last;
412 };
413
414 struct FAccessHistory
415 {
417
419 {
420 Tail = Head = new FHistoryChunk();
421 Tail->InitStack();
422 NumChunks++;
423 }
424
426 {
427 TWriteScopeLock Scope(Lock);
428
429 while (Head)
430 {
431 FHistoryChunk* ToDelete = Head;
432 Head = Head->Next;
433 delete ToDelete;
434 }
435
436 NumChunks = 0;
437 Head = Tail = nullptr;
438
439 while (Spare)
440 {
441 FHistoryChunk* Next = Spare->Next;
442 delete Spare;
443 Spare = Next;
444 }
445 }
446
447 // Number of chunks that have been recycled
449
450 // Total number of chunks currently allocated
451 int32 NumChunks = 0;
452 int32 NumSpares = 0;
453
454 // Used to dump information in case we can't find the memory access in the history
455 double LastRecycle = 0.0;
456
457 // Just used on the slow path between recycling and scanning.
459
460 // Can be used by other threads doing race reporting
461 FHistoryChunk* Head = nullptr;
462
463 // Only used by the owner thread
464 FHistoryChunk* Tail = nullptr;
465
466 // Only used by the owner thread to store unused buffers
467 FHistoryChunk* Spare = nullptr;
468
470 {
471 // We need to have at least 2 chunks so that we always have one filled with data
472 // while we start filling the new one.
473 return NumChunks > 2 && NumChunks > GRaceDetectorHistoryLength;
474 }
475
477 {
478 if (HasTooManyChunks())
479 {
480 TWriteScopeLock Scope(Lock);
481
482 while (HasTooManyChunks())
483 {
484 FHistoryChunk* Recycle = Head;
485 Head = Head->Next;
486 Head->Prev = nullptr;
487
488 RecycleCount++;
489 LastRecycle = Recycle->LastUsed;
490
491 Recycle->Next = Spare;
492 Spare = Recycle;
493
494 Spare->StartClock = 0;
495 Spare->EndClock = 0;
496 Spare->Offset = 0;
497 Spare->LastUsed = 0.0;
498
499 NumChunks--;
500 NumSpares++;
501 }
502 }
503 }
504
506 {
507 FHistoryChunk* NewChunk = nullptr;
508 if (Spare)
509 {
510 NumSpares--;
511 NewChunk = Spare;
512 Spare = Spare->Next;
513 NewChunk->Next = nullptr;
514 }
515 else
516 {
517 NewChunk = new FHistoryChunk();
518 }
519
520 NewChunk->InitStack();
521
522 Tail->LastUsed = FPlatformTime::Seconds();
523 NewChunk->Prev = Tail;
524 Tail->Next = NewChunk;
525 Tail = NewChunk;
526 NumChunks++;
527
528 TrimChunks();
529 }
530
531 template <typename EntryType, typename... ArgsType>
533 {
534 if (UNLIKELY(Tail->Offset + sizeof(EntryType) > UE_ARRAY_COUNT(Tail->Buffer)))
535 {
537 }
538
539 new (Tail->Buffer + Tail->Offset) EntryType(Forward<ArgsType>(Args)...);
540
541 Platform::AsymmetricThreadFenceLight();
542 Tail->Offset += sizeof(EntryType);
543 }
544
546 {
548 }
549
551 {
553 Tail->EndClock = InAccess.Clock;
554 }
555
557 {
559 }
560
562 {
563 TReadScopeLock Scope(Lock);
564
565 // Make sure we sync with the light fence.
566 Platform::AsymmetricThreadFenceHeavy();
567
568 OutClockRange.First = Head ? Head->StartClock : 0;
569
570 for (FHistoryChunk* Chunk = Head; Chunk; Chunk = Chunk->Next)
571 {
572 OutClockRange.Last = Chunk->EndClock;
573
574 // Do not bother searching a chunk that is outside the range we're looking for.
575 // Do the range comparison in a way that handles clock wrapping
576 if ((InAccess.Clock - Chunk->StartClock) <= (Chunk->EndClock - Chunk->StartClock))
577 {
579
580 int32 Offset = 0;
582 {
583 FHistoryEntryBase* Entry = (FHistoryEntryBase*)(Chunk->Buffer + Offset);
584 switch (Entry->Type)
585 {
586 case EHistoryEntryType::FunctionEntry:
587 Stack.Add(((FHistoryEntryFunctionEntry*)Entry)->ReturnAddress);
589 break;
590 case EHistoryEntryType::MemoryAccess:
591 {
593 if (MemoryAccessEntry->Pointer == InAlignedPointer &&
594 MemoryAccessEntry->Access.RawValue == InAccess.RawValue)
595 {
596 OutLocation = FCallstackLocation(Stack.GetData(), Stack.Num());
597 return true;
598 }
599 Offset += sizeof(FHistoryEntryAccess);
600 }
601 break;
602 case EHistoryEntryType::FunctionExit:
603 Stack.Pop();
604 Offset += sizeof(FHistoryEntryBase);
605 break;
606 default:
607 check(false);
608 // This should never happen, but if it does it is most likely a race condition
609 // so just restart the tracing from the beginning as a last resort.
610 Stack.Reset();
611 Offset = 0;
612 break;
613 }
614 }
615 }
616 }
617
618 return false;
619 }
620
622 {
623 return Tail->Offset;
624 }
625 };
626
627 struct FSyncObjectBank;
628 // ------------------------------------------------------------------------------
629 // Race Detector Context.
630 // ------------------------------------------------------------------------------
631
632 // We use ref-counting because this otherwise might get deleted
633 // by other threads and we'd need to hold a lock while scanning
634 // the history for race report, which would be unnaceptable.
635 struct FContext : public TRefCountingMixin<FContext> {
637
639 : ThreadId(InThreadId)
640 {
641 }
642
644 {
645 if (AccessHistory)
646 {
647 delete AccessHistory;
648 AccessHistory = nullptr;
649 }
650 }
651
653 // When we activate tracing, we need to recapture the current stack
654 uint32 StackEpoch = 0;
655 // Unassigned until the first memory access
656 FContextId ContextId = 0;
657 // Avoid reading another TLS value for this
658 uint32 ThreadId;
659 // Prevents recursion for instrumentation
661 // Prevents recursion for detoured intrumentation
663 // Used to avoid instrumenting CreateThread while inside a higher level thread creation function (i.e. beginthreadx)
665 // When we want detailed logging for diagnostic purpose
667 // Clock used while waiting to get a context id assigned.
669 // Each thread holds a bank of clocks to synchronize with every other context.
671 // Hazard pointer used between GetSyncObject and ResetShadow
672 FSyncObjectBank* BankHazard = nullptr;
673 // We need to keep the callstack for each thread
675 // We use this to pass thread arguments to functions that dont have parameters (i.e. ExitThread).
676 void* ThreadArgs = nullptr;
677 // Avoid using UniquePtr because it's instrumented and each access has a cost.
678 FAccessHistory* AccessHistory = nullptr;
679 // Wether or not to always report race for this thread.
680 bool bAlwaysReport = false;
681
682 // This can be bumped again if we ever face a need for deeper callstacks since
683 // this is a virtual allocation anyway so it's not going to take physical memory
684 // until it is used.
685 // This needs to be at the end of the allocated block as we rely on page fault
686 // to abort the program if the stack ever goes beyond this limit.
687 static constexpr SIZE_T MaxCallstackSize = 4096;
689
690 static_assert(
691 sizeof(FHistoryChunk::Buffer) > 10 * MaxCallstackSize * sizeof(FHistoryEntryFunctionEntry),
692 "FHistoryChunk::Buffer should be big enough to accomodate initial callstack with plenty of space left"
693 );
694
695 // [NO ACCESS GUARD PAGE]
696
697 // The clock for this context Id
699 {
700 // Make sure we use the same value for the comparison and the get in case
701 // this is called from another thread while we're releasing our context id.
702 const FContextId LocalContextId = ContextId;
704 }
705
707 {
708 CurrentClock()++;
709
711 {
712 FPlatformMisc::LowLevelOutputDebugStringf(TEXT("[%d] Thread is now at clock %u\n"), ThreadId, CurrentClock());
713 }
714 }
715
717 {
718 return reinterpret_cast<int64>(Context) > 0;
719 }
720 };
721
722 CORE_API extern FContext* GetThreadContext();
723
725 {
726 bool bNeedDecrement = false;
729 {
730 if (bNeedDecrement)
731 {
732 GetThreadContext()->WinInstrumentationDepth--;
733 }
734 }
735 };
736
737 class FSyncObject
738 {
739 public:
741 {
742 // Only count SyncObject that have been allocated separately to
743 // avoid counting the one embedded in the SyncObjectBank.
744 FPlatformAtomics::InterlockedIncrement(&ObjectCount);
745 return FInstrumentationSafeWinAllocator::Alloc(Size);
746 }
747
748 INSTRUMENTATION_FUNCTION_ATTRIBUTES void operator delete(void* Ptr)
749 {
750 FPlatformAtomics::InterlockedDecrement(&ObjectCount);
751 FInstrumentationSafeWinAllocator::Free(Ptr);
752 }
753
754 template <typename AtomicOpType>
756 {
757 TWriteScopeLock Scope(Lock);
759 AtomicOp();
760 }
761
762 template <typename AtomicOpType>
764 {
765 TWriteScopeLock Scope(Lock);
767 AtomicOp();
768 }
769
770 template <typename AtomicOpType>
772 {
773 if (UNLIKELY(Context.DetailedLogDepth || bDetailedLogGlobal))
774 {
775 FPlatformMisc::LowLevelOutputDebugStringf(TEXT("[%d] %s acq_rel of 0x%p from function at 0x%p\n"), Context.ThreadId, OpName, SyncAddress, ReturnAddress);
776 }
777
778 TWriteScopeLock Scope(Lock);
779 Context.ClockBank.AcquireRelease(ClockBank, ReturnAddress);
780 AtomicOp();
781 }
782
783 template <typename AtomicOpType, typename ActualAccessCallbackType>
785 {
786 using namespace UE::Instrumentation;
787
788 // We only need a take a write lock when we do a release or acq_rel operation otherwise
789 // it's impossible to test for failure order as the AtomicOp inside the write lock would never fail.
790 // An acquire only operation is safe to run under read-lock since we're reading from the syncobject and writing into the context clockbank which is owned by the current thread.
791 // Per the standard, failure memory order cannot be release nor acq_release, so we don't need to look at the failure order to choose our lock type.
792 // See https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/n4659.pdf §32.6.1
793 // #17 Requires: The failure argument shall not be memory_order_release nor memory_order_acq_rel.
794 //
795 // Also for the time being MSVC atomic implementation always combines both order so we have no way to test our support of different SuccessOrder and FailureOrder until they fix their implementation.
797 bool bSucceeded = AtomicOp();
798
800 const TCHAR* OpResult = bSucceeded ? TEXT("success") : TEXT("failure");
801
803
804 if (IsAtomicOrderRelaxed(Order))
805 {
806 if (UNLIKELY(Context.DetailedLogDepth || bDetailedLogGlobal))
807 {
808 FPlatformMisc::LowLevelOutputDebugStringf(TEXT("[%d] %s %s relaxed of 0x%p from function at 0x%p\n"), Context.ThreadId, OpName, OpResult, SyncAddress, ReturnAddress);
809 }
810
811 // Do nothing in the relaxed case since no barrier is provided.
812 return;
813 }
814 else if (AccessType == ACCESS_TYPE_ATOMIC_READ_WRITE && IsAtomicOrderAcquireRelease(Order))
815 {
816 if (UNLIKELY(Context.DetailedLogDepth || bDetailedLogGlobal))
817 {
818 FPlatformMisc::LowLevelOutputDebugStringf(TEXT("[%d] %s %s acq_rel of 0x%p from function at 0x%p\n"), Context.ThreadId, OpName, OpResult, SyncAddress, ReturnAddress);
819 }
820
821 Context.ClockBank.AcquireRelease(ClockBank, ReturnAddress);
822 }
823 else if ((AccessType & ACCESS_TYPE_ATOMIC_READ) == ACCESS_TYPE_ATOMIC_READ && IsAtomicOrderAcquire(Order))
824 {
825 if (UNLIKELY(Context.DetailedLogDepth || bDetailedLogGlobal))
826 {
827 FPlatformMisc::LowLevelOutputDebugStringf(TEXT("[%d] %s %s acquire 0x%p from function at 0x%p\n"), Context.ThreadId, OpName, OpResult, SyncAddress, ReturnAddress);
828 }
829
830 Context.ClockBank.Acquire(ClockBank, ReturnAddress);
831 }
833 {
834 if (UNLIKELY(Context.DetailedLogDepth || bDetailedLogGlobal))
835 {
836 FPlatformMisc::LowLevelOutputDebugStringf(TEXT("[%d] %s %s release 0x%p from function at 0x%p\n"), Context.ThreadId, OpName, OpResult, SyncAddress, ReturnAddress);
837 }
838
839 Context.ClockBank.Release(ClockBank, ReturnAddress);
840 }
841 else
842 {
843 checkf(false, TEXT("Unexpected memory order"));
844 }
845
846 Context.IncrementClock();
847 }
848
849 // Must be called by a thread that has either this object's spin lock,
850 // or an external lock that is guaranteed to be held.
852 {
853 if (UNLIKELY(Context.DetailedLogDepth || bDetailedLogGlobal))
854 {
855 FPlatformMisc::LowLevelOutputDebugStringf(TEXT("[%d] %s releases 0x%p from function at 0x%p\n"), Context.ThreadId, OpName, SyncAddress, ReturnAddress);
856 }
857
858 Context.ClockBank.Release(ClockBank, ReturnAddress);
859 }
860
862 {
863 if (UNLIKELY(Context.DetailedLogDepth || bDetailedLogGlobal))
864 {
865 FPlatformMisc::LowLevelOutputDebugStringf(TEXT("[%d] %s acquires 0x%p from function at 0x%p\n"), Context.ThreadId, OpName, SyncAddress, ReturnAddress);
866 }
867
868 Context.ClockBank.Acquire(ClockBank, ReturnAddress);
869 }
870
872 {
873 return ObjectCount;
874 }
875
876 private:
879 static volatile int64 ObjectCount;
880 };
881
882 // One SyncObjectBank per 64-bit aligned address
883 struct FSyncObjectBank
884 {
886
888 {
889 FPlatformAtomics::InterlockedIncrement(&ObjectCount);
890 }
891
893 {
894 for (int32 Index = 0; Index < 8; ++Index)
895 {
896 if (SyncObjects[Index])
897 {
898 delete SyncObjects[Index];
899 }
900 }
901
902 FPlatformAtomics::InterlockedDecrement(&ObjectCount);
903 }
904
906 {
907 return ObjectCount;
908 }
909
911 {
912 if (Index == 0)
913 {
914 return &EmbeddedObject;
915 }
916
917 Index--;
918 if (SyncObjects[Index] == nullptr)
919 {
920 FSyncObject* SyncObject = new FSyncObject();
921 if (FPlatformAtomics::InterlockedCompareExchangePointer((void**)&SyncObjects[Index], SyncObject, nullptr) != nullptr)
922 {
923 delete SyncObject;
924 }
925 }
926
927 return SyncObjects[Index];
928 }
929
930 // We maintain a linked list of clock banks for recycling purpose
932
934 {
935 return FPlatformAtomics::InterlockedIncrement(&RefCount);
936 }
937
939 {
940 int32 NewRefCount = FPlatformAtomics::InterlockedDecrement(&RefCount);
941 if (NewRefCount == 0)
942 {
943 delete this;
944 }
945 return NewRefCount;
946 }
947
949 {
950 return RefCount;
951 }
952
953 static volatile int64 ObjectCount;
954 private:
955 volatile int32 RefCount = 1;
956
957 // Save space by allocating the first sync object as part of the bank itself.
958 // Most of the time the sync object will be at offset 0.
959 // The safe allocator uses virtual memory with 4KB pages so this first entry
960 // is completely free.
962
963 // Contains optional sync object for each unaligned bytes of the 64-bit.
964 FSyncObject* SyncObjects[7] = { 0 };
965 };
966
967 struct FSyncObjectRef
968 {
970 : Bank(InBank)
972 {
973 AddRef();
974 }
975
977 {
978 Release();
979 }
980
981 // Not needed for now
982 FSyncObjectRef(const FSyncObjectRef&) = delete;
983 FSyncObjectRef& operator=(const FSyncObjectRef&) = delete;
984 FSyncObjectRef& operator=(FSyncObjectRef&&) = delete;
985
987 {
988 Bank = Other.Bank;
989 Object = Other.Object;
990
991 Other.Bank = nullptr;
992 Other.Object = nullptr;
993 }
994
996 {
997 return Object;
998 }
999 private:
1001 {
1002 Bank->AddRef();
1003 }
1004
1006 {
1007 if (Bank)
1008 {
1009 Bank->Release();
1010 }
1011 }
1012
1015 };
1016
1017 // ------------------------------------------------------------------------------
1018 // Shadow memory.
1019 // ------------------------------------------------------------------------------
1020 struct FShadowMemory
1021 {
1023 };
1024
1026 {
1028 };
1029
1030} // UE::Sanitizer::RaceDetector
1031
1032#endif // USING_INSTRUMENTATION
#define FORCENOINLINE
Definition AndroidPlatform.h:142
#define FORCEINLINE
Definition AndroidPlatform.h:140
#define check(expr)
Definition AssertionMacros.h:314
#define checkf(expr, format,...)
Definition AssertionMacros.h:315
#define UE_NONCOPYABLE(TypeName)
Definition CoreMiscDefines.h:457
#define TEXT(x)
Definition Platform.h:1272
FPlatformTypes::SIZE_T SIZE_T
An unsigned integer the same size as a pointer, the same as UPTRINT.
Definition Platform.h:1150
FPlatformTypes::TCHAR TCHAR
Either ANSICHAR or WIDECHAR, depending on whether the platform supports wide characters or the requir...
Definition Platform.h:1135
FPlatformTypes::int64 int64
A 64-bit signed integer.
Definition Platform.h:1127
FPlatformTypes::int32 int32
A 32-bit signed integer.
Definition Platform.h:1125
#define UNLIKELY(x)
Definition Platform.h:857
FPlatformTypes::uint64 uint64
A 64-bit unsigned integer.
Definition Platform.h:1117
UE_FORCEINLINE_HINT TSharedRef< CastToType, Mode > StaticCastSharedRef(TSharedRef< CastFromType, Mode > const &InSharedRef)
Definition SharedPointer.h:127
constexpr bool EnumHasAnyFlags(Enum Flags, Enum Contains)
Definition EnumClassFlags.h:35
#define INSTRUMENTATION_FUNCTION_ATTRIBUTES
Definition Defines.h:5
FRWScopeLockType
Definition ScopeRWLock.h:137
@ SLT_ReadOnly
Definition ScopeRWLock.h:138
@ SLT_Write
Definition ScopeRWLock.h:139
#define UE_ARRAY_COUNT(array)
Definition UnrealTemplate.h:212
FRWLock Lock
Definition UnversionedPropertySerialization.cpp:921
uint32 Offset
Definition VulkanMemory.cpp:4033
uint32 Size
Definition VulkanMemory.cpp:4034
uint8_t uint8
Definition binka_ue_file_header.h:8
uint16_t uint16
Definition binka_ue_file_header.h:7
uint32_t uint32
Definition binka_ue_file_header.h:6
Definition Array.h:670
UE_REWRITE SizeType Num() const
Definition Array.h:1144
void Reset(SizeType NewSize=0)
Definition Array.h:2246
UE_NODEBUG UE_FORCEINLINE_HINT ElementType * GetData() UE_LIFETIMEBOUND
Definition Array.h:1027
UE_NODEBUG UE_FORCEINLINE_HINT SizeType Add(ElementType &&Item)
Definition Array.h:2696
ElementType Pop(EAllowShrinking AllowShrinking=UE::Core::Private::AllowShrinkingByDefault< AllocatorType >())
Definition Array.h:1196
Definition RefCounting.h:355
int32 GetObjectCount(const OBJECT_ARRAY &Objects)
Definition BoundingVolumeUtilities.h:417
Type
Definition PawnAction_Move.h:11
UE::FRecursiveMutex Mutex
Definition MeshPaintVirtualTexture.cpp:164
FORCEINLINE T * Get(const FObjectPtr &ObjectPtr)
Definition ObjectPtr.h:426
TRWSpinLock< uint32 > FRWSpinLock
Definition RWSpinLock.h:79
const int32 Order[8][8]
Definition VorbisAudioInfo.cpp:47
U16 Index
Definition radfft.cpp:71
Definition AndroidPlatformTime.h:18
static double Seconds()
Definition AndroidPlatformTime.h:20
static CORE_API void VARARGS LowLevelOutputDebugStringf(const TCHAR *Format,...)
Definition GenericPlatformMisc.cpp:940
static void Yield()
Definition GenericPlatformProcess.h:950