UDocumentation UE5.7 10.02.2026 (Source)
API documentation for Unreal Engine 5.7
TaskDispatcherEvolution.h
Go to the documentation of this file.
1// Copyright Epic Games, Inc. All Rights Reserved.
2#pragma once
3
6#include "HAL/Event.h"
7
8namespace Chaos::Private
9{
10
11// This class is responsible to dispatch the evolution tasks
12// It orchestrates the task dependencies according to data flow.
13// This class shouldn't do any simulation logic and code.
15{
16private:
17 constexpr static int32 MinParticlePerTask = 40;
18
19 UE::Tasks::FTask AsyncQueueDynTask;
20 UE::Tasks::FTask AsyncQueueKinTask;
21 UE::Tasks::FTask FlushAsyncQueueTask;
22 UE::Tasks::FTask UpdateViewTask;
23
24 TArray<UE::Tasks::FTask> IntegrationPendingTasks;
25 TArray<UE::Tasks::FTask> KinematicPendingTasks;
26
27 FPBDRigidsSOAs& Particles;
28 FPendingSpatialInternalDataQueue& InternalAccelerationQueue;
29
30 int32 NumTasks;
31 int32 NumDynParticles;
32 int32 NumKinBatches;
33
34public:
36 : Particles(ParticlesIn)
37 , InternalAccelerationQueue(InternalAccelerationQueueIn)
38 , NumTasks(FMath::Max(FMath::Min(FTaskGraphInterface::Get().GetNumWorkerThreads(), Chaos::MaxNumWorkers), 1))
39 , NumDynParticles()
40 , NumKinBatches()
41 {
42 }
43
45 {
46 // Compute the number of kinematic particles to allocate the InternalAccelerationQueue pending data
48 const int32 NumView = KienamticParticles.SOAViews.Num();
49 NumKinBatches = 0;
50 for (int32 ViewIndex = 0; ViewIndex < NumView; ++ViewIndex)
51 {
52 const int32 NumKinParticles = KienamticParticles.SOAViews[ViewIndex].Size();
53 const int32 ParticleByTask = FMath::Max(FMath::DivideAndRoundUp(NumKinParticles, FMath::Max(NumTasks, 1)), MinParticlePerTask);
55 }
56 }
57
58 template<typename Lambda>
60 {
61 NumDynParticles = Particles.GetActiveParticlesArray().Num();
62
63 int32 NumDynTask = FMath::Max(FMath::Min(NumTasks, NumDynParticles), 1);
64
65 const int32 ParticleByTask = FMath::Max(FMath::DivideAndRoundUp(NumDynParticles, NumDynTask), MinParticlePerTask);
67
68 IntegrationPendingTasks.Reset(NumDynBatches);
69 InternalAccelerationQueue.KinematicBatchStartIndex = NumDynBatches + 1;
71 InternalAccelerationQueue.PendingDataArrays.SetNum(InternalAccelerationQueue.KinematicBatchStartIndex + NumKinBatches); // Keep 0 for the dirty element from PushData
72 for (int32 BatchIndex = 0; BatchIndex < NumDynBatches; BatchIndex++)
73 {
74 const int32 StartIndex = BatchIndex * ParticleByTask;
75 int32 EndIndex = (BatchIndex + 1) * ParticleByTask;
76 EndIndex = FMath::Min(NumDynParticles, EndIndex);
77
78 UE::Tasks::FTask PendingTask = UE::Tasks::Launch(UE_SOURCE_LOCATION, [this, IntegrateWork, BatchIndex, StartIndex, EndIndex]()
79 {
81 IntegrateWork(BatchIndex, StartIndex, EndIndex);
83 IntegrationPendingTasks.Add(PendingTask);
84 }
85 }
86
87 template<typename Lambda>
89 {
90 // Why do we add the dynamic particle to the async queue which is suppose to be for static tree ?
92 {
96 }
97
98 template<typename Lambda>
100 {
101 // To start dirtying Kinematics particles, the Dynamics particles must be finished
102 // Is it necessary to update the kinematic particles for the async data ?
103 AsyncQueueKinTask = UE::Tasks::Launch(UE_SOURCE_LOCATION, [AsyncDirty]()
104 {
106 AsyncDirty();
107 }, AsyncQueueDynTask, LowLevelTasks::ETaskPriority::High);
108
109 TArray<UE::Tasks::FTask> PendingTasks;
110 PendingTasks.Add(AsyncQueueKinTask);
111
113 // done with update, let's clear the tracking structures
114 if (bIsLastStep)
115 {
116 PendingTasks.Append(KinematicPendingTasks);
118 {
120 constexpr bool bUpdateView = false;
122 }, PendingTasks, LowLevelTasks::ETaskPriority::High);
123
124 }
125 else
126 {
127 MovingKinematicsTask = AsyncQueueKinTask;
128 }
129
130 // Updating the views can be done only when all kinematics and dynamics update are finished.
131 UpdateViewTask = UE::Tasks::Launch(UE_SOURCE_LOCATION, [this]()
132 {
134 // If we changed any particle state, the views need to be refreshed
135 Particles.UpdateDirtyViews();
136
138 }
139
140 template<typename Lambda>
142 {
143 int32 NumKinTask = FMath::Max(FMath::Min(NumTasks, NumParticles), 1);
144 const int32 ParticleByTask = FMath::Max(FMath::DivideAndRoundUp(NumParticles, NumKinTask), MinParticlePerTask);
145 int32 NumBatches = FMath::DivideAndRoundUp(NumParticles, ParticleByTask);
146
147 check(NumBatches < InternalAccelerationQueue.PendingDataArrays.Num() + 1);
148 check(NumBatches > 0)
149 // We only write to particle state
150 for (int32 BatchIndex = 0; BatchIndex < NumBatches; BatchIndex++)
151 {
152 const int32 StartIndex = BatchIndex * ParticleByTask;
153 int32 EndIndex = (BatchIndex + 1) * ParticleByTask;
154 EndIndex = FMath::Min(NumParticles, EndIndex);
155
157 {
159 KinematicTargetsWork(StartIndex, EndIndex, DispatchBatchIndex + BatchIndex);
161 KinematicPendingTasks.Add(PendingTask);
162 }
163 }
164
165 template <typename Lambda>
167 {
168 const int32 NumBatchDyn = FMath::Min(InternalAccelerationQueue.KinematicBatchStartIndex, InternalAccelerationQueue.PendingDataArrays.Num()) - 1;
169 check(IntegrationPendingTasks.Num() == 0 || NumBatchDyn == IntegrationPendingTasks.Num());
170
172 CurrentPendingTasks.Reserve(InternalAccelerationQueue.PendingDataArrays.Num());
173
176
178 int32 NumActiveParticleTask = IntegrationPendingTasks.Num() + KinematicPendingTasks.Num();
179 if (NumActiveParticleTask > 0)
180 {
181 // Moving particles could be potentially duplicated in batch 0 (inside InternalAccelerationQueue.PendingDataArrays),
182 // before processing batch 0, lets remove all duplicated particles in that batch.
185 MovingParticlesTasks.Append(IntegrationPendingTasks);
186 MovingParticlesTasks.Append(KinematicPendingTasks);
188 {
190 InternalAccelerationQueue.CleanUpDuplicated();
192
194 }
195
196 // Prune Dynamics particles
197 // If IntegrationPendingTasks.Num() == 0, it means a post integrate callback was called, then we waited for the tasks to finish and then cleared them.
198 // In this case we still do the pruning but without waiting for the integrate tasks, because it is already done.
199 if (NumBatchDyn > 0 && (IntegrationPendingTasks.Num() == NumBatchDyn || IntegrationPendingTasks.IsEmpty()))
200 {
201 check(IntegrationPendingTasks.Num() == NumBatchDyn || IntegrationPendingTasks.IsEmpty());
202 check(NumBatchDyn <= InternalAccelerationQueue.PendingDataArrays.Num());
203
204 const bool bLinkIntegrateTask = !IntegrationPendingTasks.IsEmpty();
205
208 for (int32 BatchIndex = 0; BatchIndex < NumBatchDyn; BatchIndex++)
209 {
210 // Dynamic particles start at index 1
211 TArray<FPendingSpatialData>& OldPendingData = InternalAccelerationQueue.PendingDataArrays[BatchIndex + 1];
214 {
216 for (const FPendingSpatialData& PendingData : OldPendingData)
217 {
218 if (HasToBeUpdated(PendingData))
219 {
220 NewPendingData.Add(PendingData);
221 }
222 }
223 }, bLinkIntegrateTask ? IntegrationPendingTasks[BatchIndex] : UE::Tasks::FTask{}, LowLevelTasks::ETaskPriority::High);
225 }
226 // In order to safely copy all all pruned data, we need to be sure
229 {
231 for (int32 BatchIndex = 0; BatchIndex < NumBatchDyn; BatchIndex++)
232 {
233 // Dynamic particles start at index 1
234 TArray<FPendingSpatialData>& OldPendingData = InternalAccelerationQueue.PendingDataArrays[BatchIndex + 1];
237 }
240 }
241 else if (NumBatchDyn > 0)
242 {
243 UE_LOG(LogChaos, Warning, TEXT("No pruning happened in the Spatial Acceleration structure, a slight perfromance hit could occured. IntegrationPendingTasks.Num(): %d != NumBatchDyn: %d"), IntegrationPendingTasks.Num(), NumBatchDyn);
244 CurrentPendingTasks.Append(IntegrationPendingTasks);
245 }
246
247 int32 NumBatchKin = InternalAccelerationQueue.PendingDataArrays.Num() - InternalAccelerationQueue.KinematicBatchStartIndex;
248 // Prune Kinematics particles
249 // If KinematicPendingTasks.Num() == 0, it means a post integrate callback was called, then we waited for the tasks to finish and then cleared them.
250 // In this case we still do the pruning but without waiting for the kinematic update tasks
251 if (NumBatchKin > 0 && InternalAccelerationQueue.KinematicBatchStartIndex > 0 && (KinematicPendingTasks.Num() == NumBatchKin || KinematicPendingTasks.IsEmpty()))
252 {
253 check(KinematicPendingTasks.Num() == NumBatchKin || KinematicPendingTasks.Num() == 0);
254
255 const bool bHasToLinkTask = !KinematicPendingTasks.IsEmpty();
256
259 for (int32 BatchIndex = 0; BatchIndex < NumBatchKin; BatchIndex++)
260 {
261 TArray<FPendingSpatialData>& OldPendingData = InternalAccelerationQueue.PendingDataArrays[InternalAccelerationQueue.KinematicBatchStartIndex + BatchIndex];
264 {
266 for (const FPendingSpatialData& PendingData : OldPendingData)
267 {
268 if (HasToBeUpdated(PendingData))
269 {
270 NewPendingData.Add(PendingData);
271 }
272 }
273 }, bHasToLinkTask? KinematicPendingTasks[BatchIndex]: UE::Tasks::FTask{}, LowLevelTasks::ETaskPriority::High);
275 }
276
279 {
281 for (int32 BatchIndex = 0; BatchIndex < NumBatchKin; BatchIndex++)
282 {
283 TArray<FPendingSpatialData>& OldPendingData = InternalAccelerationQueue.PendingDataArrays[InternalAccelerationQueue.KinematicBatchStartIndex + BatchIndex];
286 }
289 }
290 else if (NumBatchKin > 0 && InternalAccelerationQueue.KinematicBatchStartIndex > 0)
291 {
292 // In this case, no post-integrate was called, but somehow NumBatchKin doesn't equal KinematicPendingTasks.Num()
293 UE_LOG(LogChaos, Warning, TEXT("No pruning happened in the Spatial Acceleration structure, a slight perfromance hit could occured. KinematicPendingTasks.Num(): %d != NumBatchKin: %d"), KinematicPendingTasks.Num(), NumBatchKin);
294 CurrentPendingTasks.Append(KinematicPendingTasks);
295 }
297 IntegrationPendingTasks.Reset();
298 KinematicPendingTasks.Reset();
299 }
300
301 template<typename Lambda>
303 {
304 TArray<UE::Tasks::FTask> PendingTasks{AsyncQueueDynTask, AsyncQueueKinTask};
305 PendingTasks.Append(KinematicPendingTasks);
307 {
310 }, PendingTasks, LowLevelTasks::ETaskPriority::High);
311 }
312
313 // This will be called if it is required to wait tasks result to invoke a callback.
314 // We never know what a callback is going to use.
316 {
317 TArray<UE::Tasks::FTask> Tasks = { UpdateViewTask };
318 Tasks.Reserve(IntegrationPendingTasks.Num() + KinematicPendingTasks.Num() + 1);
319 Tasks.Append(IntegrationPendingTasks);
320 Tasks.Append(KinematicPendingTasks);
322 UpdateViewTask = UE::Tasks::FTask();
323 IntegrationPendingTasks.Reset();
324 KinematicPendingTasks.Reset();
325 }
326
328 {
329 UE::Tasks::Wait(FlushAsyncQueueTask);
330 }
331
333 {
334 UE::Tasks::Wait(UpdateViewTask);
335 }
336};
337}
#define check(expr)
Definition AssertionMacros.h:314
#define TEXT(x)
Definition Platform.h:1272
FPlatformTypes::int32 int32
A 32-bit signed integer.
Definition Platform.h:1125
#define QUICK_SCOPE_CYCLE_COUNTER(Stat)
Definition Stats.h:652
UE_FORCEINLINE_HINT TSharedRef< CastToType, Mode > StaticCastSharedRef(TSharedRef< CastFromType, Mode > const &InSharedRef)
Definition SharedPointer.h:127
#define UE_LOG(CategoryName, Verbosity, Format,...)
Definition LogMacros.h:270
#define UE_SOURCE_LOCATION
Definition PreprocessorHelpers.h:71
UE_INTRINSIC_CAST UE_REWRITE constexpr std::remove_reference_t< T > && MoveTemp(T &&Obj) noexcept
Definition UnrealTemplate.h:520
Definition PBDRigidsSOAs.h:269
void UpdateDirtyViews()
Definition PBDRigidsSOAs.h:329
const TParticleView< FPBDRigidParticles > & GetActiveMovingKinematicParticlesView() const
Definition PBDRigidsSOAs.h:944
void UpdateAllMovingKinematic(const bool bUpdateViews=true)
Definition PBDRigidsSOAs.h:829
const TArray< FPBDRigidParticleHandle * > & GetActiveParticlesArray() const
Definition PBDRigidsSOAs.h:933
Definition TaskDispatcherEvolution.h:15
void FlushAccelerationQueue(Lambda FlushAccelerationQueueLambda)
Definition TaskDispatcherEvolution.h:302
void DispatchKinAsyncDirtyAndUpdateKinematic(Lambda AsyncDirty, bool bIsLastStep)
Definition TaskDispatcherEvolution.h:99
void DispatchIntegrate(Lambda IntegrateWork)
Definition TaskDispatcherEvolution.h:59
void DispatchKinematicsTarget(Lambda KinematicTargetsWork, int32 NumParticles, int32 DispatchBatchIndex)
Definition TaskDispatcherEvolution.h:141
FTaskDispatcherEvolution(FPBDRigidsSOAs &ParticlesIn, FPendingSpatialInternalDataQueue &InternalAccelerationQueueIn)
Definition TaskDispatcherEvolution.h:35
void PruneInternalPendingData(Lambda HasToBeUpdated)
Definition TaskDispatcherEvolution.h:166
void WaitAsyncQueueTask()
Definition TaskDispatcherEvolution.h:327
void WaitIntegrationComplete()
Definition TaskDispatcherEvolution.h:315
void DispatchDynAsyncDirty(Lambda AsyncDirtyWork)
Definition TaskDispatcherEvolution.h:88
void ComputeKinematicBatch()
Definition TaskDispatcherEvolution.h:44
void WaitTaskEndSpatial()
Definition TaskDispatcherEvolution.h:332
Definition ParticleIterator.h:639
Definition TaskGraphInterfaces.h:265
Definition Array.h:670
UE_REWRITE SizeType Num() const
Definition Array.h:1144
void Reset(SizeType NewSize=0)
Definition Array.h:2246
UE_REWRITE bool IsEmpty() const
Definition Array.h:1133
UE_NODEBUG UE_FORCEINLINE_HINT SizeType Add(ElementType &&Item)
Definition Array.h:2696
UE_FORCEINLINE_HINT void Reserve(SizeType Number)
Definition Array.h:3016
Definition BodyInstance.h:90
Definition SkeletalMeshComponent.h:307
CHAOS_API int32 MaxNumWorkers
Definition Parallel.cpp:13
Private::FTaskHandle FTask
Definition Task.h:333
TTask< TInvokeResult_T< TaskBodyType > > Launch(const TCHAR *DebugName, TaskBodyType &&TaskBody, ETaskPriority Priority=ETaskPriority::Normal, EExtendedTaskPriority ExtendedPriority=EExtendedTaskPriority::None, ETaskFlags Flags=ETaskFlags::None)
Definition Task.h:266
bool Wait(const TaskCollectionType &Tasks, FTimespan InTimeout=FTimespan::MaxValue())
Definition Task.h:381
Definition PendingSpatialData.h:25
Definition PendingSpatialData.h:65
int32 KinematicBatchStartIndex
Definition PendingSpatialData.h:75
TArray< TArray< FPendingSpatialData > > PendingDataArrays
Definition PendingSpatialData.h:70
void CleanUpDuplicated()
Definition PendingSpatialData.h:159
Definition UnrealMathUtility.h:270
static constexpr UE_FORCEINLINE_HINT T DivideAndRoundUp(T Dividend, T Divisor)
Definition UnrealMathUtility.h:694