UDocumentation UE5.7 10.02.2026 (Source)
API documentation for Unreal Engine 5.7
ParallelAdaptiveRefinement.h
Go to the documentation of this file.
1// Copyright Epic Games, Inc. All Rights Reserved.
2
3#pragma once
4
5#include "HAL/Platform.h"
7#include "IndexTypes.h"
8#include "VectorUtil.h"
9#include "Async/ParallelFor.h"
11
12#include <atomic>
13
14namespace UE {
15namespace Geometry {
16
17template <typename MeshType, typename RefinementPolicyType>
19{
20private:
21
22 struct FTriSplitRecord
23 {
24 uint16 SplitMask { 0 }; //< bitmask: per-edge refinement tagging [0..3)
25 std::atomic<uint16> GreenAdjMask { 0 }; //< bitmask: encode which of the neighbors [0..3) are green refinement
26 std::atomic_flag bClaimed = ATOMIC_FLAG_INIT;
27
28 inline void Reset()
29 {
30 SplitMask = 0;
31 GreenAdjMask.store(0);
32 bClaimed.clear();
33 }
34
35 // split one edge (green refinement)
36 bool IsSplit1() const
37 {
38 return SplitMask == 1 || SplitMask == 2 || SplitMask == 4;
39 }
40
41 // split two edges
42 bool IsSplit2() const
43 {
44 return SplitMask == 3 || SplitMask == 5 || SplitMask == 6;
45 }
46
47 // split all three edges (red refinement)
48 bool IsSplit3() const
49 {
50 return SplitMask == 7;
51 }
52 };
53
54 struct FEdgeSplitRecord
55 {
56 void Reset()
57 {
58 SplitVertex = IndexConstants::InvalidID;
62
63 bTagged.clear();
64 EntryCountPreWrite.store(0);
65 EntryCountPostWrite.store(0);
66 }
67
68 int32 SplitVertex { IndexConstants::InvalidID };
69
70 // each entry pair corresponds to the half-edge indices of one side of the split
73
75
76 std::atomic_flag bTagged = ATOMIC_FLAG_INIT; // tagged for refinement
77 std::atomic<int32> EntryCountPreWrite { 0 }; // current number of entries, before write
78 std::atomic<int32> EntryCountPostWrite { 0 }; // number of half-edge pairs written
79 };
80
81 struct FConcurrencyState
82 {
83 TArray<FEdgeSplitRecord> EdgeSplitRecords;
84 TArray<FTriSplitRecord> TriSplitRecords;
85 std::atomic<int32> HalfEdgeCount; // current number of halfedges, for parallel append
86 std::atomic<int32> EdgeCount; // current number of edges, for parallel append
87
88 void Resize(const int32 NumTris, const int32 NumEdges)
89 {
90 TRACE_CPUPROFILER_EVENT_SCOPE_TEXT("TParallelAdaptiveRefinement.FConcurrencyState.Resize");
91
92 for (FTriSplitRecord& Record : TriSplitRecords)
93 {
94 Record.Reset();
95 }
96 if (NumTris > TriSplitRecords.Num())
97 {
98 TriSplitRecords.AddDefaulted(NumTris - TriSplitRecords.Num());
99 }
100 check(TriSplitRecords.Num() == NumTris);
101
102 for (FEdgeSplitRecord &Record : EdgeSplitRecords)
103 {
104 Record.Reset();
105 }
106
107 if (NumEdges > EdgeSplitRecords.Num())
108 {
109 EdgeSplitRecords.AddDefaulted(NumEdges - EdgeSplitRecords.Num());
110 }
111 }
112 };
113
114public:
115 using RealType = typename MeshType::RealType; // the field related to the vector-space
116 using VecType = typename MeshType::VecType; // for positions and normals
117
118 struct FOptions
119 {
121
122 //< pre-reserve enough space for growing up to this many vertices
124
126 };
127
128 // Initialize and run adaptive refinement
130 MeshType& InMesh, //< mesh interface
131 RefinementPolicyType& InRefinementPolicy, //< displacement and error interface
132 const FOptions& InOptions)
133 : Mesh(InMesh)
134 , RefinementPolicy(InRefinementPolicy)
135 , Options(InOptions)
136 {
137 const int ParallelForBatchSize = Options.ParallelForBatchSize;
139
140 const int32 ReserveTriangles = 2 * Options.ReserveVertices;
141 const int32 ReserveEdges = 3 * Options.ReserveVertices;
142
143 FConcurrencyState ConcurrencyState;
144 TArray<int32> RefinementCand; //< list of triangles to inspect for refinement
145 TArray<int32> TriSplitQueue; //< list of triangle indices scheduled for refinement
146 TArray<int32> EdgeSplitRequests; //< list of edge indices scheduled for refinement
147
148 RefinementCand.Reserve(ReserveTriangles);
149 TriSplitQueue.Reserve(ReserveTriangles);
150 EdgeSplitRequests.Reserve(ReserveEdges);
151
152 ConcurrencyState.TriSplitRecords.Reserve(ReserveTriangles);
153 ConcurrencyState.EdgeSplitRecords.Reserve(ReserveEdges);
154
155 Mesh.ReserveVertices(Options.ReserveVertices);
156 Mesh.ReserveTriangles(ReserveTriangles);
157
158 RefinementCand.Init(0, Mesh.MaxTriID());
159 for (int32 TriangleIdx(0); TriangleIdx<Mesh.MaxTriID(); ++TriangleIdx)
160 {
162 }
163
164 std::atomic<int32> NumRefinementCand(RefinementCand.Num());
165
166 for (int32 Level(0); Level < 32; ++Level)
167 {
168 if (NumRefinementCand.load() == 0) {
169 break;
170 }
171
172 {
173 TRACE_CPUPROFILER_EVENT_SCOPE_TEXT("TParallelAdaptiveRefinement.ResizeRequests");
174 // reserve sufficient space for tris and edges to be split
175 TriSplitQueue.SetNum(Mesh.MaxTriID());
176 EdgeSplitRequests.SetNum(Mesh.EdgeCount());
177 ConcurrencyState.Resize(Mesh.MaxTriID(), Mesh.EdgeCount());
178 }
179
180 check(ConcurrencyState.TriSplitRecords.Num() == Mesh.MaxTriID());
181
182 std::atomic<int32> TriSplitQueueSize(0);
183 std::atomic<int32> NumEdgesToSplit(0);
184
185 auto TagForRefine = [&](const int32 Idx)
186 {
187 const int32 TriIndex = RefinementCand[Idx];
188
189 if (!Mesh.IsValidTri(TriIndex))
190 {
191 return;
192 }
193
194 if (ShouldRefine(TriIndex, Level))
195 {
196 if (!ConcurrencyState.TriSplitRecords[TriIndex].bClaimed.test_and_set())
197 {
198 TriSplitQueue[TriSplitQueueSize.fetch_add(1, std::memory_order_relaxed)] = TriIndex;
199 ConcurrencyState.TriSplitRecords[TriIndex].SplitMask = 7;
200
202 {
203 const int32 EdgeIdx = Mesh.GetEdgeIndex(TriIndex, LocalEdgeIdx);
204 FEdgeSplitRecord& Edge = ConcurrencyState.EdgeSplitRecords[EdgeIdx];
205
206 // check whether this is the first time visiting this edge
207 if (!Edge.bTagged.test_and_set(std::memory_order_relaxed))
208 {
209 EdgeSplitRequests[ NumEdgesToSplit.fetch_add(1, std::memory_order_relaxed) ] = EdgeIdx;
210 }
211 }
212 }
213
215 {
216 // now the same for all the neighors
217 for (int NbIdx=0; NbIdx<3; ++NbIdx)
218 {
219 const int32 AdjTriIndex = Mesh.GetAdjTriangle(TriIndex, NbIdx);
220 if (AdjTriIndex != IndexConstants::InvalidID && !ConcurrencyState.TriSplitRecords[AdjTriIndex].bClaimed.test_and_set())
221 {
222 check(Mesh.IsValidTri(AdjTriIndex));
223
224 TriSplitQueue[TriSplitQueueSize.fetch_add(1, std::memory_order_relaxed)] = AdjTriIndex;
225 ConcurrencyState.TriSplitRecords[AdjTriIndex].SplitMask = 7;
226
228 {
229 const int32 EdgeIdx = Mesh.GetEdgeIndex(AdjTriIndex, LocalEdgeIdx);
230 FEdgeSplitRecord& Edge = ConcurrencyState.EdgeSplitRecords[EdgeIdx];
231
232 // check whether this is the first time visiting this edge
233 if (!Edge.bTagged.test_and_set(std::memory_order_relaxed))
234 {
235 EdgeSplitRequests[ NumEdgesToSplit.fetch_add(1, std::memory_order_relaxed) ] = EdgeIdx;
236 }
237 }
238 }
239 }
240 }
241 }
242 };
243
244 // tag triangles for refinement;
245 ParallelForTemplate( TEXT("TParallelAdaptiveRefinement.TagForRefine.PF"), NumRefinementCand.load(), ParallelForBatchSize, TagForRefine, ParallelForFlags );
246
247 NumRefinementCand.store(0);
248 if (TriSplitQueueSize.load() == 0)
249 {
250 break;
251 }
252
253 std::atomic<int32> NumNewInteriorEdges(0);
254 std::atomic<int32> NumNewTriangles(0);
255
256 // identify neighboring triangles closure split masks (T-junctions)
257 ParallelFor( TEXT("TParallelAdaptiveRefinement.ResolveNeighbors.PF"), Mesh.MaxTriID(), ParallelForBatchSize, [&](const int32 TriIndex)
258 {
259 if (!Mesh.IsValidTri(TriIndex))
260 {
261 return;
262 }
263
264 int32 SplitMask = ConcurrencyState.TriSplitRecords[TriIndex].SplitMask;
265
266 if (SplitMask != 0)
267 {
268 check(SplitMask == 7);
269 }
270 else
271 {
272 for (int LocalEdgeIdx=0; LocalEdgeIdx<3; ++LocalEdgeIdx)
273 {
274 const int32 AdjTriIndex = Mesh.GetAdjTriangle(TriIndex, LocalEdgeIdx);
275 if (AdjTriIndex != IndexConstants::InvalidID && ConcurrencyState.TriSplitRecords[AdjTriIndex].IsSplit3())
276 {
277 SplitMask |= 1 << LocalEdgeIdx;
278 }
279 }
280 }
281
282 if (ConcurrencyState.TriSplitRecords[TriIndex].SplitMask == 0 && SplitMask != 0)
283 {
284 // record that this triangle needs to be split as a consequence as well
285 TriSplitQueue[TriSplitQueueSize.fetch_add(1, std::memory_order_relaxed)] = TriIndex;
286 }
287
288 ConcurrencyState.TriSplitRecords[TriIndex].SplitMask = SplitMask;
289 ConcurrencyState.TriSplitRecords[TriIndex].GreenAdjMask.store(uint16(0));
290
291 if (SplitMask == 7)
292 {
293 // full split
294 NumNewInteriorEdges.fetch_add(3, std::memory_order_relaxed);
295 NumNewTriangles.fetch_add(3, std::memory_order_relaxed);
296 }
297 else if (SplitMask == 1 || SplitMask == 2 || SplitMask == 4)
298 {
299 // green refinement
300 NumNewInteriorEdges.fetch_add(1, std::memory_order_relaxed);
301 NumNewTriangles.fetch_add(1, std::memory_order_relaxed);
302 }
303 else if (SplitMask != 0)
304 {
305 NumNewInteriorEdges.fetch_add(2, std::memory_order_relaxed);
306 NumNewTriangles.fetch_add(2, std::memory_order_relaxed);
307 }
308 }, ParallelForFlags );
309
310 // for each green refined triangles, store the edge mask for each neighboring triangle that is marked for refinement
311 ParallelFor( TEXT("TParallelAdaptiveRefinement.IdentifyGreenNeighbors.PF"), TriSplitQueueSize.load(), ParallelForBatchSize, [&](int32 Idx)
312 {
313 const int32 TriIndex = TriSplitQueue[Idx];
314 check(ConcurrencyState.TriSplitRecords[TriIndex].SplitMask != 0);
315
316 if (ConcurrencyState.TriSplitRecords[TriIndex].IsSplit1())
317 {
318 // mark neighbors
319 for (int32 NbIdx=0; NbIdx<3; ++NbIdx)
320 {
321 const TPair<int32,int32> TriEdge = Mesh.GetAdjEdge(TriIndex, NbIdx);
322 if (TriEdge.Get<0>() != IndexConstants::InvalidID && ConcurrencyState.TriSplitRecords[TriEdge.Get<0>()].SplitMask != 0)
323 {
324 ConcurrencyState.TriSplitRecords[TriEdge.Get<0>()].GreenAdjMask.fetch_or(static_cast<uint16>(1u << TriEdge.Get<1>()));
325 }
326 }
327 }
329
330 // grow the arrays for edges and vertices
332 {
333 TRACE_CPUPROFILER_EVENT_SCOPE_TEXT("TParallelAdaptiveRefinement.GrowVerts");
334
335 const int32 NewVertexCount = NumEdgesToSplit.load();
337
338 ConcurrencyState.EdgeCount.store(Mesh.EdgeCount() + NewVertexCount);
339
340 PrevEdgeCount = Mesh.AddEdges(NewEdgeCount);
341 PrevVtxCount = Mesh.AddVertices(NewVertexCount);
342
343 ConcurrencyState.EdgeSplitRecords.AddDefaulted( NewEdgeCount );
344 check(ConcurrencyState.EdgeSplitRecords.Num() == Mesh.EdgeCount());
345
346 RefinementPolicy.AllocateVertices(NewVertexCount);
347 }
348
349 // assign new edge-midpoints, assign displacement
350 ParallelForTemplate(TEXT("TParallelAdaptiveRefinement.MakeEdgeMidpoints.PF"), NumEdgesToSplit.load(), ParallelForBatchSize, [&](const int32 RequestIdx)
351 {
352 const int32 EdgeIndex = EdgeSplitRequests[RequestIdx];
353 FEdgeSplitRecord& Edge = ConcurrencyState.EdgeSplitRecords[EdgeIndex];
354
355 Edge.NewEdge = PrevEdgeCount + RequestIdx;
356 Edge.SplitVertex = PrevVtxCount + RequestIdx;
357
358 FIndex2i VertexIndices, Tris;
359
360 Mesh.GetEdge(EdgeIndex, VertexIndices, Tris);
361 Mesh.InterpolateVertex(0.5f, Edge.SplitVertex, EdgeIndex);
362 RefinementPolicy.VertexAdded(Edge.SplitVertex, Tris);
363
364 }, ParallelForFlags );
365
366 ConcurrencyState.HalfEdgeCount.store(Mesh.HalfEdgeCount());
367 {
368 TRACE_CPUPROFILER_EVENT_SCOPE_TEXT("TParallelAdaptiveRefinement.GrowHalfEdges");
369
370 Mesh.AddTriangles(NumNewTriangles.load());
371 RefinementCand.AddDefaulted(NumNewTriangles.load());
372 }
373
374 // walk through refinement queue, apply refinement, enqueue triangles to check for refinement
375 ParallelFor( TEXT("TParallelAdaptiveRefinement.RefineTri.PF"), TriSplitQueueSize.load(), ParallelForBatchSize, [&](int32 Idx)
376 {
377 const int32 TriIndex = TriSplitQueue[Idx];
378 check(ConcurrencyState.TriSplitRecords[TriIndex].SplitMask != 0);
379
380 if (ConcurrencyState.TriSplitRecords[TriIndex].IsSplit3())
381 {
382 const int32 FirstNewTri = Split3(TriIndex, ConcurrencyState);
383
384 // enqueue all 4 children as refinement candidates
385 const int32 QueueIdx = NumRefinementCand.fetch_add(4, std::memory_order_relaxed);
386 RefinementCand[QueueIdx+0] = FirstNewTri+0;
387 RefinementCand[QueueIdx+1] = FirstNewTri+1;
388 RefinementCand[QueueIdx+2] = FirstNewTri+2;
389 RefinementCand[QueueIdx+3] = TriIndex;
390 }
391 else if (ConcurrencyState.TriSplitRecords[TriIndex].IsSplit2())
392 {
393 Split2(TriIndex, ConcurrencyState);
394 }
395 else if (ConcurrencyState.TriSplitRecords[TriIndex].IsSplit1())
396 {
397 Split1(TriIndex, ConcurrencyState);
398 }
399
400 // All triangle split records should be in clean state after this loop
401 ConcurrencyState.TriSplitRecords[TriIndex].Reset();
402 }, ParallelForFlags );
403
404 check(ConcurrencyState.HalfEdgeCount.load() == Mesh.HalfEdgeCount());
405
406 // Mesh.CheckConsistency();
407
408 TriSplitQueueSize.store(0);
409 }
410 }
411
412 // split 1 edge, keep two edges (1 new triangle)
413 inline void Split1(int32 TriIndex, FConcurrencyState& ConcurrencyState)
414 {
415 const int32 SplitMask = ConcurrencyState.TriSplitRecords[TriIndex].SplitMask;
416 check(ConcurrencyState.TriSplitRecords[TriIndex].IsSplit1());
417
418 // determine the edge that is split
420 if (SplitMask == 2) LocSplitEdge = 1;
421 else if (SplitMask == 4) LocSplitEdge = 2;
422
423 const int32 NextLocEdge = (LocSplitEdge+1)%3;
424 const int32 PrevLocEdge = (LocSplitEdge+2)%3;
425
426 const int32 EdgeIndices[3] = { Mesh.GetEdgeIndex(TriIndex, 0),
427 Mesh.GetEdgeIndex(TriIndex, 1),
428 Mesh.GetEdgeIndex(TriIndex, 2) };
429
430 const FIndex3i BaseTri = Mesh.GetTriangle(TriIndex);
431
432 const int32 SplitEdgeIdx = Mesh.GetEdgeIndex(TriIndex, LocSplitEdge);
433 const int32 SplitVertexIdx = ConcurrencyState.EdgeSplitRecords[SplitEdgeIdx].SplitVertex;
435
436 const int32 AdjHalfEdges[3] = { Mesh.GetAdjHalfEdge(TriIndex, 0),
437 Mesh.GetAdjHalfEdge(TriIndex, 1),
438 Mesh.GetAdjHalfEdge(TriIndex, 2) };
439
441
442 // the adjacency for the outer edge doesn't change
445
446 Mesh.SetTriangle(TriIndex, NewTri, TriIndex);
447
448 // allocate 3 new half-edges and indices. this is the first index to write to
449 const int32 HalfEdge = ConcurrencyState.HalfEdgeCount.fetch_add(3, std::memory_order_relaxed);
450 check(HalfEdge + 2 < Mesh.MaxTriID() * 3);
451
452 // first new triangle
453
454 const int32 NewTriIndex = HalfEdge / 3;
456
457 // for the interior edge
458 const int32 NewEdgeIndex = ConcurrencyState.EdgeCount.fetch_add(1, std::memory_order_relaxed);
459 check(NewEdgeIndex < Mesh.EdgeCount());
460
461 // interior edge
463
464 struct FLinkTask
465 {
466 int32 HalfEdge; //< halfedge to relink
467 int32 LocalEdge; //< local edge index
468 int32 AdjHalfEdge; //< adjacent half-edge before the update
469 int32 Edge; //<
470 };
471
473 {HalfEdge + 1 , NextLocEdge, AdjHalfEdges[NextLocEdge], EdgeIndices[NextLocEdge] } };
474
475
476 // update adjacency for the unsplit edges
477 for (const FLinkTask& LinkTask : LinkTasks )
478 {
479 // only if the neighbor is of type split-1, we need a safe concurrent update. split-2 is constructed such that the unsplit
480 // edge doesn't require an adjacency update
481 if (LinkTask.AdjHalfEdge == IndexConstants::InvalidID || !(ConcurrencyState.TriSplitRecords[TriIndex].GreenAdjMask.load() & static_cast<uint16>(1 << LinkTask.LocalEdge)))
482 {
483 Mesh.LinkEdge(LinkTask.HalfEdge, LinkTask.AdjHalfEdge, LinkTask.Edge, LinkTask.Edge);
484 }
485 else
486 {
487 FEdgeSplitRecord& Edge = ConcurrencyState.EdgeSplitRecords[LinkTask.Edge];
488 const int32 EntryCountPreWrite = Edge.EntryCountPreWrite.fetch_add(1, std::memory_order_relaxed);
489 check(EntryCountPreWrite < 2);
490
491 Edge.HalfEdges[EntryCountPreWrite] = FIndex2i( LinkTask.HalfEdge, IndexConstants::InvalidID );
492
493 if (1 == Edge.EntryCountPostWrite.fetch_add(1, std::memory_order_acq_rel))
494 {
495 check(Edge.HalfEdges[0][1] == IndexConstants::InvalidID);
496 check(Edge.HalfEdges[1][1] == IndexConstants::InvalidID);
497
498 // second entry is being written, so we can now link the edges
499 Mesh.LinkEdge(Edge.HalfEdges[0][0], Edge.HalfEdges[1][0], LinkTask.Edge, LinkTask.Edge);
500 }
501 }
502 }
503
504 // update adjacency for the split edge
506 {
507 const int32 EdgeIdx = EdgeIndices[LocSplitEdge];
508 FEdgeSplitRecord& Edge = ConcurrencyState.EdgeSplitRecords[EdgeIdx];
509
510 const int32 EntryCountPreWrite = Edge.EntryCountPreWrite.fetch_add(1, std::memory_order_relaxed);
511 check(EntryCountPreWrite < 2);
512
513 Edge.HalfEdges[EntryCountPreWrite] = BoundaryHalfEdges;
514
515 // second atomic necessary to make sure the memory written to in other thread in slot 0 is available
516 // todo: might be faster to use std::memory_order_relaxed and use a atomic_thread_fence
517 if (1 == Edge.EntryCountPostWrite.fetch_add(1, std::memory_order_acq_rel))
518 {
519 check(Edge.HalfEdges[0][0] != IndexConstants::InvalidID);
520 check(Edge.HalfEdges[0][1] != IndexConstants::InvalidID);
521 check(Edge.HalfEdges[1][0] != IndexConstants::InvalidID);
522 check(Edge.HalfEdges[1][1] != IndexConstants::InvalidID);
523
524 // second entry is being written, so we can now link the edges
525 Mesh.LinkEdge(Edge.HalfEdges[0][0], Edge.HalfEdges[1][1], EdgeIdx, EdgeIdx);
526 Mesh.LinkEdge(Edge.HalfEdges[0][1], Edge.HalfEdges[1][0], Edge.NewEdge, EdgeIdx);
527 }
528 }
529 }
530
531 // split 2 edges, keep one edge (2 new triangles)
532 inline void Split2(int32 TriIndex, FConcurrencyState& ConcurrencyState)
533 {
534 const FIndex3i BaseTri = Mesh.GetTriangle(TriIndex);
535
536 const int32 SplitMask = ConcurrencyState.TriSplitRecords[TriIndex].SplitMask;
537
538 int32 LocPreserveEdge = 0; // the one edge that is NOT split
539 if (SplitMask == 3) {
540 LocPreserveEdge = 2;
541 } else if (SplitMask == 5) {
542 LocPreserveEdge = 1;
543 } else {
544 check(SplitMask == 6);
545 LocPreserveEdge = 0;
546 }
547
548 // edges to reconnect
549 const int32 EdgeIndices[2] = { Mesh.GetEdgeIndex(TriIndex, (LocPreserveEdge+1)%3),
550 Mesh.GetEdgeIndex(TriIndex, (LocPreserveEdge+2)%3) };
551
552 const int32 SplitVertexIdx0 = ConcurrencyState.EdgeSplitRecords[EdgeIndices[0]].SplitVertex;
553 const int32 SplitVertexIdx1 = ConcurrencyState.EdgeSplitRecords[EdgeIndices[1]].SplitVertex;
554
555 const int32 h0 = TriIndex * 3 + (LocPreserveEdge+0);
556 const int32 h1 = TriIndex * 3 + (LocPreserveEdge+1)%3;
557 const int32 h2 = TriIndex * 3 + (LocPreserveEdge+2)%3;
558
559 // the tri connected to the unsplit edge
562 Mesh.SetTriangle(TriIndex, NewTri, TriIndex);
563
564 // allocate 6 new half-edges and indices. this is the first index to write to
565 const int32 HalfEdge = ConcurrencyState.HalfEdgeCount.fetch_add(6, std::memory_order_relaxed);
566 check(HalfEdge + 5 < Mesh.MaxTriID() * 3);
567
568 // two new interior edges
569 const int32 NewEdgeIndex = ConcurrencyState.EdgeCount.fetch_add(2, std::memory_order_relaxed);
570
571
572 const int32 NewTriIndex = HalfEdge / 3;
573
574 // T0
575 Mesh.SetTriangle(NewTriIndex + 0, FIndex3i(BaseTri[(LocPreserveEdge + 2) % 3], SplitVertexIdx1, SplitVertexIdx0), TriIndex);
576
577 // T1
578 Mesh.SetTriangle(NewTriIndex + 1, FIndex3i(SplitVertexIdx0, SplitVertexIdx1, BaseTri[(LocPreserveEdge + 1) % 3]), TriIndex);
579
580 // interior edges
583
584 const FIndex2i BoundaryHalfEdges[2] = { { HalfEdge + 5, HalfEdge + 2 },
585 { HalfEdge + 0, h2 } };
586
588 {
589 const int32 EdgeIdx = EdgeIndices[BoundaryIdx];
590 FEdgeSplitRecord& Edge = ConcurrencyState.EdgeSplitRecords[EdgeIdx];
591
592 const int32 EntryCountPreWrite = Edge.EntryCountPreWrite.fetch_add(1, std::memory_order_relaxed);
593 Edge.HalfEdges[EntryCountPreWrite] = BoundaryHalfEdges[BoundaryIdx];
594
595 // second atomic necessary to make sure the memory written to in other thread in slot 0 is available
596 // todo: might be faster to use std::memory_order_relaxed and use a atomic_thread_fence (not x86-64)
597 if (1 == Edge.EntryCountPostWrite.fetch_add(1, std::memory_order_acq_rel))
598 {
599 check( Edge.HalfEdges[0][0] != IndexConstants::InvalidID );
600 check( Edge.HalfEdges[0][1] != IndexConstants::InvalidID );
601 check( Edge.HalfEdges[1][0] != IndexConstants::InvalidID );
602 check( Edge.HalfEdges[1][1] != IndexConstants::InvalidID );
603
604 // second entry is being written, so we can now link the edges
605 Mesh.LinkEdge(Edge.HalfEdges[0][0], Edge.HalfEdges[1][1], EdgeIdx, EdgeIdx);
606 Mesh.LinkEdge(Edge.HalfEdges[0][1], Edge.HalfEdges[1][0], Edge.NewEdge, EdgeIdx);
607 }
608 }
609 }
610
611 // split 3 edges, triangle into 4 triangles (3 new triangles)
612 // return first index of newly added triangles
613 inline int32 Split3(int32 TriIndex, FConcurrencyState& ConcurrencyState)
614 {
615 const FIndex3i BaseTri = Mesh.GetTriangle(TriIndex);
616
617 const int32 BaseEdgeIndices[3] = { Mesh.GetEdgeIndex(TriIndex, 0),
618 Mesh.GetEdgeIndex(TriIndex, 1),
619 Mesh.GetEdgeIndex(TriIndex, 2) };
620
621 const int32 BaseAdj[3] = { Mesh.GetAdjTriangle(TriIndex, 0),
622 Mesh.GetAdjTriangle(TriIndex, 1),
623 Mesh.GetAdjTriangle(TriIndex, 2) };
624
625 const int32 SplitVertexIdx0 = ConcurrencyState.EdgeSplitRecords[BaseEdgeIndices[0]].SplitVertex;
626 const int32 SplitVertexIdx1 = ConcurrencyState.EdgeSplitRecords[BaseEdgeIndices[1]].SplitVertex;
627 const int32 SplitVertexIdx2 = ConcurrencyState.EdgeSplitRecords[BaseEdgeIndices[2]].SplitVertex;
628
629 // replace the base triangle with the one in the middle
631
632 // allocate 9 new half-edges and indices. this is the first index to write to
633 const int32 HalfEdge = ConcurrencyState.HalfEdgeCount.fetch_add(9, std::memory_order_relaxed);
634 const int32 NewTriIndex = HalfEdge / 3;
635
636 // T0
637 Mesh.SetTriangle(NewTriIndex + 0, FIndex3i(BaseTri[0], SplitVertexIdx0, SplitVertexIdx2), TriIndex);
638
639 // T1
640 Mesh.SetTriangle(NewTriIndex + 1, FIndex3i(BaseTri[1], SplitVertexIdx1, SplitVertexIdx0), TriIndex);
641
642 // T2
643 Mesh.SetTriangle(NewTriIndex + 2, FIndex3i(BaseTri[2], SplitVertexIdx2, SplitVertexIdx1), TriIndex);
644
645 const int32 NewEdgeIndex = ConcurrencyState.EdgeCount.fetch_add(3, std::memory_order_relaxed);
646 check(NewEdgeIndex < Mesh.EdgeCount());
647
648 // interior edges
649 Mesh.LinkEdge( HalfEdge + 1, TriIndex * 3 + 2, NewEdgeIndex + 0, IndexConstants::InvalidID );
650 Mesh.LinkEdge( HalfEdge + 4, TriIndex * 3 + 0, NewEdgeIndex + 1, IndexConstants::InvalidID );
651 Mesh.LinkEdge( HalfEdge + 7, TriIndex * 3 + 1, NewEdgeIndex + 2, IndexConstants::InvalidID );
652
653 // pairs of halfedges corresponding to the split edge as seen from this triangle
654 const FIndex2i BoundaryHalfEdges[3] = { { HalfEdge + 0, HalfEdge + 5 },
655 { HalfEdge + 3, HalfEdge + 8 },
656 { HalfEdge + 6, HalfEdge + 2 } };
657
659 {
660 const int32 EdgeIdx = BaseEdgeIndices[LocalEdgeIdx];
661 FEdgeSplitRecord& Edge = ConcurrencyState.EdgeSplitRecords[EdgeIdx];
662
664 {
665 // boundary case: we can just update locally without needing data from the other side
666 Mesh.LinkEdge(BoundaryHalfEdges[LocalEdgeIdx][0], IndexConstants::InvalidID, EdgeIdx, EdgeIdx);
667 Mesh.LinkEdge(BoundaryHalfEdges[LocalEdgeIdx][1], IndexConstants::InvalidID, Edge.NewEdge, EdgeIdx);
668 continue;
669 }
670
671 const int32 EntryCountPreWrite = Edge.EntryCountPreWrite.fetch_add(1, std::memory_order_relaxed);
672 Edge.HalfEdges[EntryCountPreWrite] = BoundaryHalfEdges[LocalEdgeIdx];
673
674 // second atomic necessary to make sure the memory written to in other thread in slot 0 is available
675 // todo: might be faster to use std::memory_order_relaxed and use a atomic_thread_fence
676 if (1 == Edge.EntryCountPostWrite.fetch_add(1, std::memory_order_acq_rel))
677 {
678 check(Edge.HalfEdges[0][0] != IndexConstants::InvalidID);
679 check(Edge.HalfEdges[0][1] != IndexConstants::InvalidID);
680 check(Edge.HalfEdges[1][0] != IndexConstants::InvalidID);
681 check(Edge.HalfEdges[1][1] != IndexConstants::InvalidID);
682
683 // second entry is being written, so we can now link the edges
684 Mesh.LinkEdge(Edge.HalfEdges[0][0], Edge.HalfEdges[1][1], EdgeIdx, EdgeIdx);
685 Mesh.LinkEdge(Edge.HalfEdges[0][1], Edge.HalfEdges[1][0], Edge.NewEdge, EdgeIdx);
686 }
687 }
688 check(HalfEdge % 3 == 0);
689 return HalfEdge / 3;
690 }
691
693 {
695 return RefinementPolicy.ShouldRefine(TriIndex, SplitVertexBary, Iter);
696 }
697
698private:
699
700 MeshType& Mesh;
701 RefinementPolicyType& RefinementPolicy;
702 const FOptions& Options;
703};
704
705} // namespace Geometry
706} // namespace UE
#define check(expr)
Definition AssertionMacros.h:314
EParallelForFlags
Definition ParallelFor.h:47
void ParallelFor(int32 Num, TFunctionRef< void(int32)> Body, bool bForceSingleThread, bool bPumpRenderingThread=false)
Definition ParallelFor.h:481
void ParallelForTemplate(int32 Num, const FunctionType &Body, EParallelForFlags Flags=EParallelForFlags::None)
Definition ParallelFor.h:496
#define TEXT(x)
Definition Platform.h:1272
FPlatformTypes::int32 int32
A 32-bit signed integer.
Definition Platform.h:1125
UE_FORCEINLINE_HINT TSharedRef< CastToType, Mode > StaticCastSharedRef(TSharedRef< CastFromType, Mode > const &InSharedRef)
Definition SharedPointer.h:127
#define TRACE_CPUPROFILER_EVENT_SCOPE_TEXT(Name)
Definition CpuProfilerTrace.h:532
if(Failed) console_printf("Failed.\n")
uint16_t uint16
Definition binka_ue_file_header.h:7
Definition UnrealTemplate.h:321
Definition Array.h:670
UE_REWRITE SizeType Num() const
Definition Array.h:1144
SizeType AddDefaulted()
Definition Array.h:2795
UE_FORCEINLINE_HINT void Reserve(SizeType Number)
Definition Array.h:3016
Definition ParallelAdaptiveRefinement.h:19
typename MeshType::RealType RealType
Definition ParallelAdaptiveRefinement.h:115
TParallelAdaptiveRefinement(MeshType &InMesh, RefinementPolicyType &InRefinementPolicy, const FOptions &InOptions)
Definition ParallelAdaptiveRefinement.h:129
bool ShouldRefine(int32 TriIndex, int32 Iter) const
Definition ParallelAdaptiveRefinement.h:692
int32 Split3(int32 TriIndex, FConcurrencyState &ConcurrencyState)
Definition ParallelAdaptiveRefinement.h:613
void Split2(int32 TriIndex, FConcurrencyState &ConcurrencyState)
Definition ParallelAdaptiveRefinement.h:532
typename MeshType::VecType VecType
Definition ParallelAdaptiveRefinement.h:116
void Split1(int32 TriIndex, FConcurrencyState &ConcurrencyState)
Definition ParallelAdaptiveRefinement.h:413
constexpr int InvalidID
Definition IndexTypes.h:13
Definition AdvancedWidgetsModule.cpp:13
Definition IndexTypes.h:27
Definition IndexTypes.h:158
Definition ParallelAdaptiveRefinement.h:119
int32 ReserveVertices
Definition ParallelAdaptiveRefinement.h:123
int32 ParallelForBatchSize
Definition ParallelAdaptiveRefinement.h:120
bool bApplyNeighborhoodRefinement
Definition ParallelAdaptiveRefinement.h:125