Peano
Loading...
Searching...
No Matches
SpacetreeSet.cpp
Go to the documentation of this file.
1#include "SpacetreeSet.h"
2
3#include "config.h"
4#include "Node.h"
5#include "tarch/tarch.h"
6#include "peano4/grid/grid.h"
12#include "tarch/mpi/Rank.h"
16#include "tarch/timing/Watch.h"
17
18
19tarch::logging::Log peano4::parallel::SpacetreeSet::_log("peano4::parallel::SpacetreeSet");
20
22
23
25
28
29
31
35 const std::bitset<Dimensions>& periodicBC
36) {
37 _requestMessageTag = tarch::mpi::Rank::reserveFreeTag("peano4::parallel::SpacetreeSet - request message");
38 _answerMessageTag = tarch::mpi::Rank::reserveFreeTag(
39 "peano4::parallel::SpacetreeSet - answer message", Node::MaxSpacetreesPerRank
40 );
41 tarch::services::ServiceRepository::getInstance().addService(this, "peano4::parallel::SpacetreeSet");
42
43#ifdef Parallel
45 (peano4::parallel::Node::getInstance().getNumberOfRegisteredTrees() == 1
46 and tarch::mpi::Rank::getInstance().getRank() == 0
47 ) or (peano4::parallel::Node::getInstance().getNumberOfRegisteredTrees() == 0 and tarch::mpi::Rank::getInstance().getRank() != 0),
48 peano4::parallel::Node::getInstance().getNumberOfRegisteredTrees(),
49 offset,
50 width,
52 );
53#endif
54
55 if (tarch::mpi::Rank::getInstance().isGlobalMaster()) {
56 logTraceInWith3Arguments("isGlobalMaster(...)", offset, width, periodicBC);
57 peano4::grid::Spacetree spacetree(offset, width, periodicBC);
58 _spacetrees.push_back(spacetree);
59 logTraceOutWith3Arguments("isGlobalMaster(...)", offset, width, periodicBC);
60 }
61}
62
67
68
70
71
72int peano4::parallel::SpacetreeSet::getAnswerTag(int targetSpacetreeId) const {
73 return _answerMessageTag + Node::getInstance().getLocalTreeId(targetSpacetreeId);
74}
75
76
78 switch (state) {
79 case SpacetreeSetState::TraverseTreesAndExchangeData:
80 return "traverse-trees-and-exchange-data";
81 case SpacetreeSetState::Waiting:
82 return "waiting";
83 }
84 return "undef";
85}
86
88#ifdef Parallel
89 std::vector<peano4::parallel::TreeManagementMessage> unansweredMessagesThatIanAnswerNow;
90
91 std::vector<peano4::parallel::TreeManagementMessage>::iterator p = _unansweredMessages.begin();
92 while (p != _unansweredMessages.end()) {
93 switch (p->getAction()) {
95 unansweredMessagesThatIanAnswerNow.push_back(*p);
96 p = _unansweredMessages.erase(p);
97 break;
100 if (_state == SpacetreeSetState::Waiting) {
101 unansweredMessagesThatIanAnswerNow.push_back(*p);
102 p = _unansweredMessages.erase(p);
103 } else {
104 logDebug("answerMessages()", "can't answer as I'm in the wrong state");
105 p++;
106 }
107 } break;
110 assertionMsg(false, "should only be passed synchronously and never run through this tag");
111 break;
112 }
113 }
114
115
116 for (auto p : unansweredMessagesThatIanAnswerNow) {
117 switch (p.getAction()) {
119 int newSpacetreeId = peano4::parallel::Node::getInstance().reserveId(
120 tarch::mpi::Rank::getInstance().getRank(), // on current node
121 p.getMasterSpacetreeId() // this is the tree who has requested the new tree
122 );
123
125 answerMessage.setWorkerSpacetreeId(newSpacetreeId);
128 answerMessage,
129 p.getSenderRank(),
130 getAnswerTag(p.getMasterSpacetreeId()),
132 );
133 logInfo(
134 "receiveDanglingMessages()", "reserved tree id " << newSpacetreeId << " for tree " << p.getMasterSpacetreeId()
135 );
136 } break;
138 assertion(_state == SpacetreeSetState::Waiting);
142 answerMessage,
143 p.getSenderRank(),
144 getAnswerTag(p.getMasterSpacetreeId()),
146 );
147
150 state, p.getSenderRank(), _requestMessageTag, tarch::mpi::Rank::getInstance().getCommunicator()
151 );
153 p.getWorkerSpacetreeId(), p.getMasterSpacetreeId(), state.getX(), state.getH(), state.getInverted()
154 );
155
156 _spacetrees.push_back(std::move(newTree));
157
159 answerMessage,
160 p.getSenderRank(),
161 getAnswerTag(p.getMasterSpacetreeId()),
163 );
164 } break;
167 assertionMsg(false, "should only be passed synchronously and never run through this tag");
168 break;
170 assertion(_state == SpacetreeSetState::Waiting);
171 logInfo(
172 "receiveDanglingMessages(...)",
173 "learned that remote child tree "
174 << p.getWorkerSpacetreeId() << " of local tree " << p.getMasterSpacetreeId()
175 << " is degenerated thus had been removed"
176 );
177 getSpacetree(p.getMasterSpacetreeId())._childrenIds.erase(p.getWorkerSpacetreeId());
178
182 answerMessage,
183 p.getSenderRank(),
184 getAnswerTag(p.getWorkerSpacetreeId()),
186 );
187 } break;
188 }
189 }
190#else
191 assertion(_unansweredMessages.empty());
192#endif
193}
194
196#ifdef Parallel
197
198 if (tarch::mpi::Rank::getInstance().isMessageInQueue(_requestMessageTag)) {
199 logTraceIn("receiveDanglingMessages()");
200
203 message, MPI_ANY_SOURCE, _requestMessageTag, tarch::mpi::Rank::getInstance().getCommunicator()
204 );
205
206 _unansweredMessages.push_back(message);
207
208 logDebug("receiveDanglingMessages()", "received new message " << message.toString());
209
210 logTraceOut("receiveDanglingMessages()");
211 }
212
213 answerQuestions();
214#endif
215}
216
217
218void peano4::parallel::SpacetreeSet::addSpacetree(int masterId, int newTreeId) {
219 logTraceInWith2Arguments("addSpacetree(int,int)", masterId, newTreeId);
220 if (peano4::parallel::Node::getInstance().getRank(masterId) != peano4::parallel::Node::getInstance().getRank(newTreeId)) {
221#ifdef Parallel
222 const int targetRank = peano4::parallel::Node::getInstance().getRank(newTreeId);
223
224 TreeManagementMessage message;
225 message.setMasterSpacetreeId(masterId);
226 message.setWorkerSpacetreeId(newTreeId);
228 TreeManagementMessage::sendAndPollDanglingMessages(message, targetRank, _requestMessageTag);
229
231 message, targetRank, getAnswerTag(masterId), tarch::mpi::Rank::getInstance().getCommunicator()
232 );
234
235 peano4::grid::AutomatonState state = _spacetrees.begin()->_root;
236 logDebug("addSpacetree(int,int)", "send state " << state.toString() << " to rank " << targetRank);
238 state, targetRank, _requestMessageTag, tarch::mpi::Rank::getInstance().getCommunicator()
239 );
240
242 message, targetRank, getAnswerTag(masterId), tarch::mpi::Rank::getInstance().getCommunicator()
243 );
245#else
246 assertionMsg(false, "should never enter this branch without -DParallel");
247#endif
248 } else {
250 newTreeId,
251 masterId,
252 _spacetrees.begin()->_root.getX(),
253 _spacetrees.begin()->_root.getH(),
254 _spacetrees.begin()->_root.getInverted()
255 );
256 tarch::multicore::Lock lock(_semaphore);
257 _spacetrees.push_back(std::move(newTree));
258 }
259 logTraceOut("addSpacetree(int,int)");
260}
261
264 SpacetreeSet& set,
266 bool invertTreeTraversalDirectionBeforeWeStart
267):
268 Task(Task::DontFuse, Task::DefaultPriority),
269 _spacetree(tree),
270 _spacetreeSet(set),
271 _observer(observer),
272 _invertTreeTraversalDirectionBeforeWeStart(invertTreeTraversalDirectionBeforeWeStart) {}
273
275 _spacetreeSet.createObserverCloneIfRequired(_observer, _spacetree._id);
276 if (_invertTreeTraversalDirectionBeforeWeStart) {
277 _spacetree._root.setInverted(not _spacetree._root.getInverted());
278 }
279 _spacetree.traverse(*_spacetreeSet._clonedObserver[_spacetree._id], true);
280 return false;
281}
282
283
285 peano4::grid::TraversalObserver& observer, int treeId
286) {
288
289 if (_clonedObserver.count(treeId) == 0) {
290 _clonedObserver.insert(std::pair<int, peano4::grid::TraversalObserver*>(treeId, observer.clone(treeId)));
291 }
292 if (_clonedObserver[treeId] == nullptr) {
293 _clonedObserver[treeId] = observer.clone(treeId);
294 }
296 _clonedObserver.count(treeId) == 1 and _clonedObserver[treeId] != nullptr, treeId, _clonedObserver.count(treeId)
297 );
298}
299
301 [[maybe_unused]] int spacetreeId,
302 [[maybe_unused]] int parentId,
303 [[maybe_unused]] const std::set<int>& joiningIds
304) {
305 logTraceInWith2Arguments("streamLocalVertexInformationToMasterThroughVerticalStacks(...)", spacetreeId, parentId);
306
307 /*
308 const int destinationRank = Node::getInstance().getRank( parentId );
309 const int sourceRank = Node::getInstance().getRank( spacetreeId );
310 const int destinationStack = Node::getInstance().getInputStackNumberForVerticalDataExchange( spacetreeId );
311 const int sourceStack = Node::getInstance().getOutputStackNumberForVerticalDataExchange( parentId );
312 if (
313 destinationRank != tarch::mpi::Rank::getInstance().getRank()
314 and
315 sourceRank == tarch::mpi::Rank::getInstance().getRank()
316 and
317 not peano4::grid::Spacetree::_vertexStack.getForPush( peano4::maps::StackKey(spacetreeId,sourceStack) )->empty()
318 ) {
319 assertion(false);
320 const int tag = Node::getInstance().getGridDataExchangeTag( sourceSpacetreeId, destinationSpacetreeId,
321 Node::ExchangeMode::SendVerticalData ); logInfo( "exchangeStacksSynchronously(...)", "send stack " << sourceStack << "
322 from tree " << sourceSpacetreeId << " to rank " << destinationRank <<
323 ": " << stackContainer[ peano4::grid::Spacetree::StackKey(destinationSpacetreeId,destinationStack) ].toString()
324 );
325
326 tarch::mpi::IntegerMessage message( stackContainer[ peano4::grid::Spacetree::StackKey(sourceSpacetreeId,sourceStack)
327 ].size() );
328 message.send(destinationRank,tag,false,tarch::mpi::IntegerMessage::ExchangeMode::NonblockingWithPollingLoopOverTests);
329
330 stackContainer[ peano4::grid::Spacetree::StackKey(sourceSpacetreeId,sourceStack) ].startSend(destinationRank,tag);
331 stackContainer[ peano4::grid::Spacetree::StackKey(sourceSpacetreeId,sourceStack) ].finishSendOrReceive();
332 }
333
334 // habe ich die Stacks immer noch oder missbrauch ich die zur Zeit fuer MPI sends und receives?
335 if (
336 destinationRank == tarch::mpi::Rank::getInstance().getRank()
337 and
338 sourceRank == tarch::mpi::Rank::getInstance().getRank()
339 and
340 not peano4::grid::Spacetree::_vertexStack.getForPush( spacetreeId,sourceStack )->empty()
341 ) {
342 logDebug( "exchangeAllVerticalDataExchangeStacks(...)", "stream content of stack " << sourceStack << " on tree " <<
343 spacetreeId << " into stack " << destinationStack << " of tree " << parentId << " as source tree is joining" );
344 peano4::grid::Spacetree::_vertexStack.getForPush(parentId,destinationStack)->clone(
345 *peano4::grid::Spacetree::_vertexStack.getForPop(spacetreeId,sourceStack) );
346 peano4::grid::Spacetree::_vertexStack.getForPop(spacetreeId,sourceStack)->clear();
347 peano4::grid::Spacetree::_vertexStack.getForPop(parentId,destinationStack)->reverse();
348 }
349*/
350
351 logTraceOut("streamLocalVertexInformationToMasterThroughVerticalStacks(...)");
352}
353
354
356 logTraceInWith1Argument("deleteAllStacks(int)", spacetreeId);
358 createObserverCloneIfRequired(observer, spacetreeId);
359 _clonedObserver[spacetreeId]->deleteAllStacks();
360 logTraceOut("deleteAllStacks(int)");
361}
362
363
365 logTraceIn("exchangeVerticalDataBetweenTrees(...)");
366
367 for (auto& p : _spacetrees) {
368 logDebug(
369 "exchangeVerticalDataBetweenTrees(TraversalObserver&)",
370 "manage data transfer of tree " << p._id << " in state " << peano4::grid::toString(p._spacetreeState)
371 );
372
373 streamLocalVertexInformationToMasterThroughVerticalStacks(p._id, p._masterId, p._joining);
374
376
377 createObserverCloneIfRequired(observer, p._id);
378
379 _clonedObserver[p._id]->exchangeAllVerticalDataExchangeStacks(p._masterId);
380 }
381
382 logDebug(
383 "exchangeVerticalDataBetweenTrees(TraversalObserver&)",
384 "all local data exchange realised, all MPI message exchange triggered. Wait for MPI to terminate"
385 );
386
387 for (auto& p : _spacetrees) {
389 _clonedObserver[p._id]->finishAllOutstandingSendsAndReceives();
390 }
391
392 logTraceOut("exchangeVerticalDataBetweenTrees(...)");
393}
394
395
397 logTraceInWith1Argument("streamDataFromSplittingTreesToNewTrees()", _spacetrees.size());
398
399 for (auto& parent : _spacetrees) {
400 for (auto& worker : parent._hasSplit) {
401 const int temporaryOutStackForVertices = Node::getOutputStackNumberForVerticalDataExchange(worker);
402 const int sourceStackForVertices = peano4::grid::PeanoCurve::getInputStackNumber(parent._root);
404 peano4::grid::Spacetree::_vertexStack.getForPush(parent._id, temporaryOutStackForVertices)->empty(),
405 parent._id,
406 temporaryOutStackForVertices,
407 sourceStackForVertices
408 );
409 peano4::grid::Spacetree::_vertexStack.getForPush(parent._id, temporaryOutStackForVertices)
410 ->clone(*peano4::grid::Spacetree::_vertexStack.getForPop(parent._id, sourceStackForVertices));
411
413
414 createObserverCloneIfRequired(observer, parent._id);
415
416 _clonedObserver[parent._id]->streamDataFromSplittingTreeToNewTree(worker);
417 }
418 }
419
420 for (auto& p : _spacetrees) {
421 if (p._spacetreeState == peano4::grid::SpacetreeState::EmptyRun) {
423
424 createObserverCloneIfRequired(observer, p._masterId);
425
426 _clonedObserver[p._masterId]->streamDataFromSplittingTreeToNewTree(p._id);
427 }
428 }
429
430 for (auto& p : _spacetrees) {
431 createObserverCloneIfRequired(observer, p._id);
433 _clonedObserver[p._id]->finishAllOutstandingSendsAndReceives();
434 }
435
436 logTraceOut("streamDataFromSplittingTreesToNewTrees()");
437}
438
439
441 logTraceIn("exchangeHorizontalDataBetweenTrees(...)");
442
443 for (auto& p : _spacetrees) {
444 logDebug(
445 "exchangeHorizontalDataBetweenTrees(TraversalObserver&)",
446 "manage data transfer of tree " << p._id << " in state " << peano4::grid::toString(p._spacetreeState)
447 );
450
451 createObserverCloneIfRequired(observer, p._id);
452
453 _clonedObserver[p._id]->exchangeAllHorizontalDataExchangeStacks(true);
454 _clonedObserver[p._id]->exchangeAllPeriodicBoundaryDataStacks();
455 }
456
457 logDebug(
458 "exchangeHorizontalDataBetweenTrees(TraversalObserver&)",
459 "all local data exchange realised, all MPI message exchange triggered. Wait for MPI to terminate"
460 );
461
462 for (auto& p : _spacetrees) {
464 _clonedObserver[p._id]->finishAllOutstandingSendsAndReceives();
465 }
466
467 logTraceOut("exchangeHorizontalDataBetweenTrees(...)");
468}
469
470
472 std::set<int> result;
473 for (const auto& tree : _spacetrees) {
474 if (not tree._joining.empty()) {
475 result.insert(tree._id);
476 }
477 }
478 return result;
479}
480
481
483 for (const auto& tree : _spacetrees) {
484 for (auto& p : tree._splitting) {
485 addSpacetree(tree._id, p);
486 }
487 }
488}
489
490
492 for (auto& p : _clonedObserver) {
493 if (p.second != nullptr) {
494 delete p.second;
495 p.second = nullptr;
496 }
497 }
498}
499
500
502 logTraceIn("traverse(TraversalObserver&)");
503
504 if (tarch::mpi::Rank::getInstance().isGlobalMaster()) {
506 }
507 logDebug("traverse(TraversalObserver)", "start new grid sweep");
508
509 tarch::mpi::Rank::getInstance().barrier([&]() -> void {
512 });
513 logDebug("traverse(TraversalObserver&)", "rank has passed barrier");
514
516
517 std::vector<tarch::multicore::Task*> primaryTasks;
518 std::vector<tarch::multicore::Task*> secondaryTasks;
519 std::vector<tarch::multicore::Task*> tertiaryTasks;
520
521 for (auto& p : _spacetrees) {
522 switch (p._spacetreeState) {
526 logDebug(
527 "traverse(TraversalObserver&)",
528 "issue task to traverse tree " << p._id << " in the primary tree set " << p.toString()
529 );
530 primaryTasks.push_back(new TraverseTask(p, *this, observer, false));
531 break;
533 if (p._joining.empty()) {
534 logDebug(
535 "traverse(TraversalObserver&)",
536 "issue task to traverse tree " << p._id << " in the primary tree set " << p.toString()
537 );
538 primaryTasks.push_back(new TraverseTask(p, *this, observer, false));
539 } else {
540 logDebug(
541 "traverse(TraversalObserver&)",
542 "issue task to traverse tree "
543 << p._id << " in the third tree set as it joins in other tree: " << p.toString()
544 );
545 tertiaryTasks.push_back(new TraverseTask(p, *this, observer, false));
546 }
547 break;
549 logDebug(
550 "traverse(TraversalObserver&)",
551 "issue task to traverse tree " << p._id << " in secondary tree set as empty tree and in third set as new tree"
552 );
553 secondaryTasks.push_back(new TraverseTask(p, *this, observer, true));
554 tertiaryTasks.push_back(new TraverseTask(p, *this, observer, false));
555 break;
558 assertionMsg("should not happen", false);
559 break;
560 }
561 }
562
563 // I use this boolean flag from time to time to debug the code.
564 [[maybe_unused]] const bool runSequentially = false;
565
566 logTraceInWith1Argument("traverse(TraversalObserver&)-primary", primaryTasks.size());
567 tarch::multicore::spawnAndWait(primaryTasks);
568 logTraceOut("traverse(TraversalObserver&)-primary");
569
570 logDebug(
571 "traverse(TraversalObserver&)", "primary tasks (traversals) complete, trigger split data stream if required"
572 );
574 logDebug("traverse(TraversalObserver&)", "exchange vertical data if required");
576
577 logTraceInWith1Argument("traverse(TraversalObserver&)-secondary", secondaryTasks.size());
578 tarch::multicore::spawnAndWait(secondaryTasks);
579 logTraceOut("traverse(TraversalObserver&)-secondary");
580
581 tarch::timing::Watch dataExchangeTime("peano4::parallel::SpacetreeSet", "traverse", false);
582
584
585 logTraceInWith1Argument("traverse(TraversalObserver&)-tertiary", tertiaryTasks.size());
586 tarch::multicore::spawnAndWait(tertiaryTasks);
587 logTraceOut("traverse(TraversalObserver&)-tertiary");
588
590
591 dataExchangeTime.stop();
592 tarch::logging::Statistics::getInstance().log("mpi wait times", dataExchangeTime.getCPUTime());
593
595
596 cleanUpTrees(observer);
597
599
601
602 logTraceOut("traverse(TraversalObserver&)");
603}
604
605
607 logTraceIn("cleanUpTrees(...)");
608 for (auto p = _spacetrees.begin(); p != _spacetrees.end();) {
609 /*
610 else if (
611 p->getGridStatistics().getCoarseningHasBeenVetoed()
612 and
613 p->mayJoinWithMaster()
614 and
615 ) {
616 logInfo( "traverse(Observer)", "trigger join of tree " << p->_id << " with its master tree " << p->_masterId
617 << " to enable further grid erases"); join(p->_id);
618 //
619 p->_masterId>=0
620 and
621 getSpacetree(p->_masterId).mayJoinWithWorker()
622 }
623 */
624 if (p->_spacetreeState == peano4::grid::SpacetreeState::Joined) {
625 logInfo("traverse(Observer)", "tree " << p->_id << " has successfully joined");
626 deleteAllStacks(observer, p->_id);
628 p = _spacetrees.erase(p);
629 } else if (p->mayJoinWithMaster() and p->getGridStatistics().getNumberOfLocalUnrefinedCells() == 0) {
630 logInfo("traverse(Observer)", "remove empty tree " << p->_id << " with master " << p->_masterId);
631 deleteAllStacks(observer, p->_id);
633
634 if (Node::getInstance().getRank(p->_masterId) != tarch::mpi::Rank::getInstance().getRank()) {
635 logDebug(
636 "traverse(Observer)", "parent tree " << p->_masterId << " is not local on this rank. Remove child reference"
637 );
638#ifdef Parallel
639 TreeManagementMessage message(
641 );
643 message,
644 Node::getInstance().getRank(p->_masterId),
647 );
648
650 message,
651 Node::getInstance().getRank(p->_masterId),
652 getAnswerTag(p->_id),
654 );
656#else
657 assertionMsg(false, "branch may not be entered");
658#endif
659 } else {
660 logDebug(
661 "traverse(Observer)", "parent tree " << p->_masterId << " is local on this rank. Remove child reference"
662 );
663 getSpacetree(p->_masterId)._childrenIds.erase(p->_id);
665 }
666 p = _spacetrees.erase(p);
667 } else if (p->mayJoinWithMaster()) {
668 const int localRank = Node::getInstance().getRank(p->_id);
669 const int masterRank = Node::getInstance().getRank(p->_masterId);
670 if (localRank == masterRank and getSpacetree(p->_masterId).getGridStatistics().getCoarseningHasBeenVetoed()) {
671 // @todo erste Meldung info
672 logError(
673 "traverse(Observer)",
674 "join tree "
675 << p->_id << " as it is deteriorated (encodes no hierarchical data) while master " << p->_masterId
676 << " resides on same rank and can't coarsen"
677 );
678 logError("traverse(Observer)", "not implemented yet");
679 // p->joinWithMaster();
680 // getSpacetree(p->_masterId).joinWithWorker(p->_id);
681 } else if (localRank == masterRank) {
682 logDebug(
683 "traverse(Observer)",
684 "tree " << p->_id << " is deteriorated (encodes no hierarchical data) yet seems not to constrain its master"
685 );
686 } else {
687 // @todo: Aber nur, wenn es noch andere Baeume auf diesem Rank gibt
688 // @todo erste Meldung info
689 logDebug(
690 "cleanUpTrees(...)", "I should merge tree " << p->_id << " to reduce synchronisation: " << p->toString()
691 );
692 logDebug("traverse(Observer)", "not implemented yet");
693 }
694 }
695 p++;
696 }
697 logTraceOut("cleanUpTrees(...)");
698}
699
700
705
706
708 logTraceIn("getGridStatistics()");
709 if (_spacetrees.empty()) {
711 0, // __numberOfLocalUnrefinedCells,
712 0, // __numberOfRemoteUnrefinedCells,
713 0, // __numberOfLocalRefinedCells
714 0, // __numberOfRemoteRefinedCells,
715 0, // __stationarySweeps,
716 false,
717 false,
718 tarch::la::Vector<Dimensions, double>(std::numeric_limits<double>::max()) // minH
719 );
720 logTraceOutWith1Argument("getGridStatistics()", result.toString());
721 return result;
722 } else {
723 peano4::grid::GridStatistics result(_spacetrees.begin()->_statistics);
724 for (auto& from : _spacetrees) {
725 if (from._id != _spacetrees.begin()->_id and from._spacetreeState != peano4::grid::SpacetreeState::NewFromSplit) {
726 result = result + from._statistics;
727 }
728 }
729 logTraceOutWith1Argument("getGridStatistics()", result.toString());
730 return result;
731 }
732}
733
734
736 for (auto& p : _spacetrees) {
737 if (p._id == treeId)
738 return true;
739 }
740 return false;
741}
742
743
745 std::set<int> result;
746
747 for (auto& p : _spacetrees) {
748 result.insert(p._id);
749 }
750
751 return result;
752}
753
754
755bool peano4::parallel::SpacetreeSet::split(int treeId, const peano4::SplitInstruction& instruction, int targetRank) {
756 logTraceInWith3Arguments("split(int,SplitInstruction,int)", treeId, instruction, targetRank);
758
759 if (tree.maySplit()) {
760 int newSpacetreeId = -1;
761
762 if (tarch::mpi::Rank::getInstance().getRank() != targetRank) {
763#ifdef Parallel
764 logDebug("split(int,SplitInstruction,int)", "request new tree on rank " << targetRank);
766 requestMessage.setMasterSpacetreeId(treeId);
767 requestMessage.setWorkerSpacetreeId(-1);
770
771 logDebug("split(int,SplitInstruction,int)", "message " << requestMessage.toString() << " sent - wait for answer");
772
776 newSpacetreeId = answerMessage.getWorkerSpacetreeId();
777#else
778 newSpacetreeId = -1;
779 assertionMsg(false, "can't split into tree on a different rank if not compiled with mpi");
780#endif
781 } else {
783 peano4::parallel::Node::getInstance().getRank(treeId), treeId
784 );
785 }
786
787 if (newSpacetreeId >= 0) {
788 tree.split(newSpacetreeId, instruction);
791 "split(int,SplitInstruction,int)",
792 "Peano 4 uses "
794 << tarch::mpi::Rank::getInstance().getRank() << " and is asked to split. Total memory is "
795 << tarch::getTotalMemory(tarch::MemoryUsageFormat::MByte) << " MB, i.e. we might run out of memory"
796 );
797 }
798
799 logInfo(
800 "split(int,SplitInstruction,int)",
801 "trigger split of tree " << treeId << " into tree " << newSpacetreeId << " with " << instruction
802 );
803 logTraceOutWith1Argument("split(int,SplitInstruction,int)", true);
804 return true;
805 }
806 }
807
808 logTraceOutWith1Argument("split(int,SplitInstruction,int)", false);
809 return false;
810}
811
812
814 for (auto& p : _spacetrees) {
815 if (p._id == id)
816 return p;
817 }
818 assertion3(false, "no spacetree found", id, tarch::mpi::Rank::getInstance().getRank());
819 return *_spacetrees.begin(); // just here to avoid warning
820}
821
822
824 for (auto& p : _spacetrees) {
825 if (p._id == id)
826 return p;
827 }
828 assertion3(false, "no spacetree found", id, tarch::mpi::Rank::getInstance().getRank());
829 return *_spacetrees.begin(); // just here to avoid warning
830}
#define assertion2(expr, param0, param1)
#define assertion4(expr, param0, param1, param2, param3)
#define assertion3(expr, param0, param1, param2)
#define assertion1(expr, param)
#define assertionMsg(expr, message)
#define assertion(expr)
AutomatonState state
#define logError(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
Definition Log.h:464
#define logDebug(methodName, logMacroMessageStream)
Definition Log.h:50
#define logTraceOutWith1Argument(methodName, argument0)
Definition Log.h:380
#define logTraceOut(methodName)
Definition Log.h:379
#define logWarning(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
Definition Log.h:440
#define logTraceOutWith3Arguments(methodName, argument0, argument1, argument2)
Definition Log.h:382
#define logTraceInWith3Arguments(methodName, argument0, argument1, argument2)
Definition Log.h:372
#define logTraceIn(methodName)
Definition Log.h:369
#define logTraceInWith1Argument(methodName, argument0)
Definition Log.h:370
#define logTraceInWith2Arguments(methodName, argument0, argument1)
Definition Log.h:371
#define logInfo(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
Definition Log.h:411
std::bitset< Dimensions > periodicBC
Definition main.cpp:19
static int getInputStackNumber(const AutomatonState &state)
Represents one tree.
Definition Spacetree.h:40
GridStatistics getGridStatistics() const
static GridVertexStackMap _vertexStack
Definition Spacetree.h:152
GridStatistics _statistics
Definition Spacetree.h:99
std::set< int > _childrenIds
Definition Spacetree.h:110
virtual TraversalObserver * clone(int spacetreeId)=0
T * getForPush(int treeId, int stackId)
Get the stack belonging to a tree.
bool empty(int treeId, int stackId) const
static constexpr int MaxSpacetreesPerRank
Definition Node.h:39
bool continueToRun()
You should call this operation only on the ranks >0 to find out whether you should do more iteration/...
Definition Node.cpp:418
int reserveId(int rank, int forTreeId)
This operation is not const as it does some internal bookkeeping.
Definition Node.cpp:137
int getLocalTreeId(int treeId) const
Definition Node.cpp:125
int getRank(int treeId) const
You hand in a tree number and the node tells you on which rank such a tree is hosted.
Definition Node.cpp:119
void deregisterId(int id)
Only the SpacetreeSet should call this operation.
Definition Node.cpp:188
static int getOutputStackNumberForVerticalDataExchange(int id)
Definition Node.cpp:221
static Node & getInstance()
This operation returns the singleton instance.
Definition Node.cpp:108
Each task triggers the traversal of one specific spacetree.
virtual bool run() override
I create the copy of the observer, run the traversal on my local tree _spacetree and finally destroy ...
TraverseTask(peano4::grid::Spacetree &tree, SpacetreeSet &set, peano4::grid::TraversalObserver &observer, bool invertTreeTraversalDirectionBeforeWeStart)
The spacetree set has to be a singleton, as it is reponsible to accept requests for new trees from re...
void exchangeVerticalDataBetweenTrees(peano4::grid::TraversalObserver &observer)
void addSpacetree(int masterId, int newTreeId)
Adds a new spacetree to the set.
void createNewTrees()
This operation should be called pretty close towards the end of a traversal.
std::map< int, peano4::grid::TraversalObserver * > _clonedObserver
I create/clone one observer per local tree.
static void streamDataFromSplittingTreeToNewTree(Container &stackContainer, int master, int worker)
Copies (streams) data from the master to the worker.
static void exchangeAllPeriodicBoundaryDataStacks(Container &stackContainer, int spacetreeId)
Exchange periodic BC data.
peano4::grid::Spacetree & getSpacetree(int id)
static void deleteAllStacks(Container &stackContainer, int spacetreeId)
void streamLocalVertexInformationToMasterThroughVerticalStacks(int spacetreeId, int parentId, const std::set< int > &joiningIds)
Whenever we join two partitions, we have to stream data from the worker to the master.
std::set< int > getLocalTreesMergingWithWorkers() const
I need this routine for technical reasons: Prior to the sweep of trees, I have to identify all of tho...
SpacetreeSetState _state
The state identifies what the set is doing right now.
int getAnswerTag(int targetSpacetreeId) const
bool split(int treeId, const peano4::SplitInstruction &instruction, int targetRank)
Split a local tree.
int _requestMessageTag
I use this tag to identify messages send from one tree to another rank.
static std::string toString(SpacetreeSetState state)
void createObserverCloneIfRequired(peano4::grid::TraversalObserver &observer, int treeId)
Quick lookup whether an observer clone for this tree id does already exist.
virtual void receiveDanglingMessages() override
We poll the tree management messages.
void answerQuestions()
Run through the set of unanswered questions and, well, answer them.
peano4::grid::GridStatistics getGridStatistics() const
Return statistics object for primary spacetree.
std::list< peano4::grid::Spacetree > _spacetrees
These are the local spacetrees.
void init(const tarch::la::Vector< Dimensions, double > &offset, const tarch::la::Vector< Dimensions, double > &width, const std::bitset< Dimensions > &periodicBC=0)
void exchangeHorizontalDataBetweenTrees(peano4::grid::TraversalObserver &observer)
void cleanUpTrees(peano4::grid::TraversalObserver &observer)
static SpacetreeSet & getInstance()
std::set< int > getLocalSpacetrees() const
static void exchangeAllVerticalDataExchangeStacks(Container &stackContainer, int spacetreeId, int parentId)
static tarch::logging::Log _log
Logging device.
virtual void shutdown() override
static SpacetreeSet _singleton
static void finishAllOutstandingSendsAndReceives(Container &stackContainer, int spacetreeId)
This routine finishes all the sends and receives that are still active, i.e.
~SpacetreeSet()
As the setis a singleton and a service, it has to deregister itself.
static tarch::multicore::BooleanSemaphore _semaphore
Semaphore to protect container holding all the local trees.
static void exchangeAllHorizontalDataExchangeStacks(Container &stackContainer, int spacetreeId, bool symmetricDataCardinality)
Realise domain boundary exchange (over multiple scales)
bool isLocalSpacetree(int treeId) const
Codes hold one spacetree set per rank.
void streamDataFromSplittingTreesToNewTrees(peano4::grid::TraversalObserver &observer)
Copy the data from a splitting tree onto its new workers.
void traverse(peano4::grid::TraversalObserver &observer)
Invoke traverse on all spacetrees in parallel.
Log Device.
Definition Log.h:516
void log(const std::string &identifier, double value, bool disableSampling=false)
Definition Statistics.h:105
static Statistics & getInstance()
This is not the canonical realisation of singletons as I use it usually for stats in Peano.
static Rank & getInstance()
This operation returns the singleton instance.
Definition Rank.cpp:539
int getRank() const
Return rank of this node.
Definition Rank.cpp:529
void barrier(std::function< void()> waitor=[]() -> void {})
Definition Rank.cpp:352
static int reserveFreeTag(const std::string &fullQualifiedMessageName, int numberOfTags=1)
Return a Free Tag.
Definition Rank.cpp:39
MPI_Comm getCommunicator() const
Definition Rank.cpp:545
Create a lock around a boolean semaphore region.
Definition Lock.h:19
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
void removeService(Service *const service)
This routine is thread-safe, i.e.
void addService(Service *const service, const std::string &name)
Add a new service.
A simple class that has to be included to measure the clock ticks required for an operation.
Definition Watch.h:45
double getCPUTime()
Return CPU Time in Seconds.
Definition Watch.cpp:64
void stop()
Stop timer.
Definition Watch.cpp:55
std::string toString(VertexType type)
Definition grid.cpp:276
@ NewFromSplit
Set if this tree results from a split and if this is the first grid sweep when the former owner actua...
@ JoinTriggered
Join has been triggered for this tree.
@ EmptyRun
Not yet a new root.
void spawnAndWait(const std::vector< Task * > &tasks)
Fork-join task submission pattern.
Definition multicore.cpp:91
int getTotalMemory(MemoryUsageFormat format)
Definition tarch.cpp:72
int getMemoryUsage(MemoryUsageFormat format)
Method for getting the application's memory footprint.
Definition tarch.cpp:95
Instruction to split.
Definition grid.h:34
static void send(const peano4::grid::AutomatonState &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static void receive(peano4::grid::AutomatonState &buffer, int source, int tag, MPI_Comm communicator)
void setRemovedEmptySubtree(bool value)
peano4::parallel::TreeManagementMessage::Action getAction() const
static void send(const peano4::parallel::TreeManagementMessage &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static void sendAndPollDanglingMessages(const peano4::parallel::TreeManagementMessage &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static void receiveAndPollDanglingMessages(peano4::parallel::TreeManagementMessage &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static void receive(peano4::parallel::TreeManagementMessage &buffer, int source, int tag, MPI_Comm communicator)
Simple vector class.
Definition Vector.h:134