Peano
Loading...
Searching...
No Matches
SpacetreeSet.cpp
Go to the documentation of this file.
1#include "SpacetreeSet.h"
2
3#include "config.h"
4#include "Node.h"
5#include "tarch/tarch.h"
6#include "peano4/grid/grid.h"
12#include "tarch/mpi/Rank.h"
16#include "tarch/timing/Watch.h"
17
18
19tarch::logging::Log peano4::parallel::SpacetreeSet::_log("peano4::parallel::SpacetreeSet");
20
22
23
25
28
29
31
35 const std::bitset<Dimensions>& periodicBC
36) {
37 _requestMessageTag = tarch::mpi::Rank::reserveFreeTag("peano4::parallel::SpacetreeSet - request message");
38 _answerMessageTag = tarch::mpi::Rank::reserveFreeTag(
39 "peano4::parallel::SpacetreeSet - answer message", Node::MaxSpacetreesPerRank
40 );
41 tarch::services::ServiceRepository::getInstance().addService(this, "peano4::parallel::SpacetreeSet");
42
43#ifdef Parallel
45 (peano4::parallel::Node::getInstance().getNumberOfRegisteredTrees() == 1
46 and tarch::mpi::Rank::getInstance().getRank() == 0
47 ) or (peano4::parallel::Node::getInstance().getNumberOfRegisteredTrees() == 0 and tarch::mpi::Rank::getInstance().getRank() != 0),
48 peano4::parallel::Node::getInstance().getNumberOfRegisteredTrees(),
49 offset,
50 width,
52 );
53#endif
54
55 if (tarch::mpi::Rank::getInstance().isGlobalMaster()) {
56 logTraceInWith3Arguments("isGlobalMaster(...)", offset, width, periodicBC);
57 peano4::grid::Spacetree spacetree(offset, width, periodicBC);
58 _spacetrees.push_back(spacetree);
59 logTraceOutWith3Arguments("isGlobalMaster(...)", offset, width, periodicBC);
60 }
61}
62
67
68
70
71
72int peano4::parallel::SpacetreeSet::getAnswerTag(int targetSpacetreeId) const {
73 return _answerMessageTag + Node::getInstance().getLocalTreeId(targetSpacetreeId);
74}
75
76
78 switch (state) {
79 case SpacetreeSetState::TraverseTreesAndExchangeData:
80 return "traverse-trees-and-exchange-data";
81 case SpacetreeSetState::Waiting:
82 return "waiting";
83 }
84 return "undef";
85}
86
88#ifdef Parallel
89 std::vector<peano4::parallel::TreeManagementMessage> unansweredMessagesThatIanAnswerNow;
90
91 std::vector<peano4::parallel::TreeManagementMessage>::iterator p = _unansweredMessages.begin();
92 while (p != _unansweredMessages.end()) {
93 switch (p->getAction()) {
95 unansweredMessagesThatIanAnswerNow.push_back(*p);
96 p = _unansweredMessages.erase(p);
97 break;
100 if (_state == SpacetreeSetState::Waiting) {
101 unansweredMessagesThatIanAnswerNow.push_back(*p);
102 p = _unansweredMessages.erase(p);
103 } else {
104 logDebug("answerMessages()", "can't answer as I'm in the wrong state");
105 p++;
106 }
107 } break;
110 assertionMsg(false, "should only be passed synchronously and never run through this tag");
111 break;
112 }
113 }
114
115
116 for (auto p : unansweredMessagesThatIanAnswerNow) {
117 switch (p.getAction()) {
119 int newSpacetreeId = peano4::parallel::Node::getInstance().reserveId(
120 tarch::mpi::Rank::getInstance().getRank(), // on current node
121 p.getMasterSpacetreeId() // this is the tree who has requested the new tree
122 );
123
125 answerMessage.setWorkerSpacetreeId(newSpacetreeId);
128 answerMessage,
129 p.getSenderRank(),
130 getAnswerTag(p.getMasterSpacetreeId()),
132 );
133 logInfo(
134 "receiveDanglingMessages()", "reserved tree id " << newSpacetreeId << " for tree " << p.getMasterSpacetreeId()
135 );
136 } break;
138 assertion(_state == SpacetreeSetState::Waiting);
142 answerMessage,
143 p.getSenderRank(),
144 getAnswerTag(p.getMasterSpacetreeId()),
146 );
147
150 state, p.getSenderRank(), _requestMessageTag, tarch::mpi::Rank::getInstance().getCommunicator()
151 );
153 p.getWorkerSpacetreeId(), p.getMasterSpacetreeId(), state.getX(), state.getH(), state.getInverted()
154 );
155
156 _spacetrees.push_back(std::move(newTree));
157
159 answerMessage,
160 p.getSenderRank(),
161 getAnswerTag(p.getMasterSpacetreeId()),
163 );
164 } break;
167 assertionMsg(false, "should only be passed synchronously and never run through this tag");
168 break;
170 assertion(_state == SpacetreeSetState::Waiting);
171 logInfo(
172 "receiveDanglingMessages(...)",
173 "learned that remote child tree "
174 << p.getWorkerSpacetreeId() << " of local tree " << p.getMasterSpacetreeId()
175 << " is degenerated thus had been removed"
176 );
177 getSpacetree(p.getMasterSpacetreeId())._childrenIds.erase(p.getWorkerSpacetreeId());
178
182 answerMessage,
183 p.getSenderRank(),
184 getAnswerTag(p.getWorkerSpacetreeId()),
186 );
187 } break;
188 }
189 }
190#else
191 assertion(_unansweredMessages.empty());
192#endif
193}
194
196#ifdef Parallel
197
198 if (tarch::mpi::Rank::getInstance().isMessageInQueue(_requestMessageTag)) {
199 logTraceIn("receiveDanglingMessages()");
200
203 message, MPI_ANY_SOURCE, _requestMessageTag, tarch::mpi::Rank::getInstance().getCommunicator()
204 );
205
206 _unansweredMessages.push_back(message);
207
208 logDebug("receiveDanglingMessages()", "received new message " << message.toString());
209
210 logTraceOut("receiveDanglingMessages()");
211 }
212
213 answerQuestions();
214#endif
215}
216
217
218void peano4::parallel::SpacetreeSet::addSpacetree(int masterId, int newTreeId) {
219 logTraceInWith2Arguments("addSpacetree(int,int)", masterId, newTreeId);
220 if (peano4::parallel::Node::getInstance().getRank(masterId) != peano4::parallel::Node::getInstance().getRank(newTreeId)) {
221#ifdef Parallel
222 const int targetRank = peano4::parallel::Node::getInstance().getRank(newTreeId);
223
224 TreeManagementMessage message;
225 message.setMasterSpacetreeId(masterId);
226 message.setWorkerSpacetreeId(newTreeId);
228 TreeManagementMessage::sendAndPollDanglingMessages(message, targetRank, _requestMessageTag);
229
231 message, targetRank, getAnswerTag(masterId), tarch::mpi::Rank::getInstance().getCommunicator()
232 );
234
235 peano4::grid::AutomatonState state = _spacetrees.begin()->_root;
236 logDebug("addSpacetree(int,int)", "send state " << state.toString() << " to rank " << targetRank);
238 state, targetRank, _requestMessageTag, tarch::mpi::Rank::getInstance().getCommunicator()
239 );
240
242 message, targetRank, getAnswerTag(masterId), tarch::mpi::Rank::getInstance().getCommunicator()
243 );
245#else
246 assertionMsg(false, "should never enter this branch without -DParallel");
247#endif
248 } else {
250 newTreeId,
251 masterId,
252 _spacetrees.begin()->_root.getX(),
253 _spacetrees.begin()->_root.getH(),
254 _spacetrees.begin()->_root.getInverted()
255 );
256 tarch::multicore::Lock lock(_semaphore);
257 _spacetrees.push_back(std::move(newTree));
258 }
259 logTraceOut("addSpacetree(int,int)");
260}
261
264 SpacetreeSet& set,
266 bool invertTreeTraversalDirectionBeforeWeStart
267):
268 Task(Task::DontFuse, Task::DefaultPriority),
269 _spacetree(tree),
270 _spacetreeSet(set),
271 _observer(observer),
272 _invertTreeTraversalDirectionBeforeWeStart(invertTreeTraversalDirectionBeforeWeStart) {}
273
275 _spacetreeSet.createObserverCloneIfRequired(_observer, _spacetree._id);
276 if (_invertTreeTraversalDirectionBeforeWeStart) {
277 _spacetree._root.setInverted(not _spacetree._root.getInverted());
278 }
279 _spacetree.traverse(*_spacetreeSet._clonedObserver[_spacetree._id], true);
280}
281
282
284 peano4::grid::TraversalObserver& observer, int treeId
285) {
287
288 if (_clonedObserver.count(treeId) == 0) {
289 _clonedObserver.insert(std::pair<int, peano4::grid::TraversalObserver*>(treeId, observer.clone(treeId)));
290 }
291 if (_clonedObserver[treeId] == nullptr) {
292 _clonedObserver[treeId] = observer.clone(treeId);
293 }
295 _clonedObserver.count(treeId) == 1 and _clonedObserver[treeId] != nullptr, treeId, _clonedObserver.count(treeId)
296 );
297}
298
300 [[maybe_unused]] int spacetreeId,
301 [[maybe_unused]] int parentId,
302 [[maybe_unused]] const std::set<int>& joiningIds
303) {
304 logTraceInWith2Arguments("streamLocalVertexInformationToMasterThroughVerticalStacks(...)", spacetreeId, parentId);
305
306 /*
307 const int destinationRank = Node::getInstance().getRank( parentId );
308 const int sourceRank = Node::getInstance().getRank( spacetreeId );
309 const int destinationStack = Node::getInstance().getInputStackNumberForVerticalDataExchange( spacetreeId );
310 const int sourceStack = Node::getInstance().getOutputStackNumberForVerticalDataExchange( parentId );
311 if (
312 destinationRank != tarch::mpi::Rank::getInstance().getRank()
313 and
314 sourceRank == tarch::mpi::Rank::getInstance().getRank()
315 and
316 not peano4::grid::Spacetree::_vertexStack.getForPush( peano4::maps::StackKey(spacetreeId,sourceStack) )->empty()
317 ) {
318 assertion(false);
319 const int tag = Node::getInstance().getGridDataExchangeTag( sourceSpacetreeId, destinationSpacetreeId,
320 Node::ExchangeMode::SendVerticalData ); logInfo( "exchangeStacksSynchronously(...)", "send stack " << sourceStack << "
321 from tree " << sourceSpacetreeId << " to rank " << destinationRank <<
322 ": " << stackContainer[ peano4::grid::Spacetree::StackKey(destinationSpacetreeId,destinationStack) ].toString()
323 );
324
325 tarch::mpi::IntegerMessage message( stackContainer[ peano4::grid::Spacetree::StackKey(sourceSpacetreeId,sourceStack)
326 ].size() );
327 message.send(destinationRank,tag,false,tarch::mpi::IntegerMessage::ExchangeMode::NonblockingWithPollingLoopOverTests);
328
329 stackContainer[ peano4::grid::Spacetree::StackKey(sourceSpacetreeId,sourceStack) ].startSend(destinationRank,tag);
330 stackContainer[ peano4::grid::Spacetree::StackKey(sourceSpacetreeId,sourceStack) ].finishSendOrReceive();
331 }
332
333 // habe ich die Stacks immer noch oder missbrauch ich die zur Zeit fuer MPI sends und receives?
334 if (
335 destinationRank == tarch::mpi::Rank::getInstance().getRank()
336 and
337 sourceRank == tarch::mpi::Rank::getInstance().getRank()
338 and
339 not peano4::grid::Spacetree::_vertexStack.getForPush( spacetreeId,sourceStack )->empty()
340 ) {
341 logDebug( "exchangeAllVerticalDataExchangeStacks(...)", "stream content of stack " << sourceStack << " on tree " <<
342 spacetreeId << " into stack " << destinationStack << " of tree " << parentId << " as source tree is joining" );
343 peano4::grid::Spacetree::_vertexStack.getForPush(parentId,destinationStack)->clone(
344 *peano4::grid::Spacetree::_vertexStack.getForPop(spacetreeId,sourceStack) );
345 peano4::grid::Spacetree::_vertexStack.getForPop(spacetreeId,sourceStack)->clear();
346 peano4::grid::Spacetree::_vertexStack.getForPop(parentId,destinationStack)->reverse();
347 }
348*/
349
350 logTraceOut("streamLocalVertexInformationToMasterThroughVerticalStacks(...)");
351}
352
353
355 logTraceInWith1Argument("deleteAllStacks(int)", spacetreeId);
357 createObserverCloneIfRequired(observer, spacetreeId);
358 _clonedObserver[spacetreeId]->deleteAllStacks();
359 logTraceOut("deleteAllStacks(int)");
360}
361
362
364 logTraceIn("exchangeVerticalDataBetweenTrees(...)");
365
366 for (auto& p : _spacetrees) {
367 logDebug(
368 "exchangeVerticalDataBetweenTrees(TraversalObserver&)",
369 "manage data transfer of tree " << p._id << " in state " << peano4::grid::toString(p._spacetreeState)
370 );
371
372 streamLocalVertexInformationToMasterThroughVerticalStacks(p._id, p._masterId, p._joining);
373
375
376 createObserverCloneIfRequired(observer, p._id);
377
378 _clonedObserver[p._id]->exchangeAllVerticalDataExchangeStacks(p._masterId);
379 }
380
381 logDebug(
382 "exchangeVerticalDataBetweenTrees(TraversalObserver&)",
383 "all local data exchange realised, all MPI message exchange triggered. Wait for MPI to terminate"
384 );
385
386 for (auto& p : _spacetrees) {
388 _clonedObserver[p._id]->finishAllOutstandingSendsAndReceives();
389 }
390
391 logTraceOut("exchangeVerticalDataBetweenTrees(...)");
392}
393
394
396 logTraceInWith1Argument("streamDataFromSplittingTreesToNewTrees()", _spacetrees.size());
397
398 for (auto& parent : _spacetrees) {
399 for (auto& worker : parent._hasSplit) {
400 const int temporaryOutStackForVertices = Node::getOutputStackNumberForVerticalDataExchange(worker);
401 const int sourceStackForVertices = peano4::grid::PeanoCurve::getInputStackNumber(parent._root);
403 peano4::grid::Spacetree::_vertexStack.getForPush(parent._id, temporaryOutStackForVertices)->empty(),
404 parent._id,
405 temporaryOutStackForVertices,
406 sourceStackForVertices
407 );
408 peano4::grid::Spacetree::_vertexStack.getForPush(parent._id, temporaryOutStackForVertices)
409 ->clone(*peano4::grid::Spacetree::_vertexStack.getForPop(parent._id, sourceStackForVertices));
410
412
413 createObserverCloneIfRequired(observer, parent._id);
414
415 _clonedObserver[parent._id]->streamDataFromSplittingTreeToNewTree(worker);
416 }
417 }
418
419 for (auto& p : _spacetrees) {
420 if (p._spacetreeState == peano4::grid::SpacetreeState::EmptyRun) {
422
423 createObserverCloneIfRequired(observer, p._masterId);
424
425 _clonedObserver[p._masterId]->streamDataFromSplittingTreeToNewTree(p._id);
426 }
427 }
428
429 for (auto& p : _spacetrees) {
430 createObserverCloneIfRequired(observer, p._id);
432 _clonedObserver[p._id]->finishAllOutstandingSendsAndReceives();
433 }
434
435 logTraceOut("streamDataFromSplittingTreesToNewTrees()");
436}
437
438
440 logTraceIn("exchangeHorizontalDataBetweenTrees(...)");
441
442 for (auto& p : _spacetrees) {
443 logDebug(
444 "exchangeHorizontalDataBetweenTrees(TraversalObserver&)",
445 "manage data transfer of tree " << p._id << " in state " << peano4::grid::toString(p._spacetreeState)
446 );
449
450 createObserverCloneIfRequired(observer, p._id);
451
452 _clonedObserver[p._id]->exchangeAllHorizontalDataExchangeStacks(true);
453 _clonedObserver[p._id]->exchangeAllPeriodicBoundaryDataStacks();
454 }
455
456 logDebug(
457 "exchangeHorizontalDataBetweenTrees(TraversalObserver&)",
458 "all local data exchange realised, all MPI message exchange triggered. Wait for MPI to terminate"
459 );
460
461 for (auto& p : _spacetrees) {
463 _clonedObserver[p._id]->finishAllOutstandingSendsAndReceives();
464 }
465
466 logTraceOut("exchangeHorizontalDataBetweenTrees(...)");
467}
468
469
471 std::set<int> result;
472 for (const auto& tree : _spacetrees) {
473 if (not tree._joining.empty()) {
474 result.insert(tree._id);
475 }
476 }
477 return result;
478}
479
480
482 for (const auto& tree : _spacetrees) {
483 for (auto& p : tree._splitting) {
484 addSpacetree(tree._id, p);
485 }
486 }
487}
488
489
491 for (auto& p : _clonedObserver) {
492 if (p.second != nullptr) {
493 delete p.second;
494 p.second = nullptr;
495 }
496 }
497}
498
499
501 logTraceIn("traverse(TraversalObserver&)");
502
503 if (tarch::mpi::Rank::getInstance().isGlobalMaster()) {
505 }
506 logDebug("traverse(TraversalObserver)", "start new grid sweep");
507
508 tarch::mpi::Rank::getInstance().barrier([&]() -> void {
511 });
512 logDebug("traverse(TraversalObserver&)", "rank has passed barrier");
513
515
516 std::vector<tarch::multicore::Task*> primaryTasks;
517 std::vector<tarch::multicore::Task*> secondaryTasks;
518 std::vector<tarch::multicore::Task*> tertiaryTasks;
519
520 for (auto& p : _spacetrees) {
521 switch (p._spacetreeState) {
525 logDebug(
526 "traverse(TraversalObserver&)",
527 "issue task to traverse tree " << p._id << " in the primary tree set " << p.toString()
528 );
529 primaryTasks.push_back(new TraverseTask(p, *this, observer, false));
530 break;
532 if (p._joining.empty()) {
533 logDebug(
534 "traverse(TraversalObserver&)",
535 "issue task to traverse tree " << p._id << " in the primary tree set " << p.toString()
536 );
537 primaryTasks.push_back(new TraverseTask(p, *this, observer, false));
538 } else {
539 logDebug(
540 "traverse(TraversalObserver&)",
541 "issue task to traverse tree "
542 << p._id << " in the third tree set as it joins in other tree: " << p.toString()
543 );
544 tertiaryTasks.push_back(new TraverseTask(p, *this, observer, false));
545 }
546 break;
548 logDebug(
549 "traverse(TraversalObserver&)",
550 "issue task to traverse tree " << p._id << " in secondary tree set as empty tree and in third set as new tree"
551 );
552 secondaryTasks.push_back(new TraverseTask(p, *this, observer, true));
553 tertiaryTasks.push_back(new TraverseTask(p, *this, observer, false));
554 break;
557 assertionMsg("should not happen", false);
558 break;
559 }
560 }
561
562 // I use this boolean flag from time to time to debug the code.
563 [[maybe_unused]] const bool runSequentially = false;
564
565 logTraceInWith1Argument("traverse(TraversalObserver&)-primary", primaryTasks.size());
566 tarch::multicore::spawnAndWait(primaryTasks);
567 logTraceOut("traverse(TraversalObserver&)-primary");
568
569 logDebug(
570 "traverse(TraversalObserver&)", "primary tasks (traversals) complete, trigger split data stream if required"
571 );
573 logDebug("traverse(TraversalObserver&)", "exchange vertical data if required");
575
576 logTraceInWith1Argument("traverse(TraversalObserver&)-secondary", secondaryTasks.size());
577 tarch::multicore::spawnAndWait(secondaryTasks);
578 logTraceOut("traverse(TraversalObserver&)-secondary");
579
580 tarch::timing::Watch dataExchangeTime("peano4::parallel::SpacetreeSet", "traverse", false);
581
583
584 logTraceInWith1Argument("traverse(TraversalObserver&)-tertiary", tertiaryTasks.size());
585 tarch::multicore::spawnAndWait(tertiaryTasks);
586 logTraceOut("traverse(TraversalObserver&)-tertiary");
587
589
590 dataExchangeTime.stop();
591 tarch::logging::Statistics::getInstance().log("mpi wait times", dataExchangeTime.getCPUTime());
592
594
595 cleanUpTrees(observer);
596
598
600
601 logTraceOut("traverse(TraversalObserver&)");
602}
603
604
606 logTraceIn("cleanUpTrees(...)");
607 for (auto p = _spacetrees.begin(); p != _spacetrees.end();) {
608 /*
609 else if (
610 p->getGridStatistics().getCoarseningHasBeenVetoed()
611 and
612 p->mayJoinWithMaster()
613 and
614 ) {
615 logInfo( "traverse(Observer)", "trigger join of tree " << p->_id << " with its master tree " << p->_masterId
616 << " to enable further grid erases"); join(p->_id);
617 //
618 p->_masterId>=0
619 and
620 getSpacetree(p->_masterId).mayJoinWithWorker()
621 }
622 */
623 if (p->_spacetreeState == peano4::grid::SpacetreeState::Joined) {
624 logInfo("traverse(Observer)", "tree " << p->_id << " has successfully joined");
625 deleteAllStacks(observer, p->_id);
627 p = _spacetrees.erase(p);
628 } else if (p->mayJoinWithMaster() and p->getGridStatistics().getNumberOfLocalUnrefinedCells() == 0) {
629 logInfo("traverse(Observer)", "remove empty tree " << p->_id << " with master " << p->_masterId);
630 deleteAllStacks(observer, p->_id);
632
633 if (Node::getInstance().getRank(p->_masterId) != tarch::mpi::Rank::getInstance().getRank()) {
634 logDebug(
635 "traverse(Observer)", "parent tree " << p->_masterId << " is not local on this rank. Remove child reference"
636 );
637#ifdef Parallel
638 TreeManagementMessage message(
640 );
642 message,
643 Node::getInstance().getRank(p->_masterId),
646 );
647
649 message,
650 Node::getInstance().getRank(p->_masterId),
651 getAnswerTag(p->_id),
653 );
655#else
656 assertionMsg(false, "branch may not be entered");
657#endif
658 } else {
659 logDebug(
660 "traverse(Observer)", "parent tree " << p->_masterId << " is local on this rank. Remove child reference"
661 );
662 getSpacetree(p->_masterId)._childrenIds.erase(p->_id);
664 }
665 p = _spacetrees.erase(p);
666 } else if (p->mayJoinWithMaster()) {
667 const int localRank = Node::getInstance().getRank(p->_id);
668 const int masterRank = Node::getInstance().getRank(p->_masterId);
669 if (localRank == masterRank and getSpacetree(p->_masterId).getGridStatistics().getCoarseningHasBeenVetoed()) {
670 // @todo erste Meldung info
671 logError(
672 "traverse(Observer)",
673 "join tree "
674 << p->_id << " as it is deteriorated (encodes no hierarchical data) while master " << p->_masterId
675 << " resides on same rank and can't coarsen"
676 );
677 logError("traverse(Observer)", "not implemented yet");
678 // p->joinWithMaster();
679 // getSpacetree(p->_masterId).joinWithWorker(p->_id);
680 } else if (localRank == masterRank) {
681 logDebug(
682 "traverse(Observer)",
683 "tree " << p->_id << " is deteriorated (encodes no hierarchical data) yet seems not to constrain its master"
684 );
685 } else {
686 // @todo: Aber nur, wenn es noch andere Baeume auf diesem Rank gibt
687 // @todo erste Meldung info
688 logDebug(
689 "cleanUpTrees(...)", "I should merge tree " << p->_id << " to reduce synchronisation: " << p->toString()
690 );
691 logDebug("traverse(Observer)", "not implemented yet");
692 }
693 }
694 p++;
695 }
696 logTraceOut("cleanUpTrees(...)");
697}
698
699
704
705
707 logTraceIn("getGridStatistics()");
708 if (_spacetrees.empty()) {
710 0, // __numberOfLocalUnrefinedCells,
711 0, // __numberOfRemoteUnrefinedCells,
712 0, // __numberOfLocalRefinedCells
713 0, // __numberOfRemoteRefinedCells,
714 0, // __stationarySweeps,
715 false,
716 false,
717 tarch::la::Vector<Dimensions, double>(std::numeric_limits<double>::max()) // minH
718 );
719 logTraceOutWith1Argument("getGridStatistics()", result.toString());
720 return result;
721 } else {
722 peano4::grid::GridStatistics result(_spacetrees.begin()->_statistics);
723 for (auto& from : _spacetrees) {
724 if (from._id != _spacetrees.begin()->_id and from._spacetreeState != peano4::grid::SpacetreeState::NewFromSplit) {
725 result = result + from._statistics;
726 }
727 }
728 logTraceOutWith1Argument("getGridStatistics()", result.toString());
729 return result;
730 }
731}
732
733
735 for (auto& p : _spacetrees) {
736 if (p._id == treeId)
737 return true;
738 }
739 return false;
740}
741
742
744 std::set<int> result;
745
746 for (auto& p : _spacetrees) {
747 result.insert(p._id);
748 }
749
750 return result;
751}
752
753
754bool peano4::parallel::SpacetreeSet::split(int treeId, const peano4::SplitInstruction& instruction, int targetRank) {
755 logTraceInWith3Arguments("split(int,SplitInstruction,int)", treeId, instruction, targetRank);
757
758 if (tree.maySplit()) {
759 int newSpacetreeId = -1;
760
761 if (tarch::mpi::Rank::getInstance().getRank() != targetRank) {
762#ifdef Parallel
763 logDebug("split(int,SplitInstruction,int)", "request new tree on rank " << targetRank);
765 requestMessage.setMasterSpacetreeId(treeId);
766 requestMessage.setWorkerSpacetreeId(-1);
769
770 logDebug("split(int,SplitInstruction,int)", "message " << requestMessage.toString() << " sent - wait for answer");
771
775 newSpacetreeId = answerMessage.getWorkerSpacetreeId();
776#else
777 newSpacetreeId = -1;
778 assertionMsg(false, "can't split into tree on a different rank if not compiled with mpi");
779#endif
780 } else {
782 peano4::parallel::Node::getInstance().getRank(treeId), treeId
783 );
784 }
785
786 if (newSpacetreeId >= 0) {
787 tree.split(newSpacetreeId, instruction);
790 "split(int,SplitInstruction,int)",
791 "Peano 4 uses "
793 << tarch::mpi::Rank::getInstance().getRank() << " and is asked to split. Total memory is "
794 << tarch::getTotalMemory(tarch::MemoryUsageFormat::MByte) << " MB, i.e. we might run out of memory"
795 );
796 }
797
798 logInfo(
799 "split(int,SplitInstruction,int)",
800 "trigger split of tree " << treeId << " into tree " << newSpacetreeId << " with " << instruction
801 );
802 logTraceOutWith1Argument("split(int,SplitInstruction,int)", true);
803 return true;
804 }
805 }
806
807 logTraceOutWith1Argument("split(int,SplitInstruction,int)", false);
808 return false;
809}
810
811
813 for (auto& p : _spacetrees) {
814 if (p._id == id)
815 return p;
816 }
817 assertion3(false, "no spacetree found", id, tarch::mpi::Rank::getInstance().getRank());
818 return *_spacetrees.begin(); // just here to avoid warning
819}
820
821
823 for (auto& p : _spacetrees) {
824 if (p._id == id)
825 return p;
826 }
827 assertion3(false, "no spacetree found", id, tarch::mpi::Rank::getInstance().getRank());
828 return *_spacetrees.begin(); // just here to avoid warning
829}
#define assertion2(expr, param0, param1)
#define assertion4(expr, param0, param1, param2, param3)
#define assertion3(expr, param0, param1, param2)
#define assertion1(expr, param)
#define assertionMsg(expr, message)
#define assertion(expr)
AutomatonState state
#define logError(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
Definition Log.h:464
#define logDebug(methodName, logMacroMessageStream)
Definition Log.h:50
#define logTraceOutWith1Argument(methodName, argument0)
Definition Log.h:380
#define logTraceOut(methodName)
Definition Log.h:379
#define logWarning(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
Definition Log.h:440
#define logTraceOutWith3Arguments(methodName, argument0, argument1, argument2)
Definition Log.h:382
#define logTraceInWith3Arguments(methodName, argument0, argument1, argument2)
Definition Log.h:372
#define logTraceIn(methodName)
Definition Log.h:369
#define logTraceInWith1Argument(methodName, argument0)
Definition Log.h:370
#define logTraceInWith2Arguments(methodName, argument0, argument1)
Definition Log.h:371
#define logInfo(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
Definition Log.h:411
std::bitset< Dimensions > periodicBC
Definition main.cpp:19
static int getInputStackNumber(const AutomatonState &state)
Represents one tree.
Definition Spacetree.h:40
GridStatistics getGridStatistics() const
static GridVertexStackMap _vertexStack
Definition Spacetree.h:152
GridStatistics _statistics
Definition Spacetree.h:99
std::set< int > _childrenIds
Definition Spacetree.h:110
virtual TraversalObserver * clone(int spacetreeId)=0
T * getForPush(int treeId, int stackId)
Get the stack belonging to a tree.
bool empty(int treeId, int stackId) const
static constexpr int MaxSpacetreesPerRank
Definition Node.h:39
bool continueToRun()
You should call this operation only on the ranks >0 to find out whether you should do more iteration/...
Definition Node.cpp:418
int reserveId(int rank, int forTreeId)
This operation is not const as it does some internal bookkeeping.
Definition Node.cpp:137
int getLocalTreeId(int treeId) const
Definition Node.cpp:125
int getRank(int treeId) const
You hand in a tree number and the node tells you on which rank such a tree is hosted.
Definition Node.cpp:119
void deregisterId(int id)
Only the SpacetreeSet should call this operation.
Definition Node.cpp:188
static int getOutputStackNumberForVerticalDataExchange(int id)
Definition Node.cpp:221
static Node & getInstance()
This operation returns the singleton instance.
Definition Node.cpp:108
Each task triggers the traversal of one specific spacetree.
virtual void run() override
I create the copy of the observer, run the traversal on my local tree _spacetree and finally destroy ...
TraverseTask(peano4::grid::Spacetree &tree, SpacetreeSet &set, peano4::grid::TraversalObserver &observer, bool invertTreeTraversalDirectionBeforeWeStart)
The spacetree set has to be a singleton, as it is reponsible to accept requests for new trees from re...
void exchangeVerticalDataBetweenTrees(peano4::grid::TraversalObserver &observer)
void addSpacetree(int masterId, int newTreeId)
Adds a new spacetree to the set.
void createNewTrees()
This operation should be called pretty close towards the end of a traversal.
std::map< int, peano4::grid::TraversalObserver * > _clonedObserver
I create/clone one observer per local tree.
static void streamDataFromSplittingTreeToNewTree(Container &stackContainer, int master, int worker)
Copies (streams) data from the master to the worker.
static void exchangeAllPeriodicBoundaryDataStacks(Container &stackContainer, int spacetreeId)
Exchange periodic BC data.
peano4::grid::Spacetree & getSpacetree(int id)
static void deleteAllStacks(Container &stackContainer, int spacetreeId)
void streamLocalVertexInformationToMasterThroughVerticalStacks(int spacetreeId, int parentId, const std::set< int > &joiningIds)
Whenever we join two partitions, we have to stream data from the worker to the master.
std::set< int > getLocalTreesMergingWithWorkers() const
I need this routine for technical reasons: Prior to the sweep of trees, I have to identify all of tho...
SpacetreeSetState _state
The state identifies what the set is doing right now.
int getAnswerTag(int targetSpacetreeId) const
bool split(int treeId, const peano4::SplitInstruction &instruction, int targetRank)
Split a local tree.
int _requestMessageTag
I use this tag to identify messages send from one tree to another rank.
static std::string toString(SpacetreeSetState state)
void createObserverCloneIfRequired(peano4::grid::TraversalObserver &observer, int treeId)
Quick lookup whether an observer clone for this tree id does already exist.
virtual void receiveDanglingMessages() override
We poll the tree management messages.
void answerQuestions()
Run through the set of unanswered questions and, well, answer them.
peano4::grid::GridStatistics getGridStatistics() const
Return statistics object for primary spacetree.
std::list< peano4::grid::Spacetree > _spacetrees
These are the local spacetrees.
void init(const tarch::la::Vector< Dimensions, double > &offset, const tarch::la::Vector< Dimensions, double > &width, const std::bitset< Dimensions > &periodicBC=0)
void exchangeHorizontalDataBetweenTrees(peano4::grid::TraversalObserver &observer)
void cleanUpTrees(peano4::grid::TraversalObserver &observer)
static SpacetreeSet & getInstance()
std::set< int > getLocalSpacetrees() const
static void exchangeAllVerticalDataExchangeStacks(Container &stackContainer, int spacetreeId, int parentId)
static tarch::logging::Log _log
Logging device.
virtual void shutdown() override
static SpacetreeSet _singleton
static void finishAllOutstandingSendsAndReceives(Container &stackContainer, int spacetreeId)
This routine finishes all the sends and receives that are still active, i.e.
~SpacetreeSet()
As the setis a singleton and a service, it has to deregister itself.
static tarch::multicore::BooleanSemaphore _semaphore
Semaphore to protect container holding all the local trees.
static void exchangeAllHorizontalDataExchangeStacks(Container &stackContainer, int spacetreeId, bool symmetricDataCardinality)
Realise domain boundary exchange (over multiple scales)
bool isLocalSpacetree(int treeId) const
Codes hold one spacetree set per rank.
void streamDataFromSplittingTreesToNewTrees(peano4::grid::TraversalObserver &observer)
Copy the data from a splitting tree onto its new workers.
void traverse(peano4::grid::TraversalObserver &observer)
Invoke traverse on all spacetrees in parallel.
Log Device.
Definition Log.h:516
void log(const std::string &identifier, double value, bool disableSampling=false)
Definition Statistics.h:105
static Statistics & getInstance()
This is not the canonical realisation of singletons as I use it usually for stats in Peano.
static Rank & getInstance()
This operation returns the singleton instance.
Definition Rank.cpp:539
int getRank() const
Return rank of this node.
Definition Rank.cpp:529
void barrier(std::function< void()> waitor=[]() -> void {})
Definition Rank.cpp:352
static int reserveFreeTag(const std::string &fullQualifiedMessageName, int numberOfTags=1)
Return a Free Tag.
Definition Rank.cpp:39
MPI_Comm getCommunicator() const
Definition Rank.cpp:545
Create a lock around a boolean semaphore region.
Definition Lock.h:19
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
void removeService(Service *const service)
This routine is thread-safe, i.e.
void addService(Service *const service, const std::string &name)
Add a new service.
A simple class that has to be included to measure the clock ticks required for an operation.
Definition Watch.h:45
double getCPUTime()
Return CPU Time in Seconds.
Definition Watch.cpp:64
void stop()
Stop timer.
Definition Watch.cpp:55
std::string toString(VertexType type)
Definition grid.cpp:276
@ NewFromSplit
Set if this tree results from a split and if this is the first grid sweep when the former owner actua...
@ JoinTriggered
Join has been triggered for this tree.
@ EmptyRun
Not yet a new root.
void spawnAndWait(const std::vector< Task * > &tasks)
Fork-join task submission pattern.
Definition multicore.cpp:91
int getTotalMemory(MemoryUsageFormat format)
Definition tarch.cpp:72
int getMemoryUsage(MemoryUsageFormat format)
Method for getting the application's memory footprint.
Definition tarch.cpp:95
Instruction to split.
Definition grid.h:34
static void send(const peano4::grid::AutomatonState &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static void receive(peano4::grid::AutomatonState &buffer, int source, int tag, MPI_Comm communicator)
void setRemovedEmptySubtree(bool value)
peano4::parallel::TreeManagementMessage::Action getAction() const
static void send(const peano4::parallel::TreeManagementMessage &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static void sendAndPollDanglingMessages(const peano4::parallel::TreeManagementMessage &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static void receiveAndPollDanglingMessages(peano4::parallel::TreeManagementMessage &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static void receive(peano4::parallel::TreeManagementMessage &buffer, int source, int tag, MPI_Comm communicator)
Simple vector class.
Definition Vector.h:150