Peano 4
Loading...
Searching...
No Matches
SpacetreeSet.cpp
Go to the documentation of this file.
1#include "SpacetreeSet.h"
2
3#include "config.h"
4#include "Node.h"
5#include "tarch/tarch.h"
6#include "peano4/grid/grid.h"
12#include "tarch/mpi/Rank.h"
17#include "tarch/timing/Watch.h"
18
19
20tarch::logging::Log peano4::parallel::SpacetreeSet::_log("peano4::parallel::SpacetreeSet");
21
23
24
26
29
30
32
36 const std::bitset<Dimensions>& periodicBC
37) {
38 _requestMessageTag = tarch::mpi::Rank::reserveFreeTag("peano4::parallel::SpacetreeSet - request message");
39 _answerMessageTag = tarch::mpi::Rank::reserveFreeTag(
40 "peano4::parallel::SpacetreeSet - answer message", Node::MaxSpacetreesPerRank
41 );
42 tarch::services::ServiceRepository::getInstance().addService(this, "peano4::parallel::SpacetreeSet");
43
44#ifdef Parallel
46 (peano4::parallel::Node::getInstance().getNumberOfRegisteredTrees() == 1
47 and tarch::mpi::Rank::getInstance().getRank() == 0
48 ) or (peano4::parallel::Node::getInstance().getNumberOfRegisteredTrees() == 0 and tarch::mpi::Rank::getInstance().getRank() != 0),
49 peano4::parallel::Node::getInstance().getNumberOfRegisteredTrees(),
50 offset,
51 width,
53 );
54#endif
55
56 if (tarch::mpi::Rank::getInstance().isGlobalMaster()) {
57 logTraceInWith3Arguments("isGlobalMaster(...)", offset, width, periodicBC);
58 peano4::grid::Spacetree spacetree(offset, width, periodicBC);
59 _spacetrees.push_back(spacetree);
60 logTraceOutWith3Arguments("isGlobalMaster(...)", offset, width, periodicBC);
61 }
62}
63
68
69
71
72
73int peano4::parallel::SpacetreeSet::getAnswerTag(int targetSpacetreeId) const {
74 return _answerMessageTag + Node::getInstance().getLocalTreeId(targetSpacetreeId);
75}
76
77
79 switch (state) {
80 case SpacetreeSetState::TraverseTreesAndExchangeData:
81 return "traverse-trees-and-exchange-data";
82 case SpacetreeSetState::Waiting:
83 return "waiting";
84 }
85 return "undef";
86}
87
89#ifdef Parallel
90 std::vector<peano4::parallel::TreeManagementMessage> unansweredMessagesThatIanAnswerNow;
91
92 std::vector<peano4::parallel::TreeManagementMessage>::iterator p = _unansweredMessages.begin();
93 while (p != _unansweredMessages.end()) {
94 switch (p->getAction()) {
96 unansweredMessagesThatIanAnswerNow.push_back(*p);
97 p = _unansweredMessages.erase(p);
98 break;
101 if (_state == SpacetreeSetState::Waiting) {
102 unansweredMessagesThatIanAnswerNow.push_back(*p);
103 p = _unansweredMessages.erase(p);
104 } else {
105 logDebug("answerMessages()", "can't answer as I'm in the wrong state");
106 p++;
107 }
108 } break;
111 assertionMsg(false, "should only be passed synchronously and never run through this tag");
112 break;
113 }
114 }
115
116
117 for (auto p : unansweredMessagesThatIanAnswerNow) {
118 switch (p.getAction()) {
120 int newSpacetreeId = peano4::parallel::Node::getInstance().reserveId(
121 tarch::mpi::Rank::getInstance().getRank(), // on current node
122 p.getMasterSpacetreeId() // this is the tree who has requested the new tree
123 );
124
126 answerMessage.setWorkerSpacetreeId(newSpacetreeId);
129 answerMessage,
130 p.getSenderRank(),
131 getAnswerTag(p.getMasterSpacetreeId()),
133 );
134 logInfo(
135 "receiveDanglingMessages()", "reserved tree id " << newSpacetreeId << " for tree " << p.getMasterSpacetreeId()
136 );
137 } break;
139 assertion(_state == SpacetreeSetState::Waiting);
143 answerMessage,
144 p.getSenderRank(),
145 getAnswerTag(p.getMasterSpacetreeId()),
147 );
148
151 state, p.getSenderRank(), _requestMessageTag, tarch::mpi::Rank::getInstance().getCommunicator()
152 );
154 p.getWorkerSpacetreeId(), p.getMasterSpacetreeId(), state.getX(), state.getH(), state.getInverted()
155 );
156
157 _spacetrees.push_back(std::move(newTree));
158
160 answerMessage,
161 p.getSenderRank(),
162 getAnswerTag(p.getMasterSpacetreeId()),
164 );
165 } break;
168 assertionMsg(false, "should only be passed synchronously and never run through this tag");
169 break;
171 assertion(_state == SpacetreeSetState::Waiting);
172 logInfo(
173 "receiveDanglingMessages(...)",
174 "learned that remote child tree "
175 << p.getWorkerSpacetreeId() << " of local tree " << p.getMasterSpacetreeId()
176 << " is degenerated thus had been removed"
177 );
178 getSpacetree(p.getMasterSpacetreeId())._childrenIds.erase(p.getWorkerSpacetreeId());
179
183 answerMessage,
184 p.getSenderRank(),
185 getAnswerTag(p.getWorkerSpacetreeId()),
187 );
188 } break;
189 }
190 }
191#else
192 assertion(_unansweredMessages.empty());
193#endif
194}
195
197#ifdef Parallel
198
199 if (tarch::mpi::Rank::getInstance().isMessageInQueue(_requestMessageTag)) {
200 logTraceIn("receiveDanglingMessages()");
201
204 message, MPI_ANY_SOURCE, _requestMessageTag, tarch::mpi::Rank::getInstance().getCommunicator()
205 );
206
207 _unansweredMessages.push_back(message);
208
209 logDebug("receiveDanglingMessages()", "received new message " << message.toString());
210
211 logTraceOut("receiveDanglingMessages()");
212 }
213
214 answerQuestions();
215#endif
216}
217
218
219void peano4::parallel::SpacetreeSet::addSpacetree(int masterId, int newTreeId) {
220 logTraceInWith2Arguments("addSpacetree(int,int)", masterId, newTreeId);
221 if (peano4::parallel::Node::getInstance().getRank(masterId) != peano4::parallel::Node::getInstance().getRank(newTreeId)) {
222#ifdef Parallel
223 const int targetRank = peano4::parallel::Node::getInstance().getRank(newTreeId);
224
225 TreeManagementMessage message;
226 message.setMasterSpacetreeId(masterId);
227 message.setWorkerSpacetreeId(newTreeId);
229 TreeManagementMessage::sendAndPollDanglingMessages(message, targetRank, _requestMessageTag);
230
232 message, targetRank, getAnswerTag(masterId), tarch::mpi::Rank::getInstance().getCommunicator()
233 );
235
236 peano4::grid::AutomatonState state = _spacetrees.begin()->_root;
237 logDebug("addSpacetree(int,int)", "send state " << state.toString() << " to rank " << targetRank);
239 state, targetRank, _requestMessageTag, tarch::mpi::Rank::getInstance().getCommunicator()
240 );
241
243 message, targetRank, getAnswerTag(masterId), tarch::mpi::Rank::getInstance().getCommunicator()
244 );
246#else
247 assertionMsg(false, "should never enter this branch without -DParallel");
248#endif
249 } else {
251 newTreeId,
252 masterId,
253 _spacetrees.begin()->_root.getX(),
254 _spacetrees.begin()->_root.getH(),
255 _spacetrees.begin()->_root.getInverted()
256 );
257 tarch::multicore::Lock lock(_semaphore);
258 _spacetrees.push_back(std::move(newTree));
259 }
260 logTraceOut("addSpacetree(int,int)");
261}
262
265 SpacetreeSet& set,
267 bool invertTreeTraversalDirectionBeforeWeStart
268):
269 Task(Task::DontFuse, Task::DefaultPriority),
270 _spacetree(tree),
271 _spacetreeSet(set),
272 _observer(observer),
273 _invertTreeTraversalDirectionBeforeWeStart(invertTreeTraversalDirectionBeforeWeStart) {}
274
276
277 OTTER_DEFINE_TASK(local_task, OTTER_NULL_TASK, otter_add_to_pool, otter::label::unknown);
278 OTTER_TASK_START(local_task);
279
280 _spacetreeSet.createObserverCloneIfRequired(_observer, _spacetree._id);
281 if (_invertTreeTraversalDirectionBeforeWeStart) {
282 _spacetree._root.setInverted(not _spacetree._root.getInverted());
283 }
284 _spacetree.traverse(*_spacetreeSet._clonedObserver[_spacetree._id], true);
285
286 OTTER_TASK_END(local_task);
287 return false;
288}
289
290
292 peano4::grid::TraversalObserver& observer, int treeId
293) {
295
296 if (_clonedObserver.count(treeId) == 0) {
297 _clonedObserver.insert(std::pair<int, peano4::grid::TraversalObserver*>(treeId, observer.clone(treeId)));
298 }
299 if (_clonedObserver[treeId] == nullptr) {
300 _clonedObserver[treeId] = observer.clone(treeId);
301 }
303 _clonedObserver.count(treeId) == 1 and _clonedObserver[treeId] != nullptr, treeId, _clonedObserver.count(treeId)
304 );
305}
306
308 [[maybe_unused]] int spacetreeId,
309 [[maybe_unused]] int parentId,
310 [[maybe_unused]] const std::set<int>& joiningIds
311) {
312 logTraceInWith2Arguments("streamLocalVertexInformationToMasterThroughVerticalStacks(...)", spacetreeId, parentId);
313
314 /*
315 const int destinationRank = Node::getInstance().getRank( parentId );
316 const int sourceRank = Node::getInstance().getRank( spacetreeId );
317 const int destinationStack = Node::getInstance().getInputStackNumberForVerticalDataExchange( spacetreeId );
318 const int sourceStack = Node::getInstance().getOutputStackNumberForVerticalDataExchange( parentId );
319 if (
320 destinationRank != tarch::mpi::Rank::getInstance().getRank()
321 and
322 sourceRank == tarch::mpi::Rank::getInstance().getRank()
323 and
324 not peano4::grid::Spacetree::_vertexStack.getForPush( peano4::maps::StackKey(spacetreeId,sourceStack) )->empty()
325 ) {
326 assertion(false);
327 const int tag = Node::getInstance().getGridDataExchangeTag( sourceSpacetreeId, destinationSpacetreeId,
328 Node::ExchangeMode::SendVerticalData ); logInfo( "exchangeStacksSynchronously(...)", "send stack " << sourceStack << "
329 from tree " << sourceSpacetreeId << " to rank " << destinationRank <<
330 ": " << stackContainer[ peano4::grid::Spacetree::StackKey(destinationSpacetreeId,destinationStack) ].toString()
331 );
332
333 tarch::mpi::IntegerMessage message( stackContainer[ peano4::grid::Spacetree::StackKey(sourceSpacetreeId,sourceStack)
334 ].size() );
335 message.send(destinationRank,tag,false,tarch::mpi::IntegerMessage::ExchangeMode::NonblockingWithPollingLoopOverTests);
336
337 stackContainer[ peano4::grid::Spacetree::StackKey(sourceSpacetreeId,sourceStack) ].startSend(destinationRank,tag);
338 stackContainer[ peano4::grid::Spacetree::StackKey(sourceSpacetreeId,sourceStack) ].finishSendOrReceive();
339 }
340
341 // habe ich die Stacks immer noch oder missbrauch ich die zur Zeit fuer MPI sends und receives?
342 if (
343 destinationRank == tarch::mpi::Rank::getInstance().getRank()
344 and
345 sourceRank == tarch::mpi::Rank::getInstance().getRank()
346 and
347 not peano4::grid::Spacetree::_vertexStack.getForPush( spacetreeId,sourceStack )->empty()
348 ) {
349 logDebug( "exchangeAllVerticalDataExchangeStacks(...)", "stream content of stack " << sourceStack << " on tree " <<
350 spacetreeId << " into stack " << destinationStack << " of tree " << parentId << " as source tree is joining" );
351 peano4::grid::Spacetree::_vertexStack.getForPush(parentId,destinationStack)->clone(
352 *peano4::grid::Spacetree::_vertexStack.getForPop(spacetreeId,sourceStack) );
353 peano4::grid::Spacetree::_vertexStack.getForPop(spacetreeId,sourceStack)->clear();
354 peano4::grid::Spacetree::_vertexStack.getForPop(parentId,destinationStack)->reverse();
355 }
356*/
357
358 logTraceOut("streamLocalVertexInformationToMasterThroughVerticalStacks(...)");
359}
360
361
363 logTraceInWith1Argument("deleteAllStacks(int)", spacetreeId);
365 createObserverCloneIfRequired(observer, spacetreeId);
366 _clonedObserver[spacetreeId]->deleteAllStacks();
367 logTraceOut("deleteAllStacks(int)");
368}
369
370
372 logTraceIn("exchangeVerticalDataBetweenTrees(...)");
373
374 for (auto& p : _spacetrees) {
375 logDebug(
376 "exchangeVerticalDataBetweenTrees(TraversalObserver&)",
377 "manage data transfer of tree " << p._id << " in state " << peano4::grid::toString(p._spacetreeState)
378 );
379
380 streamLocalVertexInformationToMasterThroughVerticalStacks(p._id, p._masterId, p._joining);
381
383
384 createObserverCloneIfRequired(observer, p._id);
385
386 _clonedObserver[p._id]->exchangeAllVerticalDataExchangeStacks(p._masterId);
387 }
388
389 logDebug(
390 "exchangeVerticalDataBetweenTrees(TraversalObserver&)",
391 "all local data exchange realised, all MPI message exchange triggered. Wait for MPI to terminate"
392 );
393
394 for (auto& p : _spacetrees) {
396 _clonedObserver[p._id]->finishAllOutstandingSendsAndReceives();
397 }
398
399 logTraceOut("exchangeVerticalDataBetweenTrees(...)");
400}
401
402
404 logTraceInWith1Argument("streamDataFromSplittingTreesToNewTrees()", _spacetrees.size());
405
406 for (auto& parent : _spacetrees) {
407 for (auto& worker : parent._hasSplit) {
408 const int temporaryOutStackForVertices = Node::getOutputStackNumberForVerticalDataExchange(worker);
409 const int sourceStackForVertices = peano4::grid::PeanoCurve::getInputStackNumber(parent._root);
411 peano4::grid::Spacetree::_vertexStack.getForPush(parent._id, temporaryOutStackForVertices)->empty(),
412 parent._id,
413 temporaryOutStackForVertices,
414 sourceStackForVertices
415 );
416 peano4::grid::Spacetree::_vertexStack.getForPush(parent._id, temporaryOutStackForVertices)
417 ->clone(*peano4::grid::Spacetree::_vertexStack.getForPop(parent._id, sourceStackForVertices));
418
420
421 createObserverCloneIfRequired(observer, parent._id);
422
423 _clonedObserver[parent._id]->streamDataFromSplittingTreeToNewTree(worker);
424 }
425 }
426
427 for (auto& p : _spacetrees) {
428 if (p._spacetreeState == peano4::grid::SpacetreeState::EmptyRun) {
430
431 createObserverCloneIfRequired(observer, p._masterId);
432
433 _clonedObserver[p._masterId]->streamDataFromSplittingTreeToNewTree(p._id);
434 }
435 }
436
437 for (auto& p : _spacetrees) {
438 createObserverCloneIfRequired(observer, p._id);
440 _clonedObserver[p._id]->finishAllOutstandingSendsAndReceives();
441 }
442
443 logTraceOut("streamDataFromSplittingTreesToNewTrees()");
444}
445
446
448 logTraceIn("exchangeHorizontalDataBetweenTrees(...)");
449
450 for (auto& p : _spacetrees) {
451 logDebug(
452 "exchangeHorizontalDataBetweenTrees(TraversalObserver&)",
453 "manage data transfer of tree " << p._id << " in state " << peano4::grid::toString(p._spacetreeState)
454 );
457
458 createObserverCloneIfRequired(observer, p._id);
459
460 _clonedObserver[p._id]->exchangeAllHorizontalDataExchangeStacks(true);
461 _clonedObserver[p._id]->exchangeAllPeriodicBoundaryDataStacks();
462 }
463
464 logDebug(
465 "exchangeHorizontalDataBetweenTrees(TraversalObserver&)",
466 "all local data exchange realised, all MPI message exchange triggered. Wait for MPI to terminate"
467 );
468
469 for (auto& p : _spacetrees) {
471 _clonedObserver[p._id]->finishAllOutstandingSendsAndReceives();
472 }
473
474 logTraceOut("exchangeHorizontalDataBetweenTrees(...)");
475}
476
477
479 std::set<int> result;
480 for (const auto& tree : _spacetrees) {
481 if (not tree._joining.empty()) {
482 result.insert(tree._id);
483 }
484 }
485 return result;
486}
487
488
490 for (const auto& tree : _spacetrees) {
491 for (auto& p : tree._splitting) {
492 addSpacetree(tree._id, p);
493 }
494 }
495}
496
497
499 for (auto& p : _clonedObserver) {
500 if (p.second != nullptr) {
501 delete p.second;
502 p.second = nullptr;
503 }
504 }
505}
506
507
509 logTraceIn("traverse(TraversalObserver&)");
510
511 if (tarch::mpi::Rank::getInstance().isGlobalMaster()) {
513 }
514 logDebug("traverse(TraversalObserver)", "start new grid sweep");
515
516 tarch::mpi::Rank::getInstance().barrier([&]() -> void {
519 });
520 logDebug("traverse(TraversalObserver&)", "rank has passed barrier");
521
523
524 std::vector<tarch::multicore::Task*> primaryTasks;
525 std::vector<tarch::multicore::Task*> secondaryTasks;
526 std::vector<tarch::multicore::Task*> tertiaryTasks;
527
528 for (auto& p : _spacetrees) {
529 switch (p._spacetreeState) {
533 logDebug(
534 "traverse(TraversalObserver&)",
535 "issue task to traverse tree " << p._id << " in the primary tree set " << p.toString()
536 );
537 primaryTasks.push_back(new TraverseTask(p, *this, observer, false));
538 break;
540 if (p._joining.empty()) {
541 logDebug(
542 "traverse(TraversalObserver&)",
543 "issue task to traverse tree " << p._id << " in the primary tree set " << p.toString()
544 );
545 primaryTasks.push_back(new TraverseTask(p, *this, observer, false));
546 } else {
547 logDebug(
548 "traverse(TraversalObserver&)",
549 "issue task to traverse tree "
550 << p._id << " in the third tree set as it joins in other tree: " << p.toString()
551 );
552 tertiaryTasks.push_back(new TraverseTask(p, *this, observer, false));
553 }
554 break;
556 logDebug(
557 "traverse(TraversalObserver&)",
558 "issue task to traverse tree " << p._id << " in secondary tree set as empty tree and in third set as new tree"
559 );
560 secondaryTasks.push_back(new TraverseTask(p, *this, observer, true));
561 tertiaryTasks.push_back(new TraverseTask(p, *this, observer, false));
562 break;
565 assertionMsg("should not happen", false);
566 break;
567 }
568 }
569
570 // I use this boolean flag from time to time to debug the code.
571 [[maybe_unused]] const bool runSequentially = false;
572
573 logTraceInWith1Argument("traverse(TraversalObserver&)-primary", primaryTasks.size());
574 tarch::multicore::spawnAndWait(primaryTasks);
575 logTraceOut("traverse(TraversalObserver&)-primary");
576
577 logDebug(
578 "traverse(TraversalObserver&)", "primary tasks (traversals) complete, trigger split data stream if required"
579 );
581 logDebug("traverse(TraversalObserver&)", "exchange vertical data if required");
583
584 logTraceInWith1Argument("traverse(TraversalObserver&)-secondary", secondaryTasks.size());
585 tarch::multicore::spawnAndWait(secondaryTasks);
586 logTraceOut("traverse(TraversalObserver&)-secondary");
587
588 tarch::timing::Watch dataExchangeTime("peano4::parallel::SpacetreeSet", "traverse", false);
589
591
592 logTraceInWith1Argument("traverse(TraversalObserver&)-tertiary", tertiaryTasks.size());
593 tarch::multicore::spawnAndWait(tertiaryTasks);
594 logTraceOut("traverse(TraversalObserver&)-tertiary");
595
597
598 dataExchangeTime.stop();
599 tarch::logging::Statistics::getInstance().log("mpi wait times", dataExchangeTime.getCPUTime());
600
602
603 cleanUpTrees(observer);
604
606
608
609 logTraceOut("traverse(TraversalObserver&)");
610}
611
612
614 logTraceIn("cleanUpTrees(...)");
615 for (auto p = _spacetrees.begin(); p != _spacetrees.end();) {
616 /*
617 else if (
618 p->getGridStatistics().getCoarseningHasBeenVetoed()
619 and
620 p->mayJoinWithMaster()
621 and
622 ) {
623 logInfo( "traverse(Observer)", "trigger join of tree " << p->_id << " with its master tree " << p->_masterId
624 << " to enable further grid erases"); join(p->_id);
625 //
626 p->_masterId>=0
627 and
628 getSpacetree(p->_masterId).mayJoinWithWorker()
629 }
630 */
631 if (p->_spacetreeState == peano4::grid::SpacetreeState::Joined) {
632 logInfo("traverse(Observer)", "tree " << p->_id << " has successfully joined");
633 deleteAllStacks(observer, p->_id);
635 p = _spacetrees.erase(p);
636 } else if (p->mayJoinWithMaster() and p->getGridStatistics().getNumberOfLocalUnrefinedCells() == 0) {
637 logInfo("traverse(Observer)", "remove empty tree " << p->_id << " with master " << p->_masterId);
638 deleteAllStacks(observer, p->_id);
640
641 if (Node::getInstance().getRank(p->_masterId) != tarch::mpi::Rank::getInstance().getRank()) {
642 logDebug(
643 "traverse(Observer)", "parent tree " << p->_masterId << " is not local on this rank. Remove child reference"
644 );
645#ifdef Parallel
646 TreeManagementMessage message(
648 );
650 message,
651 Node::getInstance().getRank(p->_masterId),
654 );
655
657 message,
658 Node::getInstance().getRank(p->_masterId),
659 getAnswerTag(p->_id),
661 );
663#else
664 assertionMsg(false, "branch may not be entered");
665#endif
666 } else {
667 logDebug(
668 "traverse(Observer)", "parent tree " << p->_masterId << " is local on this rank. Remove child reference"
669 );
670 getSpacetree(p->_masterId)._childrenIds.erase(p->_id);
672 }
673 p = _spacetrees.erase(p);
674 } else if (p->mayJoinWithMaster()) {
675 const int localRank = Node::getInstance().getRank(p->_id);
676 const int masterRank = Node::getInstance().getRank(p->_masterId);
677 if (localRank == masterRank and getSpacetree(p->_masterId).getGridStatistics().getCoarseningHasBeenVetoed()) {
678 // @todo erste Meldung info
679 logError(
680 "traverse(Observer)",
681 "join tree "
682 << p->_id << " as it is deteriorated (encodes no hierarchical data) while master " << p->_masterId
683 << " resides on same rank and can't coarsen"
684 );
685 logError("traverse(Observer)", "not implemented yet");
686 // p->joinWithMaster();
687 // getSpacetree(p->_masterId).joinWithWorker(p->_id);
688 } else if (localRank == masterRank) {
689 logDebug(
690 "traverse(Observer)",
691 "tree " << p->_id << " is deteriorated (encodes no hierarchical data) yet seems not to constrain its master"
692 );
693 } else {
694 // @todo: Aber nur, wenn es noch andere Baeume auf diesem Rank gibt
695 // @todo erste Meldung info
696 logDebug(
697 "cleanUpTrees(...)", "I should merge tree " << p->_id << " to reduce synchronisation: " << p->toString()
698 );
699 logDebug("traverse(Observer)", "not implemented yet");
700 }
701 }
702 p++;
703 }
704 logTraceOut("cleanUpTrees(...)");
705}
706
707
712
713
715 logTraceIn("getGridStatistics()");
716 if (_spacetrees.empty()) {
718 0, // __numberOfLocalUnrefinedCells,
719 0, // __numberOfRemoteUnrefinedCells,
720 0, // __numberOfLocalRefinedCells
721 0, // __numberOfRemoteRefinedCells,
722 0, // __stationarySweeps,
723 false,
724 false,
725 tarch::la::Vector<Dimensions, double>(std::numeric_limits<double>::max()) // minH
726 );
727 logTraceOutWith1Argument("getGridStatistics()", result.toString());
728 return result;
729 } else {
730 peano4::grid::GridStatistics result(_spacetrees.begin()->_statistics);
731 for (auto& from : _spacetrees) {
732 if (from._id != _spacetrees.begin()->_id and from._spacetreeState != peano4::grid::SpacetreeState::NewFromSplit) {
733 result = result + from._statistics;
734 }
735 }
736 logTraceOutWith1Argument("getGridStatistics()", result.toString());
737 return result;
738 }
739}
740
741
743 for (auto& p : _spacetrees) {
744 if (p._id == treeId)
745 return true;
746 }
747 return false;
748}
749
750
752 std::set<int> result;
753
754 for (auto& p : _spacetrees) {
755 result.insert(p._id);
756 }
757
758 return result;
759}
760
761
762bool peano4::parallel::SpacetreeSet::split(int treeId, const peano4::SplitInstruction& instruction, int targetRank) {
763 logTraceInWith3Arguments("split(int,SplitInstruction,int)", treeId, instruction, targetRank);
765
766 if (tree.maySplit()) {
767 int newSpacetreeId = -1;
768
769 if (tarch::mpi::Rank::getInstance().getRank() != targetRank) {
770#ifdef Parallel
771 logDebug("split(int,SplitInstruction,int)", "request new tree on rank " << targetRank);
773 requestMessage.setMasterSpacetreeId(treeId);
774 requestMessage.setWorkerSpacetreeId(-1);
777
778 logDebug("split(int,SplitInstruction,int)", "message " << requestMessage.toString() << " sent - wait for answer");
779
783 newSpacetreeId = answerMessage.getWorkerSpacetreeId();
784#else
785 newSpacetreeId = -1;
786 assertionMsg(false, "can't split into tree on a different rank if not compiled with mpi");
787#endif
788 } else {
790 peano4::parallel::Node::getInstance().getRank(treeId), treeId
791 );
792 }
793
794 if (newSpacetreeId >= 0) {
795 tree.split(newSpacetreeId, instruction);
798 "split(int,SplitInstruction,int)",
799 "Peano 4 uses "
801 << tarch::mpi::Rank::getInstance().getRank() << " and is asked to split. Total memory is "
802 << tarch::getTotalMemory(tarch::MemoryUsageFormat::MByte) << " MB, i.e. we might run out of memory"
803 );
804 }
805
806 logInfo(
807 "split(int,SplitInstruction,int)",
808 "trigger split of tree " << treeId << " into tree " << newSpacetreeId << " with " << instruction
809 );
810 logTraceOutWith1Argument("split(int,SplitInstruction,int)", true);
811 return true;
812 }
813 }
814
815 logTraceOutWith1Argument("split(int,SplitInstruction,int)", false);
816 return false;
817}
818
819
821 for (auto& p : _spacetrees) {
822 if (p._id == id)
823 return p;
824 }
825 assertion3(false, "no spacetree found", id, tarch::mpi::Rank::getInstance().getRank());
826 return *_spacetrees.begin(); // just here to avoid warning
827}
828
829
831 for (auto& p : _spacetrees) {
832 if (p._id == id)
833 return p;
834 }
835 assertion3(false, "no spacetree found", id, tarch::mpi::Rank::getInstance().getRank());
836 return *_spacetrees.begin(); // just here to avoid warning
837}
#define assertion2(expr, param0, param1)
#define assertion4(expr, param0, param1, param2, param3)
#define assertion3(expr, param0, param1, param2)
#define assertion1(expr, param)
#define assertionMsg(expr, message)
#define assertion(expr)
AutomatonState state
#define logError(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
Definition Log.h:464
#define logDebug(methodName, logMacroMessageStream)
Definition Log.h:50
#define logTraceOutWith1Argument(methodName, argument0)
Definition Log.h:380
#define logTraceOut(methodName)
Definition Log.h:379
#define logWarning(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
Definition Log.h:440
#define logTraceOutWith3Arguments(methodName, argument0, argument1, argument2)
Definition Log.h:382
#define logTraceInWith3Arguments(methodName, argument0, argument1, argument2)
Definition Log.h:372
#define logTraceIn(methodName)
Definition Log.h:369
#define logTraceInWith1Argument(methodName, argument0)
Definition Log.h:370
#define logTraceInWith2Arguments(methodName, argument0, argument1)
Definition Log.h:371
#define logInfo(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
Definition Log.h:411
std::bitset< Dimensions > periodicBC
Definition main.cpp:19
static int getInputStackNumber(const AutomatonState &state)
Represents one tree.
Definition Spacetree.h:40
GridStatistics getGridStatistics() const
GridStatistics _statistics
Definition Spacetree.h:99
std::set< int > _childrenIds
Definition Spacetree.h:110
static GridVertexStackMap _vertexStack
Definition Spacetree.h:152
virtual TraversalObserver * clone(int spacetreeId)=0
T * getForPush(int treeId, int stackId)
Get the stack belonging to a tree.
bool empty(int treeId, int stackId) const
static constexpr int MaxSpacetreesPerRank
Definition Node.h:39
bool continueToRun()
You should call this operation only on the ranks >0 to find out whether you should do more iteration/...
Definition Node.cpp:418
int reserveId(int rank, int forTreeId)
This operation is not const as it does some internal bookkeeping.
Definition Node.cpp:137
int getLocalTreeId(int treeId) const
Definition Node.cpp:125
int getRank(int treeId) const
You hand in a tree number and the node tells you on which rank such a tree is hosted.
Definition Node.cpp:119
void deregisterId(int id)
Only the SpacetreeSet should call this operation.
Definition Node.cpp:188
static int getOutputStackNumberForVerticalDataExchange(int id)
Definition Node.cpp:221
static Node & getInstance()
This operation returns the singleton instance.
Definition Node.cpp:108
Each task triggers the traversal of one specific spacetree.
virtual bool run() override
I create the copy of the observer, run the traversal on my local tree _spacetree and finally destroy ...
TraverseTask(peano4::grid::Spacetree &tree, SpacetreeSet &set, peano4::grid::TraversalObserver &observer, bool invertTreeTraversalDirectionBeforeWeStart)
The spacetree set has to be a singleton, as it is reponsible to accept requests for new trees from re...
void exchangeVerticalDataBetweenTrees(peano4::grid::TraversalObserver &observer)
void addSpacetree(int masterId, int newTreeId)
Adds a new spacetree to the set.
void createNewTrees()
This operation should be called pretty close towards the end of a traversal.
std::map< int, peano4::grid::TraversalObserver * > _clonedObserver
I create/clone one observer per local tree.
static void streamDataFromSplittingTreeToNewTree(Container &stackContainer, int master, int worker)
Copies (streams) data from the master to the worker.
static void exchangeAllPeriodicBoundaryDataStacks(Container &stackContainer, int spacetreeId)
Exchange periodic BC data.
peano4::grid::Spacetree & getSpacetree(int id)
static void deleteAllStacks(Container &stackContainer, int spacetreeId)
void streamLocalVertexInformationToMasterThroughVerticalStacks(int spacetreeId, int parentId, const std::set< int > &joiningIds)
Whenever we join two partitions, we have to stream data from the worker to the master.
std::set< int > getLocalTreesMergingWithWorkers() const
I need this routine for technical reasons: Prior to the sweep of trees, I have to identify all of tho...
SpacetreeSetState _state
The state identifies what the set is doing right now.
int getAnswerTag(int targetSpacetreeId) const
bool split(int treeId, const peano4::SplitInstruction &instruction, int targetRank)
Split a local tree.
int _requestMessageTag
I use this tag to identify messages send from one tree to another rank.
static std::string toString(SpacetreeSetState state)
void createObserverCloneIfRequired(peano4::grid::TraversalObserver &observer, int treeId)
Quick lookup whether an observer clone for this tree id does already exist.
virtual void receiveDanglingMessages() override
We poll the tree management messages.
void answerQuestions()
Run through the set of unanswered questions and, well, answer them.
peano4::grid::GridStatistics getGridStatistics() const
Return statistics object for primary spacetree.
std::list< peano4::grid::Spacetree > _spacetrees
These are the local spacetrees.
void init(const tarch::la::Vector< Dimensions, double > &offset, const tarch::la::Vector< Dimensions, double > &width, const std::bitset< Dimensions > &periodicBC=0)
void exchangeHorizontalDataBetweenTrees(peano4::grid::TraversalObserver &observer)
void cleanUpTrees(peano4::grid::TraversalObserver &observer)
static SpacetreeSet & getInstance()
std::set< int > getLocalSpacetrees() const
static void exchangeAllVerticalDataExchangeStacks(Container &stackContainer, int spacetreeId, int parentId)
static tarch::logging::Log _log
Logging device.
virtual void shutdown() override
static SpacetreeSet _singleton
static void finishAllOutstandingSendsAndReceives(Container &stackContainer, int spacetreeId)
This routine finishes all the sends and receives that are still active, i.e.
~SpacetreeSet()
As the setis a singleton and a service, it has to deregister itself.
static tarch::multicore::BooleanSemaphore _semaphore
Semaphore to protect container holding all the local trees.
static void exchangeAllHorizontalDataExchangeStacks(Container &stackContainer, int spacetreeId, bool symmetricDataCardinality)
Realise domain boundary exchange (over multiple scales)
bool isLocalSpacetree(int treeId) const
Codes hold one spacetree set per rank.
void streamDataFromSplittingTreesToNewTrees(peano4::grid::TraversalObserver &observer)
Copy the data from a splitting tree onto its new workers.
void traverse(peano4::grid::TraversalObserver &observer)
Invoke traverse on all spacetrees in parallel.
Log Device.
Definition Log.h:516
void log(const std::string &identifier, double value, bool disableSampling=false)
Log one particular value.
Definition Statistics.h:90
static Statistics & getInstance()
This is not the canonical realisation of singletons as I use it usually for stats in Peano.
static Rank & getInstance()
This operation returns the singleton instance.
Definition Rank.cpp:538
int getRank() const
Return rank of this node.
Definition Rank.cpp:528
void barrier(std::function< void()> waitor=[]() -> void {})
Definition Rank.cpp:347
static int reserveFreeTag(const std::string &fullQualifiedMessageName, int numberOfTags=1)
Return a Free Tag.
Definition Rank.cpp:39
MPI_Comm getCommunicator() const
Definition Rank.cpp:544
Create a lock around a boolean semaphore region.
Definition Lock.h:19
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
void removeService(Service *const service)
This routine is thread-safe, i.e.
void addService(Service *const service, const std::string &name)
Add a new service.
A simple class that has to be included to measure the clock ticks required for an operation.
Definition Watch.h:45
double getCPUTime()
Return CPU Time in Seconds.
Definition Watch.cpp:64
void stop()
Stop timer.
Definition Watch.cpp:55
static const char * unknown
Definition otter.h:110
std::string toString(VertexType type)
Definition grid.cpp:277
@ NewFromSplit
Set if this tree results from a split and if this is the first grid sweep when the former owner actua...
@ JoinTriggered
Join has been triggered for this tree.
@ EmptyRun
Not yet a new root.
void spawnAndWait(const std::vector< Task * > &tasks)
Fork-join task submission pattern.
Definition Tasks.cpp:302
int getTotalMemory(MemoryUsageFormat format)
Definition tarch.cpp:72
int getMemoryUsage(MemoryUsageFormat format)
Method for getting the application's memory footprint.
Definition tarch.cpp:95
#define OTTER_TASK_END(...)
Definition otter.h:141
#define OTTER_TASK_START(...)
Definition otter.h:140
#define OTTER_DEFINE_TASK(...)
Definition otter.h:133
Instruction to split.
Definition grid.h:34
static void send(const peano4::grid::AutomatonState &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static void receive(peano4::grid::AutomatonState &buffer, int source, int tag, MPI_Comm communicator)
void setRemovedEmptySubtree(bool value)
peano4::parallel::TreeManagementMessage::Action getAction() const
static void send(const peano4::parallel::TreeManagementMessage &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static void sendAndPollDanglingMessages(const peano4::parallel::TreeManagementMessage &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static void receiveAndPollDanglingMessages(peano4::parallel::TreeManagementMessage &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static void receive(peano4::parallel::TreeManagementMessage &buffer, int source, int tag, MPI_Comm communicator)
Simple vector class.
Definition Vector.h:134