58 _spacetrees.push_back(spacetree);
79 case SpacetreeSetState::TraverseTreesAndExchangeData:
80 return "traverse-trees-and-exchange-data";
81 case SpacetreeSetState::Waiting:
89 std::vector<peano4::parallel::TreeManagementMessage> unansweredMessagesThatIanAnswerNow;
91 std::vector<peano4::parallel::TreeManagementMessage>::iterator p = _unansweredMessages.begin();
92 while (p != _unansweredMessages.end()) {
93 switch (p->getAction()) {
95 unansweredMessagesThatIanAnswerNow.push_back(*p);
96 p = _unansweredMessages.erase(p);
100 if (
_state == SpacetreeSetState::Waiting) {
101 unansweredMessagesThatIanAnswerNow.push_back(*p);
102 p = _unansweredMessages.erase(p);
104 logDebug(
"answerMessages()",
"can't answer as I'm in the wrong state");
110 assertionMsg(
false,
"should only be passed synchronously and never run through this tag");
116 for (
auto p : unansweredMessagesThatIanAnswerNow) {
117 switch (p.getAction()) {
121 p.getMasterSpacetreeId()
130 getAnswerTag(p.getMasterSpacetreeId()),
134 "receiveDanglingMessages()",
"reserved tree id " << newSpacetreeId <<
" for tree " << p.getMasterSpacetreeId()
144 getAnswerTag(p.getMasterSpacetreeId()),
153 p.getWorkerSpacetreeId(), p.getMasterSpacetreeId(),
state.getX(),
state.getH(),
state.getInverted()
156 _spacetrees.push_back(std::move(newTree));
161 getAnswerTag(p.getMasterSpacetreeId()),
167 assertionMsg(
false,
"should only be passed synchronously and never run through this tag");
172 "receiveDanglingMessages(...)",
173 "learned that remote child tree "
174 << p.getWorkerSpacetreeId() <<
" of local tree " << p.getMasterSpacetreeId()
175 <<
" is degenerated thus had been removed"
177 getSpacetree(p.getMasterSpacetreeId())._childrenIds.erase(p.getWorkerSpacetreeId());
184 getAnswerTag(p.getWorkerSpacetreeId()),
206 _unansweredMessages.push_back(message);
208 logDebug(
"receiveDanglingMessages()",
"received new message " << message.
toString());
236 logDebug(
"addSpacetree(int,int)",
"send state " <<
state.toString() <<
" to rank " << targetRank);
246 assertionMsg(
false,
"should never enter this branch without -DParallel");
252 _spacetrees.begin()->_root.getX(),
253 _spacetrees.begin()->_root.getH(),
254 _spacetrees.begin()->_root.getInverted()
257 _spacetrees.push_back(std::move(newTree));
266 bool invertTreeTraversalDirectionBeforeWeStart
268 Task(Task::DontFuse, Task::DefaultPriority),
272 _invertTreeTraversalDirectionBeforeWeStart(invertTreeTraversalDirectionBeforeWeStart) {}
275 _spacetreeSet.createObserverCloneIfRequired(_observer, _spacetree._id);
276 if (_invertTreeTraversalDirectionBeforeWeStart) {
277 _spacetree._root.setInverted(not _spacetree._root.getInverted());
279 _spacetree.traverse(*_spacetreeSet._clonedObserver[_spacetree._id],
true);
290 _clonedObserver.insert(std::pair<int, peano4::grid::TraversalObserver*>(treeId, observer.
clone(treeId)));
301 [[maybe_unused]]
int spacetreeId,
302 [[maybe_unused]]
int parentId,
303 [[maybe_unused]]
const std::set<int>& joiningIds
351 logTraceOut(
"streamLocalVertexInformationToMasterThroughVerticalStacks(...)");
365 logTraceIn(
"exchangeVerticalDataBetweenTrees(...)");
369 "exchangeVerticalDataBetweenTrees(TraversalObserver&)",
379 _clonedObserver[p._id]->exchangeAllVerticalDataExchangeStacks(p._masterId);
383 "exchangeVerticalDataBetweenTrees(TraversalObserver&)",
384 "all local data exchange realised, all MPI message exchange triggered. Wait for MPI to terminate"
392 logTraceOut(
"exchangeVerticalDataBetweenTrees(...)");
400 for (
auto& worker : parent._hasSplit) {
406 temporaryOutStackForVertices,
407 sourceStackForVertices
416 _clonedObserver[parent._id]->streamDataFromSplittingTreeToNewTree(worker);
426 _clonedObserver[p._masterId]->streamDataFromSplittingTreeToNewTree(p._id);
436 logTraceOut(
"streamDataFromSplittingTreesToNewTrees()");
441 logTraceIn(
"exchangeHorizontalDataBetweenTrees(...)");
445 "exchangeHorizontalDataBetweenTrees(TraversalObserver&)",
458 "exchangeHorizontalDataBetweenTrees(TraversalObserver&)",
459 "all local data exchange realised, all MPI message exchange triggered. Wait for MPI to terminate"
467 logTraceOut(
"exchangeHorizontalDataBetweenTrees(...)");
472 std::set<int> result;
474 if (not tree._joining.empty()) {
475 result.insert(tree._id);
484 for (
auto& p : tree._splitting) {
493 if (p.second !=
nullptr) {
507 logDebug(
"traverse(TraversalObserver)",
"start new grid sweep");
513 logDebug(
"traverse(TraversalObserver&)",
"rank has passed barrier");
517 std::vector<tarch::multicore::Task*> primaryTasks;
518 std::vector<tarch::multicore::Task*> secondaryTasks;
519 std::vector<tarch::multicore::Task*> tertiaryTasks;
522 switch (p._spacetreeState) {
527 "traverse(TraversalObserver&)",
528 "issue task to traverse tree " << p._id <<
" in the primary tree set " << p.toString()
530 primaryTasks.push_back(
new TraverseTask(p, *
this, observer,
false));
533 if (p._joining.empty()) {
535 "traverse(TraversalObserver&)",
536 "issue task to traverse tree " << p._id <<
" in the primary tree set " << p.toString()
538 primaryTasks.push_back(
new TraverseTask(p, *
this, observer,
false));
541 "traverse(TraversalObserver&)",
542 "issue task to traverse tree "
543 << p._id <<
" in the third tree set as it joins in other tree: " << p.toString()
545 tertiaryTasks.push_back(
new TraverseTask(p, *
this, observer,
false));
550 "traverse(TraversalObserver&)",
551 "issue task to traverse tree " << p._id <<
" in secondary tree set as empty tree and in third set as new tree"
553 secondaryTasks.push_back(
new TraverseTask(p, *
this, observer,
true));
554 tertiaryTasks.push_back(
new TraverseTask(p, *
this, observer,
false));
564 [[maybe_unused]]
const bool runSequentially =
false;
568 logTraceOut(
"traverse(TraversalObserver&)-primary");
571 "traverse(TraversalObserver&)",
"primary tasks (traversals) complete, trigger split data stream if required"
574 logDebug(
"traverse(TraversalObserver&)",
"exchange vertical data if required");
579 logTraceOut(
"traverse(TraversalObserver&)-secondary");
587 logTraceOut(
"traverse(TraversalObserver&)-tertiary");
591 dataExchangeTime.
stop();
625 logInfo(
"traverse(Observer)",
"tree " << p->_id <<
" has successfully joined");
629 }
else if (p->mayJoinWithMaster() and p->getGridStatistics().getNumberOfLocalUnrefinedCells() == 0) {
630 logInfo(
"traverse(Observer)",
"remove empty tree " << p->_id <<
" with master " << p->_masterId);
636 "traverse(Observer)",
"parent tree " << p->_masterId <<
" is not local on this rank. Remove child reference"
661 "traverse(Observer)",
"parent tree " << p->_masterId <<
" is local on this rank. Remove child reference"
667 }
else if (p->mayJoinWithMaster()) {
673 "traverse(Observer)",
675 << p->_id <<
" as it is deteriorated (encodes no hierarchical data) while master " << p->_masterId
676 <<
" resides on same rank and can't coarsen"
678 logError(
"traverse(Observer)",
"not implemented yet");
681 }
else if (localRank == masterRank) {
683 "traverse(Observer)",
684 "tree " << p->_id <<
" is deteriorated (encodes no hierarchical data) yet seems not to constrain its master"
690 "cleanUpTrees(...)",
"I should merge tree " << p->_id <<
" to reduce synchronisation: " << p->toString()
692 logDebug(
"traverse(Observer)",
"not implemented yet");
726 result = result + from._statistics;
745 std::set<int> result;
748 result.insert(p._id);
759 if (tree.maySplit()) {
760 int newSpacetreeId = -1;
764 logDebug(
"split(int,SplitInstruction,int)",
"request new tree on rank " << targetRank);
771 logDebug(
"split(int,SplitInstruction,int)",
"message " << requestMessage.
toString() <<
" sent - wait for answer");
779 assertionMsg(
false,
"can't split into tree on a different rank if not compiled with mpi");
787 if (newSpacetreeId >= 0) {
788 tree.split(newSpacetreeId, instruction);
791 "split(int,SplitInstruction,int)",
800 "split(int,SplitInstruction,int)",
801 "trigger split of tree " << treeId <<
" into tree " << newSpacetreeId <<
" with " << instruction
#define assertion2(expr, param0, param1)
#define assertion4(expr, param0, param1, param2, param3)
#define assertion3(expr, param0, param1, param2)
#define assertion1(expr, param)
#define assertionMsg(expr, message)
#define logError(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
#define logDebug(methodName, logMacroMessageStream)
#define logTraceOutWith1Argument(methodName, argument0)
#define logTraceOut(methodName)
#define logWarning(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
#define logTraceOutWith3Arguments(methodName, argument0, argument1, argument2)
#define logTraceInWith3Arguments(methodName, argument0, argument1, argument2)
#define logTraceIn(methodName)
#define logTraceInWith1Argument(methodName, argument0)
#define logTraceInWith2Arguments(methodName, argument0, argument1)
#define logInfo(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
std::bitset< Dimensions > periodicBC
static int getInputStackNumber(const AutomatonState &state)
GridStatistics getGridStatistics() const
static GridVertexStackMap _vertexStack
GridStatistics _statistics
std::set< int > _childrenIds
virtual TraversalObserver * clone(int spacetreeId)=0
T * getForPush(int treeId, int stackId)
Get the stack belonging to a tree.
bool empty(int treeId, int stackId) const
static constexpr int MaxSpacetreesPerRank
bool continueToRun()
You should call this operation only on the ranks >0 to find out whether you should do more iteration/...
int reserveId(int rank, int forTreeId)
This operation is not const as it does some internal bookkeeping.
int getLocalTreeId(int treeId) const
int getRank(int treeId) const
You hand in a tree number and the node tells you on which rank such a tree is hosted.
void deregisterId(int id)
Only the SpacetreeSet should call this operation.
static int getOutputStackNumberForVerticalDataExchange(int id)
static Node & getInstance()
This operation returns the singleton instance.
Each task triggers the traversal of one specific spacetree.
virtual bool run() override
I create the copy of the observer, run the traversal on my local tree _spacetree and finally destroy ...
TraverseTask(peano4::grid::Spacetree &tree, SpacetreeSet &set, peano4::grid::TraversalObserver &observer, bool invertTreeTraversalDirectionBeforeWeStart)
The spacetree set has to be a singleton, as it is reponsible to accept requests for new trees from re...
void exchangeVerticalDataBetweenTrees(peano4::grid::TraversalObserver &observer)
void addSpacetree(int masterId, int newTreeId)
Adds a new spacetree to the set.
@ TraverseTreesAndExchangeData
void createNewTrees()
This operation should be called pretty close towards the end of a traversal.
std::map< int, peano4::grid::TraversalObserver * > _clonedObserver
I create/clone one observer per local tree.
static void streamDataFromSplittingTreeToNewTree(Container &stackContainer, int master, int worker)
Copies (streams) data from the master to the worker.
static void exchangeAllPeriodicBoundaryDataStacks(Container &stackContainer, int spacetreeId)
Exchange periodic BC data.
peano4::grid::Spacetree & getSpacetree(int id)
static void deleteAllStacks(Container &stackContainer, int spacetreeId)
void streamLocalVertexInformationToMasterThroughVerticalStacks(int spacetreeId, int parentId, const std::set< int > &joiningIds)
Whenever we join two partitions, we have to stream data from the worker to the master.
std::set< int > getLocalTreesMergingWithWorkers() const
I need this routine for technical reasons: Prior to the sweep of trees, I have to identify all of tho...
SpacetreeSetState _state
The state identifies what the set is doing right now.
int getAnswerTag(int targetSpacetreeId) const
bool split(int treeId, const peano4::SplitInstruction &instruction, int targetRank)
Split a local tree.
int _requestMessageTag
I use this tag to identify messages send from one tree to another rank.
static std::string toString(SpacetreeSetState state)
void deleteClonedObservers()
void createObserverCloneIfRequired(peano4::grid::TraversalObserver &observer, int treeId)
Quick lookup whether an observer clone for this tree id does already exist.
virtual void receiveDanglingMessages() override
We poll the tree management messages.
void answerQuestions()
Run through the set of unanswered questions and, well, answer them.
peano4::grid::GridStatistics getGridStatistics() const
Return statistics object for primary spacetree.
std::list< peano4::grid::Spacetree > _spacetrees
These are the local spacetrees.
void init(const tarch::la::Vector< Dimensions, double > &offset, const tarch::la::Vector< Dimensions, double > &width, const std::bitset< Dimensions > &periodicBC=0)
void exchangeHorizontalDataBetweenTrees(peano4::grid::TraversalObserver &observer)
void cleanUpTrees(peano4::grid::TraversalObserver &observer)
static SpacetreeSet & getInstance()
std::set< int > getLocalSpacetrees() const
static void exchangeAllVerticalDataExchangeStacks(Container &stackContainer, int spacetreeId, int parentId)
static tarch::logging::Log _log
Logging device.
virtual void shutdown() override
static SpacetreeSet _singleton
static void finishAllOutstandingSendsAndReceives(Container &stackContainer, int spacetreeId)
This routine finishes all the sends and receives that are still active, i.e.
~SpacetreeSet()
As the setis a singleton and a service, it has to deregister itself.
static tarch::multicore::BooleanSemaphore _semaphore
Semaphore to protect container holding all the local trees.
static void exchangeAllHorizontalDataExchangeStacks(Container &stackContainer, int spacetreeId, bool symmetricDataCardinality)
Realise domain boundary exchange (over multiple scales)
bool isLocalSpacetree(int treeId) const
Codes hold one spacetree set per rank.
void streamDataFromSplittingTreesToNewTrees(peano4::grid::TraversalObserver &observer)
Copy the data from a splitting tree onto its new workers.
void traverse(peano4::grid::TraversalObserver &observer)
Invoke traverse on all spacetrees in parallel.
void log(const std::string &identifier, double value, bool disableSampling=false)
static Statistics & getInstance()
This is not the canonical realisation of singletons as I use it usually for stats in Peano.
static Rank & getInstance()
This operation returns the singleton instance.
int getRank() const
Return rank of this node.
void barrier(std::function< void()> waitor=[]() -> void {})
static int reserveFreeTag(const std::string &fullQualifiedMessageName, int numberOfTags=1)
Return a Free Tag.
MPI_Comm getCommunicator() const
Create a lock around a boolean semaphore region.
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
void removeService(Service *const service)
This routine is thread-safe, i.e.
void addService(Service *const service, const std::string &name)
Add a new service.
A simple class that has to be included to measure the clock ticks required for an operation.
double getCPUTime()
Return CPU Time in Seconds.
std::string toString(VertexType type)
@ NewFromSplit
Set if this tree results from a split and if this is the first grid sweep when the former owner actua...
@ JoinTriggered
Join has been triggered for this tree.
@ EmptyRun
Not yet a new root.
void spawnAndWait(const std::vector< Task * > &tasks)
Fork-join task submission pattern.
int getTotalMemory(MemoryUsageFormat format)
int getMemoryUsage(MemoryUsageFormat format)
Method for getting the application's memory footprint.
static void send(const peano4::grid::AutomatonState &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static void receive(peano4::grid::AutomatonState &buffer, int source, int tag, MPI_Comm communicator)
std::string toString() const
bool getCoarseningHasBeenVetoed() const
void setRemovedEmptySubtree(bool value)
peano4::parallel::TreeManagementMessage::Action getAction() const
static void send(const peano4::parallel::TreeManagementMessage &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
void setMasterSpacetreeId(int value)
int getWorkerSpacetreeId() const
std::string toString() const
void setWorkerSpacetreeId(int value)
static void sendAndPollDanglingMessages(const peano4::parallel::TreeManagementMessage &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
void setAction(Action value)
@ RemoveChildTreeFromBooksAsChildBecameEmpty
static void receiveAndPollDanglingMessages(peano4::parallel::TreeManagementMessage &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static void receive(peano4::parallel::TreeManagementMessage &buffer, int source, int tag, MPI_Comm communicator)