13template <
class Container>
15 [[maybe_unused]] Container& stackContainer,
16 [[maybe_unused]]
int master,
17 [[maybe_unused]]
int worker
20template <
class Container>
22 Container& stackContainer,
26 logDebug(
"deleteAllStacks()",
"delete all stacks of " << spacetreeId );
27 stackContainer.clear(spacetreeId);
30template <
class Container>
32 Container& stackContainer,
int master,
int worker
47 not stackContainer.holdsStack( master, sourceStack )
50 "streamDataFromSplittingTreeToNewTree()",
51 "source stack for target stack " << destinationStack <<
" is empty, so skip copying"
55 not stackContainer.getForPush(worker,destinationStack)->empty()
58 "streamDataFromSplittingTreeToNewTree()",
59 "target stack " << destinationStack <<
" of tree " << worker <<
" contains already " << stackContainer.getForPush(worker,destinationStack)->size() <<
" entries, so skip copying"
64 "streamDataFromSplittingTreeToNewTree()",
65 "copy stack " << sourceStack <<
" from tree " << master <<
" into stack " << destinationStack <<
" from tree " << worker <<
66 " with a stack size of " << stackContainer.getForPush(master,sourceStack)->size() <<
". Can be done directly as both stacks reside on same machine"
68 assertion4( stackContainer.getForPush(worker,destinationStack)->empty(), master, worker, sourceStack, destinationStack );
69 stackContainer.getForPush(worker,destinationStack)->clone( *stackContainer.getForPop(master,sourceStack) );
72 if ( stackContainer.holdsStack( master, sourceStack ) ) {
74 "streamDataFromSplittingTreeToNewTree()",
75 "clear stack " << sourceStack <<
" on tree " << master
77 stackContainer.getForPush(master,sourceStack)->clear();
84 const int messageSize =
85 stackContainer.holdsStack( master, sourceStack ) ?
86 stackContainer.getForPop(master,sourceStack)->size() : 0;
89 "streamDataFromSplittingTreeToNewTree()",
90 "send stack " << sourceStack <<
" from tree " << master <<
" on rank " << sourceRank <<
" through tag " << meta.first <<
91 " to tree " << worker <<
" on rank " << destinationRank <<
". size=" << messageSize
97 stackContainer.getForPush(master,sourceStack)->startSend(
99 destinationRank, meta.first, meta.second
114 "streamDataFromSplittingTreeToNewTree()",
115 "receive " << message.
getValue() <<
" entries from tree " << master <<
" on rank " << sourceRank <<
" (used tag " << metaInfo.first <<
")"
118 stackContainer.getForPush(worker,destinationStack)->startReceive(
120 sourceRank, metaInfo.first, metaInfo.second, message.
getValue()
130template <
class Container>
132 [[maybe_unused]] Container& stackContainer,
133 [[maybe_unused]]
int spacetreeId,
134 [[maybe_unused]]
int parentId
140template <
class Container>
142 [[maybe_unused]] Container& stackContainer,
143 [[maybe_unused]]
int spacetreeId,
144 [[maybe_unused]]
bool symmetricDataCardinality
147 assertionMsg( symmetricDataCardinality,
"haven't implemented the asymmetric case yet, but would be simple: Just need the integer messages as I do for the vertical data flow" );
151 std::set< peano4::maps::StackKey > keys = stackContainer.getKeys();
152 for (
auto& sourceStackKey: keys) {
154 sourceStackKey.first==spacetreeId
158 not stackContainer.getForPop(sourceStackKey)->empty()
164 int count = stackContainer.getForPush(sourceStackKey)->size();
168 logDebug(
"exchangeAllHorizontalDataExchangeStacks(...)",
"send stack " << sourceStackKey.second <<
" of tree " << sourceStackKey.first <<
" to rank " << rank <<
" with tag " << sendMetaInfo.first <<
": " << count <<
" element(s)");
170 stackContainer.getForPush(sourceStackKey)->startSend(
172 rank,sendMetaInfo.first,sendMetaInfo.second);
176 logDebug(
"exchangeAllHorizontalDataExchangeStacks(...)",
"in return, receive " << count <<
" element(s) from rank " << rank <<
" with tag " << receiveMetaInfo.first <<
" into stack " << inStack );
181 stackContainer.getForPush(spacetreeId,inStack)->startReceive(
184 receiveMetaInfo.first,
185 receiveMetaInfo.second,count);
191 for (
auto sourceStackKey: keys) {
193 sourceStackKey.first==spacetreeId
197 not stackContainer.getForPop(sourceStackKey)->empty()
205 "exchangeAllHorizontalDataExchangeStacks(...)",
206 "map output stream " << sourceStackKey.second <<
" of tree " <<
207 spacetreeId <<
" onto input stream " << targetStack <<
208 " of tree " << targetId <<
209 ". Copy " << stackContainer.getForPush(sourceStackKey)->size() <<
" entries"
212 assertion4( stackContainer.getForPush(targetId,targetStack)->empty(), spacetreeId, targetId, sourceStackKey.second, targetStack );
213 stackContainer.getForPush(targetId,targetStack)->clone( *stackContainer.getForPop(sourceStackKey) );
219 stackContainer.getForPush(targetId,targetStack)->size() == stackContainer.getForPush(spacetreeId,comparisonStackForTarget)->size()
221 stackContainer.getForPush(spacetreeId,comparisonStackForTarget)->empty(),
222 stackContainer.getForPush(targetId,targetStack)->size(),
223 stackContainer.getForPush(spacetreeId,comparisonStackForTarget)->size(),
224 stackContainer.getForPush(targetId,targetStack)->toString(),
225 stackContainer.getForPush(spacetreeId,comparisonStackForTarget)->toString(),
226 targetStack, comparisonStackForTarget, spacetreeId,
227 "target stack is what I have already sent over"
231 stackContainer.getForPush(sourceStackKey)->clear();
234 logTraceOut(
"exchangeAllHorizontalDataExchangeStacks(...)" );
237template <
class Container>
242 std::set< peano4::maps::StackKey > keys = stackContainer.getKeys();
247 bool allSendReceivesFinished =
false;
253 while (not allSendReceivesFinished) {
254 allSendReceivesFinished =
true;
255 for (
auto& sourceStackKey: keys) {
256 if ( sourceStackKey.first==spacetreeId ) {
257 logDebug(
"finishAllOutstandingSendsAndReceives(...)",
"check stack no " << sourceStackKey.first <<
" x " << sourceStackKey.second );
258 allSendReceivesFinished &= stackContainer.getForPush(sourceStackKey)->tryToFinishSendOrReceive();
261 const int rank = stackContainer.getForPush(sourceStackKey)->sendingOrReceiving();
263 dataExchangeTime.
stop();
264 smartmpi::reportWaitTime(dataExchangeTime.
getCPUTime(),rank);
271 "peano4::parallel::SpacetreeSet",
272 "finishAllOutstandingSendsAndReceives(...)", spacetreeId, -1, -1
282 "peano4::parallel::SpacetreeSet",
283 "finishAllOutstandingSendsAndReceives(...)", spacetreeId, -1, -1
289 logDebug(
"finishAllOutstandingSendsAndReceives(...)",
"all data transfer is done, trigger garbage collection" );
291 stackContainer.garbageCollection(spacetreeId);
295template <
class Container>
297 std::set< peano4::maps::StackKey > keys = stackContainer.getKeys();
299 for (
auto& sourceStackKey: keys) {
301 sourceStackKey.first==spacetreeId
305 not stackContainer.getForPush(sourceStackKey)->empty()
309 "exchangeAllPeriodicBoundaryDataStacks(...)",
310 "map output stream " << sourceStackKey.second <<
" onto input stream " << targetStack <<
311 " to realise periodic boundary conditions. Copy/clone " << stackContainer.getForPush(sourceStackKey)->size() <<
" entries"
314 assertion4( stackContainer.getForPush(sourceStackKey.first,targetStack)->empty(), sourceStackKey.first, sourceStackKey.second, targetStack, stackContainer.getForPush(sourceStackKey.first,targetStack)->size() );
315 stackContainer.getForPush(sourceStackKey.first,targetStack)->clone( *stackContainer.getForPush(sourceStackKey) );
316 stackContainer.getForPush(sourceStackKey)->clear();
319 logTraceOut(
"exchangeAllPeriodicBoundaryDataStacks(...)");
#define assertion4(expr, param0, param1, param2, param3)
#define assertion8(expr, param0, param1, param2, param3, param4, param5, param6, param7)
#define assertionMsg(expr, message)
#define logDebug(methodName, logMacroMessageStream)
#define logTraceOutWith1Argument(methodName, argument0)
#define logTraceOut(methodName)
#define logTraceOutWith2Arguments(methodName, argument0, argument1)
#define logTraceInWith1Argument(methodName, argument0)
#define logTraceInWith2Arguments(methodName, argument0, argument1)
static int getOutputStackNumber(const AutomatonState &state)
static int getTreeNumberTiedToExchangeStackNumber(int number)
Gives you back the id of a communication partner, i.e.
static bool isPeriodicBoundaryExchangeOutputStackNumber(int number)
static int getOutputStackNumberForHorizontalDataExchange(int id)
Hand in a spacetree id and get back the number that we should use to send something to this tree.
static int mapPeriodicBoundaryExchangeOutputStackOntoInputStack(int outputStack)
int getRank(int treeId) const
You hand in a tree number and the node tells you on which rank such a tree is hosted.
static int getInputStackNumberForHorizontalDataExchange(int id)
Counterpart of getOutputStackNumberOfBoundaryExchange(int)
@ VerticalData
Vertical data is data running from the master to the worker and the other way round.
static bool isHorizontalDataExchangeOutputStackNumber(int number)
See getOutputStackNumberOfBoundaryExchange().
std::pair< int, MPI_Comm > GridDataExchangeMetaInformation
static int getOutputStackNumberForVerticalDataExchange(int id)
GridDataExchangeMetaInformation getGridDataExchangeMetaInformation(int sendingTreeId, int receivingTreeId, ExchangeMode exchange) const
I use two tags per spacetree per rank: one for boundary data (horizontal) and one for up-down and syn...
static Node & getInstance()
This operation returns the singleton instance.
static void streamDataFromSplittingTreeToNewTree(Container &stackContainer, int master, int worker)
Copies (streams) data from the master to the worker.
static void exchangeAllPeriodicBoundaryDataStacks(Container &stackContainer, int spacetreeId)
Exchange periodic BC data.
static void deleteAllStacks(Container &stackContainer, int spacetreeId)
static void exchangeAllVerticalDataExchangeStacks(Container &stackContainer, int spacetreeId, int parentId)
static void finishAllOutstandingSendsAndReceives(Container &stackContainer, int spacetreeId)
This routine finishes all the sends and receives that are still active, i.e.
static void exchangeAllHorizontalDataExchangeStacks(Container &stackContainer, int spacetreeId, bool symmetricDataCardinality)
Realise domain boundary exchange (over multiple scales)
static void streamDataFromJoiningTreeToMasterTree(Container &stackContainer, int master, int worker)
void triggerDeadlockTimeOut(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1, const std::string &comment="")
Triggers a time out and shuts down the cluster if a timeout is violated.
void setDeadlockWarningTimeStamp()
Memorise global timeout.
void writeTimeOutWarning(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1)
Writes a warning if relevant.
void setDeadlockTimeOutTimeStamp()
static Rank & getInstance()
This operation returns the singleton instance.
int getRank() const
Return rank of this node.
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
A simple class that has to be included to measure the clock ticks required for an operation.
double getCPUTime()
Return CPU Time in Seconds.
static void receiveAndPollDanglingMessages(tarch::mpi::IntegerMessage &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static void send(const tarch::mpi::IntegerMessage &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.