Peano 4
Loading...
Searching...
No Matches
StartTraversalMessage.cpp
Go to the documentation of this file.
2
3
4
5#include <sstream>
6#include <algorithm>
7
8
9
13
14
15
17 std::ostringstream out;
18 out << "(";
19 out << "stepIdentifier=" << _stepIdentifier;
20 out << ")";
21 return out.str();
22}
23
24
25
26
27
29 return _stepIdentifier;
30}
31
32
34 _stepIdentifier = value;
35}
36
37
38
39
40
41
42#ifdef Parallel
43
44#if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
46#endif
47
48
49[[clang::map_mpi_datatype]]
51 return Datatype;
52}
53
54
55[[clang::map_mpi_datatype]]
59
60
61[[clang::map_mpi_datatype]]
63 return Datatype;
64}
65
66
67[[clang::map_mpi_datatype]]
71
72
73[[clang::map_mpi_datatype]]
77
78
80 return _senderDestinationRank;
81}
82
83
85 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
87
88 int NumberOfAttributes = 0;
89 NumberOfAttributes++;
90
91 MPI_Datatype* subtypes = new MPI_Datatype[NumberOfAttributes];
92 int* blocklen = new int[NumberOfAttributes];
93 MPI_Aint* disp = new MPI_Aint[NumberOfAttributes];
94
95 int counter = 0;
96 subtypes[counter] = MPI_INT;
97 blocklen[counter] = 1;
98 counter++;
99
100 MPI_Aint baseFirstInstance;
101 MPI_Aint baseSecondInstance;
102 MPI_Get_address( &instances[0], &baseFirstInstance );
103 MPI_Get_address( &instances[1], &baseSecondInstance );
104
105 counter = 0;
106 MPI_Get_address( &(instances[0]._stepIdentifier), &disp[counter] );
107 counter++;
108
109 MPI_Aint offset = disp[0] - baseFirstInstance;
110 MPI_Aint extent = baseSecondInstance - baseFirstInstance - offset;
111 for (int i=NumberOfAttributes-1; i>=0; i--) {
112 disp[i] = disp[i] - disp[0];
113 }
114
115 int errorCode = 0;
116 MPI_Datatype tmpType;
117 errorCode += MPI_Type_create_struct( NumberOfAttributes, blocklen, disp, subtypes, &tmpType );
118 errorCode += MPI_Type_create_resized( tmpType, offset, extent, &Datatype );
119 errorCode += MPI_Type_commit( &Datatype );
120 errorCode += MPI_Type_free( &tmpType );
121 if (errorCode) std::cerr << "error constructing MPI datatype in " << __FILE__ << ":" << __LINE__ << std::endl;
122
123 delete[] subtypes;
124 delete[] blocklen;
125 delete[] disp;
126
127 #else
128 // invoke routine once to trigger lazy initialisation
129 getForkDatatype();
130 getJoinDatatype();
131 getBoundaryExchangeDatatype();
132 getMultiscaleDataExchangeDatatype();
133 getGlobalCommunciationDatatype();
134 #endif
135}
136
137
139 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
140 MPI_Datatype type;
141 type = getForkDatatype();
142 MPI_Type_free( &type );
143 type = getJoinDatatype();
144 MPI_Type_free( &type );
145 type = getBoundaryExchangeDatatype();
146 MPI_Type_free( &type );
147 type = getMultiscaleDataExchangeDatatype();
148 MPI_Type_free( &type );
149 type = getGlobalCommunciationDatatype();
150 MPI_Type_free( &type );
151 #else
152 MPI_Datatype type = Datatype;
153 MPI_Type_free( &type );
154 #endif
155}
156
157
158void peano4::parallel::StartTraversalMessage::send(const peano4::parallel::StartTraversalMessage& buffer, int destination, int tag, MPI_Comm communicator ) {
159 MPI_Send( &buffer, 1, Datatype, destination, tag, communicator);
160}
161
162
163void peano4::parallel::StartTraversalMessage::receive(peano4::parallel::StartTraversalMessage& buffer, int source, int tag, MPI_Comm communicator ) {
164 MPI_Status status;
165 MPI_Recv( &buffer, 1, Datatype, source, tag, communicator, &status);
166 buffer._senderDestinationRank = status.MPI_SOURCE;
167}
168
169
172 int destination,
173 int tag,
174 std::function<void()> startCommunicationFunctor,
175 std::function<void()> waitFunctor,
176 MPI_Comm communicator
177) {
178 MPI_Request sendRequestHandle;
179 int flag = 0;
180 MPI_Isend( &buffer, 1, Datatype, destination, tag, communicator, &sendRequestHandle );
181 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
182 startCommunicationFunctor();
183 while (!flag) {
184 waitFunctor();
185 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
186 }
187}
188
189
192 int source,
193 int tag,
194 std::function<void()> startCommunicationFunctor,
195 std::function<void()> waitFunctor,
196 MPI_Comm communicator
197) {
198 MPI_Status status;
199 MPI_Request receiveRequestHandle;
200 int flag = 0;
201 MPI_Irecv( &buffer, 1, Datatype, source, tag, communicator, &receiveRequestHandle );
202 MPI_Test( &receiveRequestHandle, &flag, &status );
203 startCommunicationFunctor();
204 while (!flag) {
205 waitFunctor();
206 MPI_Test( &receiveRequestHandle, &flag, &status );
207 }
208 buffer._senderDestinationRank = status.MPI_SOURCE;
209}
210#endif
211
212#ifdef Parallel
213void peano4::parallel::StartTraversalMessage::sendAndPollDanglingMessages(const peano4::parallel::StartTraversalMessage& message, int destination, int tag, MPI_Comm communicator ) {
215 message, destination, tag,
216 [&]() {
219 },
220 [&]() {
221 tarch::mpi::Rank::getInstance().writeTimeOutWarning( "peano4::parallel::StartTraversalMessage", "sendAndPollDanglingMessages()",destination, tag );
222 tarch::mpi::Rank::getInstance().triggerDeadlockTimeOut( "peano4::parallel::StartTraversalMessage", "sendAndPollDanglingMessages()", destination, tag );
224 },
225 communicator
226 );
227}
228
229
232 message, source, tag,
233 [&]() {
236 },
237 [&]() {
238 tarch::mpi::Rank::getInstance().writeTimeOutWarning( "peano4::parallel::StartTraversalMessage", "receiveAndPollDanglingMessages()", source, tag );
239 tarch::mpi::Rank::getInstance().triggerDeadlockTimeOut( "peano4::parallel::StartTraversalMessage", "receiveAndPollDanglingMessages()", source, tag );
241 },
242 communicator
243 );
244}
245#endif
246
void triggerDeadlockTimeOut(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1, const std::string &comment="")
Triggers a time out and shuts down the cluster if a timeout is violated.
Definition Rank.cpp:119
void setDeadlockWarningTimeStamp()
Memorise global timeout.
Definition Rank.cpp:188
void writeTimeOutWarning(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1)
Writes a warning if relevant.
Definition Rank.cpp:143
void setDeadlockTimeOutTimeStamp()
Definition Rank.cpp:193
static Rank & getInstance()
This operation returns the singleton instance.
Definition Rank.cpp:538
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
static void receiveAndPollDanglingMessages(peano4::parallel::StartTraversalMessage &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static void sendAndPollDanglingMessages(const peano4::parallel::StartTraversalMessage &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static void shutdownDatatype()
Free the underlying MPI datatype.
static void send(const peano4::parallel::StartTraversalMessage &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static void initDatatype()
Wrapper around getDatatype() to trigger lazy evaluation if we use the lazy initialisation.
static void receive(peano4::parallel::StartTraversalMessage &buffer, int source, int tag, MPI_Comm communicator)
static MPI_Datatype Datatype
Whenever we use LLVM's MPI extension (DaStGe), we rely on lazy initialisation of the datatype.
PragmaPush static SilenceUnknownAttribute MPI_Datatype getForkDatatype()
Hands out MPI datatype if we work without the LLVM MPI extension.