Peano
Loading...
Searching...
No Matches
TreeManagementMessage.cpp
Go to the documentation of this file.
2
3
4
5#include <sstream>
6#include <algorithm>
7
8
9
10peano4::parallel::TreeManagementMessage::TreeManagementMessage(int __masterSpacetreeId, int __workerSpacetreeId, Action __action){
11setMasterSpacetreeId( __masterSpacetreeId);
12setWorkerSpacetreeId( __workerSpacetreeId);
13setAction( __action);
14}
15
16
17
19 std::ostringstream out;
20 out << "(";
21 out << "masterSpacetreeId=" << _masterSpacetreeId;
22 out << ",";
23 out << "workerSpacetreeId=" << _workerSpacetreeId;
24 out << ",";
25 out << "action=" << (_action==Action::RequestNewRemoteTree? "RequestNewRemoteTree" : "") << (_action==Action::CreateNewRemoteTree? "CreateNewRemoteTree" : "") << (_action==Action::RemoveChildTreeFromBooksAsChildBecameEmpty? "RemoveChildTreeFromBooksAsChildBecameEmpty" : "") << (_action==Action::JoinWithWorker? "JoinWithWorker" : "") << (_action==Action::Acknowledgement? "Acknowledgement" : "") ;
26 out << ")";
27 return out.str();
28}
29
30
31
32
33
35 return _masterSpacetreeId;
36}
37
38
40 _masterSpacetreeId = value;
41}
42
43
45 return _workerSpacetreeId;
46}
47
48
50 _workerSpacetreeId = value;
51}
52
53
57
58
60 _action = value;
61}
62
63
64
65
66
67
68#ifdef Parallel
69
70#if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
71MPI_Datatype peano4::parallel::TreeManagementMessage::Datatype = MPI_DATATYPE_NULL;
72#endif
73
74
75[[clang::map_mpi_datatype]]
77 return Datatype;
78}
79
80
81[[clang::map_mpi_datatype]]
85
86
87[[clang::map_mpi_datatype]]
89 return Datatype;
90}
91
92
93[[clang::map_mpi_datatype]]
97
98
99[[clang::map_mpi_datatype]]
103
104
105[[clang::map_mpi_datatype]]
107 if (Datatype != MPI_DATATYPE_NULL){
108 MPI_Type_free(&Datatype);
109 Datatype = MPI_DATATYPE_NULL;
110 }
111}
112
113
114[[clang::map_mpi_datatype]]
116 if (Datatype != MPI_DATATYPE_NULL){
117 MPI_Type_free(&Datatype);
118 Datatype = MPI_DATATYPE_NULL;
119 }
120}
121
122
123[[clang::map_mpi_datatype]]
125 if (Datatype != MPI_DATATYPE_NULL){
126 MPI_Type_free(&Datatype);
127 Datatype = MPI_DATATYPE_NULL;
128 }
129}
130
131
132[[clang::map_mpi_datatype]]
134 if (Datatype != MPI_DATATYPE_NULL){
135 MPI_Type_free(&Datatype);
136 Datatype = MPI_DATATYPE_NULL;
137 }
138}
139
140
141[[clang::map_mpi_datatype]]
143 if (Datatype != MPI_DATATYPE_NULL){
144 MPI_Type_free(&Datatype);
145 Datatype = MPI_DATATYPE_NULL;
146 }
147}
148
149
151 return _senderDestinationRank;
152}
153
154
156 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
158
159 int NumberOfAttributes = 0;
160 NumberOfAttributes++;
161 NumberOfAttributes++;
162 NumberOfAttributes++;
163
164 MPI_Datatype* subtypes = new MPI_Datatype[NumberOfAttributes];
165 int* blocklen = new int[NumberOfAttributes];
166 MPI_Aint* disp = new MPI_Aint[NumberOfAttributes];
167
168 int counter = 0;
169 subtypes[counter] = MPI_INT;
170 blocklen[counter] = 1;
171 counter++;
172 subtypes[counter] = MPI_INT;
173 blocklen[counter] = 1;
174 counter++;
175 subtypes[counter] = MPI_INT;
176 blocklen[counter] = 1;
177 counter++;
178
179 MPI_Aint baseFirstInstance;
180 MPI_Aint baseSecondInstance;
181 MPI_Get_address( &instances[0], &baseFirstInstance );
182 MPI_Get_address( &instances[1], &baseSecondInstance );
183
184 counter = 0;
185 MPI_Get_address( &(instances[0]._masterSpacetreeId), &disp[counter] );
186 counter++;
187 MPI_Get_address( &(instances[0]._workerSpacetreeId), &disp[counter] );
188 counter++;
189 MPI_Get_address( &(instances[0]._action), &disp[counter] );
190 counter++;
191
192 MPI_Aint offset = disp[0] - baseFirstInstance;
193 MPI_Aint extent = baseSecondInstance - baseFirstInstance - offset;
194 for (int i=NumberOfAttributes-1; i>=0; i--) {
195 disp[i] = disp[i] - disp[0];
196 }
197
198 int errorCode = 0;
199 MPI_Datatype tmpType;
200 errorCode += MPI_Type_create_struct( NumberOfAttributes, blocklen, disp, subtypes, &tmpType );
201 errorCode += MPI_Type_create_resized( tmpType, offset, extent, &Datatype );
202 errorCode += MPI_Type_commit( &Datatype );
203 errorCode += MPI_Type_free( &tmpType );
204 if (errorCode) std::cerr << "error constructing MPI datatype in " << __FILE__ << ":" << __LINE__ << std::endl;
205
206 delete[] subtypes;
207 delete[] blocklen;
208 delete[] disp;
209
210 #else
211 // invoke routine once to trigger lazy initialisation
212 getForkDatatype();
213 getJoinDatatype();
214 getBoundaryExchangeDatatype();
215 getMultiscaleDataExchangeDatatype();
216 getGlobalCommunciationDatatype();
217 #endif
218}
219
220
222 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
223 freeForkDatatype();
224 freeJoinDatatype();
225 freeBoundaryExchangeDatatype();
226 freeMultiscaleDataExchangeDatatype();
227 freeGlobalCommunciationDatatype();
228 #else
229 MPI_Datatype type = Datatype;
230 MPI_Type_free( &type );
231 #endif
232}
233
234
235void peano4::parallel::TreeManagementMessage::send(const peano4::parallel::TreeManagementMessage& buffer, int destination, int tag, MPI_Comm communicator ) {
236 MPI_Send( &buffer, 1, Datatype, destination, tag, communicator);
237}
238
239
240void peano4::parallel::TreeManagementMessage::receive(peano4::parallel::TreeManagementMessage& buffer, int source, int tag, MPI_Comm communicator ) {
241 MPI_Status status;
242 MPI_Recv( &buffer, 1, Datatype, source, tag, communicator, &status);
243 buffer._senderDestinationRank = status.MPI_SOURCE;
244}
245
246
249 int destination,
250 int tag,
251 std::function<void()> startCommunicationFunctor,
252 std::function<void()> waitFunctor,
253 MPI_Comm communicator
254) {
255 MPI_Request sendRequestHandle;
256 int flag = 0;
257 MPI_Isend( &buffer, 1, Datatype, destination, tag, communicator, &sendRequestHandle );
258 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
259 startCommunicationFunctor();
260 while (!flag) {
261 waitFunctor();
262 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
263 }
264}
265
266
269 int source,
270 int tag,
271 std::function<void()> startCommunicationFunctor,
272 std::function<void()> waitFunctor,
273 MPI_Comm communicator
274) {
275 MPI_Status status;
276 MPI_Request receiveRequestHandle;
277 int flag = 0;
278 MPI_Irecv( &buffer, 1, Datatype, source, tag, communicator, &receiveRequestHandle );
279 MPI_Test( &receiveRequestHandle, &flag, &status );
280 startCommunicationFunctor();
281 while (!flag) {
282 waitFunctor();
283 MPI_Test( &receiveRequestHandle, &flag, &status );
284 }
285 buffer._senderDestinationRank = status.MPI_SOURCE;
286}
287#endif
288
289#ifdef Parallel
290void peano4::parallel::TreeManagementMessage::sendAndPollDanglingMessages(const peano4::parallel::TreeManagementMessage& message, int destination, int tag, MPI_Comm communicator ) {
292 message, destination, tag,
293 [&]() {
296 },
297 [&]() {
298 tarch::mpi::Rank::getInstance().writeTimeOutWarning( "peano4::parallel::TreeManagementMessage", "sendAndPollDanglingMessages()",destination, tag );
299 tarch::mpi::Rank::getInstance().triggerDeadlockTimeOut( "peano4::parallel::TreeManagementMessage", "sendAndPollDanglingMessages()", destination, tag );
301 },
302 communicator
303 );
304}
305
306
309 message, source, tag,
310 [&]() {
313 },
314 [&]() {
315 tarch::mpi::Rank::getInstance().writeTimeOutWarning( "peano4::parallel::TreeManagementMessage", "receiveAndPollDanglingMessages()", source, tag );
316 tarch::mpi::Rank::getInstance().triggerDeadlockTimeOut( "peano4::parallel::TreeManagementMessage", "receiveAndPollDanglingMessages()", source, tag );
318 },
319 communicator
320 );
321}
322#endif
323
void triggerDeadlockTimeOut(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1, const std::string &comment="")
Triggers a time out and shuts down the cluster if a timeout is violated.
Definition Rank.cpp:124
void setDeadlockWarningTimeStamp()
Memorise global timeout.
Definition Rank.cpp:193
void writeTimeOutWarning(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1)
Writes a warning if relevant.
Definition Rank.cpp:148
void setDeadlockTimeOutTimeStamp()
Definition Rank.cpp:198
static Rank & getInstance()
This operation returns the singleton instance.
Definition Rank.cpp:539
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
peano4::parallel::TreeManagementMessage::Action getAction() const
static void send(const peano4::parallel::TreeManagementMessage &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static void shutdownDatatype()
Free the underlying MPI datatype.
static MPI_Datatype Datatype
Whenever we use LLVM's MPI extension (DaStGe), we rely on lazy initialisation of the datatype.
static void sendAndPollDanglingMessages(const peano4::parallel::TreeManagementMessage &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static void receiveAndPollDanglingMessages(peano4::parallel::TreeManagementMessage &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static MPI_Datatype getForkDatatype()
Hands out MPI datatype if we work without the LLVM MPI extension.
static void receive(peano4::parallel::TreeManagementMessage &buffer, int source, int tag, MPI_Comm communicator)
static void initDatatype()
Wrapper around getDatatype() to trigger lazy evaluation if we use the lazy initialisation.