Peano
Loading...
Searching...
No Matches
TreeEntry.cpp
Go to the documentation of this file.
1#include "TreeEntry.h"
2
3
4
5#include <sstream>
6#include <algorithm>
7
8
9
11setId( __id);
12setMaster( __master);
13}
14
15
16
18 std::ostringstream out;
19 out << "(";
20 out << "id=" << _id;
21 out << ",";
22 out << "master=" << _master;
23 out << ")";
24 return out.str();
25}
26
27
28
29
30
32 return _id;
33}
34
35
37 _id = value;
38}
39
40
42 return _master;
43}
44
45
47 _master = value;
48}
49
50
51
52
53
54
55#ifdef Parallel
56
57#if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
58MPI_Datatype peano4::parallel::TreeEntry::Datatype = MPI_DATATYPE_NULL;
59#endif
60
61
62[[clang::map_mpi_datatype]]
64 return Datatype;
65}
66
67
68[[clang::map_mpi_datatype]]
70 return Datatype;
71}
72
73
74[[clang::map_mpi_datatype]]
76 return Datatype;
77}
78
79
80[[clang::map_mpi_datatype]]
82 return Datatype;
83}
84
85
86[[clang::map_mpi_datatype]]
90
91
92[[clang::map_mpi_datatype]]
94 if (Datatype != MPI_DATATYPE_NULL){
95 MPI_Type_free(&Datatype);
96 Datatype = MPI_DATATYPE_NULL;
97 }
98}
99
100
101[[clang::map_mpi_datatype]]
103 if (Datatype != MPI_DATATYPE_NULL){
104 MPI_Type_free(&Datatype);
105 Datatype = MPI_DATATYPE_NULL;
106 }
107}
108
109
110[[clang::map_mpi_datatype]]
112 if (Datatype != MPI_DATATYPE_NULL){
113 MPI_Type_free(&Datatype);
114 Datatype = MPI_DATATYPE_NULL;
115 }
116}
117
118
119[[clang::map_mpi_datatype]]
121 if (Datatype != MPI_DATATYPE_NULL){
122 MPI_Type_free(&Datatype);
123 Datatype = MPI_DATATYPE_NULL;
124 }
125}
126
127
128[[clang::map_mpi_datatype]]
130 if (Datatype != MPI_DATATYPE_NULL){
131 MPI_Type_free(&Datatype);
132 Datatype = MPI_DATATYPE_NULL;
133 }
134}
135
136
138 return _senderDestinationRank;
139}
140
141
143 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
144 peano4::parallel::TreeEntry instances[2];
145
146 int NumberOfAttributes = 0;
147 NumberOfAttributes++;
148 NumberOfAttributes++;
149
150 MPI_Datatype* subtypes = new MPI_Datatype[NumberOfAttributes];
151 int* blocklen = new int[NumberOfAttributes];
152 MPI_Aint* disp = new MPI_Aint[NumberOfAttributes];
153
154 int counter = 0;
155 subtypes[counter] = MPI_INT;
156 blocklen[counter] = 1;
157 counter++;
158 subtypes[counter] = MPI_INT;
159 blocklen[counter] = 1;
160 counter++;
161
162 MPI_Aint baseFirstInstance;
163 MPI_Aint baseSecondInstance;
164 MPI_Get_address( &instances[0], &baseFirstInstance );
165 MPI_Get_address( &instances[1], &baseSecondInstance );
166
167 counter = 0;
168 MPI_Get_address( &(instances[0]._id), &disp[counter] );
169 counter++;
170 MPI_Get_address( &(instances[0]._master), &disp[counter] );
171 counter++;
172
173 MPI_Aint offset = disp[0] - baseFirstInstance;
174 MPI_Aint extent = baseSecondInstance - baseFirstInstance - offset;
175 for (int i=NumberOfAttributes-1; i>=0; i--) {
176 disp[i] = disp[i] - disp[0];
177 }
178
179 int errorCode = 0;
180 MPI_Datatype tmpType;
181 errorCode += MPI_Type_create_struct( NumberOfAttributes, blocklen, disp, subtypes, &tmpType );
182 errorCode += MPI_Type_create_resized( tmpType, offset, extent, &Datatype );
183 errorCode += MPI_Type_commit( &Datatype );
184 errorCode += MPI_Type_free( &tmpType );
185 if (errorCode) std::cerr << "error constructing MPI datatype in " << __FILE__ << ":" << __LINE__ << std::endl;
186
187 delete[] subtypes;
188 delete[] blocklen;
189 delete[] disp;
190
191 #else
192 // invoke routine once to trigger lazy initialisation
193 getForkDatatype();
194 getJoinDatatype();
195 getBoundaryExchangeDatatype();
196 getMultiscaleDataExchangeDatatype();
197 getGlobalCommunciationDatatype();
198 #endif
199}
200
201
203 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
204 freeForkDatatype();
205 freeJoinDatatype();
206 freeBoundaryExchangeDatatype();
207 freeMultiscaleDataExchangeDatatype();
208 freeGlobalCommunciationDatatype();
209 #else
210 MPI_Datatype type = Datatype;
211 MPI_Type_free( &type );
212 #endif
213}
214
215
216void peano4::parallel::TreeEntry::send(const peano4::parallel::TreeEntry& buffer, int destination, int tag, MPI_Comm communicator ) {
217 MPI_Send( &buffer, 1, Datatype, destination, tag, communicator);
218}
219
220
221void peano4::parallel::TreeEntry::receive(peano4::parallel::TreeEntry& buffer, int source, int tag, MPI_Comm communicator ) {
222 MPI_Status status;
223 MPI_Recv( &buffer, 1, Datatype, source, tag, communicator, &status);
224 buffer._senderDestinationRank = status.MPI_SOURCE;
225}
226
227
229 const peano4::parallel::TreeEntry& buffer,
230 int destination,
231 int tag,
232 std::function<void()> startCommunicationFunctor,
233 std::function<void()> waitFunctor,
234 MPI_Comm communicator
235) {
236 MPI_Request sendRequestHandle;
237 int flag = 0;
238 MPI_Isend( &buffer, 1, Datatype, destination, tag, communicator, &sendRequestHandle );
239 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
240 startCommunicationFunctor();
241 while (!flag) {
242 waitFunctor();
243 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
244 }
245}
246
247
250 int source,
251 int tag,
252 std::function<void()> startCommunicationFunctor,
253 std::function<void()> waitFunctor,
254 MPI_Comm communicator
255) {
256 MPI_Status status;
257 MPI_Request receiveRequestHandle;
258 int flag = 0;
259 MPI_Irecv( &buffer, 1, Datatype, source, tag, communicator, &receiveRequestHandle );
260 MPI_Test( &receiveRequestHandle, &flag, &status );
261 startCommunicationFunctor();
262 while (!flag) {
263 waitFunctor();
264 MPI_Test( &receiveRequestHandle, &flag, &status );
265 }
266 buffer._senderDestinationRank = status.MPI_SOURCE;
267}
268#endif
269
270#ifdef Parallel
271void peano4::parallel::TreeEntry::sendAndPollDanglingMessages(const peano4::parallel::TreeEntry& message, int destination, int tag, MPI_Comm communicator ) {
273 message, destination, tag,
274 [&]() {
277 },
278 [&]() {
279 tarch::mpi::Rank::getInstance().writeTimeOutWarning( "peano4::parallel::TreeEntry", "sendAndPollDanglingMessages()",destination, tag );
280 tarch::mpi::Rank::getInstance().triggerDeadlockTimeOut( "peano4::parallel::TreeEntry", "sendAndPollDanglingMessages()", destination, tag );
282 },
283 communicator
284 );
285}
286
287
288void peano4::parallel::TreeEntry::receiveAndPollDanglingMessages(peano4::parallel::TreeEntry& message, int source, int tag, MPI_Comm communicator ) {
290 message, source, tag,
291 [&]() {
294 },
295 [&]() {
296 tarch::mpi::Rank::getInstance().writeTimeOutWarning( "peano4::parallel::TreeEntry", "receiveAndPollDanglingMessages()", source, tag );
297 tarch::mpi::Rank::getInstance().triggerDeadlockTimeOut( "peano4::parallel::TreeEntry", "receiveAndPollDanglingMessages()", source, tag );
299 },
300 communicator
301 );
302}
303#endif
304
void triggerDeadlockTimeOut(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1, const std::string &comment="")
Triggers a time out and shuts down the cluster if a timeout is violated.
Definition Rank.cpp:124
void setDeadlockWarningTimeStamp()
Memorise global timeout.
Definition Rank.cpp:193
void writeTimeOutWarning(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1)
Writes a warning if relevant.
Definition Rank.cpp:148
void setDeadlockTimeOutTimeStamp()
Definition Rank.cpp:198
static Rank & getInstance()
This operation returns the singleton instance.
Definition Rank.cpp:539
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
static void freeBoundaryExchangeDatatype()
static MPI_Datatype getForkDatatype()
Hands out MPI datatype if we work without the LLVM MPI extension.
Definition TreeEntry.cpp:63
static void initDatatype()
Wrapper around getDatatype() to trigger lazy evaluation if we use the lazy initialisation.
static void sendAndPollDanglingMessages(const peano4::parallel::TreeEntry &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static MPI_Datatype getGlobalCommunciationDatatype()
Definition TreeEntry.cpp:69
static void receiveAndPollDanglingMessages(peano4::parallel::TreeEntry &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static void freeMultiscaleDataExchangeDatatype()
static void shutdownDatatype()
Free the underlying MPI datatype.
std::string toString() const
Definition TreeEntry.cpp:17
void setMaster(int value)
Definition TreeEntry.cpp:46
static MPI_Datatype getBoundaryExchangeDatatype()
Definition TreeEntry.cpp:81
static MPI_Datatype getMultiscaleDataExchangeDatatype()
Definition TreeEntry.cpp:87
static void freeForkDatatype()
Definition TreeEntry.cpp:93
static void send(const peano4::parallel::TreeEntry &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static MPI_Datatype Datatype
Whenever we use LLVM's MPI extension (DaStGe), we rely on lazy initialisation of the datatype.
Definition TreeEntry.h:144
static MPI_Datatype getJoinDatatype()
Definition TreeEntry.cpp:75
static void receive(peano4::parallel::TreeEntry &buffer, int source, int tag, MPI_Comm communicator)
static void freeGlobalCommunciationDatatype()