Peano
Loading...
Searching...
No Matches
MPI.py
Go to the documentation of this file.
1# This file is part of the DaStGen2 project. For conditions of distribution and
2# use, please see the copyright notice at www.peano-framework.org
3import dastgen2
4from dastgen2.Utils import construct_ifdef_string
5
6
7class MPI(object):
8 """
9
10 Represents the MPI aspect injected into a DaStGen model.
11
12 """
13
14 def __init__(self):
15 pass
16
17 def set_model(self, data_model):
18 self._data_model = data_model
19
20 def get_include(self):
21 return """
22#ifdef Parallel
23 #include <mpi.h>
24 #include <functional>
25#endif
26"""
27
28 def get_attributes(self):
29 return """
30 #ifdef Parallel
31 private:
32 int _senderDestinationRank;
33
34 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
35 /**
36 * Whenever we use LLVM's MPI extension (DaStGe), we rely on lazy
37 * initialisation of the datatype. However, Peano calls init explicitly
38 * in most cases. Without the LLVM extension which caches the MPI
39 * datatype once constructed, this field stores the type.
40 */
41 static MPI_Datatype Datatype;
42 #endif
43 #endif
44"""
45
46 def get_method_declarations(self, full_qualified_name):
47 return (
48 """
49 #ifdef Parallel
50 /**
51 * Hands out MPI datatype if we work without the LLVM MPI extension.
52 * If we work with this additional feature, this is the routine where
53 * the lazy initialisation is done and the datatype is also cached.
54 */
55 [[clang::map_mpi_datatype]]
56 static MPI_Datatype getForkDatatype();
57
58 [[clang::map_mpi_datatype]]
59 static MPI_Datatype getJoinDatatype();
60
61 [[clang::map_mpi_datatype]]
62 static MPI_Datatype getBoundaryExchangeDatatype();
63
64 [[clang::map_mpi_datatype]]
65 static MPI_Datatype getMultiscaleDataExchangeDatatype();
66
67 [[clang::map_mpi_datatype]]
68 static MPI_Datatype getGlobalCommunciationDatatype();
69
70 [[clang::map_mpi_datatype]]
71 static void freeForkDatatype();
72
73 [[clang::map_mpi_datatype]]
74 static void freeJoinDatatype();
75
76 [[clang::map_mpi_datatype]]
77 static void freeBoundaryExchangeDatatype();
78
79 [[clang::map_mpi_datatype]]
80 static void freeMultiscaleDataExchangeDatatype();
81
82 [[clang::map_mpi_datatype]]
83 static void freeGlobalCommunciationDatatype();
84
85 /**
86 * @return The rank of the sender of an object. It only make ssense to call
87 * this routine after you've invoked receive with MPI_ANY_SOURCE.
88 */
89 int getSenderRank() const;
90
91 /**
92 * Wrapper around getDatatype() to trigger lazy evaluation if we
93 * use the lazy initialisation.
94 */
95 static void initDatatype();
96
97 /**
98 * Free the underlying MPI datatype.
99 */
100 static void shutdownDatatype();
101
102 /**
103 * In DaStGen (the first version), I had a non-static version of the send
104 * as well as the receive. However, this did not work with newer C++11
105 * versions, as a member function using this as pointer usually doesn't
106 * see the vtable while the init sees the object from outside, i.e.
107 * including a vtable. So this routine now is basically an alias for a
108 * blocking MPI_Send.
109 */
110 static void send(const """
111 + full_qualified_name
112 + """& buffer, int destination, int tag, MPI_Comm communicator );
113 static void receive("""
114 + full_qualified_name
115 + """& buffer, int source, int tag, MPI_Comm communicator );
116
117 /**
118 * Alternative to the other send() where I trigger a non-blocking send an
119 * then invoke the functor until the corresponding MPI_Test tells me that
120 * the message went through. In systems with heavy MPI usage, this can
121 * help to avoid deadlocks.
122 */
123 static void send(const """
124 + full_qualified_name
125 + """& buffer, int destination, int tag, std::function<void()> startCommunicationFunctor, std::function<void()> waitFunctor, MPI_Comm communicator );
126 static void receive( """
127 + full_qualified_name
128 + """& buffer, int source, int tag, std::function<void()> startCommunicationFunctor, std::function<void()> waitFunctor, MPI_Comm communicator );
129 #endif
130"""
131 )
132
133 def get_implementation(self, full_qualified_name):
134 result = (
135 """
136#ifdef Parallel
137
138#if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
139MPI_Datatype """
140 + full_qualified_name
141 + """::Datatype = MPI_DATATYPE_NULL;
142#endif
143
144
145[[clang::map_mpi_datatype]]
146MPI_Datatype """
147 + full_qualified_name
148 + """::getForkDatatype() {
149 return Datatype;
150}
151
152
153[[clang::map_mpi_datatype]]
154MPI_Datatype """
155 + full_qualified_name
156 + """::getGlobalCommunciationDatatype() {
157 return Datatype;
158}
159
160
161[[clang::map_mpi_datatype]]
162MPI_Datatype """
163 + full_qualified_name
164 + """::getJoinDatatype() {
165 return Datatype;
166}
167
168
169[[clang::map_mpi_datatype]]
170MPI_Datatype """
171 + full_qualified_name
172 + """::getBoundaryExchangeDatatype() {
173 return Datatype;
174}
175
176
177[[clang::map_mpi_datatype]]
178MPI_Datatype """
179 + full_qualified_name
180 + """::getMultiscaleDataExchangeDatatype() {
181 return Datatype;
182}
183
184
185[[clang::map_mpi_datatype]]
186void """
187 + full_qualified_name
188 + """::freeForkDatatype() {
189 if (Datatype != MPI_DATATYPE_NULL){
190 MPI_Type_free(&Datatype);
191 Datatype = MPI_DATATYPE_NULL;
192 }
193}
194
195
196[[clang::map_mpi_datatype]]
197void """
198 + full_qualified_name
199 + """::freeGlobalCommunciationDatatype() {
200 if (Datatype != MPI_DATATYPE_NULL){
201 MPI_Type_free(&Datatype);
202 Datatype = MPI_DATATYPE_NULL;
203 }
204}
205
206
207[[clang::map_mpi_datatype]]
208void """
209 + full_qualified_name
210 + """::freeJoinDatatype() {
211 if (Datatype != MPI_DATATYPE_NULL){
212 MPI_Type_free(&Datatype);
213 Datatype = MPI_DATATYPE_NULL;
214 }
215}
216
217
218[[clang::map_mpi_datatype]]
219void """
220 + full_qualified_name
221 + """::freeBoundaryExchangeDatatype() {
222 if (Datatype != MPI_DATATYPE_NULL){
223 MPI_Type_free(&Datatype);
224 Datatype = MPI_DATATYPE_NULL;
225 }
226}
227
228
229[[clang::map_mpi_datatype]]
230void """
231 + full_qualified_name
232 + """::freeMultiscaleDataExchangeDatatype() {
233 if (Datatype != MPI_DATATYPE_NULL){
234 MPI_Type_free(&Datatype);
235 Datatype = MPI_DATATYPE_NULL;
236 }
237}
238
239
240int """
241 + full_qualified_name
242 + """::getSenderRank() const {
243 return _senderDestinationRank;
244}
245
246
247
248void """
249 + full_qualified_name
250 + """::initDatatype() {
251 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
252 """
253 + full_qualified_name
254 + """ instances[2];
255
256 int NumberOfAttributes = 0;
257"""
258 )
259
260 for i in self._data_model._attributes:
261 if i._is_static or i._is_const_static or i._is_constexpr or i._is_const:
262 # No comms for static/const members
263 continue
264 if i.ifdefs != []:
265 result += construct_ifdef_string(i.ifdefs)
266 result += " NumberOfAttributes++;\n#endif \n"
267 else:
268 result += " NumberOfAttributes++;\n"
269
270 result += """
271 MPI_Datatype* subtypes = new MPI_Datatype[NumberOfAttributes];
272 int* blocklen = new int[NumberOfAttributes];
273 MPI_Aint* disp = new MPI_Aint[NumberOfAttributes];
274
275 int counter = 0;
276"""
277
278 for i in self._data_model._attributes:
279 if i._is_static or i._is_const_static or i._is_constexpr or i._is_const:
280 # No comms for static/const members
281 continue
282 if i.ifdefs != []:
283 result += construct_ifdef_string(i.ifdefs)
284 for ii in i.get_native_MPI_type():
285 result += " subtypes[counter] = " + ii[0] + ";\n"
286 result += " blocklen[counter] = " + str(ii[1]) + ";\n"
287 result += " counter++;\n"
288 if i.ifdefs != []:
289 result += "#endif\n"
290
291 result += """
292 MPI_Aint baseFirstInstance;
293 MPI_Aint baseSecondInstance;
294 MPI_Get_address( &instances[0], &baseFirstInstance );
295 MPI_Get_address( &instances[1], &baseSecondInstance );
296
297 counter = 0;
298"""
299
300 for i in self._data_model._attributes:
301 if i._is_static or i._is_const_static or i._is_constexpr or i._is_const:
302 # no comms for static/const members
303 continue
304 if i.ifdefs != []:
305 result += "\n"
306 result += construct_ifdef_string(i.ifdefs)
307 for ii in i.get_first_plain_C_attribute():
308 result += " MPI_Get_address( &(instances[0]."
309 result += ii[0]
310 result += "), &disp[counter] );\n"
311 result += " counter++;\n"
312 if i.ifdefs != []:
313 result += "#endif\n"
314
315 result += (
316 """
317 MPI_Aint offset = disp[0] - baseFirstInstance;
318 MPI_Aint extent = baseSecondInstance - baseFirstInstance - offset;
319 for (int i=NumberOfAttributes-1; i>=0; i--) {
320 disp[i] = disp[i] - disp[0];
321 }
322
323 int errorCode = 0;
324 MPI_Datatype tmpType;
325 errorCode += MPI_Type_create_struct( NumberOfAttributes, blocklen, disp, subtypes, &tmpType );
326 errorCode += MPI_Type_create_resized( tmpType, offset, extent, &Datatype );
327 errorCode += MPI_Type_commit( &Datatype );
328 errorCode += MPI_Type_free( &tmpType );
329 if (errorCode) std::cerr << "error constructing MPI datatype in " << __FILE__ << ":" << __LINE__ << std::endl;
330
331 delete[] subtypes;
332 delete[] blocklen;
333 delete[] disp;
334
335 #else
336 // invoke routine once to trigger lazy initialisation
337 getForkDatatype();
338 getJoinDatatype();
339 getBoundaryExchangeDatatype();
340 getMultiscaleDataExchangeDatatype();
341 getGlobalCommunciationDatatype();
342 #endif
343}
344
345
346void """
347 + full_qualified_name
348 + """::shutdownDatatype() {
349 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
350 freeForkDatatype();
351 freeJoinDatatype();
352 freeBoundaryExchangeDatatype();
353 freeMultiscaleDataExchangeDatatype();
354 freeGlobalCommunciationDatatype();
355 #else
356 MPI_Datatype type = Datatype;
357 MPI_Type_free( &type );
358 #endif
359}
360
361
362void """
363 + full_qualified_name
364 + """::send(const """
365 + full_qualified_name
366 + """& buffer, int destination, int tag, MPI_Comm communicator ) {
367 MPI_Send( &buffer, 1, Datatype, destination, tag, communicator);
368}
369
370
371void """
372 + full_qualified_name
373 + """::receive("""
374 + full_qualified_name
375 + """& buffer, int source, int tag, MPI_Comm communicator ) {
376 MPI_Status status;
377 MPI_Recv( &buffer, 1, Datatype, source, tag, communicator, &status);
378 buffer._senderDestinationRank = status.MPI_SOURCE;
379}
380
381
382void """
383 + full_qualified_name
384 + """::send(
385 const """
386 + full_qualified_name
387 + """& buffer,
388 int destination,
389 int tag,
390 std::function<void()> startCommunicationFunctor,
391 std::function<void()> waitFunctor,
392 MPI_Comm communicator
393) {
394 MPI_Request sendRequestHandle;
395 int flag = 0;
396 MPI_Isend( &buffer, 1, Datatype, destination, tag, communicator, &sendRequestHandle );
397 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
398 startCommunicationFunctor();
399 while (!flag) {
400 waitFunctor();
401 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
402 }
403}
404
405
406void """
407 + full_qualified_name
408 + """::receive(
409 """
410 + full_qualified_name
411 + """& buffer,
412 int source,
413 int tag,
414 std::function<void()> startCommunicationFunctor,
415 std::function<void()> waitFunctor,
416 MPI_Comm communicator
417) {
418 MPI_Status status;
419 MPI_Request receiveRequestHandle;
420 int flag = 0;
421 MPI_Irecv( &buffer, 1, Datatype, source, tag, communicator, &receiveRequestHandle );
422 MPI_Test( &receiveRequestHandle, &flag, &status );
423 startCommunicationFunctor();
424 while (!flag) {
425 waitFunctor();
426 MPI_Test( &receiveRequestHandle, &flag, &status );
427 }
428 buffer._senderDestinationRank = status.MPI_SOURCE;
429}
430#endif
431"""
432 )
433
434 return result
Represents the MPI aspect injected into a DaStGen model.
Definition MPI.py:7
get_method_declarations(self, full_qualified_name)
Definition MPI.py:46
get_implementation(self, full_qualified_name)
Definition MPI.py:133
set_model(self, data_model)
Definition MPI.py:17