Peano
Loading...
Searching...
No Matches
MPI.py
Go to the documentation of this file.
1# This file is part of the Peano 4 project. For conditions of distribution and
2# use, please see the copyright notice at www.peano-framework.org
3#import dastgen2.aspects
4
5from .Aspect import Aspect
6from dastgen2.Utils import construct_ifdef_string
7
8
9class MPI(Aspect):
10 """!
11
12 Represents the MPI aspect injected into a DaStGen model.
13
14 This object does not work with data types which host a smart pointer. In
15 many codes, we do not know at the time when we add this aspect to a
16 data model if the underlying data_model has a smart pointer. If so, we
17 have to exchange the MPI aspect later on for the one supporting smart
18 pointers. The exchange is typically done by
19 peano4.datamodel.DaStGen2GeneratorForObjectsWithSmartPointers.
20
21 @todo I think we should introduce an Aspects interface, which makes it clear which routines are to be implemented
22
23 """
24
25 def __init__(self):
26 pass
27
28 def get_include(self):
29 return """
30#ifdef Parallel
31 #include <mpi.h>
32 #include <functional>
33#endif
34"""
35
36 def get_attributes(self):
37 return """
38 #ifdef Parallel
39 private:
40 int _senderDestinationRank;
41
42 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
43 /**
44 * Whenever we use LLVM's MPI extension (DaStGe), we rely on lazy
45 * initialisation of the datatype. However, Peano calls init explicitly
46 * in most cases. Without the LLVM extension which caches the MPI
47 * datatype once constructed, this field stores the type.
48 */
49 static MPI_Datatype Datatype;
50 #endif
51 #endif
52"""
53
54 def get_method_declarations(self, full_qualified_name):
55 return (
56 """
57 #ifdef Parallel
58 /**
59 * Hands out MPI datatype if we work without the LLVM MPI extension.
60 * If we work with this additional feature, this is the routine where
61 * the lazy initialisation is done and the datatype is also cached.
62 */
63 [[clang::map_mpi_datatype]]
64 static MPI_Datatype getForkDatatype();
65
66 [[clang::map_mpi_datatype]]
67 static MPI_Datatype getJoinDatatype();
68
69 [[clang::map_mpi_datatype]]
70 static MPI_Datatype getBoundaryExchangeDatatype();
71
72 [[clang::map_mpi_datatype]]
73 static MPI_Datatype getMultiscaleDataExchangeDatatype();
74
75 [[clang::map_mpi_datatype]]
76 static MPI_Datatype getGlobalCommunciationDatatype();
77
78 [[clang::map_mpi_datatype]]
79 static void freeForkDatatype();
80
81 [[clang::map_mpi_datatype]]
82 static void freeJoinDatatype();
83
84 [[clang::map_mpi_datatype]]
85 static void freeBoundaryExchangeDatatype();
86
87 [[clang::map_mpi_datatype]]
88 static void freeMultiscaleDataExchangeDatatype();
89
90 [[clang::map_mpi_datatype]]
91 static void freeGlobalCommunciationDatatype();
92
93 /**
94 * @return The rank of the sender of an object. It only make ssense to call
95 * this routine after you've invoked receive with MPI_ANY_SOURCE.
96 */
97 int getSenderRank() const;
98
99 /**
100 * Wrapper around getDatatype() to trigger lazy evaluation if we
101 * use the lazy initialisation.
102 */
103 static void initDatatype();
104
105 /**
106 * Free the underlying MPI datatype.
107 */
108 static void shutdownDatatype();
109
110 /**
111 * In DaStGen (the first version), I had a non-static version of the send
112 * as well as the receive. However, this did not work with newer C++11
113 * versions, as a member function using this as pointer usually doesn't
114 * see the vtable while the init sees the object from outside, i.e.
115 * including a vtable. So this routine now is basically an alias for a
116 * blocking MPI_Send.
117 */
118 static void send(const """
119 + full_qualified_name
120 + """& buffer, int destination, int tag, MPI_Comm communicator );
121 static void receive("""
122 + full_qualified_name
123 + """& buffer, int source, int tag, MPI_Comm communicator );
124
125 /**
126 * Alternative to the other send() where I trigger a non-blocking send an
127 * then invoke the functor until the corresponding MPI_Test tells me that
128 * the message went through. In systems with heavy MPI usage, this can
129 * help to avoid deadlocks.
130 */
131 static void send(const """
132 + full_qualified_name
133 + """& buffer, int destination, int tag, std::function<void()> startCommunicationFunctor, std::function<void()> waitFunctor, MPI_Comm communicator );
134 static void receive( """
135 + full_qualified_name
136 + """& buffer, int source, int tag, std::function<void()> startCommunicationFunctor, std::function<void()> waitFunctor, MPI_Comm communicator );
137 #endif
138"""
139 )
140
141 def get_implementation(self, full_qualified_name):
142 result = (
143 """
144#ifdef Parallel
145
146#if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
147MPI_Datatype """
148 + full_qualified_name
149 + """::Datatype = MPI_DATATYPE_NULL;
150#endif
151
152
153[[clang::map_mpi_datatype]]
154MPI_Datatype """
155 + full_qualified_name
156 + """::getForkDatatype() {
157 return Datatype;
158}
159
160
161[[clang::map_mpi_datatype]]
162MPI_Datatype """
163 + full_qualified_name
164 + """::getGlobalCommunciationDatatype() {
165 return Datatype;
166}
167
168
169[[clang::map_mpi_datatype]]
170MPI_Datatype """
171 + full_qualified_name
172 + """::getJoinDatatype() {
173 return Datatype;
174}
175
176
177[[clang::map_mpi_datatype]]
178MPI_Datatype """
179 + full_qualified_name
180 + """::getBoundaryExchangeDatatype() {
181 return Datatype;
182}
183
184
185[[clang::map_mpi_datatype]]
186MPI_Datatype """
187 + full_qualified_name
188 + """::getMultiscaleDataExchangeDatatype() {
189 return Datatype;
190}
191
192
193[[clang::map_mpi_datatype]]
194void """
195 + full_qualified_name
196 + """::freeForkDatatype() {
197 if (Datatype != MPI_DATATYPE_NULL){
198 MPI_Type_free(&Datatype);
199 Datatype = MPI_DATATYPE_NULL;
200 }
201}
202
203
204[[clang::map_mpi_datatype]]
205void """
206 + full_qualified_name
207 + """::freeGlobalCommunciationDatatype() {
208 if (Datatype != MPI_DATATYPE_NULL){
209 MPI_Type_free(&Datatype);
210 Datatype = MPI_DATATYPE_NULL;
211 }
212}
213
214
215[[clang::map_mpi_datatype]]
216void """
217 + full_qualified_name
218 + """::freeJoinDatatype() {
219 if (Datatype != MPI_DATATYPE_NULL){
220 MPI_Type_free(&Datatype);
221 Datatype = MPI_DATATYPE_NULL;
222 }
223}
224
225
226[[clang::map_mpi_datatype]]
227void """
228 + full_qualified_name
229 + """::freeBoundaryExchangeDatatype() {
230 if (Datatype != MPI_DATATYPE_NULL){
231 MPI_Type_free(&Datatype);
232 Datatype = MPI_DATATYPE_NULL;
233 }
234}
235
236
237[[clang::map_mpi_datatype]]
238void """
239 + full_qualified_name
240 + """::freeMultiscaleDataExchangeDatatype() {
241 if (Datatype != MPI_DATATYPE_NULL){
242 MPI_Type_free(&Datatype);
243 Datatype = MPI_DATATYPE_NULL;
244 }
245}
246
247
248int """
249 + full_qualified_name
250 + """::getSenderRank() const {
251 return _senderDestinationRank;
252}
253
254
255
256void """
257 + full_qualified_name
258 + """::initDatatype() {
259 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
260 """
261 + full_qualified_name
262 + """ instances[2];
263
264 int NumberOfAttributes = 0;
265"""
266 )
267
268 for i in self._data_model._attributes:
269 if i._is_static or i._is_const_static or i._is_constexpr or i._is_const:
270 # No comms for static/const members
271 continue
272 if i.ifdefs != []:
273 result += construct_ifdef_string(i.ifdefs)
274 result += " NumberOfAttributes++;\n#endif \n"
275 else:
276 result += " NumberOfAttributes++;\n"
277
278 result += """
279 MPI_Datatype* subtypes = new MPI_Datatype[NumberOfAttributes];
280 int* blocklen = new int[NumberOfAttributes];
281 MPI_Aint* disp = new MPI_Aint[NumberOfAttributes];
282
283 int counter = 0;
284"""
285
286 for i in self._data_model._attributes:
287 if i._is_static or i._is_const_static or i._is_constexpr or i._is_const:
288 # No comms for static/const members
289 continue
290 if i.ifdefs != []:
291 result += construct_ifdef_string(i.ifdefs)
292 for ii in i.get_native_MPI_type():
293 result += " subtypes[counter] = " + ii[0] + ";\n"
294 result += " blocklen[counter] = " + str(ii[1]) + ";\n"
295 result += " counter++;\n"
296 if i.ifdefs != []:
297 result += "#endif\n"
298
299 result += """
300 MPI_Aint baseFirstInstance;
301 MPI_Aint baseSecondInstance;
302 MPI_Get_address( &instances[0], &baseFirstInstance );
303 MPI_Get_address( &instances[1], &baseSecondInstance );
304
305 counter = 0;
306"""
307
308 for i in self._data_model._attributes:
309 if i._is_static or i._is_const_static or i._is_constexpr or i._is_const:
310 # no comms for static/const members
311 continue
312 if i.ifdefs != []:
313 result += "\n"
314 result += construct_ifdef_string(i.ifdefs)
315 for ii in i.get_first_plain_C_attribute():
316 result += " MPI_Get_address( &(instances[0]."
317 result += ii[0]
318 result += "), &disp[counter] );\n"
319 result += " counter++;\n"
320 if i.ifdefs != []:
321 result += "#endif\n"
322
323 result += (
324 """
325 MPI_Aint offset = disp[0] - baseFirstInstance;
326 MPI_Aint extent = baseSecondInstance - baseFirstInstance - offset;
327 for (int i=NumberOfAttributes-1; i>=0; i--) {
328 disp[i] = disp[i] - disp[0];
329 }
330
331 int errorCode = 0;
332 MPI_Datatype tmpType;
333 errorCode += MPI_Type_create_struct( NumberOfAttributes, blocklen, disp, subtypes, &tmpType );
334 errorCode += MPI_Type_create_resized( tmpType, offset, extent, &Datatype );
335 errorCode += MPI_Type_commit( &Datatype );
336 errorCode += MPI_Type_free( &tmpType );
337 if (errorCode) std::cerr << "error constructing MPI datatype in " << __FILE__ << ":" << __LINE__ << std::endl;
338
339 delete[] subtypes;
340 delete[] blocklen;
341 delete[] disp;
342
343 #else
344 // invoke routine once to trigger lazy initialisation
345 getForkDatatype();
346 getJoinDatatype();
347 getBoundaryExchangeDatatype();
348 getMultiscaleDataExchangeDatatype();
349 getGlobalCommunciationDatatype();
350 #endif
351}
352
353
354void """
355 + full_qualified_name
356 + """::shutdownDatatype() {
357 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
358 freeForkDatatype();
359 freeJoinDatatype();
360 freeBoundaryExchangeDatatype();
361 freeMultiscaleDataExchangeDatatype();
362 freeGlobalCommunciationDatatype();
363 #else
364 MPI_Datatype type = Datatype;
365 MPI_Type_free( &type );
366 #endif
367}
368
369
370void """
371 + full_qualified_name
372 + """::send(const """
373 + full_qualified_name
374 + """& buffer, int destination, int tag, MPI_Comm communicator ) {
375 MPI_Send( &buffer, 1, Datatype, destination, tag, communicator);
376}
377
378
379void """
380 + full_qualified_name
381 + """::receive("""
382 + full_qualified_name
383 + """& buffer, int source, int tag, MPI_Comm communicator ) {
384 MPI_Status status;
385 MPI_Recv( &buffer, 1, Datatype, source, tag, communicator, &status);
386 buffer._senderDestinationRank = status.MPI_SOURCE;
387}
388
389
390void """
391 + full_qualified_name
392 + """::send(
393 const """
394 + full_qualified_name
395 + """& buffer,
396 int destination,
397 int tag,
398 std::function<void()> startCommunicationFunctor,
399 std::function<void()> waitFunctor,
400 MPI_Comm communicator
401) {
402 MPI_Request sendRequestHandle;
403 int flag = 0;
404 MPI_Isend( &buffer, 1, Datatype, destination, tag, communicator, &sendRequestHandle );
405 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
406 startCommunicationFunctor();
407 while (!flag) {
408 waitFunctor();
409 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
410 }
411}
412
413
414void """
415 + full_qualified_name
416 + """::receive(
417 """
418 + full_qualified_name
419 + """& buffer,
420 int source,
421 int tag,
422 std::function<void()> startCommunicationFunctor,
423 std::function<void()> waitFunctor,
424 MPI_Comm communicator
425) {
426 MPI_Status status;
427 MPI_Request receiveRequestHandle;
428 int flag = 0;
429 MPI_Irecv( &buffer, 1, Datatype, source, tag, communicator, &receiveRequestHandle );
430 MPI_Test( &receiveRequestHandle, &flag, &status );
431 startCommunicationFunctor();
432 while (!flag) {
433 waitFunctor();
434 MPI_Test( &receiveRequestHandle, &flag, &status );
435 }
436 buffer._senderDestinationRank = status.MPI_SOURCE;
437}
438#endif
439"""
440 )
441
442 return result
Superclass of each and every MPI aspect in Peano 4.
Definition Aspect.py:6
Represents the MPI aspect injected into a DaStGen model.
Definition MPI.py:9
get_method_declarations(self, full_qualified_name)
Definition MPI.py:54
get_implementation(self, full_qualified_name)
Definition MPI.py:141