Peano 4
Loading...
Searching...
No Matches
TreeEntry.cpp
Go to the documentation of this file.
1#include "TreeEntry.h"
2
3
4
5#include <sstream>
6#include <algorithm>
7
8
9
11setId( __id);
12setMaster( __master);
13}
14
15
16
18 std::ostringstream out;
19 out << "(";
20 out << "id=" << _id;
21 out << ",";
22 out << "master=" << _master;
23 out << ")";
24 return out.str();
25}
26
27
28
29
30
32 return _id;
33}
34
35
37 _id = value;
38}
39
40
42 return _master;
43}
44
45
47 _master = value;
48}
49
50
51
52
53
54
55#ifdef Parallel
56
57#if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
59#endif
60
61
62[[clang::map_mpi_datatype]]
64 return Datatype;
65}
66
67
68[[clang::map_mpi_datatype]]
70 return Datatype;
71}
72
73
74[[clang::map_mpi_datatype]]
76 return Datatype;
77}
78
79
80[[clang::map_mpi_datatype]]
82 return Datatype;
83}
84
85
86[[clang::map_mpi_datatype]]
90
91
93 return _senderDestinationRank;
94}
95
96
98 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
99 peano4::parallel::TreeEntry instances[2];
100
101 int NumberOfAttributes = 0;
102 NumberOfAttributes++;
103 NumberOfAttributes++;
104
105 MPI_Datatype* subtypes = new MPI_Datatype[NumberOfAttributes];
106 int* blocklen = new int[NumberOfAttributes];
107 MPI_Aint* disp = new MPI_Aint[NumberOfAttributes];
108
109 int counter = 0;
110 subtypes[counter] = MPI_INT;
111 blocklen[counter] = 1;
112 counter++;
113 subtypes[counter] = MPI_INT;
114 blocklen[counter] = 1;
115 counter++;
116
117 MPI_Aint baseFirstInstance;
118 MPI_Aint baseSecondInstance;
119 MPI_Get_address( &instances[0], &baseFirstInstance );
120 MPI_Get_address( &instances[1], &baseSecondInstance );
121
122 counter = 0;
123 MPI_Get_address( &(instances[0]._id), &disp[counter] );
124 counter++;
125 MPI_Get_address( &(instances[0]._master), &disp[counter] );
126 counter++;
127
128 MPI_Aint offset = disp[0] - baseFirstInstance;
129 MPI_Aint extent = baseSecondInstance - baseFirstInstance - offset;
130 for (int i=NumberOfAttributes-1; i>=0; i--) {
131 disp[i] = disp[i] - disp[0];
132 }
133
134 int errorCode = 0;
135 MPI_Datatype tmpType;
136 errorCode += MPI_Type_create_struct( NumberOfAttributes, blocklen, disp, subtypes, &tmpType );
137 errorCode += MPI_Type_create_resized( tmpType, offset, extent, &Datatype );
138 errorCode += MPI_Type_commit( &Datatype );
139 errorCode += MPI_Type_free( &tmpType );
140 if (errorCode) std::cerr << "error constructing MPI datatype in " << __FILE__ << ":" << __LINE__ << std::endl;
141
142 delete[] subtypes;
143 delete[] blocklen;
144 delete[] disp;
145
146 #else
147 // invoke routine once to trigger lazy initialisation
148 getForkDatatype();
149 getJoinDatatype();
150 getBoundaryExchangeDatatype();
151 getMultiscaleDataExchangeDatatype();
152 getGlobalCommunciationDatatype();
153 #endif
154}
155
156
158 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
159 MPI_Datatype type;
160 type = getForkDatatype();
161 MPI_Type_free( &type );
162 type = getJoinDatatype();
163 MPI_Type_free( &type );
164 type = getBoundaryExchangeDatatype();
165 MPI_Type_free( &type );
166 type = getMultiscaleDataExchangeDatatype();
167 MPI_Type_free( &type );
168 type = getGlobalCommunciationDatatype();
169 MPI_Type_free( &type );
170 #else
171 MPI_Datatype type = Datatype;
172 MPI_Type_free( &type );
173 #endif
174}
175
176
177void peano4::parallel::TreeEntry::send(const peano4::parallel::TreeEntry& buffer, int destination, int tag, MPI_Comm communicator ) {
178 MPI_Send( &buffer, 1, Datatype, destination, tag, communicator);
179}
180
181
182void peano4::parallel::TreeEntry::receive(peano4::parallel::TreeEntry& buffer, int source, int tag, MPI_Comm communicator ) {
183 MPI_Status status;
184 MPI_Recv( &buffer, 1, Datatype, source, tag, communicator, &status);
185 buffer._senderDestinationRank = status.MPI_SOURCE;
186}
187
188
190 const peano4::parallel::TreeEntry& buffer,
191 int destination,
192 int tag,
193 std::function<void()> startCommunicationFunctor,
194 std::function<void()> waitFunctor,
195 MPI_Comm communicator
196) {
197 MPI_Request sendRequestHandle;
198 int flag = 0;
199 MPI_Isend( &buffer, 1, Datatype, destination, tag, communicator, &sendRequestHandle );
200 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
201 startCommunicationFunctor();
202 while (!flag) {
203 waitFunctor();
204 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
205 }
206}
207
208
211 int source,
212 int tag,
213 std::function<void()> startCommunicationFunctor,
214 std::function<void()> waitFunctor,
215 MPI_Comm communicator
216) {
217 MPI_Status status;
218 MPI_Request receiveRequestHandle;
219 int flag = 0;
220 MPI_Irecv( &buffer, 1, Datatype, source, tag, communicator, &receiveRequestHandle );
221 MPI_Test( &receiveRequestHandle, &flag, &status );
222 startCommunicationFunctor();
223 while (!flag) {
224 waitFunctor();
225 MPI_Test( &receiveRequestHandle, &flag, &status );
226 }
227 buffer._senderDestinationRank = status.MPI_SOURCE;
228}
229#endif
230
231#ifdef Parallel
232void peano4::parallel::TreeEntry::sendAndPollDanglingMessages(const peano4::parallel::TreeEntry& message, int destination, int tag, MPI_Comm communicator ) {
234 message, destination, tag,
235 [&]() {
238 },
239 [&]() {
240 tarch::mpi::Rank::getInstance().writeTimeOutWarning( "peano4::parallel::TreeEntry", "sendAndPollDanglingMessages()",destination, tag );
241 tarch::mpi::Rank::getInstance().triggerDeadlockTimeOut( "peano4::parallel::TreeEntry", "sendAndPollDanglingMessages()", destination, tag );
243 },
244 communicator
245 );
246}
247
248
249void peano4::parallel::TreeEntry::receiveAndPollDanglingMessages(peano4::parallel::TreeEntry& message, int source, int tag, MPI_Comm communicator ) {
251 message, source, tag,
252 [&]() {
255 },
256 [&]() {
257 tarch::mpi::Rank::getInstance().writeTimeOutWarning( "peano4::parallel::TreeEntry", "receiveAndPollDanglingMessages()", source, tag );
258 tarch::mpi::Rank::getInstance().triggerDeadlockTimeOut( "peano4::parallel::TreeEntry", "receiveAndPollDanglingMessages()", source, tag );
260 },
261 communicator
262 );
263}
264#endif
265
void triggerDeadlockTimeOut(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1, const std::string &comment="")
Triggers a time out and shuts down the cluster if a timeout is violated.
Definition Rank.cpp:119
void setDeadlockWarningTimeStamp()
Memorise global timeout.
Definition Rank.cpp:188
void writeTimeOutWarning(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1)
Writes a warning if relevant.
Definition Rank.cpp:143
void setDeadlockTimeOutTimeStamp()
Definition Rank.cpp:193
static Rank & getInstance()
This operation returns the singleton instance.
Definition Rank.cpp:538
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
PragmaPush static SilenceUnknownAttribute MPI_Datatype getForkDatatype()
Hands out MPI datatype if we work without the LLVM MPI extension.
Definition TreeEntry.cpp:63
static void initDatatype()
Wrapper around getDatatype() to trigger lazy evaluation if we use the lazy initialisation.
Definition TreeEntry.cpp:97
static void sendAndPollDanglingMessages(const peano4::parallel::TreeEntry &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static MPI_Datatype getGlobalCommunciationDatatype()
Definition TreeEntry.cpp:69
static void receiveAndPollDanglingMessages(peano4::parallel::TreeEntry &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static void shutdownDatatype()
Free the underlying MPI datatype.
std::string toString() const
Definition TreeEntry.cpp:17
void setMaster(int value)
Definition TreeEntry.cpp:46
static MPI_Datatype getBoundaryExchangeDatatype()
Definition TreeEntry.cpp:81
PragmaPop int getSenderRank() const
Definition TreeEntry.cpp:92
static MPI_Datatype getMultiscaleDataExchangeDatatype()
Definition TreeEntry.cpp:87
static void send(const peano4::parallel::TreeEntry &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static MPI_Datatype Datatype
Whenever we use LLVM's MPI extension (DaStGe), we rely on lazy initialisation of the datatype.
Definition TreeEntry.h:135
static MPI_Datatype getJoinDatatype()
Definition TreeEntry.cpp:75
static void receive(peano4::parallel::TreeEntry &buffer, int source, int tag, MPI_Comm communicator)