Peano
Loading...
Searching...
No Matches
IntegerMessage.cpp
Go to the documentation of this file.
1#include "IntegerMessage.h"
2
3
4
5#include <sstream>
6#include <algorithm>
7
8
9
13
14
15
17 std::ostringstream out;
18 out << "(";
19 out << "value=" << _value;
20 out << ")";
21 return out.str();
22}
23
24
25
26
27
29 return _value;
30}
31
32
34 _value = value;
35}
36
37
38
39
40
41
42#ifdef Parallel
43
44#if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
45MPI_Datatype tarch::mpi::IntegerMessage::Datatype = MPI_DATATYPE_NULL;
46#endif
47
48
49[[clang::map_mpi_datatype]]
51 return Datatype;
52}
53
54
55[[clang::map_mpi_datatype]]
57 return Datatype;
58}
59
60
61[[clang::map_mpi_datatype]]
63 return Datatype;
64}
65
66
67[[clang::map_mpi_datatype]]
69 return Datatype;
70}
71
72
73[[clang::map_mpi_datatype]]
77
78
79[[clang::map_mpi_datatype]]
81 if (Datatype != MPI_DATATYPE_NULL){
82 MPI_Type_free(&Datatype);
83 Datatype = MPI_DATATYPE_NULL;
84 }
85}
86
87
88[[clang::map_mpi_datatype]]
90 if (Datatype != MPI_DATATYPE_NULL){
91 MPI_Type_free(&Datatype);
92 Datatype = MPI_DATATYPE_NULL;
93 }
94}
95
96
97[[clang::map_mpi_datatype]]
99 if (Datatype != MPI_DATATYPE_NULL){
100 MPI_Type_free(&Datatype);
101 Datatype = MPI_DATATYPE_NULL;
102 }
103}
104
105
106[[clang::map_mpi_datatype]]
108 if (Datatype != MPI_DATATYPE_NULL){
109 MPI_Type_free(&Datatype);
110 Datatype = MPI_DATATYPE_NULL;
111 }
112}
113
114
115[[clang::map_mpi_datatype]]
117 if (Datatype != MPI_DATATYPE_NULL){
118 MPI_Type_free(&Datatype);
119 Datatype = MPI_DATATYPE_NULL;
120 }
121}
122
123
125 return _senderDestinationRank;
126}
127
128
130 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
131 tarch::mpi::IntegerMessage instances[2];
132
133 int NumberOfAttributes = 0;
134 NumberOfAttributes++;
135
136 MPI_Datatype* subtypes = new MPI_Datatype[NumberOfAttributes];
137 int* blocklen = new int[NumberOfAttributes];
138 MPI_Aint* disp = new MPI_Aint[NumberOfAttributes];
139
140 int counter = 0;
141 subtypes[counter] = MPI_INT;
142 blocklen[counter] = 1;
143 counter++;
144
145 MPI_Aint baseFirstInstance;
146 MPI_Aint baseSecondInstance;
147 MPI_Get_address( &instances[0], &baseFirstInstance );
148 MPI_Get_address( &instances[1], &baseSecondInstance );
149
150 counter = 0;
151 MPI_Get_address( &(instances[0]._value), &disp[counter] );
152 counter++;
153
154 MPI_Aint offset = disp[0] - baseFirstInstance;
155 MPI_Aint extent = baseSecondInstance - baseFirstInstance - offset;
156 for (int i=NumberOfAttributes-1; i>=0; i--) {
157 disp[i] = disp[i] - disp[0];
158 }
159
160 int errorCode = 0;
161 MPI_Datatype tmpType;
162 errorCode += MPI_Type_create_struct( NumberOfAttributes, blocklen, disp, subtypes, &tmpType );
163 errorCode += MPI_Type_create_resized( tmpType, offset, extent, &Datatype );
164 errorCode += MPI_Type_commit( &Datatype );
165 errorCode += MPI_Type_free( &tmpType );
166 if (errorCode) std::cerr << "error constructing MPI datatype in " << __FILE__ << ":" << __LINE__ << std::endl;
167
168 delete[] subtypes;
169 delete[] blocklen;
170 delete[] disp;
171
172 #else
173 // invoke routine once to trigger lazy initialisation
174 getForkDatatype();
175 getJoinDatatype();
176 getBoundaryExchangeDatatype();
177 getMultiscaleDataExchangeDatatype();
178 getGlobalCommunciationDatatype();
179 #endif
180}
181
182
184 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
185 freeForkDatatype();
186 freeJoinDatatype();
187 freeBoundaryExchangeDatatype();
188 freeMultiscaleDataExchangeDatatype();
189 freeGlobalCommunciationDatatype();
190 #else
191 MPI_Datatype type = Datatype;
192 MPI_Type_free( &type );
193 #endif
194}
195
196
197void tarch::mpi::IntegerMessage::send(const tarch::mpi::IntegerMessage& buffer, int destination, int tag, MPI_Comm communicator ) {
198 MPI_Send( &buffer, 1, Datatype, destination, tag, communicator);
199}
200
201
202void tarch::mpi::IntegerMessage::receive(tarch::mpi::IntegerMessage& buffer, int source, int tag, MPI_Comm communicator ) {
203 MPI_Status status;
204 MPI_Recv( &buffer, 1, Datatype, source, tag, communicator, &status);
205 buffer._senderDestinationRank = status.MPI_SOURCE;
206}
207
208
210 const tarch::mpi::IntegerMessage& buffer,
211 int destination,
212 int tag,
213 std::function<void()> startCommunicationFunctor,
214 std::function<void()> waitFunctor,
215 MPI_Comm communicator
216) {
217 MPI_Request sendRequestHandle;
218 int flag = 0;
219 MPI_Isend( &buffer, 1, Datatype, destination, tag, communicator, &sendRequestHandle );
220 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
221 startCommunicationFunctor();
222 while (!flag) {
223 waitFunctor();
224 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
225 }
226}
227
228
231 int source,
232 int tag,
233 std::function<void()> startCommunicationFunctor,
234 std::function<void()> waitFunctor,
235 MPI_Comm communicator
236) {
237 MPI_Status status;
238 MPI_Request receiveRequestHandle;
239 int flag = 0;
240 MPI_Irecv( &buffer, 1, Datatype, source, tag, communicator, &receiveRequestHandle );
241 MPI_Test( &receiveRequestHandle, &flag, &status );
242 startCommunicationFunctor();
243 while (!flag) {
244 waitFunctor();
245 MPI_Test( &receiveRequestHandle, &flag, &status );
246 }
247 buffer._senderDestinationRank = status.MPI_SOURCE;
248}
249#endif
250
251#ifdef Parallel
252void tarch::mpi::IntegerMessage::sendAndPollDanglingMessages(const tarch::mpi::IntegerMessage& message, int destination, int tag, MPI_Comm communicator ) {
254 message, destination, tag,
255 [&]() {
258 },
259 [&]() {
260 tarch::mpi::Rank::getInstance().writeTimeOutWarning( "tarch::mpi::IntegerMessage", "sendAndPollDanglingMessages()",destination, tag );
261 tarch::mpi::Rank::getInstance().triggerDeadlockTimeOut( "tarch::mpi::IntegerMessage", "sendAndPollDanglingMessages()", destination, tag );
263 },
264 communicator
265 );
266}
267
268
269void tarch::mpi::IntegerMessage::receiveAndPollDanglingMessages(tarch::mpi::IntegerMessage& message, int source, int tag, MPI_Comm communicator ) {
271 message, source, tag,
272 [&]() {
275 },
276 [&]() {
277 tarch::mpi::Rank::getInstance().writeTimeOutWarning( "tarch::mpi::IntegerMessage", "receiveAndPollDanglingMessages()", source, tag );
278 tarch::mpi::Rank::getInstance().triggerDeadlockTimeOut( "tarch::mpi::IntegerMessage", "receiveAndPollDanglingMessages()", source, tag );
280 },
281 communicator
282 );
283}
284#endif
285
void triggerDeadlockTimeOut(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1, const std::string &comment="")
Triggers a time out and shuts down the cluster if a timeout is violated.
Definition Rank.cpp:124
void setDeadlockWarningTimeStamp()
Memorise global timeout.
Definition Rank.cpp:193
void writeTimeOutWarning(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1)
Writes a warning if relevant.
Definition Rank.cpp:148
void setDeadlockTimeOutTimeStamp()
Definition Rank.cpp:198
static Rank & getInstance()
This operation returns the singleton instance.
Definition Rank.cpp:539
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
static MPI_Datatype getBoundaryExchangeDatatype()
static MPI_Datatype getMultiscaleDataExchangeDatatype()
static MPI_Datatype getForkDatatype()
Hands out MPI datatype if we work without the LLVM MPI extension.
static void freeMultiscaleDataExchangeDatatype()
std::string toString() const
static MPI_Datatype getJoinDatatype()
static void freeBoundaryExchangeDatatype()
static void sendAndPollDanglingMessages(const tarch::mpi::IntegerMessage &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static MPI_Datatype Datatype
Whenever we use LLVM's MPI extension (DaStGe), we rely on lazy initialisation of the datatype.
static void freeGlobalCommunciationDatatype()
static void receiveAndPollDanglingMessages(tarch::mpi::IntegerMessage &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static void receive(tarch::mpi::IntegerMessage &buffer, int source, int tag, MPI_Comm communicator)
static MPI_Datatype getGlobalCommunciationDatatype()
static void shutdownDatatype()
Free the underlying MPI datatype.
static void send(const tarch::mpi::IntegerMessage &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static void initDatatype()
Wrapper around getDatatype() to trigger lazy evaluation if we use the lazy initialisation.