Peano 4
Loading...
Searching...
No Matches
GridControlEvent.cpp
Go to the documentation of this file.
1#include "GridControlEvent.h"
2
3#include <sstream>
4#include <algorithm>
5
12
14 std::ostringstream out;
15 out << "(";
16 out << "refinementControl=" << (_refinementControl==RefinementControl::Refine? "Refine" : "") << (_refinementControl==RefinementControl::Erase? "Erase" : "") ;
17 out << ",";
18 out << "offset=" << _offset;
19 out << ",";
20 out << "width=" << _width;
21 out << ",";
22 out << "h=" << _h;
23 out << ")";
24 return out.str();
25}
26
30
32 _refinementControl = value;
33}
34
38
42
44 return _offset(index);
45}
46
47void peano4::grid::GridControlEvent::setOffset(int index, double value) {
48 _offset(index) = value;
49}
50
54
58
60 return _width(index);
61}
62
63void peano4::grid::GridControlEvent::setWidth(int index, double value) {
64 _width(index) = value;
65}
66
70
74
75double peano4::grid::GridControlEvent::getH(int index) const {
76 return _h(index);
77}
78
79void peano4::grid::GridControlEvent::setH(int index, double value) {
80 _h(index) = value;
81}
82
83#ifdef Parallel
84
85#if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
87#endif
88
89[[clang::map_mpi_datatype]]
91 return Datatype;
92}
93
94[[clang::map_mpi_datatype]]
98
99[[clang::map_mpi_datatype]]
101 return Datatype;
102}
103
104[[clang::map_mpi_datatype]]
106 return Datatype;
107}
108
109[[clang::map_mpi_datatype]]
113
115 return _senderDestinationRank;
116}
117
119 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
121
122 int NumberOfAttributes = 0;
123 NumberOfAttributes++;
124 NumberOfAttributes++;
125 NumberOfAttributes++;
126 NumberOfAttributes++;
127
128 MPI_Datatype* subtypes = new MPI_Datatype[NumberOfAttributes];
129 int* blocklen = new int[NumberOfAttributes];
130 MPI_Aint* disp = new MPI_Aint[NumberOfAttributes];
131
132 int counter = 0;
133 subtypes[counter] = MPI_INT;
134 blocklen[counter] = 1;
135 counter++;
136 subtypes[counter] = MPI_DOUBLE;
137 blocklen[counter] = Dimensions;
138 counter++;
139 subtypes[counter] = MPI_DOUBLE;
140 blocklen[counter] = Dimensions;
141 counter++;
142 subtypes[counter] = MPI_DOUBLE;
143 blocklen[counter] = Dimensions;
144 counter++;
145
146 MPI_Aint baseFirstInstance;
147 MPI_Aint baseSecondInstance;
148 MPI_Get_address( &instances[0], &baseFirstInstance );
149 MPI_Get_address( &instances[1], &baseSecondInstance );
150
151 counter = 0;
152 MPI_Get_address( &(instances[0]._refinementControl), &disp[counter] );
153 counter++;
154 MPI_Get_address( &(instances[0]._offset.data()[0]), &disp[counter] );
155 counter++;
156 MPI_Get_address( &(instances[0]._width.data()[0]), &disp[counter] );
157 counter++;
158 MPI_Get_address( &(instances[0]._h.data()[0]), &disp[counter] );
159 counter++;
160
161 MPI_Aint offset = disp[0] - baseFirstInstance;
162 MPI_Aint extent = baseSecondInstance - baseFirstInstance - offset;
163 for (int i=NumberOfAttributes-1; i>=0; i--) {
164 disp[i] = disp[i] - disp[0];
165 }
166
167 int errorCode = 0;
168 MPI_Datatype tmpType;
169 errorCode += MPI_Type_create_struct( NumberOfAttributes, blocklen, disp, subtypes, &tmpType );
170 errorCode += MPI_Type_create_resized( tmpType, offset, extent, &Datatype );
171 errorCode += MPI_Type_commit( &Datatype );
172 errorCode += MPI_Type_free( &tmpType );
173 if (errorCode) std::cerr << "error constructing MPI datatype in " << __FILE__ << ":" << __LINE__ << std::endl;
174
175 delete[] subtypes;
176 delete[] blocklen;
177 delete[] disp;
178
179 #else
180 // invoke routine once to trigger lazy initialisation
181 getForkDatatype();
182 getJoinDatatype();
183 getBoundaryExchangeDatatype();
184 getMultiscaleDataExchangeDatatype();
185 getGlobalCommunciationDatatype();
186 #endif
187}
188
190 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
191 MPI_Datatype type;
192 type = getForkDatatype();
193 MPI_Type_free( &type );
194 type = getJoinDatatype();
195 MPI_Type_free( &type );
196 type = getBoundaryExchangeDatatype();
197 MPI_Type_free( &type );
198 type = getMultiscaleDataExchangeDatatype();
199 MPI_Type_free( &type );
200 type = getGlobalCommunciationDatatype();
201 MPI_Type_free( &type );
202 #else
203 MPI_Datatype type = Datatype;
204 MPI_Type_free( &type );
205 #endif
206}
207
208void peano4::grid::GridControlEvent::send(const peano4::grid::GridControlEvent& buffer, int destination, int tag, MPI_Comm communicator ) {
209 MPI_Send( &buffer, 1, Datatype, destination, tag, communicator);
210}
211
212void peano4::grid::GridControlEvent::receive(peano4::grid::GridControlEvent& buffer, int source, int tag, MPI_Comm communicator ) {
213 MPI_Status status;
214 MPI_Recv( &buffer, 1, Datatype, source, tag, communicator, &status);
215 buffer._senderDestinationRank = status.MPI_SOURCE;
216}
217
219 const peano4::grid::GridControlEvent& buffer,
220 int destination,
221 int tag,
222 std::function<void()> startCommunicationFunctor,
223 std::function<void()> waitFunctor,
224 MPI_Comm communicator
225) {
226 MPI_Request sendRequestHandle;
227 int flag = 0;
228 MPI_Isend( &buffer, 1, Datatype, destination, tag, communicator, &sendRequestHandle );
229 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
230 startCommunicationFunctor();
231 while (!flag) {
232 waitFunctor();
233 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
234 }
235}
236
239 int source,
240 int tag,
241 std::function<void()> startCommunicationFunctor,
242 std::function<void()> waitFunctor,
243 MPI_Comm communicator
244) {
245 MPI_Status status;
246 MPI_Request receiveRequestHandle;
247 int flag = 0;
248 MPI_Irecv( &buffer, 1, Datatype, source, tag, communicator, &receiveRequestHandle );
249 MPI_Test( &receiveRequestHandle, &flag, &status );
250 startCommunicationFunctor();
251 while (!flag) {
252 waitFunctor();
253 MPI_Test( &receiveRequestHandle, &flag, &status );
254 }
255 buffer._senderDestinationRank = status.MPI_SOURCE;
256}
257#endif
258
259#ifdef Parallel
260void peano4::grid::GridControlEvent::sendAndPollDanglingMessages(const peano4::grid::GridControlEvent& message, int destination, int tag, MPI_Comm communicator ) {
262 message, destination, tag,
263 [&]() {
266 },
267 [&]() {
268 tarch::mpi::Rank::getInstance().writeTimeOutWarning( "peano4::grid::GridControlEvent", "sendAndPollDanglingMessages()",destination, tag );
269 tarch::mpi::Rank::getInstance().triggerDeadlockTimeOut( "peano4::grid::GridControlEvent", "sendAndPollDanglingMessages()", destination, tag );
271 },
272 communicator
273 );
274}
275
276
279 message, source, tag,
280 [&]() {
283 },
284 [&]() {
285 tarch::mpi::Rank::getInstance().writeTimeOutWarning( "peano4::grid::GridControlEvent", "receiveAndPollDanglingMessages()", source, tag );
286 tarch::mpi::Rank::getInstance().triggerDeadlockTimeOut( "peano4::grid::GridControlEvent", "receiveAndPollDanglingMessages()", source, tag );
288 },
289 communicator
290 );
291}
292#endif
293
void triggerDeadlockTimeOut(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1, const std::string &comment="")
Triggers a time out and shuts down the cluster if a timeout is violated.
Definition Rank.cpp:119
void setDeadlockWarningTimeStamp()
Memorise global timeout.
Definition Rank.cpp:188
void writeTimeOutWarning(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1)
Writes a warning if relevant.
Definition Rank.cpp:143
void setDeadlockTimeOutTimeStamp()
Definition Rank.cpp:193
static Rank & getInstance()
This operation returns the singleton instance.
Definition Rank.cpp:538
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
static MPI_Datatype getBoundaryExchangeDatatype()
static void send(const peano4::grid::GridControlEvent &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static void sendAndPollDanglingMessages(const peano4::grid::GridControlEvent &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static MPI_Datatype getJoinDatatype()
void setOffset(const tarch::la::Vector< Dimensions, double > &value)
static void initDatatype()
Wrapper around getDatatype() to trigger lazy evaluation if we use the lazy initialisation.
static void shutdownDatatype()
Free the underlying MPI datatype.
void setH(const tarch::la::Vector< Dimensions, double > &value)
tarch::la::Vector< Dimensions, double > getWidth() const
peano4::grid::GridControlEvent::RefinementControl getRefinementControl() const
static void receive(peano4::grid::GridControlEvent &buffer, int source, int tag, MPI_Comm communicator)
static MPI_Datatype Datatype
Whenever we use LLVM's MPI extension (DaStGe), we rely on lazy initialisation of the datatype.
PragmaPush static SilenceUnknownAttribute MPI_Datatype getForkDatatype()
Hands out MPI datatype if we work without the LLVM MPI extension.
static MPI_Datatype getGlobalCommunciationDatatype()
void setRefinementControl(RefinementControl value)
static MPI_Datatype getMultiscaleDataExchangeDatatype()
tarch::la::Vector< Dimensions, double > getOffset() const
PragmaPop int getSenderRank() const
tarch::la::Vector< Dimensions, double > getH() const
void setWidth(const tarch::la::Vector< Dimensions, double > &value)
static void receiveAndPollDanglingMessages(peano4::grid::GridControlEvent &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
Simple vector class.
Definition Vector.h:134