Peano 4
Loading...
Searching...
No Matches
TreeManagementMessage.cpp
Go to the documentation of this file.
2
3
4
5#include <sstream>
6#include <algorithm>
7
8
9
10peano4::parallel::TreeManagementMessage::TreeManagementMessage(int __masterSpacetreeId, int __workerSpacetreeId, Action __action){
11setMasterSpacetreeId( __masterSpacetreeId);
12setWorkerSpacetreeId( __workerSpacetreeId);
13setAction( __action);
14}
15
16
17
19 std::ostringstream out;
20 out << "(";
21 out << "masterSpacetreeId=" << _masterSpacetreeId;
22 out << ",";
23 out << "workerSpacetreeId=" << _workerSpacetreeId;
24 out << ",";
25 out << "action=" << (_action==Action::RequestNewRemoteTree? "RequestNewRemoteTree" : "") << (_action==Action::CreateNewRemoteTree? "CreateNewRemoteTree" : "") << (_action==Action::RemoveChildTreeFromBooksAsChildBecameEmpty? "RemoveChildTreeFromBooksAsChildBecameEmpty" : "") << (_action==Action::JoinWithWorker? "JoinWithWorker" : "") << (_action==Action::Acknowledgement? "Acknowledgement" : "") ;
26 out << ")";
27 return out.str();
28}
29
30
31
32
33
35 return _masterSpacetreeId;
36}
37
38
40 _masterSpacetreeId = value;
41}
42
43
45 return _workerSpacetreeId;
46}
47
48
50 _workerSpacetreeId = value;
51}
52
53
57
58
60 _action = value;
61}
62
63
64
65
66
67
68#ifdef Parallel
69
70#if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
72#endif
73
74
75[[clang::map_mpi_datatype]]
77 return Datatype;
78}
79
80
81[[clang::map_mpi_datatype]]
85
86
87[[clang::map_mpi_datatype]]
89 return Datatype;
90}
91
92
93[[clang::map_mpi_datatype]]
97
98
99[[clang::map_mpi_datatype]]
103
104
106 return _senderDestinationRank;
107}
108
109
111 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
113
114 int NumberOfAttributes = 0;
115 NumberOfAttributes++;
116 NumberOfAttributes++;
117 NumberOfAttributes++;
118
119 MPI_Datatype* subtypes = new MPI_Datatype[NumberOfAttributes];
120 int* blocklen = new int[NumberOfAttributes];
121 MPI_Aint* disp = new MPI_Aint[NumberOfAttributes];
122
123 int counter = 0;
124 subtypes[counter] = MPI_INT;
125 blocklen[counter] = 1;
126 counter++;
127 subtypes[counter] = MPI_INT;
128 blocklen[counter] = 1;
129 counter++;
130 subtypes[counter] = MPI_INT;
131 blocklen[counter] = 1;
132 counter++;
133
134 MPI_Aint baseFirstInstance;
135 MPI_Aint baseSecondInstance;
136 MPI_Get_address( &instances[0], &baseFirstInstance );
137 MPI_Get_address( &instances[1], &baseSecondInstance );
138
139 counter = 0;
140 MPI_Get_address( &(instances[0]._masterSpacetreeId), &disp[counter] );
141 counter++;
142 MPI_Get_address( &(instances[0]._workerSpacetreeId), &disp[counter] );
143 counter++;
144 MPI_Get_address( &(instances[0]._action), &disp[counter] );
145 counter++;
146
147 MPI_Aint offset = disp[0] - baseFirstInstance;
148 MPI_Aint extent = baseSecondInstance - baseFirstInstance - offset;
149 for (int i=NumberOfAttributes-1; i>=0; i--) {
150 disp[i] = disp[i] - disp[0];
151 }
152
153 int errorCode = 0;
154 MPI_Datatype tmpType;
155 errorCode += MPI_Type_create_struct( NumberOfAttributes, blocklen, disp, subtypes, &tmpType );
156 errorCode += MPI_Type_create_resized( tmpType, offset, extent, &Datatype );
157 errorCode += MPI_Type_commit( &Datatype );
158 errorCode += MPI_Type_free( &tmpType );
159 if (errorCode) std::cerr << "error constructing MPI datatype in " << __FILE__ << ":" << __LINE__ << std::endl;
160
161 delete[] subtypes;
162 delete[] blocklen;
163 delete[] disp;
164
165 #else
166 // invoke routine once to trigger lazy initialisation
167 getForkDatatype();
168 getJoinDatatype();
169 getBoundaryExchangeDatatype();
170 getMultiscaleDataExchangeDatatype();
171 getGlobalCommunciationDatatype();
172 #endif
173}
174
175
177 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
178 MPI_Datatype type;
179 type = getForkDatatype();
180 MPI_Type_free( &type );
181 type = getJoinDatatype();
182 MPI_Type_free( &type );
183 type = getBoundaryExchangeDatatype();
184 MPI_Type_free( &type );
185 type = getMultiscaleDataExchangeDatatype();
186 MPI_Type_free( &type );
187 type = getGlobalCommunciationDatatype();
188 MPI_Type_free( &type );
189 #else
190 MPI_Datatype type = Datatype;
191 MPI_Type_free( &type );
192 #endif
193}
194
195
196void peano4::parallel::TreeManagementMessage::send(const peano4::parallel::TreeManagementMessage& buffer, int destination, int tag, MPI_Comm communicator ) {
197 MPI_Send( &buffer, 1, Datatype, destination, tag, communicator);
198}
199
200
201void peano4::parallel::TreeManagementMessage::receive(peano4::parallel::TreeManagementMessage& buffer, int source, int tag, MPI_Comm communicator ) {
202 MPI_Status status;
203 MPI_Recv( &buffer, 1, Datatype, source, tag, communicator, &status);
204 buffer._senderDestinationRank = status.MPI_SOURCE;
205}
206
207
210 int destination,
211 int tag,
212 std::function<void()> startCommunicationFunctor,
213 std::function<void()> waitFunctor,
214 MPI_Comm communicator
215) {
216 MPI_Request sendRequestHandle;
217 int flag = 0;
218 MPI_Isend( &buffer, 1, Datatype, destination, tag, communicator, &sendRequestHandle );
219 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
220 startCommunicationFunctor();
221 while (!flag) {
222 waitFunctor();
223 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
224 }
225}
226
227
230 int source,
231 int tag,
232 std::function<void()> startCommunicationFunctor,
233 std::function<void()> waitFunctor,
234 MPI_Comm communicator
235) {
236 MPI_Status status;
237 MPI_Request receiveRequestHandle;
238 int flag = 0;
239 MPI_Irecv( &buffer, 1, Datatype, source, tag, communicator, &receiveRequestHandle );
240 MPI_Test( &receiveRequestHandle, &flag, &status );
241 startCommunicationFunctor();
242 while (!flag) {
243 waitFunctor();
244 MPI_Test( &receiveRequestHandle, &flag, &status );
245 }
246 buffer._senderDestinationRank = status.MPI_SOURCE;
247}
248#endif
249
250#ifdef Parallel
251void peano4::parallel::TreeManagementMessage::sendAndPollDanglingMessages(const peano4::parallel::TreeManagementMessage& message, int destination, int tag, MPI_Comm communicator ) {
253 message, destination, tag,
254 [&]() {
257 },
258 [&]() {
259 tarch::mpi::Rank::getInstance().writeTimeOutWarning( "peano4::parallel::TreeManagementMessage", "sendAndPollDanglingMessages()",destination, tag );
260 tarch::mpi::Rank::getInstance().triggerDeadlockTimeOut( "peano4::parallel::TreeManagementMessage", "sendAndPollDanglingMessages()", destination, tag );
262 },
263 communicator
264 );
265}
266
267
270 message, source, tag,
271 [&]() {
274 },
275 [&]() {
276 tarch::mpi::Rank::getInstance().writeTimeOutWarning( "peano4::parallel::TreeManagementMessage", "receiveAndPollDanglingMessages()", source, tag );
277 tarch::mpi::Rank::getInstance().triggerDeadlockTimeOut( "peano4::parallel::TreeManagementMessage", "receiveAndPollDanglingMessages()", source, tag );
279 },
280 communicator
281 );
282}
283#endif
284
void triggerDeadlockTimeOut(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1, const std::string &comment="")
Triggers a time out and shuts down the cluster if a timeout is violated.
Definition Rank.cpp:119
void setDeadlockWarningTimeStamp()
Memorise global timeout.
Definition Rank.cpp:188
void writeTimeOutWarning(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1)
Writes a warning if relevant.
Definition Rank.cpp:143
void setDeadlockTimeOutTimeStamp()
Definition Rank.cpp:193
static Rank & getInstance()
This operation returns the singleton instance.
Definition Rank.cpp:538
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
peano4::parallel::TreeManagementMessage::Action getAction() const
static void send(const peano4::parallel::TreeManagementMessage &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static void shutdownDatatype()
Free the underlying MPI datatype.
static MPI_Datatype Datatype
Whenever we use LLVM's MPI extension (DaStGe), we rely on lazy initialisation of the datatype.
static void sendAndPollDanglingMessages(const peano4::parallel::TreeManagementMessage &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static void receiveAndPollDanglingMessages(peano4::parallel::TreeManagementMessage &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
PragmaPush static SilenceUnknownAttribute MPI_Datatype getForkDatatype()
Hands out MPI datatype if we work without the LLVM MPI extension.
static void receive(peano4::parallel::TreeManagementMessage &buffer, int source, int tag, MPI_Comm communicator)
static void initDatatype()
Wrapper around getDatatype() to trigger lazy evaluation if we use the lazy initialisation.