19 std::ostringstream out;
21 out <<
"masterSpacetreeId=" << _masterSpacetreeId;
23 out <<
"workerSpacetreeId=" << _workerSpacetreeId;
25 out <<
"action=" << (_action==Action::RequestNewRemoteTree?
"RequestNewRemoteTree" :
"") << (_action==Action::CreateNewRemoteTree?
"CreateNewRemoteTree" :
"") << (_action==Action::RemoveChildTreeFromBooksAsChildBecameEmpty?
"RemoveChildTreeFromBooksAsChildBecameEmpty" :
"") << (_action==Action::JoinWithWorker?
"JoinWithWorker" :
"") << (_action==Action::Acknowledgement?
"Acknowledgement" :
"") ;
35 return _masterSpacetreeId;
40 _masterSpacetreeId = value;
45 return _workerSpacetreeId;
50 _workerSpacetreeId = value;
70#if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
75[[clang::map_mpi_datatype]]
81[[clang::map_mpi_datatype]]
87[[clang::map_mpi_datatype]]
93[[clang::map_mpi_datatype]]
99[[clang::map_mpi_datatype]]
105[[clang::map_mpi_datatype]]
107 if (Datatype != MPI_DATATYPE_NULL){
108 MPI_Type_free(&Datatype);
109 Datatype = MPI_DATATYPE_NULL;
114[[clang::map_mpi_datatype]]
116 if (Datatype != MPI_DATATYPE_NULL){
117 MPI_Type_free(&Datatype);
118 Datatype = MPI_DATATYPE_NULL;
123[[clang::map_mpi_datatype]]
125 if (Datatype != MPI_DATATYPE_NULL){
126 MPI_Type_free(&Datatype);
127 Datatype = MPI_DATATYPE_NULL;
132[[clang::map_mpi_datatype]]
134 if (Datatype != MPI_DATATYPE_NULL){
135 MPI_Type_free(&Datatype);
136 Datatype = MPI_DATATYPE_NULL;
141[[clang::map_mpi_datatype]]
143 if (Datatype != MPI_DATATYPE_NULL){
144 MPI_Type_free(&Datatype);
145 Datatype = MPI_DATATYPE_NULL;
151 return _senderDestinationRank;
156 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
159 int NumberOfAttributes = 0;
160 NumberOfAttributes++;
161 NumberOfAttributes++;
162 NumberOfAttributes++;
164 MPI_Datatype* subtypes =
new MPI_Datatype[NumberOfAttributes];
165 int* blocklen =
new int[NumberOfAttributes];
166 MPI_Aint* disp =
new MPI_Aint[NumberOfAttributes];
169 subtypes[counter] = MPI_INT;
170 blocklen[counter] = 1;
172 subtypes[counter] = MPI_INT;
173 blocklen[counter] = 1;
175 subtypes[counter] = MPI_INT;
176 blocklen[counter] = 1;
179 MPI_Aint baseFirstInstance;
180 MPI_Aint baseSecondInstance;
181 MPI_Get_address( &instances[0], &baseFirstInstance );
182 MPI_Get_address( &instances[1], &baseSecondInstance );
185 MPI_Get_address( &(instances[0]._masterSpacetreeId), &disp[counter] );
187 MPI_Get_address( &(instances[0]._workerSpacetreeId), &disp[counter] );
189 MPI_Get_address( &(instances[0]._action), &disp[counter] );
192 MPI_Aint offset = disp[0] - baseFirstInstance;
193 MPI_Aint extent = baseSecondInstance - baseFirstInstance - offset;
194 for (
int i=NumberOfAttributes-1; i>=0; i--) {
195 disp[i] = disp[i] - disp[0];
199 MPI_Datatype tmpType;
200 errorCode += MPI_Type_create_struct( NumberOfAttributes, blocklen, disp, subtypes, &tmpType );
201 errorCode += MPI_Type_create_resized( tmpType, offset, extent, &Datatype );
202 errorCode += MPI_Type_commit( &Datatype );
203 errorCode += MPI_Type_free( &tmpType );
204 if (errorCode) std::cerr <<
"error constructing MPI datatype in " << __FILE__ <<
":" << __LINE__ << std::endl;
214 getBoundaryExchangeDatatype();
215 getMultiscaleDataExchangeDatatype();
216 getGlobalCommunciationDatatype();
222 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
225 freeBoundaryExchangeDatatype();
226 freeMultiscaleDataExchangeDatatype();
227 freeGlobalCommunciationDatatype();
229 MPI_Datatype type = Datatype;
230 MPI_Type_free( &type );
236 MPI_Send( &buffer, 1, Datatype, destination, tag, communicator);
242 MPI_Recv( &buffer, 1, Datatype, source, tag, communicator, &status);
251 std::function<
void()> startCommunicationFunctor,
252 std::function<
void()> waitFunctor,
253 MPI_Comm communicator
255 MPI_Request sendRequestHandle;
257 MPI_Isend( &buffer, 1, Datatype, destination, tag, communicator, &sendRequestHandle );
258 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
259 startCommunicationFunctor();
262 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
271 std::function<
void()> startCommunicationFunctor,
272 std::function<
void()> waitFunctor,
273 MPI_Comm communicator
276 MPI_Request receiveRequestHandle;
278 MPI_Irecv( &buffer, 1, Datatype, source, tag, communicator, &receiveRequestHandle );
279 MPI_Test( &receiveRequestHandle, &flag, &status );
280 startCommunicationFunctor();
283 MPI_Test( &receiveRequestHandle, &flag, &status );
292 message, destination, tag,
309 message, source, tag,
void triggerDeadlockTimeOut(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1, const std::string &comment="")
Triggers a time out and shuts down the cluster if a timeout is violated.
void setDeadlockWarningTimeStamp()
Memorise global timeout.
void writeTimeOutWarning(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1)
Writes a warning if relevant.
void setDeadlockTimeOutTimeStamp()
static Rank & getInstance()
This operation returns the singleton instance.
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
static MPI_Datatype getJoinDatatype()
static void freeGlobalCommunciationDatatype()
static MPI_Datatype getMultiscaleDataExchangeDatatype()
peano4::parallel::TreeManagementMessage::Action getAction() const
static void send(const peano4::parallel::TreeManagementMessage &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static void shutdownDatatype()
Free the underlying MPI datatype.
void setMasterSpacetreeId(int value)
static void freeMultiscaleDataExchangeDatatype()
int getWorkerSpacetreeId() const
static void freeBoundaryExchangeDatatype()
static MPI_Datatype Datatype
Whenever we use LLVM's MPI extension (DaStGe), we rely on lazy initialisation of the datatype.
std::string toString() const
void setWorkerSpacetreeId(int value)
static void sendAndPollDanglingMessages(const peano4::parallel::TreeManagementMessage &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
void setAction(Action value)
int _senderDestinationRank
static void freeJoinDatatype()
int getMasterSpacetreeId() const
static void receiveAndPollDanglingMessages(peano4::parallel::TreeManagementMessage &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
int getSenderRank() const
static MPI_Datatype getForkDatatype()
Hands out MPI datatype if we work without the LLVM MPI extension.
static MPI_Datatype getGlobalCommunciationDatatype()
static void receive(peano4::parallel::TreeManagementMessage &buffer, int source, int tag, MPI_Comm communicator)
static void freeForkDatatype()
static void initDatatype()
Wrapper around getDatatype() to trigger lazy evaluation if we use the lazy initialisation.
static MPI_Datatype getBoundaryExchangeDatatype()