18 std::ostringstream out;
22 out <<
"master=" << _master;
57#if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
62[[clang::map_mpi_datatype]]
68[[clang::map_mpi_datatype]]
74[[clang::map_mpi_datatype]]
80[[clang::map_mpi_datatype]]
86[[clang::map_mpi_datatype]]
92[[clang::map_mpi_datatype]]
94 if (Datatype != MPI_DATATYPE_NULL){
95 MPI_Type_free(&Datatype);
96 Datatype = MPI_DATATYPE_NULL;
101[[clang::map_mpi_datatype]]
103 if (Datatype != MPI_DATATYPE_NULL){
104 MPI_Type_free(&Datatype);
105 Datatype = MPI_DATATYPE_NULL;
110[[clang::map_mpi_datatype]]
112 if (Datatype != MPI_DATATYPE_NULL){
113 MPI_Type_free(&Datatype);
114 Datatype = MPI_DATATYPE_NULL;
119[[clang::map_mpi_datatype]]
121 if (Datatype != MPI_DATATYPE_NULL){
122 MPI_Type_free(&Datatype);
123 Datatype = MPI_DATATYPE_NULL;
128[[clang::map_mpi_datatype]]
130 if (Datatype != MPI_DATATYPE_NULL){
131 MPI_Type_free(&Datatype);
132 Datatype = MPI_DATATYPE_NULL;
138 return _senderDestinationRank;
143 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
146 int NumberOfAttributes = 0;
147 NumberOfAttributes++;
148 NumberOfAttributes++;
150 MPI_Datatype* subtypes =
new MPI_Datatype[NumberOfAttributes];
151 int* blocklen =
new int[NumberOfAttributes];
152 MPI_Aint* disp =
new MPI_Aint[NumberOfAttributes];
155 subtypes[counter] = MPI_INT;
156 blocklen[counter] = 1;
158 subtypes[counter] = MPI_INT;
159 blocklen[counter] = 1;
162 MPI_Aint baseFirstInstance;
163 MPI_Aint baseSecondInstance;
164 MPI_Get_address( &instances[0], &baseFirstInstance );
165 MPI_Get_address( &instances[1], &baseSecondInstance );
168 MPI_Get_address( &(instances[0]._id), &disp[counter] );
170 MPI_Get_address( &(instances[0]._master), &disp[counter] );
173 MPI_Aint offset = disp[0] - baseFirstInstance;
174 MPI_Aint extent = baseSecondInstance - baseFirstInstance - offset;
175 for (
int i=NumberOfAttributes-1; i>=0; i--) {
176 disp[i] = disp[i] - disp[0];
180 MPI_Datatype tmpType;
181 errorCode += MPI_Type_create_struct( NumberOfAttributes, blocklen, disp, subtypes, &tmpType );
182 errorCode += MPI_Type_create_resized( tmpType, offset, extent, &Datatype );
183 errorCode += MPI_Type_commit( &Datatype );
184 errorCode += MPI_Type_free( &tmpType );
185 if (errorCode) std::cerr <<
"error constructing MPI datatype in " << __FILE__ <<
":" << __LINE__ << std::endl;
195 getBoundaryExchangeDatatype();
196 getMultiscaleDataExchangeDatatype();
197 getGlobalCommunciationDatatype();
203 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
206 freeBoundaryExchangeDatatype();
207 freeMultiscaleDataExchangeDatatype();
208 freeGlobalCommunciationDatatype();
210 MPI_Datatype type = Datatype;
211 MPI_Type_free( &type );
217 MPI_Send( &buffer, 1, Datatype, destination, tag, communicator);
223 MPI_Recv( &buffer, 1, Datatype, source, tag, communicator, &status);
232 std::function<
void()> startCommunicationFunctor,
233 std::function<
void()> waitFunctor,
234 MPI_Comm communicator
236 MPI_Request sendRequestHandle;
238 MPI_Isend( &buffer, 1, Datatype, destination, tag, communicator, &sendRequestHandle );
239 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
240 startCommunicationFunctor();
243 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
252 std::function<
void()> startCommunicationFunctor,
253 std::function<
void()> waitFunctor,
254 MPI_Comm communicator
257 MPI_Request receiveRequestHandle;
259 MPI_Irecv( &buffer, 1, Datatype, source, tag, communicator, &receiveRequestHandle );
260 MPI_Test( &receiveRequestHandle, &flag, &status );
261 startCommunicationFunctor();
264 MPI_Test( &receiveRequestHandle, &flag, &status );
273 message, destination, tag,
290 message, source, tag,
void triggerDeadlockTimeOut(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1, const std::string &comment="")
Triggers a time out and shuts down the cluster if a timeout is violated.
void setDeadlockWarningTimeStamp()
Memorise global timeout.
void writeTimeOutWarning(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1)
Writes a warning if relevant.
void setDeadlockTimeOutTimeStamp()
static Rank & getInstance()
This operation returns the singleton instance.
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
static void freeBoundaryExchangeDatatype()
static MPI_Datatype getForkDatatype()
Hands out MPI datatype if we work without the LLVM MPI extension.
int _senderDestinationRank
static void initDatatype()
Wrapper around getDatatype() to trigger lazy evaluation if we use the lazy initialisation.
static void sendAndPollDanglingMessages(const peano4::parallel::TreeEntry &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static MPI_Datatype getGlobalCommunciationDatatype()
static void receiveAndPollDanglingMessages(peano4::parallel::TreeEntry &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static void freeMultiscaleDataExchangeDatatype()
static void shutdownDatatype()
Free the underlying MPI datatype.
std::string toString() const
void setMaster(int value)
static MPI_Datatype getBoundaryExchangeDatatype()
int getSenderRank() const
static MPI_Datatype getMultiscaleDataExchangeDatatype()
static void freeForkDatatype()
static void send(const peano4::parallel::TreeEntry &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static MPI_Datatype Datatype
Whenever we use LLVM's MPI extension (DaStGe), we rely on lazy initialisation of the datatype.
static MPI_Datatype getJoinDatatype()
static void freeJoinDatatype()
static void receive(peano4::parallel::TreeEntry &buffer, int source, int tag, MPI_Comm communicator)
static void freeGlobalCommunciationDatatype()