17 std::ostringstream out;
19 out <<
"value=" << _value;
44#if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
49[[clang::map_mpi_datatype]]
55[[clang::map_mpi_datatype]]
61[[clang::map_mpi_datatype]]
67[[clang::map_mpi_datatype]]
73[[clang::map_mpi_datatype]]
79[[clang::map_mpi_datatype]]
81 if (Datatype != MPI_DATATYPE_NULL){
82 MPI_Type_free(&Datatype);
83 Datatype = MPI_DATATYPE_NULL;
88[[clang::map_mpi_datatype]]
90 if (Datatype != MPI_DATATYPE_NULL){
91 MPI_Type_free(&Datatype);
92 Datatype = MPI_DATATYPE_NULL;
97[[clang::map_mpi_datatype]]
99 if (Datatype != MPI_DATATYPE_NULL){
100 MPI_Type_free(&Datatype);
101 Datatype = MPI_DATATYPE_NULL;
106[[clang::map_mpi_datatype]]
108 if (Datatype != MPI_DATATYPE_NULL){
109 MPI_Type_free(&Datatype);
110 Datatype = MPI_DATATYPE_NULL;
115[[clang::map_mpi_datatype]]
117 if (Datatype != MPI_DATATYPE_NULL){
118 MPI_Type_free(&Datatype);
119 Datatype = MPI_DATATYPE_NULL;
125 return _senderDestinationRank;
130 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
133 int NumberOfAttributes = 0;
134 NumberOfAttributes++;
136 MPI_Datatype* subtypes =
new MPI_Datatype[NumberOfAttributes];
137 int* blocklen =
new int[NumberOfAttributes];
138 MPI_Aint* disp =
new MPI_Aint[NumberOfAttributes];
141 subtypes[counter] = MPI_INT;
142 blocklen[counter] = 1;
145 MPI_Aint baseFirstInstance;
146 MPI_Aint baseSecondInstance;
147 MPI_Get_address( &instances[0], &baseFirstInstance );
148 MPI_Get_address( &instances[1], &baseSecondInstance );
151 MPI_Get_address( &(instances[0]._value), &disp[counter] );
154 MPI_Aint offset = disp[0] - baseFirstInstance;
155 MPI_Aint extent = baseSecondInstance - baseFirstInstance - offset;
156 for (
int i=NumberOfAttributes-1; i>=0; i--) {
157 disp[i] = disp[i] - disp[0];
161 MPI_Datatype tmpType;
162 errorCode += MPI_Type_create_struct( NumberOfAttributes, blocklen, disp, subtypes, &tmpType );
163 errorCode += MPI_Type_create_resized( tmpType, offset, extent, &Datatype );
164 errorCode += MPI_Type_commit( &Datatype );
165 errorCode += MPI_Type_free( &tmpType );
166 if (errorCode) std::cerr <<
"error constructing MPI datatype in " << __FILE__ <<
":" << __LINE__ << std::endl;
176 getBoundaryExchangeDatatype();
177 getMultiscaleDataExchangeDatatype();
178 getGlobalCommunciationDatatype();
184 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
187 freeBoundaryExchangeDatatype();
188 freeMultiscaleDataExchangeDatatype();
189 freeGlobalCommunciationDatatype();
191 MPI_Datatype type = Datatype;
192 MPI_Type_free( &type );
198 MPI_Send( &buffer, 1, Datatype, destination, tag, communicator);
204 MPI_Recv( &buffer, 1, Datatype, source, tag, communicator, &status);
213 std::function<
void()> startCommunicationFunctor,
214 std::function<
void()> waitFunctor,
215 MPI_Comm communicator
217 MPI_Request sendRequestHandle;
219 MPI_Isend( &buffer, 1, Datatype, destination, tag, communicator, &sendRequestHandle );
220 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
221 startCommunicationFunctor();
224 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
233 std::function<
void()> startCommunicationFunctor,
234 std::function<
void()> waitFunctor,
235 MPI_Comm communicator
238 MPI_Request receiveRequestHandle;
240 MPI_Irecv( &buffer, 1, Datatype, source, tag, communicator, &receiveRequestHandle );
241 MPI_Test( &receiveRequestHandle, &flag, &status );
242 startCommunicationFunctor();
245 MPI_Test( &receiveRequestHandle, &flag, &status );
254 message, destination, tag,
271 message, source, tag,
void triggerDeadlockTimeOut(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1, const std::string &comment="")
Triggers a time out and shuts down the cluster if a timeout is violated.
void setDeadlockWarningTimeStamp()
Memorise global timeout.
void writeTimeOutWarning(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1)
Writes a warning if relevant.
void setDeadlockTimeOutTimeStamp()
static Rank & getInstance()
This operation returns the singleton instance.
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
static MPI_Datatype getBoundaryExchangeDatatype()
static MPI_Datatype getMultiscaleDataExchangeDatatype()
int getSenderRank() const
static MPI_Datatype getForkDatatype()
Hands out MPI datatype if we work without the LLVM MPI extension.
static void freeForkDatatype()
static void freeMultiscaleDataExchangeDatatype()
std::string toString() const
static MPI_Datatype getJoinDatatype()
static void freeBoundaryExchangeDatatype()
static void freeJoinDatatype()
static void sendAndPollDanglingMessages(const tarch::mpi::IntegerMessage &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static MPI_Datatype Datatype
Whenever we use LLVM's MPI extension (DaStGe), we rely on lazy initialisation of the datatype.
static void freeGlobalCommunciationDatatype()
static void receiveAndPollDanglingMessages(tarch::mpi::IntegerMessage &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static void receive(tarch::mpi::IntegerMessage &buffer, int source, int tag, MPI_Comm communicator)
static MPI_Datatype getGlobalCommunciationDatatype()
static void shutdownDatatype()
Free the underlying MPI datatype.
static void send(const tarch::mpi::IntegerMessage &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
int _senderDestinationRank
static void initDatatype()
Wrapper around getDatatype() to trigger lazy evaluation if we use the lazy initialisation.