14 std::ostringstream out;
16 out <<
"refinementControl=" << (_refinementControl==RefinementControl::Refine?
"Refine" :
"") << (_refinementControl==RefinementControl::Erase?
"Erase" :
"") ;
18 out <<
"offset=" << _offset;
20 out <<
"width=" << _width;
28 return _refinementControl;
32 _refinementControl = value;
44 return _offset(index);
48 _offset(index) = value;
64 _width(index) = value;
90#if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
95[[clang::map_mpi_datatype]]
100[[clang::map_mpi_datatype]]
105[[clang::map_mpi_datatype]]
110[[clang::map_mpi_datatype]]
115[[clang::map_mpi_datatype]]
121[[clang::map_mpi_datatype]]
123 if (Datatype != MPI_DATATYPE_NULL){
124 MPI_Type_free(&Datatype);
125 Datatype = MPI_DATATYPE_NULL;
130[[clang::map_mpi_datatype]]
132 if (Datatype != MPI_DATATYPE_NULL){
133 MPI_Type_free(&Datatype);
134 Datatype = MPI_DATATYPE_NULL;
139[[clang::map_mpi_datatype]]
141 if (Datatype != MPI_DATATYPE_NULL){
142 MPI_Type_free(&Datatype);
143 Datatype = MPI_DATATYPE_NULL;
148[[clang::map_mpi_datatype]]
150 if (Datatype != MPI_DATATYPE_NULL){
151 MPI_Type_free(&Datatype);
152 Datatype = MPI_DATATYPE_NULL;
157[[clang::map_mpi_datatype]]
159 if (Datatype != MPI_DATATYPE_NULL){
160 MPI_Type_free(&Datatype);
161 Datatype = MPI_DATATYPE_NULL;
167 return _senderDestinationRank;
171 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
174 int NumberOfAttributes = 0;
175 NumberOfAttributes++;
176 NumberOfAttributes++;
177 NumberOfAttributes++;
178 NumberOfAttributes++;
180 MPI_Datatype* subtypes =
new MPI_Datatype[NumberOfAttributes];
181 int* blocklen =
new int[NumberOfAttributes];
182 MPI_Aint* disp =
new MPI_Aint[NumberOfAttributes];
185 subtypes[counter] = MPI_INT;
186 blocklen[counter] = 1;
188 subtypes[counter] = MPI_DOUBLE;
189 blocklen[counter] = Dimensions;
191 subtypes[counter] = MPI_DOUBLE;
192 blocklen[counter] = Dimensions;
194 subtypes[counter] = MPI_DOUBLE;
195 blocklen[counter] = Dimensions;
198 MPI_Aint baseFirstInstance;
199 MPI_Aint baseSecondInstance;
200 MPI_Get_address( &instances[0], &baseFirstInstance );
201 MPI_Get_address( &instances[1], &baseSecondInstance );
204 MPI_Get_address( &(instances[0]._refinementControl), &disp[counter] );
206 MPI_Get_address( &(instances[0]._offset.data()[0]), &disp[counter] );
208 MPI_Get_address( &(instances[0]._width.data()[0]), &disp[counter] );
210 MPI_Get_address( &(instances[0]._h.data()[0]), &disp[counter] );
213 MPI_Aint offset = disp[0] - baseFirstInstance;
214 MPI_Aint extent = baseSecondInstance - baseFirstInstance - offset;
215 for (
int i=NumberOfAttributes-1; i>=0; i--) {
216 disp[i] = disp[i] - disp[0];
220 MPI_Datatype tmpType;
221 errorCode += MPI_Type_create_struct( NumberOfAttributes, blocklen, disp, subtypes, &tmpType );
222 errorCode += MPI_Type_create_resized( tmpType, offset, extent, &Datatype );
223 errorCode += MPI_Type_commit( &Datatype );
224 errorCode += MPI_Type_free( &tmpType );
225 if (errorCode) std::cerr <<
"error constructing MPI datatype in " << __FILE__ <<
":" << __LINE__ << std::endl;
235 getBoundaryExchangeDatatype();
236 getMultiscaleDataExchangeDatatype();
237 getGlobalCommunciationDatatype();
243 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
246 freeBoundaryExchangeDatatype();
247 freeMultiscaleDataExchangeDatatype();
248 freeGlobalCommunciationDatatype();
250 MPI_Datatype type = Datatype;
251 MPI_Type_free( &type );
256 MPI_Send( &buffer, 1, Datatype, destination, tag, communicator);
261 MPI_Recv( &buffer, 1, Datatype, source, tag, communicator, &status);
269 std::function<
void()> startCommunicationFunctor,
270 std::function<
void()> waitFunctor,
271 MPI_Comm communicator
273 MPI_Request sendRequestHandle;
275 MPI_Isend( &buffer, 1, Datatype, destination, tag, communicator, &sendRequestHandle );
276 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
277 startCommunicationFunctor();
280 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
288 std::function<
void()> startCommunicationFunctor,
289 std::function<
void()> waitFunctor,
290 MPI_Comm communicator
293 MPI_Request receiveRequestHandle;
295 MPI_Irecv( &buffer, 1, Datatype, source, tag, communicator, &receiveRequestHandle );
296 MPI_Test( &receiveRequestHandle, &flag, &status );
297 startCommunicationFunctor();
300 MPI_Test( &receiveRequestHandle, &flag, &status );
309 message, destination, tag,
326 message, source, tag,
void triggerDeadlockTimeOut(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1, const std::string &comment="")
Triggers a time out and shuts down the cluster if a timeout is violated.
void setDeadlockWarningTimeStamp()
Memorise global timeout.
void writeTimeOutWarning(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1)
Writes a warning if relevant.
void setDeadlockTimeOutTimeStamp()
static Rank & getInstance()
This operation returns the singleton instance.
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
int _senderDestinationRank
static MPI_Datatype getBoundaryExchangeDatatype()
static void send(const peano4::grid::GridControlEvent &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static void sendAndPollDanglingMessages(const peano4::grid::GridControlEvent &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static MPI_Datatype getJoinDatatype()
static void freeGlobalCommunciationDatatype()
void setOffset(const tarch::la::Vector< Dimensions, double > &value)
static void initDatatype()
Wrapper around getDatatype() to trigger lazy evaluation if we use the lazy initialisation.
static void freeJoinDatatype()
static void freeForkDatatype()
static void shutdownDatatype()
Free the underlying MPI datatype.
void setH(const tarch::la::Vector< Dimensions, double > &value)
tarch::la::Vector< Dimensions, double > getWidth() const
peano4::grid::GridControlEvent::RefinementControl getRefinementControl() const
static void receive(peano4::grid::GridControlEvent &buffer, int source, int tag, MPI_Comm communicator)
static MPI_Datatype Datatype
Whenever we use LLVM's MPI extension (DaStGe), we rely on lazy initialisation of the datatype.
static MPI_Datatype getForkDatatype()
Hands out MPI datatype if we work without the LLVM MPI extension.
static void freeMultiscaleDataExchangeDatatype()
static void freeBoundaryExchangeDatatype()
static MPI_Datatype getGlobalCommunciationDatatype()
void setRefinementControl(RefinementControl value)
static MPI_Datatype getMultiscaleDataExchangeDatatype()
tarch::la::Vector< Dimensions, double > getOffset() const
int getSenderRank() const
std::string toString() const
tarch::la::Vector< Dimensions, double > getH() const
void setWidth(const tarch::la::Vector< Dimensions, double > &value)
static void receiveAndPollDanglingMessages(peano4::grid::GridControlEvent &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())