21 setAccessNumber( copy.getAccessNumber() );
40 std::ostringstream out;
42 out <<
"level=" << _level;
44 out <<
"x=" << getX();
46 out <<
"h=" << getH();
48 out <<
"inverted=" << _inverted;
50 out <<
"evenFlags=" << getEvenFlags();
52 out <<
"accessNumber=" << getAccessNumber();
74 for(
int i=0; i<Dimensions; i++) {
83 for(
int i=0; i<Dimensions; i++) {
102 for(
int i=0; i<Dimensions; i++) {
111 for(
int i=0; i<Dimensions; i++) {
139 std::bitset<Dimensions> result;
140 for (
int i=0; i<Dimensions; i++) result[i] = _evenFlags[i];
147 for (
int i=0; i<Dimensions; i++) _evenFlags[i]=value[i];
152 return _evenFlags[index];
157 _evenFlags[index] = value;
162 _evenFlags[index] = not _evenFlags[index];
170 result(i) = _accessNumber[i];
179 _accessNumber[i] = value(i);
185 return _accessNumber[index];
190 _accessNumber[index] = value;
200#if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
205[[clang::map_mpi_datatype]]
211[[clang::map_mpi_datatype]]
217[[clang::map_mpi_datatype]]
223[[clang::map_mpi_datatype]]
229[[clang::map_mpi_datatype]]
235[[clang::map_mpi_datatype]]
237 if (Datatype != MPI_DATATYPE_NULL){
238 MPI_Type_free(&Datatype);
239 Datatype = MPI_DATATYPE_NULL;
244[[clang::map_mpi_datatype]]
246 if (Datatype != MPI_DATATYPE_NULL){
247 MPI_Type_free(&Datatype);
248 Datatype = MPI_DATATYPE_NULL;
253[[clang::map_mpi_datatype]]
255 if (Datatype != MPI_DATATYPE_NULL){
256 MPI_Type_free(&Datatype);
257 Datatype = MPI_DATATYPE_NULL;
262[[clang::map_mpi_datatype]]
264 if (Datatype != MPI_DATATYPE_NULL){
265 MPI_Type_free(&Datatype);
266 Datatype = MPI_DATATYPE_NULL;
271[[clang::map_mpi_datatype]]
273 if (Datatype != MPI_DATATYPE_NULL){
274 MPI_Type_free(&Datatype);
275 Datatype = MPI_DATATYPE_NULL;
281 return _senderDestinationRank;
286 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
289 int NumberOfAttributes = 0;
290 NumberOfAttributes++;
291 NumberOfAttributes++;
292 NumberOfAttributes++;
293 NumberOfAttributes++;
294 NumberOfAttributes++;
295 NumberOfAttributes++;
297 MPI_Datatype* subtypes =
new MPI_Datatype[NumberOfAttributes];
298 int* blocklen =
new int[NumberOfAttributes];
299 MPI_Aint* disp =
new MPI_Aint[NumberOfAttributes];
302 subtypes[counter] = MPI_INT;
303 blocklen[counter] = 1;
305 subtypes[counter] = MPI_DOUBLE;
306 blocklen[counter] = Dimensions;
308 subtypes[counter] = MPI_DOUBLE;
309 blocklen[counter] = Dimensions;
311 subtypes[counter] = MPI_BYTE;
312 blocklen[counter] = 1;
314 subtypes[counter] = MPI_UNSIGNED_LONG;
315 blocklen[counter] = 1;
317 subtypes[counter] = MPI_INT;
321 MPI_Aint baseFirstInstance;
322 MPI_Aint baseSecondInstance;
323 MPI_Get_address( &instances[0], &baseFirstInstance );
324 MPI_Get_address( &instances[1], &baseSecondInstance );
327 MPI_Get_address( &(instances[0]._level), &disp[counter] );
329 MPI_Get_address( &(instances[0]._x.data()[0]), &disp[counter] );
331 MPI_Get_address( &(instances[0]._h.data()[0]), &disp[counter] );
333 MPI_Get_address( &(instances[0]._inverted), &disp[counter] );
335 MPI_Get_address( &(instances[0]._evenFlags), &disp[counter] );
337 MPI_Get_address( &(instances[0]._accessNumber.data()[0]), &disp[counter] );
340 MPI_Aint offset = disp[0] - baseFirstInstance;
341 MPI_Aint extent = baseSecondInstance - baseFirstInstance - offset;
342 for (
int i=NumberOfAttributes-1; i>=0; i--) {
343 disp[i] = disp[i] - disp[0];
347 MPI_Datatype tmpType;
348 errorCode += MPI_Type_create_struct( NumberOfAttributes, blocklen, disp, subtypes, &tmpType );
349 errorCode += MPI_Type_create_resized( tmpType, offset, extent, &Datatype );
350 errorCode += MPI_Type_commit( &Datatype );
351 errorCode += MPI_Type_free( &tmpType );
352 if (errorCode) std::cerr <<
"error constructing MPI datatype in " << __FILE__ <<
":" << __LINE__ << std::endl;
362 getBoundaryExchangeDatatype();
363 getMultiscaleDataExchangeDatatype();
364 getGlobalCommunciationDatatype();
370 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
373 freeBoundaryExchangeDatatype();
374 freeMultiscaleDataExchangeDatatype();
375 freeGlobalCommunciationDatatype();
377 MPI_Datatype type = Datatype;
378 MPI_Type_free( &type );
384 MPI_Send( &buffer, 1, Datatype, destination, tag, communicator);
390 MPI_Recv( &buffer, 1, Datatype, source, tag, communicator, &status);
399 std::function<
void()> startCommunicationFunctor,
400 std::function<
void()> waitFunctor,
401 MPI_Comm communicator
403 MPI_Request sendRequestHandle;
405 MPI_Isend( &buffer, 1, Datatype, destination, tag, communicator, &sendRequestHandle );
406 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
407 startCommunicationFunctor();
410 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
419 std::function<
void()> startCommunicationFunctor,
420 std::function<
void()> waitFunctor,
421 MPI_Comm communicator
424 MPI_Request receiveRequestHandle;
426 MPI_Irecv( &buffer, 1, Datatype, source, tag, communicator, &receiveRequestHandle );
427 MPI_Test( &receiveRequestHandle, &flag, &status );
428 startCommunicationFunctor();
431 MPI_Test( &receiveRequestHandle, &flag, &status );
440 message, destination, tag,
457 message, source, tag,
#define DimensionsTimesTwo
void triggerDeadlockTimeOut(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1, const std::string &comment="")
Triggers a time out and shuts down the cluster if a timeout is violated.
void setDeadlockWarningTimeStamp()
Memorise global timeout.
void writeTimeOutWarning(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1)
Writes a warning if relevant.
void setDeadlockTimeOutTimeStamp()
static Rank & getInstance()
This operation returns the singleton instance.
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
tarch::la::Vector< Dimensions, double > getX() const
void setAccessNumber(const tarch::la::Vector< DimensionsTimesTwo, int > &value)
std::string toString() const
std::bitset< Dimensions > getEvenFlags() const
static void freeBoundaryExchangeDatatype()
void setInverted(bool value)
static MPI_Datatype getBoundaryExchangeDatatype()
AutomatonState & operator=(const AutomatonState &other)
static void send(const peano4::grid::AutomatonState &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
int getSenderRank() const
tarch::la::Vector< Dimensions, double > getH() const
static MPI_Datatype Datatype
Whenever we use LLVM's MPI extension (DaStGe), we rely on lazy initialisation of the datatype.
static void shutdownDatatype()
Free the underlying MPI datatype.
static MPI_Datatype getJoinDatatype()
void setH(const tarch::la::Vector< Dimensions, double > &value)
static MPI_Datatype getGlobalCommunciationDatatype()
static MPI_Datatype getForkDatatype()
Hands out MPI datatype if we work without the LLVM MPI extension.
static void freeMultiscaleDataExchangeDatatype()
static void freeGlobalCommunciationDatatype()
void flipEvenFlags(int index)
tarch::la::Vector< DimensionsTimesTwo, int > getAccessNumber() const
static void freeForkDatatype()
static void sendAndPollDanglingMessages(const peano4::grid::AutomatonState &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
static void initDatatype()
Wrapper around getDatatype() to trigger lazy evaluation if we use the lazy initialisation.
void setEvenFlags(const std::bitset< Dimensions > &value)
void setX(const tarch::la::Vector< Dimensions, double > &value)
static MPI_Datatype getMultiscaleDataExchangeDatatype()
int _senderDestinationRank
static void freeJoinDatatype()
static void receive(peano4::grid::AutomatonState &buffer, int source, int tag, MPI_Comm communicator)
static void receiveAndPollDanglingMessages(peano4::grid::AutomatonState &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())