33 struct GridTraversalEvent;
44 GridTraversalEvent(
tarch::la::Vector<Dimensions,double> __x,
tarch::la::Vector<Dimensions,double> __h, std::bitset<TwoPowerD> __hasBeenRefined, std::bitset<TwoPowerD> __willBeRefined, std::bitset<TwoPowerD> __isVertexLocal, std::bitset<TwoPowerD> __isParentVertexLocal, std::bitset<TwoPowerD> __isVertexParentOfSubtree, std::bitset<TwoTimesD> __isFaceLocal,
bool __isCellLocal,
bool __isParentCellLocal, std::bitset<TwoPowerD> __isVertexAdjacentToParallelDomainBoundary, std::bitset<TwoTimesD> __isFaceAdjacentToParallelDomainBoundary,
tarch::la::Vector<TwoPowerD,int> __numberOfAdjacentTreesPerVertex, std::bitset<ThreePowerD> __isAdjacentCellLocal,
tarch::la::Vector<TwoPowerD,int> __vertexDataFrom,
tarch::la::Vector<TwoPowerD,int> __vertexDataTo,
tarch::la::Vector<TwoTimesD,int> __faceDataFrom,
tarch::la::Vector<TwoTimesD,int> __faceDataTo,
int __cellData,
tarch::la::Vector<Dimensions,int> __relativePositionToFather,
int __invokingSpacetree,
bool __invokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing);
49 double getX(
int index)
const;
50 void setX(
int index,
double value);
53 double getH(
int index)
const;
54 void setH(
int index,
double value);
143 [[clang::map_mpi_datatype]]
146 [[clang::map_mpi_datatype]]
149 [[clang::map_mpi_datatype]]
152 [[clang::map_mpi_datatype]]
155 [[clang::map_mpi_datatype]]
158 [[clang::map_mpi_datatype]]
161 [[clang::map_mpi_datatype]]
164 [[clang::map_mpi_datatype]]
167 [[clang::map_mpi_datatype]]
170 [[clang::map_mpi_datatype]]
207 static void send(
const peano4::grid::GridTraversalEvent& buffer,
int destination,
int tag, std::function<
void()> startCommunicationFunctor, std::function<
void()> waitFunctor, MPI_Comm communicator );
228#if defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
229 [[clang::truncate_mantissa(48)]]
double _x[Dimensions];
231#if !defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
234#if defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
235 [[clang::truncate_mantissa(48)]]
double _h[Dimensions];
237#if !defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
240#if defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
243#if !defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
246#if defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
249#if !defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
252#if defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
255#if !defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
258#if defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
261#if !defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
264#if defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
267#if !defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
270#if defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
273#if !defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
278#if defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
281#if !defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
284#if defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
287#if !defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
291#if defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
294#if !defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
302#if defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
305#if !defined(__PACKED_ATTRIBUTES_LANGUAGE_EXTENSION__)
317 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
static Rank & getInstance()
This operation returns the singleton instance.
int getVertexDataTo(int index) const
GridTraversalEvent(tarch::la::Vector< Dimensions, double > __x, tarch::la::Vector< Dimensions, double > __h, std::bitset< TwoPowerD > __hasBeenRefined, std::bitset< TwoPowerD > __willBeRefined, std::bitset< TwoPowerD > __isVertexLocal, std::bitset< TwoPowerD > __isParentVertexLocal, std::bitset< TwoPowerD > __isVertexParentOfSubtree, std::bitset< TwoTimesD > __isFaceLocal, bool __isCellLocal, bool __isParentCellLocal, std::bitset< TwoPowerD > __isVertexAdjacentToParallelDomainBoundary, std::bitset< TwoTimesD > __isFaceAdjacentToParallelDomainBoundary, tarch::la::Vector< TwoPowerD, int > __numberOfAdjacentTreesPerVertex, std::bitset< ThreePowerD > __isAdjacentCellLocal, tarch::la::Vector< TwoPowerD, int > __vertexDataFrom, tarch::la::Vector< TwoPowerD, int > __vertexDataTo, tarch::la::Vector< TwoTimesD, int > __faceDataFrom, tarch::la::Vector< TwoTimesD, int > __faceDataTo, int __cellData, tarch::la::Vector< Dimensions, int > __relativePositionToFather, int __invokingSpacetree, bool __invokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing)
static MPI_Datatype getMultiscaleDataExchangeDatatype()
static void freeBoundaryExchangeDatatype()
static void sendAndPollDanglingMessages(const peano4::grid::GridTraversalEvent &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
void setIsFaceAdjacentToParallelDomainBoundary(const std::bitset< TwoTimesD > &value)
std::bitset< TwoTimesD > getIsFaceLocal() const
std::bitset< ThreePowerD > _isAdjacentCellLocal
void setH(int index, double value)
void setIsFaceLocal(const std::bitset< TwoTimesD > &value)
GridTraversalEvent(const GridTraversalEvent ©)
void setRelativePositionToFather(const tarch::la::Vector< Dimensions, int > &value)
bool getInvokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing() const
static void freeForkDatatype()
bool _invokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing
void flipIsParentVertexLocal(int index)
bool getIsVertexAdjacentToParallelDomainBoundary(int index) const
void setVertexDataFrom(const tarch::la::Vector< TwoPowerD, int > &value)
tarch::la::Vector< TwoTimesD, int > getFaceDataFrom() const
void setCellData(int value)
void flipIsVertexParentOfSubtree(int index)
std::bitset< TwoPowerD > getIsVertexParentOfSubtree() const
void setIsVertexAdjacentToParallelDomainBoundary(int index, bool value)
static void receive(peano4::grid::GridTraversalEvent &buffer, int source, int tag, MPI_Comm communicator)
bool getIsAdjacentCellLocal(int index) const
void setFaceDataFrom(int index, int value)
std::bitset< TwoPowerD > _isParentVertexLocal
bool getIsFaceLocal(int index) const
std::bitset< TwoPowerD > getHasBeenRefined() const
static MPI_Datatype getBoundaryExchangeDatatype()
void setIsFaceLocal(int index, bool value)
static void freeMultiscaleDataExchangeDatatype()
std::string toString() const
tarch::la::Vector< TwoPowerD, int > _vertexDataFrom
void setFaceDataTo(const tarch::la::Vector< TwoTimesD, int > &value)
void setVertexDataTo(int index, int value)
std::bitset< TwoTimesD > _isFaceAdjacentToParallelDomainBoundary
void setX(const tarch::la::Vector< Dimensions, double > &value)
void setInvokingSpacetree(int value)
void flipIsVertexAdjacentToParallelDomainBoundary(int index)
void setIsFaceAdjacentToParallelDomainBoundary(int index, bool value)
void setNumberOfAdjacentTreesPerVertex(const tarch::la::Vector< TwoPowerD, int > &value)
std::bitset< TwoPowerD > _hasBeenRefined
std::bitset< TwoPowerD > _isVertexLocal
tarch::la::Vector< TwoTimesD, int > _faceDataTo
void setInvokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing(bool value)
static MPI_Datatype getForkDatatype()
Hands out MPI datatype if we work without the LLVM MPI extension.
std::bitset< ThreePowerD > getIsAdjacentCellLocal() const
tarch::la::Vector< TwoTimesD, int > _faceDataFrom
void flipIsAdjacentCellLocal(int index)
void setIsVertexLocal(int index, bool value)
int getInvokingSpacetree() const
tarch::la::Vector< TwoPowerD, int > getVertexDataFrom() const
static void receive(peano4::grid::GridTraversalEvent &buffer, int source, int tag, std::function< void()> startCommunicationFunctor, std::function< void()> waitFunctor, MPI_Comm communicator)
bool getIsFaceAdjacentToParallelDomainBoundary(int index) const
bool getIsVertexParentOfSubtree(int index) const
double getH(int index) const
void setIsAdjacentCellLocal(const std::bitset< ThreePowerD > &value)
std::bitset< TwoPowerD > getIsParentVertexLocal() const
std::bitset< TwoPowerD > _isVertexParentOfSubtree
void setIsAdjacentCellLocal(int index, bool value)
void setIsVertexParentOfSubtree(int index, bool value)
std::bitset< TwoPowerD > getWillBeRefined() const
void setIsParentVertexLocal(int index, bool value)
void setIsCellLocal(bool value)
std::bitset< TwoPowerD > getIsVertexAdjacentToParallelDomainBoundary() const
void setFaceDataFrom(const tarch::la::Vector< TwoTimesD, int > &value)
int getRelativePositionToFather(int index) const
tarch::la::Vector< TwoTimesD, int > getFaceDataTo() const
int getSenderRank() const
static void receiveAndPollDanglingMessages(peano4::grid::GridTraversalEvent &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
void flipIsVertexLocal(int index)
void setHasBeenRefined(int index, bool value)
static void send(const peano4::grid::GridTraversalEvent &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
tarch::la::Vector< Dimensions, double > getH() const
void flipIsFaceAdjacentToParallelDomainBoundary(int index)
void setIsVertexLocal(const std::bitset< TwoPowerD > &value)
tarch::la::Vector< Dimensions, int > _relativePositionToFather
void setH(const tarch::la::Vector< Dimensions, double > &value)
static void send(const peano4::grid::GridTraversalEvent &buffer, int destination, int tag, std::function< void()> startCommunicationFunctor, std::function< void()> waitFunctor, MPI_Comm communicator)
Alternative to the other send() where I trigger a non-blocking send an then invoke the functor until ...
int getNumberOfAdjacentTreesPerVertex(int index) const
void setIsParentVertexLocal(const std::bitset< TwoPowerD > &value)
void setX(int index, double value)
tarch::la::Vector< TwoPowerD, int > _numberOfAdjacentTreesPerVertex
static void freeGlobalCommunciationDatatype()
bool getIsVertexLocal(int index) const
int _senderDestinationRank
static MPI_Datatype getGlobalCommunciationDatatype()
void setWillBeRefined(int index, bool value)
void setWillBeRefined(const std::bitset< TwoPowerD > &value)
static void shutdownDatatype()
Free the underlying MPI datatype.
static MPI_Datatype getJoinDatatype()
bool getWillBeRefined(int index) const
GridTraversalEvent(ObjectConstruction)
void setIsVertexAdjacentToParallelDomainBoundary(const std::bitset< TwoPowerD > &value)
tarch::la::Vector< TwoPowerD, int > getNumberOfAdjacentTreesPerVertex() const
void setIsVertexParentOfSubtree(const std::bitset< TwoPowerD > &value)
std::bitset< TwoPowerD > _isVertexAdjacentToParallelDomainBoundary
std::bitset< TwoTimesD > _isFaceLocal
void setVertexDataFrom(int index, int value)
tarch::la::Vector< TwoPowerD, int > getVertexDataTo() const
std::bitset< TwoPowerD > getIsVertexLocal() const
bool getIsCellLocal() const
bool getHasBeenRefined(int index) const
static void initDatatype()
Wrapper around getDatatype() to trigger lazy evaluation if we use the lazy initialisation.
void setIsParentCellLocal(bool value)
void flipIsFaceLocal(int index)
bool getIsParentVertexLocal(int index) const
void flipHasBeenRefined(int index)
tarch::la::Vector< Dimensions, double > _h
tarch::la::Vector< Dimensions, double > getX() const
std::bitset< TwoTimesD > getIsFaceAdjacentToParallelDomainBoundary() const
int getVertexDataFrom(int index) const
tarch::la::Vector< Dimensions, int > getRelativePositionToFather() const
static void freeJoinDatatype()
int getFaceDataFrom(int index) const
void setNumberOfAdjacentTreesPerVertex(int index, int value)
void setRelativePositionToFather(int index, int value)
tarch::la::Vector< Dimensions, double > _x
void setVertexDataTo(const tarch::la::Vector< TwoPowerD, int > &value)
void setHasBeenRefined(const std::bitset< TwoPowerD > &value)
double getX(int index) const
bool getIsParentCellLocal() const
int getFaceDataTo(int index) const
tarch::la::Vector< TwoPowerD, int > _vertexDataTo
std::bitset< TwoPowerD > _willBeRefined
static MPI_Datatype Datatype
Whenever we use LLVM's MPI extension (DaStGe), we rely on lazy initialisation of the datatype.
void setFaceDataTo(int index, int value)
void flipWillBeRefined(int index)