19#ifdef UseTestSpecificCompilerSettings
20#pragma optimize("",off)
25 TestCase(
"peano4::parallel::tests::PingPongTest" ) {
30 #if defined(Parallel) and not defined(UseSmartMPI)
33 MPI_Send(&out,1,MPI_INT,1,0,MPI_COMM_WORLD);
37 MPI_Recv(&in,1,MPI_INT,0,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
40 MPI_Barrier(MPI_COMM_WORLD);
46 [[maybe_unused]]
int testErrors = 0;
51 const bool _blockingMPI;
53 PingPongSendTask(
int data,
bool blockingMPI):
54 Task(Task::DontFuse,Task::DefaultPriority),
56 _blockingMPI(blockingMPI) {}
59 #if defined(Parallel) and not defined(UseSmartMPI)
60 const int out = 23+_data;
63 MPI_Send(&out,1,MPI_INT,1,_data,MPI_COMM_WORLD);
64 logDebug(
"PingPongSendTask()",
"sent blocking message " << out <<
" to rank 1 with tag " << _data );
68 MPI_Isend(&out,1,MPI_INT,1,_data,MPI_COMM_WORLD,&request);
69 logDebug(
"PingPongSendTask()",
"sent non-blocking message " << out <<
" to rank 1 with tag " << _data );
70 MPI_Wait(&request,MPI_STATUS_IGNORE);
81 const bool _blockingMPI;
83 PingPongReceiveTask(
int expectedData,
bool blockingMPI):
84 Task(Task::DontFuse,Task::DefaultPriority),
86 _blockingMPI(blockingMPI) {}
89 #if defined(Parallel) and not defined(UseSmartMPI)
92 logInfo(
"PingPongReceiveTask()",
"receive message from rank 1 with tag " << _data );
94 MPI_Recv(&in,1,MPI_INT,0,_data,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
98 MPI_Irecv(&in,1,MPI_INT,0,_data,MPI_COMM_WORLD,&request);
99 MPI_Wait(&request,MPI_STATUS_IGNORE);
101 logInfo(
"PingPongReceiveTask()",
"got content " << in );
102 if ( in != 23+_data) {
103 logError(
"testMultithreadedPingPong()",
"received " << in <<
" instead of " << (23+_data) <<
" (blocking mode=" << _blockingMPI <<
", tag=" << _data <<
")" );
114 #if defined(Parallel) and not defined(UseSmartMPI)
122 MPI_Send(&out,1,MPI_INT,1,i,MPI_COMM_WORLD);
126 std::vector< tarch::multicore::Task* > tasks;
128 tasks.push_back(
new PingPongReceiveTask(i,
true) );
132 MPI_Barrier(MPI_COMM_WORLD);
142 #if defined(Parallel) and not defined(UseSmartMPI)
150 MPI_Send(&out,1,MPI_INT,1,i,MPI_COMM_WORLD);
154 std::vector< tarch::multicore::Task* > tasks;
156 tasks.push_back(
new PingPongReceiveTask(i,
false) );
160 MPI_Barrier(MPI_COMM_WORLD);
171 #if defined(Parallel) and not defined(UseSmartMPI)
177 std::vector< tarch::multicore::Task* > tasks;
179 tasks.push_back(
new PingPongSendTask(i,
true) );
186 MPI_Recv(&in,1,MPI_INT,0,i,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
190 MPI_Barrier(MPI_COMM_WORLD);
200 #if defined(Parallel) and not defined(UseSmartMPI)
206 std::vector< tarch::multicore::Task* > tasks;
208 tasks.push_back(
new PingPongSendTask(i,
false) );
215 MPI_Recv(&in,1,MPI_INT,0,i,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
219 MPI_Barrier(MPI_COMM_WORLD);
229 #if defined(Parallel) and not defined(UseSmartMPI)
235 std::vector< tarch::multicore::Task* > tasks;
237 tasks.push_back(
new PingPongSendTask(i,
true) );
242 std::vector< tarch::multicore::Task* > tasks;
244 tasks.push_back(
new PingPongReceiveTask(i,
true) );
248 MPI_Barrier(MPI_COMM_WORLD);
257 #if defined(Parallel) and not defined(UseSmartMPI)
263 std::vector< tarch::multicore::Task* > tasks;
265 tasks.push_back(
new PingPongSendTask(i,
false) );
270 std::vector< tarch::multicore::Task* > tasks;
272 tasks.push_back(
new PingPongReceiveTask(i,
false) );
276 MPI_Barrier(MPI_COMM_WORLD);
285 #if defined(Parallel) and not defined(UseSmartMPI)
296 MPI_Barrier(MPI_COMM_WORLD);
302 #if defined(Parallel) and not defined(UseSmartMPI)
321 inThroughDaStGen, 0, 0,
326 MPI_Barrier(MPI_COMM_WORLD);
332 #if defined(Parallel) and not defined(UseSmartMPI)
345 in[0].getStepIdentifier(), out[0].getStepIdentifier(),
352 in[1].getStepIdentifier(), out[1].getStepIdentifier(),
359 in[2].getStepIdentifier(), out[2].getStepIdentifier(),
366 in[3].getStepIdentifier(), out[3].getStepIdentifier(),
373 MPI_Barrier(MPI_COMM_WORLD);
379 #if defined(Parallel) and not defined(UseSmartMPI)
390 MPI_Barrier(MPI_COMM_WORLD);
401 testMethod( testDaStGenTypeStartTraversalMessage );
404 testMethod( testDaStGenArrayTreeManagementMessage );
406 testMethod( testMultithreadedPingPongWithBlockingReceives );
407 testMethod( testMultithreadedPingPongWithBlockingSends );
408 testMethod( testMultithreadedPingPongWithBlockingSendsAndReceives );
410 testMethod( testMultithreadedPingPongWithNonblockingReceives );
411 testMethod( testMultithreadedPingPongWithNonblockingSends );
412 testMethod( testMultithreadedPingPongWithNonblockingSendsAndReceives );
418#ifdef UseTestSpecificCompilerSettings
419#pragma optimize("",on)
#define logError(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
#define logDebug(methodName, logMacroMessageStream)
#define logTraceOut(methodName)
#define logTraceIn(methodName)
#define logInfo(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
#define validateEqualsWithParams8(actualValue, validValue, param0, param1, param2, param3, param4, param5, param6, param7)
#define validate(booleanExpr)
#define testMethod(name)
Run a test method and check for errors.
#define validateWithParams1(booleanExpr, param0)
#define validateEqualsWithParams2(actualValue, validValue, param0, param1)
#define validateEquals(actualValue, validValue)
void testMultithreadedPingPongWithBlockingReceives()
void testMultithreadedPingPongWithNonblockingSends()
virtual void run() override
This routine is triggered by the TestCaseCollection.
void testDaStGenTypeIntegerMessage()
It is really important that we test - for the DaStGen-generated data types - both the built-in operat...
void testDaStGenTypeStartTraversalMessage()
void testDaStGenArrayTreeManagementMessage()
void testMultithreadedPingPongWithBlockingSendsAndReceives()
void testMultithreadedPingPongWithBlockingSends()
void testMultithreadedPingPongWithNonblockingReceives()
void testMultithreadedPingPongWithNonblockingSendsAndReceives()
static tarch::logging::Log _log
Logging device.
static Rank & getInstance()
This operation returns the singleton instance.
static Core & getInstance()
int getNumberOfThreads() const
Returns the number of threads that is used.
Abstract super class for a job.
static Hardcoded * createNative()
Fall back to native tasking.
std::string toString(Filter filter)
tarch::logging::Log _log("exahype2::fv")
void setOrchestration(tarch::multicore::orchestration::Strategy *realisation)
void spawnAndWait(const std::vector< Task * > &tasks)
Fork-join task submission pattern.
tarch::multicore::orchestration::Strategy * swapOrchestration(tarch::multicore::orchestration::Strategy *realisation)
Swap the active orchestration.
static void send(const peano4::parallel::StartTraversalMessage &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
std::string toString() const
int getStepIdentifier() const
static void receive(peano4::parallel::StartTraversalMessage &buffer, int source, int tag, MPI_Comm communicator)
static MPI_Datatype getGlobalCommunciationDatatype()
void setStepIdentifier(int value)
peano4::parallel::TreeManagementMessage::Action getAction() const
static void send(const peano4::parallel::TreeManagementMessage &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
std::string toString() const
void setAction(Action value)
static void receive(peano4::parallel::TreeManagementMessage &buffer, int source, int tag, MPI_Comm communicator)
std::string toString() const
static void receive(tarch::mpi::IntegerMessage &buffer, int source, int tag, MPI_Comm communicator)
static MPI_Datatype getGlobalCommunciationDatatype()
static void send(const tarch::mpi::IntegerMessage &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.