Loading [MathJax]/extensions/tex2jax.js
Peano
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages Concepts
PingPongTest.cpp
Go to the documentation of this file.
2#include "PingPongTest.h"
5
6
8
9
10#include "tarch/la/Vector.h"
11#include "tarch/mpi/Rank.h"
14
15
16tarch::logging::Log peano4::parallel::tests::PingPongTest::_log("peano4::parallel::tests::PingPongTest");
17
18
19#ifdef UseTestSpecificCompilerSettings
20#pragma optimize("",off)
21#endif
22
23
25 TestCase( "peano4::parallel::tests::PingPongTest" ) {
26}
27
28
30 #if defined(Parallel) and not defined(UseSmartMPI)
31 int out = 23;
32 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==0) {
33 MPI_Send(&out,1,MPI_INT,1,0,MPI_COMM_WORLD);
34 }
35 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==1) {
36 int in = 25;
37 MPI_Recv(&in,1,MPI_INT,0,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
38 validateEquals( in, out );
39 }
40 MPI_Barrier(MPI_COMM_WORLD);
41 #endif
42}
43
44
45namespace {
46 [[maybe_unused]] int testErrors = 0;
47
48 class PingPongSendTask: public tarch::multicore::Task {
49 private:
50 const int _data;
51 const bool _blockingMPI;
52 public:
53 PingPongSendTask(int data, bool blockingMPI):
54 Task(Task::DontFuse,Task::DefaultPriority),
55 _data(data),
56 _blockingMPI(blockingMPI) {}
57
58 bool run() {
59 #if defined(Parallel) and not defined(UseSmartMPI)
60 const int out = 23+_data;
61 static tarch::logging::Log _log( "peano4::parallel::tests::PingPongSendTask" );
62 if (_blockingMPI) {
63 MPI_Send(&out,1,MPI_INT,1,_data,MPI_COMM_WORLD);
64 logDebug( "PingPongSendTask()", "sent blocking message " << out << " to rank 1 with tag " << _data );
65 }
66 else {
67 MPI_Request request;
68 MPI_Isend(&out,1,MPI_INT,1,_data,MPI_COMM_WORLD,&request);
69 logDebug( "PingPongSendTask()", "sent non-blocking message " << out << " to rank 1 with tag " << _data );
70 MPI_Wait(&request,MPI_STATUS_IGNORE);
71 }
72 #endif
73 return false;
74 }
75 };
76
77
78 class PingPongReceiveTask: public tarch::multicore::Task {
79 private:
80 const int _data;
81 const bool _blockingMPI;
82 public:
83 PingPongReceiveTask(int expectedData, bool blockingMPI):
84 Task(Task::DontFuse,Task::DefaultPriority),
85 _data(expectedData),
86 _blockingMPI(blockingMPI) {}
87
88 bool run() {
89 #if defined(Parallel) and not defined(UseSmartMPI)
90 int in = -12;
91 static tarch::logging::Log _log( "peano4::parallel::tests::PingPongSendTask" );
92 logInfo( "PingPongReceiveTask()", "receive message from rank 1 with tag " << _data );
93 if (_blockingMPI) {
94 MPI_Recv(&in,1,MPI_INT,0,_data,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
95 }
96 else {
97 MPI_Request request;
98 MPI_Irecv(&in,1,MPI_INT,0,_data,MPI_COMM_WORLD,&request);
99 MPI_Wait(&request,MPI_STATUS_IGNORE);
100 }
101 logInfo( "PingPongReceiveTask()", "got content " << in );
102 if ( in != 23+_data) {
103 logError( "testMultithreadedPingPong()", "received " << in << " instead of " << (23+_data) << " (blocking mode=" << _blockingMPI << ", tag=" << _data << ")" );
104 testErrors++;
105 }
106 #endif
107 return false;
108 }
109 };
110}
111
112
114 #if defined(Parallel) and not defined(UseSmartMPI)
116
117 int out;
118 testErrors = 0;
119 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==0) {
121 out = 23 + i;
122 MPI_Send(&out,1,MPI_INT,1,i,MPI_COMM_WORLD);
123 }
124 }
125 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==1) {
126 std::vector< tarch::multicore::Task* > tasks;
128 tasks.push_back( new PingPongReceiveTask(i,true) );
129 }
131 }
132 MPI_Barrier(MPI_COMM_WORLD);
133 validate( testErrors==0 );
134
135
136 tarch::multicore::setOrchestration( originalOrchestration );
137 #endif
138}
139
140
142 #if defined(Parallel) and not defined(UseSmartMPI)
144
145 int out;
146 testErrors = 0;
147 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==0) {
149 out = 23 + i;
150 MPI_Send(&out,1,MPI_INT,1,i,MPI_COMM_WORLD);
151 }
152 }
153 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==1) {
154 std::vector< tarch::multicore::Task* > tasks;
156 tasks.push_back( new PingPongReceiveTask(i,false) );
157 }
159 }
160 MPI_Barrier(MPI_COMM_WORLD);
161 validate( testErrors==0 );
162
163
164 tarch::multicore::setOrchestration( originalOrchestration );
165 #endif
166}
167
168
169
171 #if defined(Parallel) and not defined(UseSmartMPI)
173
174 int out = 23;
175 testErrors = 0;
176 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==0) {
177 std::vector< tarch::multicore::Task* > tasks;
179 tasks.push_back( new PingPongSendTask(i,true) );
180 }
182 }
183 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==1) {
184 int in = -12;
186 MPI_Recv(&in,1,MPI_INT,0,i,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
187 validateEquals( in, out+i );
188 }
189 }
190 MPI_Barrier(MPI_COMM_WORLD);
191 validate( testErrors==0 );
192
193
194 tarch::multicore::setOrchestration( originalOrchestration );
195 #endif
196}
197
198
200 #if defined(Parallel) and not defined(UseSmartMPI)
202
203 int out = 23;
204 testErrors = 0;
205 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==0) {
206 std::vector< tarch::multicore::Task* > tasks;
208 tasks.push_back( new PingPongSendTask(i,false) );
209 }
211 }
212 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==1) {
213 int in = -12;
215 MPI_Recv(&in,1,MPI_INT,0,i,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
216 validateEquals( in, out+i );
217 }
218 }
219 MPI_Barrier(MPI_COMM_WORLD);
220 validate( testErrors==0 );
221
222
223 tarch::multicore::setOrchestration( originalOrchestration );
224 #endif
225}
226
227
229 #if defined(Parallel) and not defined(UseSmartMPI)
231
232 int out = 23;
233 testErrors = 0;
234 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==0) {
235 std::vector< tarch::multicore::Task* > tasks;
237 tasks.push_back( new PingPongSendTask(i,true) );
238 }
240 }
241 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==1) {
242 std::vector< tarch::multicore::Task* > tasks;
244 tasks.push_back( new PingPongReceiveTask(i,true) );
245 }
247 }
248 MPI_Barrier(MPI_COMM_WORLD);
249 validate( testErrors==0 );
250
251 tarch::multicore::setOrchestration( originalOrchestration );
252 #endif
253}
254
255
257 #if defined(Parallel) and not defined(UseSmartMPI)
259
260 int out = 23;
261 testErrors = 0;
262 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==0) {
263 std::vector< tarch::multicore::Task* > tasks;
265 tasks.push_back( new PingPongSendTask(i,false) );
266 }
268 }
269 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==1) {
270 std::vector< tarch::multicore::Task* > tasks;
272 tasks.push_back( new PingPongReceiveTask(i,false) );
273 }
275 }
276 MPI_Barrier(MPI_COMM_WORLD);
277 validate( testErrors==0 );
278
279 tarch::multicore::setOrchestration( originalOrchestration );
280 #endif
281}
282
283
285 #if defined(Parallel) and not defined(UseSmartMPI)
287 out.setStepIdentifier(23);
288 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==0) {
289 StartTraversalMessage::send( out, 1, 0, MPI_COMM_WORLD );
290 }
291 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==1) {
293 StartTraversalMessage::receive( in, 0, 0, MPI_COMM_WORLD );
295 }
296 MPI_Barrier(MPI_COMM_WORLD);
297 #endif
298}
299
300
302 #if defined(Parallel) and not defined(UseSmartMPI)
304 out.setValue(23);
305 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==0) {
306 MPI_Send(&out,1,tarch::mpi::IntegerMessage::getGlobalCommunciationDatatype(),1,0,MPI_COMM_WORLD);
308 out, 1, 0,
309 tarch::mpi::Rank::getInstance().getCommunicator()
310 );
311 }
312 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==1) {
313 tarch::mpi::IntegerMessage inThroughMPI;
314 inThroughMPI.setValue(72);
315 MPI_Recv(&inThroughMPI,1,tarch::mpi::IntegerMessage::getGlobalCommunciationDatatype(),0,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
316 validateEqualsWithParams2( inThroughMPI.getValue(), out.getValue(), inThroughMPI.toString(), out.toString() );
317
318 tarch::mpi::IntegerMessage inThroughDaStGen;
319 inThroughDaStGen.setValue(73);
321 inThroughDaStGen, 0, 0,
322 tarch::mpi::Rank::getInstance().getCommunicator()
323 );
324 validateEqualsWithParams2( inThroughDaStGen.getValue(), out.getValue(), inThroughDaStGen.toString(), out.toString() );
325 }
326 MPI_Barrier(MPI_COMM_WORLD);
327 #endif
328}
329
330
332 #if defined(Parallel) and not defined(UseSmartMPI)
333 StartTraversalMessage out[10];
334 out[0].setStepIdentifier(23);
335 out[1].setStepIdentifier(24);
336 out[2].setStepIdentifier(25);
337 out[3].setStepIdentifier(26);
338 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==0) {
339 MPI_Send(out,4,StartTraversalMessage::getGlobalCommunciationDatatype(),1,0,MPI_COMM_WORLD);
340 }
341 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==1) {
343 MPI_Recv(in,4,StartTraversalMessage::getGlobalCommunciationDatatype(),0,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
345 in[0].getStepIdentifier(), out[0].getStepIdentifier(),
346 in[0].toString(), out[0].toString(),
347 in[1].toString(), out[1].toString(),
348 in[2].toString(), out[2].toString(),
349 in[3].toString(), out[3].toString()
350 );
352 in[1].getStepIdentifier(), out[1].getStepIdentifier(),
353 in[0].toString(), out[0].toString(),
354 in[1].toString(), out[1].toString(),
355 in[2].toString(), out[2].toString(),
356 in[3].toString(), out[3].toString()
357 );
359 in[2].getStepIdentifier(), out[2].getStepIdentifier(),
360 in[0].toString(), out[0].toString(),
361 in[1].toString(), out[1].toString(),
362 in[2].toString(), out[2].toString(),
363 in[3].toString(), out[3].toString()
364 );
366 in[3].getStepIdentifier(), out[3].getStepIdentifier(),
367 in[0].toString(), out[0].toString(),
368 in[1].toString(), out[1].toString(),
369 in[2].toString(), out[2].toString(),
370 in[3].toString(), out[3].toString()
371 );
372 }
373 MPI_Barrier(MPI_COMM_WORLD);
374 #endif
375}
376
377
379 #if defined(Parallel) and not defined(UseSmartMPI)
381 const int Tag = 14;
382 if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==0) {
384 peano4::parallel::TreeManagementMessage::send( message, 1, Tag, MPI_COMM_WORLD );
385 }
386 else if ( tarch::mpi::Rank::getInstance().getNumberOfRanks()>=2 and tarch::mpi::Rank::getInstance().getRank()==1) {
387 peano4::parallel::TreeManagementMessage::receive( message, 0, Tag, MPI_COMM_WORLD );
389 }
390 MPI_Barrier(MPI_COMM_WORLD);
391 #endif
392}
393
394
396 logTraceIn( "run()" );
397
398 testMethod( testBuiltInType );
399
400 testMethod( testDaStGenTypeIntegerMessage );
401 testMethod( testDaStGenTypeStartTraversalMessage );
402 testMethod( testDaStGenArray );
403
404 testMethod( testDaStGenArrayTreeManagementMessage );
405
406 testMethod( testMultithreadedPingPongWithBlockingReceives );
407 testMethod( testMultithreadedPingPongWithBlockingSends );
408 testMethod( testMultithreadedPingPongWithBlockingSendsAndReceives );
409
410 testMethod( testMultithreadedPingPongWithNonblockingReceives );
411 testMethod( testMultithreadedPingPongWithNonblockingSends );
412 testMethod( testMultithreadedPingPongWithNonblockingSendsAndReceives );
413
414 logTraceOut( "run()" );
415}
416
417
418#ifdef UseTestSpecificCompilerSettings
419#pragma optimize("",on)
420#endif
#define logError(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
Definition Log.h:464
#define logDebug(methodName, logMacroMessageStream)
Definition Log.h:50
#define logTraceOut(methodName)
Definition Log.h:379
#define logTraceIn(methodName)
Definition Log.h:369
#define logInfo(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
Definition Log.h:411
#define validateEqualsWithParams8(actualValue, validValue, param0, param1, param2, param3, param4, param5, param6, param7)
Definition TestMacros.h:392
#define validate(booleanExpr)
Definition TestMacros.h:37
#define testMethod(name)
Run a test method and check for errors.
Definition TestMacros.h:24
#define validateWithParams1(booleanExpr, param0)
Definition TestMacros.h:59
#define validateEqualsWithParams2(actualValue, validValue, param0, param1)
Definition TestMacros.h:328
#define validateEquals(actualValue, validValue)
Definition TestMacros.h:299
virtual void run() override
This routine is triggered by the TestCaseCollection.
void testDaStGenTypeIntegerMessage()
It is really important that we test - for the DaStGen-generated data types - both the built-in operat...
static tarch::logging::Log _log
Logging device.
Log Device.
Definition Log.h:516
static Rank & getInstance()
This operation returns the singleton instance.
Definition Rank.cpp:539
static Core & getInstance()
Definition Core.cpp:56
int getNumberOfThreads() const
Returns the number of threads that is used.
Definition Core.cpp:67
Abstract super class for a job.
Definition Task.h:21
static Hardcoded * createNative()
Fall back to native tasking.
Definition Hardcoded.cpp:18
std::string toString(Filter filter)
Definition convert.cpp:170
tarch::logging::Log _log("exahype2::fv")
void setOrchestration(tarch::multicore::orchestration::Strategy *realisation)
Definition multicore.cpp:56
void spawnAndWait(const std::vector< Task * > &tasks)
Fork-join task submission pattern.
Definition multicore.cpp:91
tarch::multicore::orchestration::Strategy * swapOrchestration(tarch::multicore::orchestration::Strategy *realisation)
Swap the active orchestration.
Definition multicore.cpp:65
static void send(const peano4::parallel::StartTraversalMessage &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static void receive(peano4::parallel::StartTraversalMessage &buffer, int source, int tag, MPI_Comm communicator)
peano4::parallel::TreeManagementMessage::Action getAction() const
static void send(const peano4::parallel::TreeManagementMessage &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
static void receive(peano4::parallel::TreeManagementMessage &buffer, int source, int tag, MPI_Comm communicator)
std::string toString() const
static void receive(tarch::mpi::IntegerMessage &buffer, int source, int tag, MPI_Comm communicator)
static MPI_Datatype getGlobalCommunciationDatatype()
static void send(const tarch::mpi::IntegerMessage &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.