Peano
Loading...
Searching...
No Matches
main.cpp
Go to the documentation of this file.
1#include "MyObserver.h"
2
3#include "tarch/logging/Log.h"
8#include "tarch/mpi/Rank.h"
9
10#include "peano4/peano.h"
14
15
16tarch::logging::Log _log("examples::regulargridupscaling");
17
18
19#include "peano4/UnitTests.h"
20#include "tarch/UnitTests.h"
21
22
23void runTests() {
24 int unitTestsErrors = 0;
26
28 tests->run();
29 unitTestsErrors += tests->getNumberOfErrors();
30 delete tests;
31
33 tests->run();
34 unitTestsErrors += tests->getNumberOfErrors();
35 delete tests;
36
37 if (unitTestsErrors != 0) {
38 logError("main()", "unit tests failed. Quit.");
39 exit(-2);
40 }
41
42}
43
44
45
46
47void runParallel(double h, int flopsPerCell) {
48 logTraceInWith1Argument( "runParallel", h );
50 #if Dimensions==2
51 {0.0, 0.0},
52 {1.0, 1.0},
53 #else
54 {0.0, 0.0, 0.0},
55 {1.0, 1.0, 1.0},
56 #endif
57 0
58 );
59
61
63
64 if (tarch::mpi::Rank::getInstance().isGlobalMaster() ) {
65 logInfo( "runParallel(...)", "create initial grid (step #1)" );
66 int numberOfGridConstructionSteps = 0;
67 while (
68 peano4::parallel::SpacetreeSet::getInstance().getGridStatistics().getNumberOfLocalUnrefinedCells() <
69 tarch::mpi::Rank::getInstance().getNumberOfRanks() * ThreePowerD
70 and
71 numberOfGridConstructionSteps<20
72 ) {
75 numberOfGridConstructionSteps++;
76 }
77 logInfo( "runParallel(...)", "grid statistics = " << peano4::parallel::SpacetreeSet::getInstance().getGridStatistics().toString() );
78
79 if (
80 peano4::parallel::SpacetreeSet::getInstance().getGridStatistics().getNumberOfLocalUnrefinedCells() <
81 tarch::mpi::Rank::getInstance().getNumberOfRanks()
82 ) {
83 logError( "runParallel(...)", "not enough cells to keep " << tarch::mpi::Rank::getInstance().getNumberOfRanks() << " busy" );
84 exit(-1);
85 }
86
88 logInfo( "runParallel(...)", "trigger split of initial grid among ranks with " << numberOfCellsPerRank << " cells per rank" );
89 for (int rank=1; rank<tarch::mpi::Rank::getInstance().getNumberOfRanks(); rank++) {
90 if ( not peano4::parallel::SpacetreeSet::getInstance().split(0,numberOfCellsPerRank,rank)) {
91 logWarning( "runParallel(...)", "failed to assign rank " << rank << " " << numberOfCellsPerRank << " cell(s)" );
92 }
93 }
94
95 const int MaxNumberOfConstructionSteps = static_cast<int>(std::round( std::log(1.0 / h)/std::log(3.0)+1 )) - 1;
96 assertion1(MaxNumberOfConstructionSteps>=0, MaxNumberOfConstructionSteps);
97 logInfo( "runParallel(...)", "commit split and give ranks " << MaxNumberOfConstructionSteps << " iterations to 'recover' (step #2)" );
98 for (int i=0; i<MaxNumberOfConstructionSteps; i++) {
101 }
102
103 int numberOfCellsPerThread = peano4::parallel::SpacetreeSet::getInstance().getGridStatistics().getNumberOfLocalUnrefinedCells() / numberOfThreads;
104 logInfo( "runParallel(...)", "trigger split of master rank into threads with " << numberOfCellsPerThread << " cells per thread (total: " << peano4::parallel::SpacetreeSet::getInstance().getGridStatistics().getNumberOfLocalUnrefinedCells() << ")");
105 for (int thread=1; thread<numberOfThreads; thread++) {
106 if ( not peano4::parallel::SpacetreeSet::getInstance().split(0,numberOfCellsPerThread,0)) {
107 logWarning( "runParallel(...)", "failed to assign thread " << thread << " " << numberOfCellsPerThread << " cell(s)" );
108 }
109 }
110
111 logInfo( "runParallel(...)", "run one step committing split and telling other ranks to split as well (step #3)" );
114
115 logInfo( "runParallel(...)", "commit splits into threads and give ranks time to 'recover' (step #4)" );
116 for (int i=0; i<3; i++) {
119 }
120
121 logInfo( "runParallel(...)", "start parallel traversals (step #5)" );
122 logInfo( "runParallel(...)", "grid statistics = " << peano4::parallel::SpacetreeSet::getInstance().getGridStatistics().toString() );
123
124 #if PeanoDebug>=2
125 const int Iterations=2;
126 #else
127 const int Iterations=20;
128 #endif
129
130 for (int i=0; i<Iterations; i++) {
133 }
134 logInfo( "runParallel(...)", "terminated successfully" );
135 }
136 else { // not the global master
137 while (peano4::parallel::Node::getInstance().continueToRun()) {
138 logDebug( "runParallel(...)", "trigger a new sweep with step " << peano4::parallel::Node::getInstance().getCurrentProgramStep() );
139 if (
140 peano4::parallel::Node::getInstance().getCurrentProgramStep()==2
141 or
142 peano4::parallel::Node::getInstance().getCurrentProgramStep()==4
143 or
144 peano4::parallel::Node::getInstance().getCurrentProgramStep()==5
145 ) {
147 }
148 else if (peano4::parallel::Node::getInstance().getCurrentProgramStep()==3) {
149 assertionEquals( peano4::parallel::SpacetreeSet::getInstance().getLocalSpacetrees().size(), 1);
150 const int localTree = *(peano4::parallel::SpacetreeSet::getInstance().getLocalSpacetrees().begin());
152 logInfo( "runParallel(...)", "local unrefined cells = " << peano4::parallel::SpacetreeSet::getInstance().getGridStatistics().getNumberOfLocalUnrefinedCells());
153 logInfo( "runParallel(...)", "trigger split of master rank into threads with " << numberOfCellsPerThread << " cells per thread");
154 for (int thread=1; thread<numberOfThreads; thread++) {
155 if ( not peano4::parallel::SpacetreeSet::getInstance().split(localTree,numberOfCellsPerThread,tarch::mpi::Rank::getInstance().getRank())) {
156 logWarning( "runParallel(...)", "failed to assign thread " << thread << " " << numberOfCellsPerThread << " cell(s)" );
157 }
158 }
160 }
161 }
162 }
163 logTraceOut( "runParallel" );
164}
165
166
167int main(int argc, char** argv) {
168 const int ExitCodeSuccess = 0;
169 const int ExitCodeUnitTestsFailed = 1;
170
173
176 ));
179 ));
182 ));
185 ));
188 ));
191 ));
192
193 //tarch::logging::CommandLineLogger::getInstance().setOutputFile( "trace.txt" );
195
196 runTests();
197
198 if (argc<3 or argc>6) {
199 logError( "main(...)", "Usage: ./executable mesh-width flops-per-cell [core-count] [spawn-frequency] [integration-points]");
200 return 1;
201 }
202
203 double meshWidth = std::atof( argv[1] );
204 int flopsPerCell = std::atoi( argv[2] );
205 if (meshWidth<=0) {
206 logError( "main(...)", "Usage: ./executable mesh-width");
207 logError( "main(...)", " mesh-width has to be a positive value");
208 return 2;
209 }
210 if (meshWidth>=1.0) {
211 logError( "main(...)", "Usage: ./executable mesh-width");
212 logError( "main(...)", " mesh-width has to be smaller than one");
213 return 2;
214 }
215
216 if (argc>=4) {
217 int cores = std::atoi( argv[3] );
219 }
220
221 if (argc>=5) {
223 }
224 if (argc>=6) {
226 }
227
228 const int numberOfRanks = tarch::mpi::Rank::getInstance().getNumberOfRanks();
229 const int numberOfCores = tarch::multicore::Core::getInstance().getNumberOfThreads();
230 logInfo( "main(...)", "run on " << numberOfRanks << " ranks with " << numberOfCores << " thread(s) each" );
231
232 runParallel(meshWidth,flopsPerCell);
233
235
236 return 0;
237}
#define assertionEquals(lhs, rhs)
#define assertion1(expr, param)
#define ThreePowerD
Definition Globals.h:24
#define logError(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
Definition Log.h:464
#define logDebug(methodName, logMacroMessageStream)
Definition Log.h:50
#define logTraceOut(methodName)
Definition Log.h:379
#define logWarning(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
Definition Log.h:440
#define logTraceInWith1Argument(methodName, argument0)
Definition Log.h:370
#define logInfo(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
Definition Log.h:411
int main()
Definition main.cpp:321
void runTests()
Definition main.cpp:25
void runParallel()
Definition main.cpp:194
tarch::logging::Log _log("examples::grid")
void setNextProgramStep(int number)
The user tells the set which program step to use, i.e.
Definition Node.cpp:441
static Node & getInstance()
This operation returns the singleton instance.
Definition Node.cpp:108
bool split(int treeId, const peano4::SplitInstruction &instruction, int targetRank)
Split a local tree.
peano4::grid::GridStatistics getGridStatistics() const
Return statistics object for primary spacetree.
void init(const tarch::la::Vector< Dimensions, double > &offset, const tarch::la::Vector< Dimensions, double > &width, const std::bitset< Dimensions > &periodicBC=0)
static SpacetreeSet & getInstance()
std::set< int > getLocalSpacetrees() const
void traverse(peano4::grid::TraversalObserver &observer)
Invoke traverse on all spacetrees in parallel.
void setOutputFile(const std::string &outputLogFileName)
Is redundant, as you could use setLogFormat() instead.
static ChromeTraceFileLogger & getInstance()
void addFilterListEntry(const FilterListEntry &entry)
Add one filter list entry.
static LogFilter & getInstance()
Log Device.
Definition Log.h:516
int getNumberOfRanks() const
Definition Rank.cpp:552
static Rank & getInstance()
This operation returns the singleton instance.
Definition Rank.cpp:539
static Core & getInstance()
Definition Core.cpp:56
int getNumberOfThreads() const
Returns the number of threads that is used.
Definition Core.cpp:67
void configure(int numberOfThreads=UseDefaultNumberOfThreads)
Configure the whole node, i.e.
Definition Core.cpp:61
Represents one test case.
Definition TestCase.h:57
std::string toString(Filter filter)
Definition convert.cpp:170
tarch::tests::TestCase * getUnitTests()
Please destroy after usage.
Definition UnitTests.cpp:12
void fillLookupTables()
Fill Lookup Tables.
Definition peano.cpp:87
int initParallelEnvironment(int *argc, char ***argv)
Init Parallel Environment.
Definition peano.cpp:101
void shutdownParallelEnvironment()
Shutdown all the parallel environment, i.e.
Definition peano.cpp:127
tarch::tests::TestCase * getUnitTests()
Please destroy after usage.
Definition UnitTests.cpp:17
Represents one entry of the filter list.
Definition LogFilter.h:30
static const std::string TargetTrace
Definition LogFilter.h:36
static const std::string TargetDebug
Definition LogFilter.h:35
static const std::string TargetInfo
Definition LogFilter.h:34