Peano 4
Loading...
Searching...
No Matches
Statistics.cpp
Go to the documentation of this file.
1#include "Statistics.h"
2
4#include "tarch/Assertions.h"
5
6
7tarch::logging::Log toolbox::loadbalancing::Statistics::_log( "toolbox::loadbalancing::Statistics" );
8
9
11 _localNumberOfInnerUnrefinedCells( 0 ),
12 _globalNumberOfInnerUnrefinedCells( 0 ),
13 _globalNumberOfTrees(1),
14 _globalNumberOfRanksWithEnabledLoadBalancing(0),
15 _localLoadBalancingEnabled(true),
16 _localNumberOfSplits(0),
17 _numberOfStateUpdatesWithoutAnySplit(0),
18 _viewConsistent(false) {
19 #ifdef Parallel
20 _globalSumRequest = nullptr;
25 #endif
26}
27
28
32
33
35 std::ostringstream msg;
36
37 msg << "(local-number-of-inner-unrefined-cells=" << _localNumberOfInnerUnrefinedCells
38 << ",global-number-of-inner-unrefined-cells=" << _globalNumberOfInnerUnrefinedCells
39 << ",global-number-of-trees=" << _globalNumberOfTrees
40 << ",max-trees-per-rank=" << _maximumTreesPerRank
41 << ",local-load-balancing-enabled=" << _localLoadBalancingEnabled
42 << ",global-number-of-ranks-with-enabled-lb=" << _globalNumberOfRanksWithEnabledLoadBalancing
43 << ",local-number-of-splits=" << _localNumberOfSplits
44 << ",global-number-of-splits=" << _globalNumberOfSplits
45 << ",number-of-state-updates-without-any-split=" << _numberOfStateUpdatesWithoutAnySplit
46 << ",view-consistent=" << _viewConsistent
47 << ")";
48
49 return msg.str();
50}
51
52
54 #ifdef Parallel
55 if (_globalSumRequest != nullptr ) {
56 MPI_Wait( _globalSumRequest, MPI_STATUS_IGNORE );
57 MPI_Wait( _globalNumberOfSplitsRequest, MPI_STATUS_IGNORE );
58 MPI_Wait( _globalNumberOfTreesRequest, MPI_STATUS_IGNORE );
59 MPI_Wait( _globalNumberOfRanksWithEnabledLoadBalancingRequest, MPI_STATUS_IGNORE );
60 MPI_Wait( _globalMaximumTreesPerRankRequest, MPI_STATUS_IGNORE );
61
62 delete _globalSumRequest;
63 delete _globalNumberOfSplitsRequest;
64 delete _globalNumberOfTreesRequest;
65 delete _globalNumberOfRanksWithEnabledLoadBalancingRequest;
66 delete _globalMaximumTreesPerRankRequest;
67
68 _globalSumRequest = nullptr;
69 _globalNumberOfSplitsRequest = nullptr;
70 _globalNumberOfTreesRequest = nullptr;
71 _globalNumberOfRanksWithEnabledLoadBalancingRequest = nullptr;
72 _globalMaximumTreesPerRankRequest = nullptr;
73 }
74 #endif
75}
76
77
80
81 _viewConsistent = true;
82
83 if (tarch::mpi::Rank::getInstance().getNumberOfRanks()<=1) {
84 _globalNumberOfInnerUnrefinedCells = _localNumberOfInnerUnrefinedCells;
85 _globalNumberOfSplits = _localNumberOfSplits;
87 _globalNumberOfRanksWithEnabledLoadBalancing = _localLoadBalancingEnabled ? 1 : 0;
89 }
90 else {
91 #ifdef Parallel
92 waitForGlobalDataExchange();
93
94 _globalNumberOfInnerUnrefinedCells = _globalNumberOfInnerUnrefinedCellsBufferIn;
95 _globalNumberOfSplits = _numberOfSplitsIn;
96 _globalNumberOfTrees = _numberOfTreesIn;
97 _globalNumberOfRanksWithEnabledLoadBalancing = _numberOfRanksWithEnabledLoadBalancingIn;
98 _maximumTreesPerRank = _maximumTreesIn;
99
100 if ( _globalNumberOfInnerUnrefinedCells < _localNumberOfInnerUnrefinedCells ) {
101 logInfo(
102 "updateGlobalView()",
103 "local number of cells (" << _localNumberOfInnerUnrefinedCells << ") is bigger than global cell count (" << _globalNumberOfInnerUnrefinedCells <<
104 "). This usually happens if a forking tree has some pending refinement events and cannot refine anymore, as it has already spawned cells. Statistics might have inconsistent view of world"
105 );
106 _viewConsistent = false;
107 _globalNumberOfInnerUnrefinedCells = _localNumberOfInnerUnrefinedCells;
108 }
109
110 _globalSumRequest = new MPI_Request();
111 _globalNumberOfSplitsRequest = new MPI_Request();
112 _globalNumberOfTreesRequest = new MPI_Request();
113 _globalNumberOfRanksWithEnabledLoadBalancingRequest = new MPI_Request();
114 _globalMaximumTreesPerRankRequest = new MPI_Request();
115
116 _globalNumberOfInnerUnrefinedCellsBufferOut = _localNumberOfInnerUnrefinedCells;
117 _numberOfSplitsOut = _localNumberOfSplits;
119 _numberOfRanksWithEnabledLoadBalancingOut = _localLoadBalancingEnabled ? 1 : 0;
120 _numberOfMaximumTreesOut = peano4::parallel::SpacetreeSet::getInstance().getLocalSpacetrees().size();
121
122 MPI_Iallreduce(
123 &_numberOfRanksWithEnabledLoadBalancingOut, // send
124 &_numberOfRanksWithEnabledLoadBalancingIn, // receive
125 1, // count
126 MPI_INT,
127 MPI_SUM,
128 tarch::mpi::Rank::getInstance().getCommunicator(),
129 _globalNumberOfRanksWithEnabledLoadBalancingRequest
130 );
131 MPI_Iallreduce(
132 &_numberOfTreesOut, // send
133 &_numberOfTreesIn, // receive
134 1, // count
135 MPI_INT,
136 MPI_SUM,
137 tarch::mpi::Rank::getInstance().getCommunicator(),
138 _globalNumberOfTreesRequest
139 );
140 MPI_Iallreduce(
141 &_globalNumberOfInnerUnrefinedCellsBufferOut, // send
142 &_globalNumberOfInnerUnrefinedCellsBufferIn, // receive
143 1, // count
144 MPI_INT,
145 MPI_SUM,
146 tarch::mpi::Rank::getInstance().getCommunicator(),
147 _globalSumRequest
148 );
149 // has to be global number, as local is already erased
150 MPI_Iallreduce(
151 &_numberOfSplitsOut, // send
152 &_numberOfSplitsIn, // receive
153 1, // count
154 MPI_INT,
155 MPI_SUM,
156 tarch::mpi::Rank::getInstance().getCommunicator(),
157 _globalNumberOfSplitsRequest
158 );
159 MPI_Iallreduce(
160 &_numberOfMaximumTreesOut, // send
161 &_maximumTreesIn, // receive
162 1, // count
163 MPI_INT,
164 MPI_MAX,
165 tarch::mpi::Rank::getInstance().getCommunicator(),
166 _globalMaximumTreesPerRankRequest
167 );
168 #endif
169 }
170
171 if ( _globalNumberOfSplits==0 and _localNumberOfSplits==0 and _numberOfStateUpdatesWithoutAnySplit<65536) {
172 _numberOfStateUpdatesWithoutAnySplit++;
173 }
174 else if ( _globalNumberOfSplits>0 or _localNumberOfSplits>0 ) {
175 _numberOfStateUpdatesWithoutAnySplit = 0;
176 }
177
178 _localNumberOfSplits = 0;
179}
180
181
183 assertion( delta>0 );
184 _localNumberOfSplits += delta;
185}
186
187
189 return _localNumberOfInnerUnrefinedCells;
190}
191
192
194 return _globalNumberOfInnerUnrefinedCells;
195}
196
197
199 return _globalNumberOfRanksWithEnabledLoadBalancing;
200}
201
202
204 return _globalNumberOfTrees;
205}
206
207
209 return _maximumTreesPerRank;
210}
211
212
214 return _numberOfStateUpdatesWithoutAnySplit;
215}
216
217
219 return _viewConsistent;
220}
221
222
#define assertion(expr)
AutomatonState state
#define logInfo(methodName, logMacroMessageStream)
Wrapper macro around tarch::tarch::logging::Log to improve logging.
Definition Log.h:411
peano4::grid::GridStatistics getGridStatistics() const
Return statistics object for primary spacetree.
static SpacetreeSet & getInstance()
std::set< int > getLocalSpacetrees() const
Log Device.
Definition Log.h:516
static Rank & getInstance()
This operation returns the singleton instance.
Definition Rank.cpp:538
int getGlobalNumberOfRanksWithEnabledLoadBalancing() const
bool hasConsistentViewOfWorld() const
If the stats spot some inconsistencies (local number of cells is bigger than global number,...
MPI_Request * _globalNumberOfRanksWithEnabledLoadBalancingRequest
Definition Statistics.h:115
MPI_Request * _globalSumRequest
Required for local stats, but also replicated in cell count metrics.
Definition Statistics.h:112
static tarch::logging::Log _log
Definition Statistics.h:71
void incLocalNumberOfSplits(int delta=1)
void waitForGlobalDataExchange()
Typically called by.
MPI_Request * _globalMaximumTreesPerRankRequest
Definition Statistics.h:116
void notifyOfStateChange(State state)
Statistics()
Set all the MPI requests to nullptr.
State
State descriptor of load balancing.
Definition State.h:22
@ SwitchedOff
You usually don't get this state when we query the configuration, i.e.