Peano
Loading...
Searching...
No Matches
Project.py
Go to the documentation of this file.
1# This file is part of the ExaHyPE2 project. For conditions of distribution and
2# use, please see the copyright notice at www.peano-framework.org
3import peano4
5
6import dastgen2
7
8import os
9import sys
10import copy
11
12from swift2.particle.Particle import Particle
14 DynamicMeshRefinementAnalysis,
15)
16from swift2.output.SWIFTMain import SWIFTMain
17from swift2.actionsets.Cleanup import Cleanup
18
19import swift2.api.graphcompiler
20
21from .ProjectRealisation import *
22
23
24from abc import abstractmethod
25
26
27class Project(object):
28 """!
29
30 Swift2 project
31
32 Represents an abstract SWIFT 2 project. An SWIFT 2 project is a Peano 4
33 project with a particular set of actions (algorithmic phases)
34 that you can choose from and with particular solver types. It
35 realises a builder mechanism, i.e. you build up your SWIFT 2
36 project and then you finally tell the project "give me the Peano 4
37 project". From hereon, you can use this Peano 4 project to actually
38 set up the Peano 4 application.
39
40 Please do not use this class directly. Use one of its subclasses.
41
42 @see generate_Peano4_project()
43
44 """
45
46 def __init__(self, namespace, project_name, directory=".", executable="swift2"):
47 self._project = peano4.Project(namespace, project_name, directory)
48
49 self._domain_offset = [0.0, 0.0, 0.0]
50 self._domain_size = [1.0, 1.0, 1.0]
51 self._dimensions = 2
60 self._build_mode = peano4.output.CompileMode.Asserts
61 self._executable_name = executable
62 self._periodic_BC = [False, False, False]
63 self._output_path = "./"
64
67
70 "InitialConditions", False
71 )
74
76
77 # Default values will be set by self.set_realisation().
80 # Default values used to be:
81 # swift2.api.graphcompiler.particle_steps_onto_separate_mesh_traversals_multiscale_sort_scattered_memory
82 # swift2.api.graphcompiler.initialisation_steps_onto_separate_mesh_traversals_multiscale_sort_scattered_memory
83
84 def set_load_balancing(self, load_balancer_name, load_balancer_arguments):
85 """
86 load_balancer_name: string
87 Should be full-qualified name of the load balancer.
88 By default, I recommend to pass "toolbox::loadbalancing::strategies::SpreadOutHierarchically",
89 but you might be well-advices to study different classes within the namespace toolbox::loadbalancing.
90
91 load_balancer_arguments: string
92 If your load balancing requires parameters, add them
93 here. It is a string that will be copied into the C++ class instantiation.
94 Please add the brackets yourself, i.e. "(3,4,5)" is fine, but "3,4,5" is not.
95 The only exception is the empty parameter list. Here, you can/should simply
96 add the empty string.
97 """
98 self._load_balancer_name = load_balancer_name
99 self._load_balancer_arguments = load_balancer_arguments
100
101 """
102 The standard extensions that I use for both Peano and ExaHyPE.
103 """
104 LibraryDebug = "_debug"
105 LibraryRelease = ""
106 LibraryTrace = "_trace"
107 LibraryAsserts = "_asserts"
108 LibraryStats = "_stats"
109
110 def set_Peano4_installation(self, src_path, mode=peano4.output.CompileMode.Release):
111 """
112 src_path: string
113 Path (relative or absolute) to the src directory of Peano. This path
114 should hold both the headers (in subdirectories) and all the static
115 libraries.
116
117 mode: peano4.output.CompileMode
118 """
119
120 swift2dir = os.path.join(src_path, "src", "swift2")
121 peano4dir = os.path.join(src_path, "src", "peano4")
122 tarchdir = os.path.join(src_path, "src", "tarch")
123
124 for d in (swift2dir, peano4dir, tarchdir):
125 if not os.path.exists(d):
126 dir_stripped = d[len(src_path) :]
127 raise FileNotFoundError(
128 f"Didn't find directory {dir_stripped} in passed peano root dir={src_path}"
129 )
130
131 self._Peano_src_directory = src_path
132 self._build_mode = mode
133
135 """!
136
137 Run over all species specified and find out what their maximum
138 (coarsest) h is. This value is then used to constrain the mesh
139 adaptivity: The mesh should never be coarser than this, but also
140 the initial mesh should meet this global maximum h.
141
142 The outcome is stored in self._global_max_h.
143
144 """
145 self._global_max_h = 0.0
146 for current_species_set in self._particle_species_set:
147 self._global_max_h = max(
148 self._global_max_h, current_species_set.particle_model.max_h
149 )
150
152 """!
153
154 Run over all species specified and find out what their minimum
155 (finest) h is. This value is then used to constrain the mesh
156 adaptivity: The mesh should never be coarser than this, but also
157 the initial mesh should meet this global maximum h.
158
159 The outcome is stored in self._global_min_h.
160
161 """
162 self._global_min_h = sys.float_info.max
163 for current_species_set in self._particle_species_set:
164 self._global_min_h = min(
165 self._global_min_h, current_species_set.particle_model.min_h
166 )
167
169 """
170
171 We export SWIFT's constants. Besides the constants from SWIFT,
172 I also export some parameters from Peano onto the SWIFT constants
173 file. Therefore, it is important that you parse the configure output
174 before we export the constants.
175
176 """
177 self._project.constants.clear()
178 offset_string = "{" + str(self._domain_offset[0])
179 size_string = "{" + str(self._domain_size[0])
180 for i in range(1, self._dimensions):
181 offset_string += ","
182 size_string += ","
183 offset_string += str(self._domain_offset[i])
184 size_string += str(self._domain_size[i])
185 offset_string += "}"
186 size_string += "}"
187 self._project.constants.add_include("""#include <bitset>""")
188 self._project.constants.add_include("""#include "tarch/la/Vector.h" """)
189 self._project.constants.add_include("""#include "peano4/utils/Globals.h" """)
190 self._project.constants.export_const_with_type(
191 "DomainOffset", offset_string, "tarch::la::Vector<Dimensions,double>"
192 )
193 self._project.constants.export_const_with_type(
194 "DomainSize", size_string, "tarch::la::Vector<Dimensions,double>"
195 )
196 self._project.constants.export("MinTerminalTime", str(self._min_terminal_time))
198 self._project.constants.export(
199 "MaxTerminalTime", str(self._max_terminal_time)
200 )
201 else:
202 self._project.constants.export(
203 "MaxTerminalTime", "std::numeric_limits<double>::max()"
204 )
205 self._project.constants.export(
206 "FirstPlotTimeStamp", str(self._first_plot_time_stamp)
207 )
208 self._project.constants.export(
209 "TimeInBetweenPlots", str(self._time_in_between_plots)
210 )
211 self._project.constants.export("PlotterPrecision", str(self._plotter_precision))
212 self._project.constants.export_boolean_sequence("PeriodicBC", self._periodic_BC)
213 self._project.constants.export("GlobalMaxH", str(self._global_max_h))
214 self._project.constants.export("GlobalMinH", str(self._global_min_h))
215
216 build_string = "python3 "
217 for i in sys.argv:
218 build_string += " "
219 build_string += i
220 self._project.constants.export_string("BuildInformation", build_string)
221 self._project.constants.export_string(
222 "ConfigureInformation", self._project.output.makefile.configure_call
223 )
224
225 readme_text = """
226
227### SWIFT 2
228
229This code uses the second generation of the SWIFT code controlled through
230its Python API. Under the hood it uses Peano 4. We do not yet have a release
231paper for this second generation of SWIFT yet, and thus appreciate any
232citation of the original SWIFT paper
233
234 @article{Schaller:2020:SWIFT,
235 title = {t.b.d.}
236 }
237
238"""
239
240 if self._dimensions == 2:
241 readme_text += (
242 """
243
244We assume that you use a domain of ("""
245 + str(self._domain_offset[0])
246 + ""","""
247 + str(self._domain_size[0] - self._domain_offset[0])
248 + """)x("""
249 + str(self._domain_offset[1])
250 + ""","""
251 + str(self._domain_size[1] - self._domain_offset[1])
252 + """).
253 """
254 )
255 else:
256 readme_text += (
257 """
258
259We assume that you use a domain of ("""
260 + str(self._domain_offset[0])
261 + ""","""
262 + str(self._domain_size[0] - self._domain_offset[0])
263 + """)x("""
264 + str(self._domain_offset[1])
265 + ""","""
266 + str(self._domain_size[1] - self._domain_offset[1])
267 + """)x("""
268 + str(self._domain_offset[2])
269 + ""","""
270 + str(self._domain_size[2] - self._domain_offset[2])
271 + """).
272"""
273 )
274
275 readme_text += (
276 """
277
278Peano 4 will cut this domain equidistantly and recursively into three parts along each coordinate axis. This yields a spacetree.
279
280The coarsest mesh chosen has a mesh width of h_max="""
281 + str(self._global_max_h)
282 + """.
283As Peano realises three-partitioning, the actual maximum mesh size will be h_max="""
284 + str(self.__real_max_mesh_size()[0])
285 + """.
286This corresponds to at least """
287 + str(self.__real_max_mesh_size()[1])
288 + """ spacetree levels.
289The coarsest regular mesh hence will host """
290 + str((3**self._dimensions) ** (self.__real_max_mesh_size()[1]))
291 + """ octants.
292
293The finest mesh chosen has a mesh width of h_min="""
294 + str(self._global_min_h)
295 + """.
296As Peano realises three-partitioning, the actual minimal mesh size will be h_min="""
297 + str(self.__real_min_mesh_size()[0])
298 + """.
299This corresponds to at least """
300 + str(self.__real_min_mesh_size()[1])
301 + """ spacetree levels.
302The finest regular mesh hence will host up to """
303 + str((3**self._dimensions) ** (self.__real_min_mesh_size()[1]))
304 + """ octants.
305
306
307Once this resultion is reached, the mesh will stop the initial load balancing, insert the
308particles and then kick off the simulation. The mesh might be refined and erased subsequently,
309but it will never become coarser than these constraints.
310
311Each particle provides its own additional meshing constraints:
312
313Species | max_h | coarsest level | min_h | finest level
314--------|-------|----------------|-------|-------------"""
315 )
316
317 for i in self._particle_species:
318 readme_text += """
319{} | {} | {} | {} | {}
320""".format(
321 i.name,
322 self.real_mesh_size(i.max_h)[0],
323 self.real_mesh_size(i.max_h)[1],
324 self.real_mesh_size(i.min_h)[0],
325 self.real_mesh_size(i.min_h)[1],
326 )
327
328 readme_text += """
329
330The meshing quantities abive are species-specific. Once you have multiple
331particle species, the actual grid results from a combination of their
332properties, and some species particles might reside on different resolution
333levels.
334
335"""
336 self._project.output.readme.add_package_description(readme_text)
337
338 def real_mesh_size(self, target_h):
339 """!
340
341 Translate a mesh size into its real Peano mesh size
342
343 Peano uses three-partitioning. That is, it will neve be able to match
344 given refinement instructions exactly. All it can do is to approximate
345 them.
346
347 @return Tuple of mesh size and mesh levels
348
349 """
350 h = self._domain_size[0]
351 level = 0
352 while h > target_h:
353 h = h / 3.0
354 level += 1
355 return (h, level)
356
358 """!
359
360 Return the max mesh size that the code really will use.
361
362 See the explanation in the generated Readme.md file or in
363 real_mesh_size why this value is smaller than the real mesh
364 size.
365
366 """
367 return self.real_mesh_size(self._global_max_h)
368
370 """!
371
372 Return the min mesh size that the code really will use.
373
374 See the explanation in the generated Readme.md file or in
375 real_mesh_size why this value is smaller than the real mesh
376 size.
377
378 """
379 return self.real_mesh_size(self._global_min_h)
380
382 self._project.output.makefile.set_dimension(self._dimensions)
383 self._project.output.makefile.set_executable_name(self._executable_name)
384
386 self,
387 dimensions,
388 offset,
389 domain_size,
390 min_end_time,
391 max_end_time,
392 first_plot_time_stamp,
393 time_in_between_plots,
394 periodic_BC=[False, False, False],
395 plotter_precision=5,
396 initial_grid_coarse=False,
397 ):
398 """
399
400 offset and size should be lists with dimensions double entries.
401
402 first_plot_time_stamp: Float
403 Is irrelevant if time_in_between_plots equals zero
404
405 time_in_between_plots: Float
406 Set to zero if you don't want to have any plots
407
408 max_end_time: Float
409 If you set it zero (or actually any value msmaller than min_end_time), then
410 the code will run until the cell that lags behind the most hits the min time.
411 If you specify a valid max time however, you can stop the sim as soon as the
412 most advanced cell exceeds this threshold.
413
414 initial_grid_coarse: Bool
415 If true, make the initial grid coarse, i.e. according to max cell size, which
416 is specified via max_h in the added particle species. Otherwise, make initial
417 grid fine, i.e. according to the particle species' min_h.
418
419 """
420 self._domain_offset = offset
421 self._domain_size = domain_size
422 self._dimensions = dimensions
423 self._max_terminal_time = max_end_time
424 self._min_terminal_time = min_end_time
425 self._make_initial_grid_coarse = initial_grid_coarse
426 self._first_plot_time_stamp = first_plot_time_stamp
427 self._time_in_between_plots = time_in_between_plots
428 self._periodic_BC = []
429 self._plotter_precision = plotter_precision
430 for d in range(0, dimensions):
431 self._periodic_BC.append(periodic_BC[d])
432 if plotter_precision <= 0:
433 raise Exception("Plotter precision has to be bigger than 0")
434
435 def set_output_path(self, path):
436 self._output_path = path
437 if not self._output_path.endswith("/"):
438 self._output_path += "/"
439
441 solverRepositoryDictionary = {
442 "PARTICLE_SPECIES": [x.name for x in self._particle_species],
443 "LOAD_BALANCER": self._load_balancer_name,
444 "LOAD_BALANCER_ARGUMENTS": self._load_balancer_arguments,
445 }
446
447 templatefile_prefix = os.path.dirname(__file__)
448 generated_solver_files = (
450 templatefile_prefix + "/output/GlobalState.template.h",
451 templatefile_prefix + "/output/GlobalState.template.cpp",
452 "GlobalState",
453 self._project.namespace + ["repositories"],
454 "repositories",
455 solverRepositoryDictionary,
456 True,
457 )
458 )
459
460 self._project.output.add(generated_solver_files)
461 self._project.output.makefile.add_cpp_file(
462 "repositories/GlobalState.cpp", generated=True
463 )
464
465 def add_particle_species(self, particle: Particle):
466 """
467
468 Add a new particle species (type) to the project. You get the container
469 back in which the particles are handled on the rank. Use this one where
470 another action set requires a particle set.
471
472 """
473 self._particle_species.append(particle)
474 self._particle_species_set.append(
476 )
477 return self._particle_species_set[-1]
478
479 def generate_Peano4_project(self, verbose=False):
480 """!
481
482 Build the Peano4 project
483
484 The main job of this routine is to add all the action sets et al that you
485 require to run this ExaHyPE2 application.
486
487 This routine generates a Peano project, i.e. the domain-specific ExaHyPE
488 view is translated into a Peano model. Once you have called this routine,
489 any changes to the ExaHyPE 2 configuration do not propagate into the Peano
490 setup anymore. If you alter the ExaHyPE setup, you have to call
491 generate_Peano4_project() again to get a new snapshot/version of the
492 Peano setup.
493
494
495 ## Initial grid
496
497 The initial grid will be a regular one, spanned through
498
499 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
500 action_set_create_regular_grid = peano4.toolbox.CreateRegularGrid(...)
501 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
502
503 According to the documentation of peano4.toolbox.CreateRegularGrid,
504 the action set will produce a mesh that is just finer than the mesh
505 width passed, so we meet the max mesh width, but do not create a
506 mesh that is significantly finer.
507
508 As we insert particles in SPH, we have therefore to make this initial
509 resolution three times coarser than what we allow, as we only insert
510 particles into the finest mesh.
511
512
513
514 ## Task graph compiler
515
516 The core contribution of the generation is the task graph compiler which
517 really is a mapping of algorithm steps per particle species onto grid
518 sweeps. The actual mapping is outsourced into the function represented
519 by self.task_graph_compiler. This way, users can switch the translator's
520 behaviour. This function returns a sequence of mesh traversals. On top
521 of that, we have the default traversals to create a grid, to plot it,
522 to initialise the setup, and to clean up after we're done.
523
524 Once we have our three default steps plus a sequence of algorithmic steps
525 per time step, we run through the following steps:
526
527 - Create a particle set around each species and add it to the project as
528 global variable. This is the global container administering these guys.
529 - Tell each algorithmic step to use this particle set. An exception could be
530 the grid creation. At this point, we don't have particles yet. We add
531 the data structure nevertheless. It ensures that we have all the data
532 in place, and it we also then can be sure that everything is properly
533 initialised. The actual particle set will be empty at this point of the
534 simulation.
535 - Ensure that plotting and initialisation use the update particle-grid
536 association properly. The plot has to update it, as previous steps
537 might have started a resort yet might not have finished it.
538 - Add all the algorithmic steps, including the default ones, to
539 the project.
540
541
542 ## Particle initialisation and proper sorting
543
544 The particle initialisation might end up with an invalid association of
545 particles to vertices. The graph compiler might make the first step of a
546 time step sequence sort if and only if the last one has altered the
547 particles' position. Consequently, we might end up with an initialisation
548 which yields an inconsistent data association. We therefore make it sort
549 the particles, but we need another grid sweep to finalise this sort in
550 case we have to do some global rearrangements. This is our rationale why
551 we realise the initialisation in two steps.
552
553 """
557
558 #
559 # Make plotting resort and filter the particles. For this, they need
560 # the species set. The initialisation steps and the actual time steps
561 # are added the resorting action sets by the graph compiler, i.e. we
562 # should and we may not add them here manually. It is only the plotting
563 # (and the grid construction) which are not automatically added the
564 # sorting steps. However, the grid construction does not hold any
565 # particles yet, so we don't have to deal with this algorithm phase.
566 #
567 for current_species_set in self._particle_species_set:
568 self.algorithm_step_plot.add_action_set(
570 current_species_set
571 )
572 )
573
574 self.algorithm_step_plot.add_action_set(
576 )
577
578 #
579 # Create cleanup action sets
580 #
581
582 for current_species_set in self._particle_species_set:
583 self.algorithm_step_cleanup.add_action_set(Cleanup(current_species_set))
584
585 #
586 # Create mesh traversals aka solver steps
587 #
588 initialisation_steps = self.initialisation_steps_task_graph_compiler(
589 species_sets=self._particle_species_set, verbose=verbose
590 )
591 solver_steps = self.algorithm_steps_task_graph_compiler(
592 species_sets=self._particle_species_set, verbose=verbose
593 )
594
595 #
596 # Make each solver step (incl predefined ones) use the particles and particle sets
597 #
598 for current_species_set in self._particle_species_set:
599 self._project.datamodel.add_global_object(
600 current_species_set.particle_model
601 )
602 self._project.datamodel.add_vertex(current_species_set)
603
604 self.algorithm_step_create_grid.use_vertex(current_species_set)
605 self.algorithm_step_initial_conditions.use_vertex(current_species_set)
606 self.algorithm_step_plot.use_vertex(current_species_set)
607 self.algorithm_step_cleanup.use_vertex(current_species_set)
608
609 #
610 # Add the cell statistics and the task markers everywhere
611 #
612 for current_species_set in self._particle_species_set:
613 cell_marker_for_statistics = (
614 DynamicMeshRefinementAnalysis.create_cell_marker(
615 current_species_set.name
616 )
617 )
618 cell_marker_for_tasks = (
620 current_species_set.name
621 )
622 )
623 vertex_marker_for_tasks = (
625 task_name=current_species_set.name,
626 full_qualified_enumerator_type="::swift2::TaskEnumerator",
627 enumerator_include=""" #include "swift2/TaskEnumerator.h" """,
628 )
629 )
630
631 # Enable cell statistics for this species
632 self._project.datamodel.add_cell(cell_marker_for_statistics)
633 self._project.datamodel.add_cell(cell_marker_for_tasks)
634 self._project.datamodel.add_vertex(vertex_marker_for_tasks)
635
636 # Add cell statistics to these action steps
637 self.algorithm_step_create_grid.use_cell(cell_marker_for_statistics)
638 self.algorithm_step_initial_conditions.use_cell(cell_marker_for_statistics)
639 self.algorithm_step_plot.use_cell(cell_marker_for_statistics)
640 self.algorithm_step_cleanup.use_cell(cell_marker_for_statistics)
641
642 self.algorithm_step_create_grid.use_cell(cell_marker_for_tasks)
643 self.algorithm_step_initial_conditions.use_cell(cell_marker_for_tasks)
644 self.algorithm_step_plot.use_cell(cell_marker_for_tasks)
645 self.algorithm_step_cleanup.use_cell(cell_marker_for_tasks)
646
647 self.algorithm_step_create_grid.use_vertex(vertex_marker_for_tasks)
648 self.algorithm_step_initial_conditions.use_vertex(vertex_marker_for_tasks)
649 self.algorithm_step_plot.use_vertex(vertex_marker_for_tasks)
650 self.algorithm_step_cleanup.use_vertex(vertex_marker_for_tasks)
651
652 for solverstep in solver_steps:
653 solverstep.use_vertex(current_species_set)
654 solverstep.use_vertex(vertex_marker_for_tasks)
655 solverstep.use_cell(cell_marker_for_statistics)
656 solverstep.use_cell(cell_marker_for_tasks)
657
658 for solverstep in initialisation_steps:
659 solverstep.use_vertex(current_species_set)
660 solverstep.use_vertex(vertex_marker_for_tasks)
661 solverstep.use_cell(cell_marker_for_statistics)
662 solverstep.use_cell(cell_marker_for_tasks)
663
664 action_set_plot_grid = peano4.toolbox.PlotGridInPeanoBlockFormat(
665 filename="grid",
666 time_stamp_evaluation="repositories::getMinTimeStamp()",
667 additional_includes="""
668#include "repositories/GlobalState.h"
669""",
670 )
671 action_set_plot_grid.descend_invocation_order = (
672 self.algorithm_step_plot.highest_descend_invocation_order() + 1
673 )
674 action_set_plot_grid.parallel = True
675 self.algorithm_step_plot.add_action_set(action_set_plot_grid)
676
677 init_gridsize = self._global_min_h
679 init_gridsize = self._global_max_h
680 action_set_create_regular_grid = peano4.toolbox.CreateRegularGrid(init_gridsize)
681 action_set_create_regular_grid.descend_invocation_order = (
682 self.algorithm_step_create_grid.lowest_descend_invocation_order() - 1
683 )
684 action_set_create_regular_grid.parallel = True
685 self.algorithm_step_create_grid.add_action_set(action_set_create_regular_grid)
686
687 self._project.solversteps.add_step(self.algorithm_step_create_grid)
688 self._project.solversteps.add_step(self.algorithm_step_initial_conditions)
689 self._project.solversteps.add_step(self.algorithm_step_plot)
690 self._project.solversteps.add_step(self.algorithm_step_cleanup)
691
692 for solverstep in solver_steps:
693 for action_set in self.additional_action_sets_per_solver_step:
694 solverstep.add_action_set(action_set)
695 self._project.solversteps.add_step(solverstep)
696
697 for initialisation_step in initialisation_steps:
698 self._project.solversteps.add_step(initialisation_step)
699
700 self._project.main = SWIFTMain(
701 self._project, initialisation_steps, solver_steps
702 )
703
705 self._project.output.makefile.parse_configure_script_outcome(
707 )
708 self.__export_constants()
709
710 self._project.output.makefile.set_mode(self._build_mode)
711
712 return self._project
713
715 self,
716 parallelisation=Parallelisation_DomainDecomposition,
717 storage=Storage_Scattered,
718 sorting=Sorting_MultiscaleSort,
719 kernel_optimisation=KernelOptimisation_NoOptimisation,
720 ):
721 """!
722 Specify the realisation of the project: Select
723 - which parallelisation method to use
724 (domain-decomposition, task-graph, multisweep-task-graph)
725 - which storage strategy to use
726 (scattered, continuous-per-vertex, global-continuous)
727 - which particle sorting strategy to use
728 (multiscale-sort, bucket-sort)
729 - which kernel optimisation to use
730 (no-optimisation, vectorise-all, vectorise-distance-checks-preamble)
731
732 Available options for all these are kept in ./ProjectRealisation.py.
733
734 Only call this function after adding particle species to the project via self.add_particle_species().
735 """
736
737 # have we added particle sets already?
738 if (
739 self._particle_species is None
740 or len(self._particle_species) == 0
741 or self._particle_species_set is None
742 or len(self._particle_species_set) == 0
743 ):
744 raise ValueError(
745 "No particle species recorded. Call this function only after adding particle sets to project."
746 )
747
748 # make sure selections are valid.
749 if parallelisation not in ParallelisationVariants:
750 raise ValueError(
751 f"parallelisation variant '{parallelisation}' unknown; available options are",
752 ParallelisationVariants,
753 )
754 if storage not in StorageVariants:
755 raise ValueError(
756 f"storage variant '{storage}' unknown; available options are",
757 StorageVariants,
758 )
759 if sorting not in SortingVariants:
760 raise ValueError(
761 f"sorting variant '{sorting}' unknown; available options are",
762 SortingVariants,
763 )
764 if kernel_optimisation not in AllKernelOptimisationVariants:
765 raise ValueError(
766 f"sorting variant '{kernel_optimisation}' unknown; available options are",
767 AllKernelOptimisationVariants,
768 )
769
770 # check that selection makes sense.
771 if (
772 storage == Storage_Scattered
773 and kernel_optimisation not in BasicKernelOptimisationVariants
774 ):
775 raise ValueError(
776 "Can't use kernel optimisation '{kernel_optimisation}' with scattered storage."
777 )
778
781
782 # pick correct task graph compiler given the selection
783 if parallelisation == Parallelisation_DomainDecomposition:
784 if storage == Storage_Scattered:
785 if sorting == Sorting_MultiscaleSort:
787 swift2.api.graphcompiler.particle_steps_onto_separate_mesh_traversals_multiscale_sort_scattered_memory
788 )
790 swift2.api.graphcompiler.initialisation_steps_onto_separate_mesh_traversals_multiscale_sort_scattered_memory
791 )
792 elif sorting == Sorting_BucketSort:
794 swift2.api.graphcompiler.particle_steps_onto_separate_mesh_traversals_bucket_sort_scattered_memory
795 )
797 swift2.api.graphcompiler.initialisation_steps_onto_separate_mesh_traversals_bucket_sort_scattered_memory
798 )
799 else:
800 raise NotImplementedError()
801
802 elif (
803 storage == Storage_ContinuousPerVertex
804 or storage == Storage_GlobalContinuous
805 ):
806 if sorting == Sorting_MultiscaleSort:
808 swift2.api.graphcompiler.particle_steps_onto_separate_mesh_traversals_multiscale_sort_coalesced_memory
809 )
811 swift2.api.graphcompiler.initialisation_steps_onto_separate_mesh_traversals_multiscale_sort_coalesced_memory
812 )
813 elif sorting == Sorting_BucketSort:
815 swift2.api.graphcompiler.particle_steps_onto_separate_mesh_traversals_bucket_sort_coalesced_memory
816 )
818 swift2.api.graphcompiler.initialisation_steps_onto_separate_mesh_traversals_bucket_sort_coalesced_memory
819 )
820 else:
821 raise NotImplementedError()
822
823 else:
824 raise NotImplementedError()
825
826 elif parallelisation == Parallelisation_MultisweepTaskGraph:
827 if storage == Storage_Scattered:
828 if sorting == Sorting_MultiscaleSort:
830 swift2.api.graphcompiler.particle_steps_onto_multisweep_task_graph_multiscale_sort_scattered_memory
831 )
833 swift2.api.graphcompiler.initialisation_steps_onto_multisweep_task_graph_multiscale_sort_scattered_memory
834 )
835 elif sorting == Sorting_BucketSort:
837 swift2.api.graphcompiler.particle_steps_onto_multisweep_task_graph_bucket_sort_scattered_memory
838 )
840 swift2.api.graphcompiler.initialisation_steps_onto_multisweep_task_graph_bucket_sort_scattered_memory
841 )
842 else:
843 raise NotImplementedError()
844
845 elif (
846 storage == Storage_ContinuousPerVertex
847 or storage == Storage_GlobalContinuous
848 ):
849 if sorting == Sorting_MultiscaleSort:
851 swift2.api.graphcompiler.particle_steps_onto_multisweep_task_graph_multiscale_sort_coalesced_memory
852 )
854 swift2.api.graphcompiler.initialisation_steps_onto_multisweep_task_graph_multiscale_sort_coalesced_memory
855 )
856 elif sorting == Sorting_BucketSort:
858 swift2.api.graphcompiler.particle_steps_onto_multisweep_task_graph_bucket_sort_coalesced_memory
859 )
861 swift2.api.graphcompiler.initialisation_steps_onto_multisweep_task_graph_bucket_sort_coalesced_memory
862 )
863 else:
864 raise NotImplementedError()
865 else:
866 raise NotImplementedError()
867
868 elif parallelisation == Parallelisation_TaskGraph:
869 if storage == Storage_Scattered:
870 if sorting == Sorting_MultiscaleSort:
872 swift2.api.graphcompiler.particle_steps_onto_task_graph_multiscale_sort_scattered_memory
873 )
875 swift2.api.graphcompiler.initialisation_steps_onto_task_graph_multiscale_sort_scattered_memory
876 )
877 elif sorting == Sorting_BucketSort:
879 swift2.api.graphcompiler.particle_steps_onto_task_graph_bucket_sort_scattered_memory
880 )
882 swift2.api.graphcompiler.initialisation_steps_onto_task_graph_bucket_sort_scattered_memory
883 )
884 else:
885 raise NotImplementedError()
886
887 elif (
888 storage == Storage_ContinuousPerVertex
889 or storage == Storage_GlobalContinuous
890 ):
891 if sorting == Sorting_MultiscaleSort:
893 swift2.api.graphcompiler.particle_steps_onto_task_graph_multiscale_sort_coalesced_memory
894 )
896 swift2.api.graphcompiler.initialisation_steps_onto_task_graph_multiscale_sort_coalesced_memory
897 )
898 elif sorting == Sorting_BucketSort:
900 swift2.api.graphcompiler.particle_steps_onto_task_graph_bucket_sort_coalesced_memory
901 )
903 swift2.api.graphcompiler.initialisation_steps_onto_task_graph_bucket_sort_coalesced_memory
904 )
905 else:
906 raise NotImplementedError()
907 else:
908 raise NotImplementedError()
909 else:
910 raise NotImplementedError()
911
912 # Make sure we didn't miss anything
914 raise ValueError("No task graph compiler selected?")
916 raise ValueError("No initialisation stpe task graph compiler selected?")
917
918 # Update particle sets and particles now
919 new_particlelist = []
920
921 for p in self._particle_species:
922 # lists are traversed as copies, not by reference, so we need to
923 # make a new list.
924 newp = copy.deepcopy(p)
925
926 if kernel_optimisation == KernelOptimisation_NoOptimisation:
927 pass
928 elif kernel_optimisation == KernelOptimisation_VectoriseAll:
929 newp.switch_namespace_of_all_particle_iterators(
930 "::swift2::kernels::coalesced::"
931 )
932 elif kernel_optimisation == KernelOptimisation_VectoriseDistanceChecks:
933 newp.switch_namespace_of_all_particle_iterators(
934 "::swift2::kernels::coalesced::prefixcheck::"
935 )
936 else:
937 raise NotImplementedError()
938
939 new_particlelist.append(newp)
940
941 self._particle_species = new_particlelist
942
943 new_particlesetlist = []
944
945 for ps in self._particle_species_set:
946 # lists are traversed as copies, not by reference, so we need to make a new list.
947 newps = copy.deepcopy(ps)
948
949 if storage == Storage_ContinuousPerVertex:
950 newps.generator = (
952 newps
953 )
954 )
955 elif storage == Storage_GlobalContinuous:
956 newps.generator = (
958 newps
959 )
960 )
961 elif storage == Storage_Scattered:
963 newps
964 )
965 else:
966 raise NotImplementedError()
967
968 new_particlesetlist.append(newps)
969
970 self._particle_species_set = new_particlesetlist
971
972 return
973
974 @abstractmethod
976 assert False, "should be implemented by subclass"
977 pass
Represents a Peano 4 project.
Definition Project.py:16
Map a particle set onto heap objects indexed by a list.
Map a particle set onto heap objects indexed by a list.
Update the parallel state of particles and keep stats of them.
Swift2 project.
Definition Project.py:27
set_realisation(self, parallelisation=Parallelisation_DomainDecomposition, storage=Storage_Scattered, sorting=Sorting_MultiscaleSort, kernel_optimisation=KernelOptimisation_NoOptimisation)
Specify the realisation of the project: Select.
Definition Project.py:720
generate_Peano4_project(self, verbose=False)
Build the Peano4 project.
Definition Project.py:479
__compute_global_min_h(self)
Run over all species specified and find out what their minimum (finest) h is.
Definition Project.py:151
__generate_global_state_files(self)
Definition Project.py:440
__init__(self, namespace, project_name, directory=".", executable="swift2")
Definition Project.py:46
real_mesh_size(self, target_h)
Translate a mesh size into its real Peano mesh size.
Definition Project.py:338
__export_constants(self)
We export SWIFT's constants.
Definition Project.py:168
initialisation_steps_task_graph_compiler
Definition Project.py:79
set_load_balancing(self, load_balancer_name, load_balancer_arguments)
load_balancer_name: string Should be full-qualified name of the load balancer.
Definition Project.py:84
set_output_path(self, path)
Definition Project.py:435
__compute_global_max_h(self)
Run over all species specified and find out what their maximum (coarsest) h is.
Definition Project.py:134
__real_min_mesh_size(self)
Return the min mesh size that the code really will use.
Definition Project.py:369
add_particle_species(self, Particle particle)
Add a new particle species (type) to the project.
Definition Project.py:465
set_global_simulation_parameters(self, dimensions, offset, domain_size, min_end_time, max_end_time, first_plot_time_stamp, time_in_between_plots, periodic_BC=[False, False, False], plotter_precision=5, initial_grid_coarse=False)
offset and size should be lists with dimensions double entries.
Definition Project.py:397
set_Peano4_installation(self, src_path, mode=peano4.output.CompileMode.Release)
src_path: string Path (relative or absolute) to the src directory of Peano.
Definition Project.py:110
__real_max_mesh_size(self)
Return the max mesh size that the code really will use.
Definition Project.py:357
create_vertex_marker(task_name, full_qualified_enumerator_type="tarch::multicore::TaskEnumerator", enumerator_include=""" #include "tarch/multicore/TaskEnumerator.h" """)
Create vertex marker.
create_cell_marker(task_name)
Create cell marker for tasking.