Peano 4
Loading...
Searching...
No Matches
noh.py
Go to the documentation of this file.
1import os
2import argparse
3import numpy as np
4
5import peano4
6import swift2
7import swift2.sphtools as sphtools
8import dastgen2
9
10import shutil
11
12
13
16
17Parallelisation_DomainDecomposition = "domain-decomposition"
18Parallelisation_TaskTree = "task-tree"
19Parallelisation_TaskGraph = "task-graph"
20ParallelisationVariants = [
21 Parallelisation_DomainDecomposition,
22 Parallelisation_TaskTree,
23 Parallelisation_TaskGraph,
24]
25
26Storage_Scattered = "scattered"
27Storage_ContinuousPerVertex = "continuous-per-vertex"
28Storage_GlobalContinuous = "global-continuous"
29StorageVariants = [Storage_ContinuousPerVertex, Storage_GlobalContinuous]
30
31Sorting_MultiscaleSort = "multiscale-sort"
32Sorting_BucketSort = "bucket-sort"
33SortingVariants = [Sorting_MultiscaleSort, Sorting_BucketSort]
34
35KernelOptimisation_NoOptimisation = "no-optimisation"
36KernelOptimisation_OuterGuards = "outer-guards"
37KernelOptimisation_VectoriseAll = "vectorise-all"
38KernelOptimisation_VectoriseDistanceChecks = "vectorise-distance-checks"
39
40BasicKernelOptimisationVariants = [
41 KernelOptimisation_NoOptimisation,
42 KernelOptimisation_OuterGuards,
43]
44AllKernelOptimisationVariants = [
45 KernelOptimisation_NoOptimisation,
46 KernelOptimisation_OuterGuards,
47 KernelOptimisation_VectoriseAll,
48 KernelOptimisation_VectoriseDistanceChecks,
49]
50
51
52Choices = [
53 parallelisation_variant + "." + storage + "." + sorting + "." + kernel_optimisation
54 for parallelisation_variant in ParallelisationVariants
55 for storage in [Storage_Scattered]
56 for sorting in SortingVariants
57 for kernel_optimisation in BasicKernelOptimisationVariants
58] + [
59 parallelisation_variant + "." + storage + "." + sorting + "." + kernel_optimisation
60 for parallelisation_variant in ParallelisationVariants
61 for storage in StorageVariants
62 for sorting in SortingVariants
63 for kernel_optimisation in AllKernelOptimisationVariants
64]
65
66
67if __name__ == "__main__":
68 parser = argparse.ArgumentParser(description="SPH benchmarking script")
69 parser.add_argument(
70 "-j",
71 "--parallel-builds",
72 dest="j",
73 type=int,
74 default=-1,
75 help="Parallel builds",
76 )
77 parser.add_argument(
78 "-v",
79 "--verbose",
80 dest="verbose",
81 action="store_true",
82 default=False,
83 help="Verbose",
84 )
85 parser.add_argument(
86 "-c",
87 "--clean",
88 dest="clean_temporary_files",
89 action="store_true",
90 help="Remove temporary directories and output files before script starts",
91 )
92 parser.add_argument(
93 "-np",
94 "--particle-number",
95 dest="particle_number",
96 required=True,
97 type=int,
98 help="Particle number (1D)",
99 )
100 parser.add_argument(
101 "-t",
102 "--trees",
103 dest="trees",
104 default=None,
105 type=int,
106 help="Number of trees to be used",
107 )
108 parser.add_argument(
109 "-et",
110 "--end-time",
111 dest="end_time",
112 type=float,
113 default=0.2,
114 help="End of simulation",
115 )
116 parser.add_argument(
117 "-o",
118 "--output",
119 dest="output_file",
120 default="noh2D",
121 help="Executable file name",
122 )
123 parser.add_argument(
124 "-plot",
125 "--plot-delta",
126 dest="plot_delta",
127 type=float,
128 default=0.01,
129 help="Plot delta (0 switches off)",
130 )
131 parser.add_argument(
132 "-dt",
133 "--timestep-size",
134 dest="timestep_size",
135 type=float,
136 default=1e-4,
137 help="Global timestep size (initial value)",
138 )
139 parser.add_argument(
140 "-cfl",
141 "--cfl-factor",
142 dest="cfl_factor",
143 type=float,
144 default=0.1,
145 help="Value of the CFL constant",
146 )
147 parser.add_argument(
148 "-ppc",
149 "--particles-per-cell",
150 dest="particles_per_cell",
151 type=int,
152 default=200,
153 help="Particles per cell (200 is default)",
154 )
155 parser.add_argument(
156 "-pd",
157 "--peano-dir",
158 dest="peanodir",
159 default="../../../..",
160 help="Peano4 directory",
161 )
162 parser.add_argument(
163 "-m",
164 "--mode",
165 dest="mode",
166 choices=["release", "stats", "trace", "asserts", "debug"],
167 default="release",
168 help="Pick build type",
169 )
170 parser.add_argument(
171 "-rea",
172 "--realisation",
173 dest="realisation",
174 choices=Choices,
175 default=Choices[0],
176 help="Realisation to use",
177 )
178 parser.add_argument(
179 "-ms",
180 "--mantissa-size",
181 dest="mantissa_size",
182 type=int,
183 default=0,
184 help="Mantissa size for clang truncation (if supported by compiler). Default of 0 disables any compression",
185 )
186 parser.add_argument(
187 "-k",
188 "--kernel",
189 dest="kernel",
190 type=str,
191 default="quartic_spline",
192 choices=sphtools.sph_kernel_list,
193 help="SPH kernel function to use.",
194 )
195 args = parser.parse_args()
196
197
200
201 project_namespace = ["benchmarks", "swift2", "noh"]
202
203 project = swift2.Project(
204 namespace=project_namespace, project_name="Noh", executable=args.output_file
205 )
206
207 if (
208 args.realisation.split(".")[0] == Parallelisation_DomainDecomposition
209 and args.realisation.split(".")[1] == Storage_Scattered
210 and args.realisation.split(".")[2] == Sorting_MultiscaleSort
211 ):
212 project.algorithm_steps_task_graph_compiler = (
213 swift2.api.graphcompiler.particle_steps_onto_separate_mesh_traversals_multiscale_sort_sweeps_scattered_memory
214 )
215 project.initialisation_steps_task_graph_compiler = (
216 swift2.api.graphcompiler.initialisation_steps_onto_separate_mesh_traversals_multiscale_sort_sweeps_scattered_memory
217 )
218 elif (
219 args.realisation.split(".")[0] == Parallelisation_DomainDecomposition
220 and args.realisation.split(".")[1] == Storage_Scattered
221 and args.realisation.split(".")[2] == Sorting_BucketSort
222 ):
223 project.algorithm_steps_task_graph_compiler = (
224 swift2.api.graphcompiler.particle_steps_onto_separate_mesh_traversals_bucket_sort_scattered_memory
225 )
226 project.initialisation_steps_task_graph_compiler = (
227 swift2.api.graphcompiler.initialisation_steps_onto_separate_mesh_traversals_bucket_sort_scattered_memory
228 )
229 elif (
230 args.realisation.split(".")[0] == Parallelisation_DomainDecomposition
231 and (
232 args.realisation.split(".")[1] == Storage_ContinuousPerVertex
233 or args.realisation.split(".")[1] == Storage_GlobalContinuous
234 )
235 and args.realisation.split(".")[2] == Sorting_MultiscaleSort
236 ):
237 project.algorithm_steps_task_graph_compiler = (
238 swift2.api.graphcompiler.particle_steps_onto_separate_mesh_traversals_insert_sort_sweeps_coalesced_memory
239 )
240 project.initialisation_steps_task_graph_compiler = (
241 swift2.api.graphcompiler.initialisation_steps_onto_separate_mesh_traversals_insert_sort_sweeps_coalesced_memory
242 )
243 elif (
244 args.realisation.split(".")[0] == Parallelisation_DomainDecomposition
245 and (
246 args.realisation.split(".")[1] == Storage_ContinuousPerVertex
247 or args.realisation.split(".")[1] == Storage_GlobalContinuous
248 )
249 and args.realisation.split(".")[2] == Sorting_BucketSort
250 ):
251 project.algorithm_steps_task_graph_compiler = (
252 swift2.api.graphcompiler.particle_steps_onto_separate_mesh_traversals_sort_on_the_fly_coalesced_memory
253 )
254 project.initialisation_steps_task_graph_compiler = (
255 swift2.api.graphcompiler.initialisation_steps_onto_separate_mesh_traversals_sort_on_the_fly_coalesced_memory
256 )
257 elif (
258 args.realisation.split(".")[0] == Parallelisation_TaskTree
259 and args.realisation.split(".")[1] == Storage_Scattered
260 and args.realisation.split(".")[2] == Sorting_MultiscaleSort
261 ):
262 project.algorithm_steps_task_graph_compiler = (
263 swift2.api.graphcompiler.particle_steps_onto_task_tree_multiscale_sort_sweeps_scattered_memory
264 )
265 project.initialisation_steps_task_graph_compiler = (
266 swift2.api.graphcompiler.initialisation_steps_onto_separate_mesh_traversals_multiscale_sort_sweeps_scattered_memory
267 )
268 elif (
269 args.realisation.split(".")[0] == Parallelisation_TaskTree
270 and args.realisation.split(".")[1] == Storage_Scattered
271 and args.realisation.split(".")[2] == Sorting_BucketSort
272 ):
273 project.algorithm_steps_task_graph_compiler = (
274 swift2.api.graphcompiler.particle_steps_onto_task_tree_bucket_sort_scattered_memory
275 )
276 project.initialisation_steps_task_graph_compiler = (
277 swift2.api.graphcompiler.initialisation_steps_onto_separate_mesh_traversals_bucket_sort_scattered_memory
278 )
279 else:
280 assert False, "realisation " + args.realisation + " not supported"
281
282 # Initialise the SPH particle --------------------------------------------------
283 # If multiple particle species are present, we can specify the parameters
284 # independently for each case.
285
286 name = "HydroPart"
287
288 dimensions = 2
289 hydro_dimensions = 2 # dimensions for the hydro
290
291 particles_per_cell = args.particles_per_cell
292 total_number_of_particles = args.particle_number**hydro_dimensions
293 total_number_of_cells = total_number_of_particles / particles_per_cell
294 cells_per_dimension = total_number_of_cells ** (1.0 / hydro_dimensions)
295 max_cell_size = 1.0 / cells_per_dimension * 3.0
296
297 print(
298 "{} particles in total fit into {} cells, i.e. {} cells per dimension. This gives h={}".format(
299 total_number_of_particles,
300 total_number_of_cells,
301 cells_per_dimension,
302 max_cell_size,
303 )
304 )
305
306 if args.realisation.split(".")[3] == KernelOptimisation_NoOptimisation:
307 particle_interaction_kernel_realisation = (
308 swift2.particle.SPHLeapfrogFixedSearchRadius.ParticleKernelRealisation.NO_OPTIMISATION
309 )
310 elif args.realisation.split(".")[3] == KernelOptimisation_OuterGuards:
311 particle_interaction_kernel_realisation = (
312 swift2.particle.SPHLeapfrogFixedSearchRadius.ParticleKernelRealisation.USE_OUTER_GUARDS
313 )
314 elif args.realisation.split(".")[3] == KernelOptimisation_VectoriseDistanceChecks:
315 particle_interaction_kernel_realisation = (
316 swift2.particle.SPHLeapfrogFixedSearchRadius.ParticleKernelRealisation.VECTORISE_WITH_SEPARATE_DISTANCE_CHECKS
317 )
318 elif args.realisation.split(".")[3] == KernelOptimisation_VectoriseAll:
319 particle_interaction_kernel_realisation = (
320 swift2.particle.SPHLeapfrogFixedSearchRadius.ParticleKernelRealisation.VECTORISE_ALL
321 )
322 else:
323 raise NotImplemented()
324
326 name=name,
327 dimensions_hydro=hydro_dimensions,
328 cfl_factor=args.cfl_factor,
329 initial_time_step_size=args.timestep_size,
330 constant_time_step_size=True,
331 swift_project_namespace=project_namespace,
332 particles_per_cell=particles_per_cell,
333 min_h=max_cell_size,
334 max_h=max_cell_size,
335 particle_interaction_kernel_realisation=particle_interaction_kernel_realisation,
336 )
337
338
344
345 # HYDRO_DIMENSION can be smaller than the Peano4 dimensions
346 HYDRO_DIMENSIONS = hydro_dimensions
347
348 # Particle number along 1 dimension
349 HYDRO_PART_NUMBER = args.particle_number
350
351 # Time integration parameters
352 GLOBAL_TIME_STEP_SIZE = args.timestep_size
353 CFL_CONSTANT = args.cfl_factor
354
355 # EoS parameter ---------------------------------------------------------------
356 gamma_hydro_list = [str(5 / 3), str(7 / 5), str(4 / 3), str(2 / 1)]
357 gamma_hydro_list_symbols = [
358 "HYDRO_GAMMA_5_3",
359 "HYDRO_GAMMA_7_5",
360 "HYDRO_GAMMA_4_3",
361 "HYDRO_GAMMA_2_1",
362 ]
363 GAMMA = gamma_hydro_list[0]
364 GAMMA_HYDRO_SYMBOL = gamma_hydro_list_symbols[0]
365
366 # SPH kernel -------------------------------------------------------
367
368 kernel = args.kernel
369 KERNEL_SYMBOL = sphtools.sph_kernel_macro_name[kernel]
370 KERNEL_SUPPORT_RAD = sphtools.sph_kernel_H_over_h[HYDRO_DIMENSIONS - 1][kernel]
371
372 hydro_h_max = max_cell_size / 2
373 particle.h_hydro_max = hydro_h_max
374
375 # particle.h_hydro_min = 1e-6
376 # particle.h_max_iterations = 10
377 # particle.h_tolerance = 1e-4
378 # particle.eta_factor = 1.2348
379 # particle.beta_av = 3.0
380 # particle.alpha_av = 0.8
381
382 # IMPORTANT: Reduce the mantissa size for particle attributes -------------------
383 # Native size is 53 (double precision)
384 if args.mantissa_size > 0:
385 particle.mass.valid_mantissa_bits = args.mantissa_size
386 particle.velocity.valid_mantissa_bits = args.mantissa_size
387 particle.acceleration.valid_mantissa_bits = args.mantissa_size
388 particle.density.valid_mantissa_bits = args.mantissa_size
389 particle.pressure.valid_mantissa_bits = args.mantissa_size
390 particle.smoothL.valid_mantissa_bits = args.mantissa_size
391 particle.u.valid_mantissa_bits = args.mantissa_size
392 particle.uDot.valid_mantissa_bits = args.mantissa_size
393 particle.f.valid_mantissa_bits = args.mantissa_size
394 particle.wcount_dh.valid_mantissa_bits = args.mantissa_size
395 particle.rho_dh.valid_mantissa_bits = args.mantissa_size
396 particle.hleft.valid_mantissa_bits = args.mantissa_size
397 particle.hright.valid_mantissa_bits = args.mantissa_size
398 particle.smoothL_prev.valid_mantissa_bits = args.mantissa_size
399 particle.wcount.valid_mantissa_bits = args.mantissa_size
400 particle.hDot.valid_mantissa_bits = args.mantissa_size
401 particle.residual.valid_mantissa_bits = args.mantissa_size
402 particle.balsara.valid_mantissa_bits = args.mantissa_size
403 particle.rot_v.valid_mantissa_bits = args.mantissa_size
404 particle.div_v.valid_mantissa_bits = args.mantissa_size
405 particle.v_sig_AV.valid_mantissa_bits = args.mantissa_size
406 particle.soundSpeed.valid_mantissa_bits = args.mantissa_size
407 particle.v_full.valid_mantissa_bits = args.mantissa_size
408 particle.u_full.valid_mantissa_bits = args.mantissa_size
409
410 particle.data.expose_all_attributes_in_header_file()
411
412 # Add particle to the SWIFT project --------------------------------------------
413 # Notice that you any change to the particle will not take effect after adding
414 # the particle_set to the project
415 particle_set = project.add_particle_species(particle)
416
417 if args.realisation.split(".")[1] == Storage_ContinuousPerVertex:
418 particle_set.generator = (
420 particle_set
421 )
422 )
423 print("Benchmark: Use continuous per vertex data layout")
424 elif args.realisation.split(".")[1] == Storage_GlobalContinuous:
425 particle_set.generator = (
427 )
428 print("Benchmark: Use globally continuous data layout")
429 else:
430 particle_set.generator = (
432 particle_set
433 )
434 )
435
436 if args.mode == "release":
437 build_mode = peano4.output.CompileMode.Release
438 if args.mode == "stats":
439 build_mode = peano4.output.CompileMode.Stats
440 if args.mode == "asserts":
441 build_mode = peano4.output.CompileMode.Asserts
442 if args.mode == "trace":
443 build_mode = peano4.output.CompileMode.Trace
444 if args.mode == "debug":
445 build_mode = peano4.output.CompileMode.Debug
446
447 # Set Peano4 simulation parameters ---------------------------------------------
448 offset = [0, 0, 0]
449 domain_size = [1, 1, 1]
450 periodic_boundary_conditions = [False, False, False]
451
452 project.set_global_simulation_parameters(
453 dimensions=dimensions,
454 offset=offset,
455 domain_size=domain_size,
456 min_end_time=args.end_time,
457 max_end_time=0.0,
458 first_plot_time_stamp=0.0,
459 time_in_between_plots=args.plot_delta,
460 periodic_BC=periodic_boundary_conditions,
461 plotter_precision=8,
462 )
463
464 project.set_Peano4_installation(args.peanodir, build_mode)
465
466 if args.trees != 0:
467 project.set_load_balancing(
468 "toolbox::loadbalancing::strategies::SpreadOutOnceGridStagnates",
469 "new toolbox::loadbalancing::DefaultConfiguration()",
470 )
471
472
475
476 # Noh problem (2D) snippet --------------------------------------------------
477 # The particle attributes are set via an initalisation snippet that sets
478 # the initial conditions specific for this problem, i.e. the velocity field
479 # that converges towards the center of the domain and that generates the
480 # collapse.
481
482 ICs = swift2.scenario.NohProblemIC(dimensions=HYDRO_DIMENSIONS)
483 initialisation_snippet = ICs.initialisation_snippet
484
485 project.algorithm_step_initial_conditions.add_action_set(
487 particle_set=particle_set,
488 initialisation_call=initialisation_snippet,
489 distance_between_particles_along_one_axis=1.0 / args.particle_number,
490 additional_includes="""
491 #include "swift2/kernels/legacy/equation_of_state.h"
492 """,
493 )
494 )
495
496
520 "particles",
521 particle_set,
522 "globaldata::{}::getSpecies().getMinTimeStamp()".format(particle.name),
523 )
524 particle_plotter.add_attribute_to_plot(particle.part_id, 1)
525 particle_plotter.add_attribute_to_plot(particle.velocity, dimensions)
526 particle_plotter.add_attribute_to_plot(particle.acceleration, dimensions)
527 particle_plotter.add_attribute_to_plot(particle.mass, 1)
528 particle_plotter.add_attribute_to_plot(particle.density, 1)
529 particle_plotter.add_attribute_to_plot(particle.pressure, 1)
530 particle_plotter.add_attribute_to_plot(particle.u, 1)
531 particle_plotter.add_attribute_to_plot(particle.smoothL, 1)
532 particle_plotter.add_attribute_to_plot(particle.is_boundary_part, 1)
533 if args.mode == "asserts" or args.mode == "debug":
534 particle_plotter.add_attribute_to_plot(particle.density_neighbourcount, 1)
535 particle_plotter.add_attribute_to_plot(particle.force_neighbourcount, 1)
536 project.algorithm_step_plot.add_action_set(particle_plotter)
537
538
549
550 peano4_project = project.generate_Peano4_project(verbose=args.verbose)
551
552 # Some sanity checks ----------------------------------------------------------
553 if GAMMA not in gamma_hydro_list:
554 print(f"Please check the value of GAMMA. You have chosen: {GAMMA}")
555
556 print("-----------------------------------------------------------------------")
557 print("SPH parameters summary:")
558 print(f"Effective dimensions of the Simulation: {HYDRO_DIMENSIONS}")
559 print(f"Global time-step size: {GLOBAL_TIME_STEP_SIZE}")
560 print(f"Number of particles (1D): {HYDRO_PART_NUMBER}")
561 print(f"Adiabatic index: {GAMMA}")
562 print(f"Smoothing length free factor: {particle.eta_factor}")
563 print("-----------------------------------------------------------------------")
564
565 # Export Constants.h file ------------------------------------------------------
566 peano4_project.constants.export_const_with_type(
567 "HYDRO_DIMENSIONS", str(HYDRO_DIMENSIONS), "double"
568 )
569 peano4_project.constants.export_const_with_type(
570 "HYDRO_PART_NUMBER", str(HYDRO_PART_NUMBER), "double"
571 )
572 peano4_project.constants.export_const_with_type(
573 "GLOBAL_TIME_STEP_SIZE", str(GLOBAL_TIME_STEP_SIZE), "double"
574 )
575 peano4_project.constants.export_const_with_type("GAMMA", GAMMA, "double")
576
577 # Export symbols for compilation (e.g. for SPH kernel) -----------------------
578 peano4_project.output.makefile.add_CXX_flag(
579 "-D" + "HYDRO_DIMENSION=" + str(HYDRO_DIMENSIONS)
580 )
581 peano4_project.output.makefile.add_CXX_flag("-D" + GAMMA_HYDRO_SYMBOL)
582 peano4_project.output.makefile.add_CXX_flag("-D" + KERNEL_SYMBOL)
583
584
587
588 # If we want to start a project from scratch, we may want to remove some of the generated code
589 if args.clean_temporary_files:
590 dirs = ["vertexdata/", "repositories/", "globaldata/", "observers/"]
591 for dir in dirs:
592 shutil.rmtree(dir)
593
594 # It is safe (and recommended) to overwrite any existing main file, since this
595 # is generated automatically by the Swift2 graph compiler.
596 peano4_project.generate(
597 overwrite=peano4.output.Overwrite.Always, throw_away_data_after_generation=True
598 )
599 peano4_project.build(make_clean_first=True, number_of_parallel_builds=args.j)
600
601 if args.clean_temporary_files:
602 # remove output files from previous runs
603 output_files = [
604 f
605 for f in os.listdir(".")
606 if f.endswith(".peano-patch-file")
607 or f.endswith(".vtu")
608 or f.endswith(".pvd")
609 ]
610 for f in output_files:
611 os.remove(f)
Map a particle set onto heap objects indexed by a list.
Map a particle set onto heap objects indexed by a list.
Swift2 project.
Definition Project.py:23
Initialisation snippet for the Noh problem in different spatial dimensions.
Definition NohProblem.py:5
This file is part of the SWIFT 2 project.
Definition __init__.py:1