Peano
Loading...
Searching...
No Matches
Project.py
Go to the documentation of this file.
1# This file is part of the Peano project. For conditions of distribution and
2# use, please see the copyright notice at www.peano-framework.org
3
4import gc
5import os
6import re
7import sys
8import subprocess
9import multiprocessing
10
11import peano4.output
12import peano4.datamodel
13import peano4.solversteps
14import peano4.runner
15
16class Project (object):
17 """!
18
19 Represents a Peano 4 project.
20
21 namespace Sequence of strings representing the (nested) namespace. Pass in
22 [ "examples", "algebraicmg" ] for example if you wanna write a solver that
23 is embedded into the namespace examples::algebraicmg.
24
25
26 ## Global project properties
27
28 If you want to alter some global constants, you should manipulate
29 the constants attribute. It will eventually feed into the generated
30 file Constants.h. See peano4.output.Constants for further info.
31
32 """
33
34 def __init__(self, namespace, project_name, directory = ".", subdirectory = "", executable = ""):
35 """!
36 project_name Simple string.
37 """
38 if sys.version_info.major < 3:
39 print( "Warning: should be invoked through python3, i.e. with newer Python version" )
40
41 if subdirectory and not os.path.exists(subdirectory):
42 os.mkdir(subdirectory)
43
44 self.rootnamespace = namespace.copy()
45 self.namespace = namespace
46 self.namespaces = []
48 self.project_name = project_name
49 self.directory = directory
50 self._subdirectory = subdirectory
51 if subdirectory:
52 self.namespace += [ subdirectory ]
53 if self.namespace:
54 if not self.namespace in self.namespaces:
55 self.namespaces.append(self.namespace)
56 if not self.subdirectorysubdirectory in self.subdirectories:
58
59 #
60 # Create default output model, i.e. those parts that have to be there
61 # always
62 #
64
65 #
66 # Empty model by default
67 #
69
71
73
74 self.is_generated = False
75 self.is_built = False
77
79 self.subconstants = []
80
81 if executable:
82 self.output.makefile.set_executable_name(executable)
83
84 self.is_extended = False
85
86
87 def __str__(self):
88 return "(#steps=" + str(self.solversteps) + ",model=" + str(self.datamodel) + ")"
89
90 def set_fenv_handler(self, args):
91 self.main.d[ "FENV_ARGS" ] = args
92
93 def cleanup(self):
94 """!
95 This routine has to be called after you've generated your code.
96 """
97 self.output.clear()
98 self.datamodel.clear()
99 self.solversteps.clear()
100
102
103 self.is_generated = False
104 self.is_built = False
105
107 self.subconstants = []
108
109 def generate(self,
110 overwrite=peano4.output.Overwrite.Default,
111 throw_away_data_after_generation=False):
112 """!
113 Generate all code. If you add stuff to your project after a
114 build, you have to (re-)generate the code. If you compile only
115 once, you don't have to invoke this routine explicitly. It is
116 lazily called by the other project routines - the latest before
117 you run the code.
118
119 It is important that I reset the output after each generate
120 call before you change parameter settings and call generate
121 again. To do so, invoke cleanup(). If you forget this, two
122 subsequent generate calls enrich the output twice.
123
124 throw_away_data_after_generation: Bool
125 The Peano 4 memory footprint can become quite substantial
126 effectively reducing the translation capabilities (as compilers
127 tend to require a lot of memory, too). So pass in True if you
128 want the script to throw away the data structures (and run a
129 garbage collection) after all files have been generated. Please
130 note that it invalidates both this object (and maybe another
131 object that you've used to generate the present one - such as
132 ExaHyPE). It really strips everything down to the stuff you
133 absolutely need to translate and run the code.
134 """
135 print( "generate all code ..." )
136 self.is_generated = True
137 self.is_built = False
138 if len(self.output.artefacts)>0:
139 print( "some artefacts have already been added to repository ... assume this is intentional (by higher abstraction layer, e.g.)")
140
141 self.output.readme.add_package_description( self.constants.readme_entry() )
142
143 self.datamodel.construct_output(self.output)
144 self.solversteps.construct_output(self.output)
145
146 self.main.construct_output(self.output,self.project_name + "-main")
147 self.output.generate(overwrite, self.directory, self.subdirectories)
148 if self.subconstants:
149 for i in range(len(self.subconstants)):
150 self.subconstants[i].generate(overwrite, self.directory, self.subdirectories[i])
151 else:
152 self.constants.generate(overwrite, self.directory)
153
154 print("generation complete")
155
156 if throw_away_data_after_generation:
157 self.datamodel = None
158 self.solversteps = None
159 self.output = None
160 gc.collect()
161 print("threw away all data and ran garbage collection")
162
163 def build(self,
164 make=True,
165 make_clean_first=True,
166 throw_away_data_after_build=False,
167 number_of_parallel_builds=-1):
168 """!
169 Invokes the underlying make build mechanism on the project.
170
171 number_of_parallel_builds: int
172 This is mapped onto make -jnumber_of_parallel_builds, i.e., it
173 determines how many parallel make instances the code spawns.
174 Usually, a lot of the generated code is quite lengthy. Therefore
175 compile time can be high. If you pass something smaller or equal
176 to zero, it will use the core count as guideline how to compile.
177
178 throw_away_data_after_build: Bool
179 see generate()
180 """
181 if number_of_parallel_builds <= 0:
182 number_of_parallel_builds = multiprocessing.cpu_count()
183
184 if not self.is_generated:
185 self.generate()
186
187 if make:
188 if make_clean_first:
189 print("clean up project ... in {}".format(self.directory))
190 try:
191 if make_clean_first:
192 subprocess.check_call(["make", "clean"], cwd=self.directory)
193 self.is_built = False
194 print("clean complete")
195 except Exception as e:
196 print("clean failed (" + str(e) + ") - continue anyway")
197
198 if not self.is_built:
199 print("start to compile with concurrency level of " + str(number_of_parallel_builds) + " ...")
200 try:
201 process = subprocess.run(["make", "-j"+str(number_of_parallel_builds)], cwd=self.directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
202 # Extract warnings and errors from error message
203 error_message = process.stderr.decode().strip()
204 warnings = re.findall(r"(?i)warning:.*", error_message)
205 errors = re.findall(r"(?i)error:.*", error_message)
206 # Print warnings into the terminal
207 if warnings:
208 print("\nWarnings ({}):".format(len(warnings)))
209 print("\n".join(warnings))
210 if errors:
211 print("\nErrors ({}):".format(len(errors)))
212 error_message = "\n".join(errors)
213 # Check the return code for linker errors
214 # Raise exception with errors
215 if process.returncode != 0:
216 linker_error_message = process.stderr.decode().strip() + "\n" + error_message
217 raise Exception(linker_error_message)
218 print("compile completed successfully")
219 self.is_built = True
220 self.build_was_successful = True
221 except Exception as e:
222 self.is_built = True
223 self.build_was_successful = False
224 print(str(e))
225 print("compile was not successful")
226 sys.exit(1)
227 else:
228 print("cannot build as code generation has not been successful")
229
230 if throw_away_data_after_build:
231 self.cleanup()
232 self.datamodel = None
233 self.solversteps = None
234 self.output = None
235 gc.collect()
236 print("threw away all data and ran garbage collection")
237
238 def run(self, executable, args=[], prefix=None, pipefile=None, rebuild_if_required=True):
239 """!
240 Runs the code. args should be a list of strings or the empty list.
241 prefix is an array, too. A typical invocation looks alike
242 project.run( ["16.0", "32.0"], ["/opt/mpi/mpirun", "-n", "1"] )
243 The operation returns True if the run had been successful
244
245 pipefile: string or None
246 """
247 success = False
248 if rebuild_if_required and not self.is_built and not self.build_was_successful:
249 self.build()
250
251 if not rebuild_if_required or (self.is_built and self.build_was_successful):
252 print( "run executable " + str(executable))
253
254 invocation = []
255 if prefix!=None:
256 invocation += prefix
257 invocation += ["./" + executable]
258 invocation += args
259
260 try:
261 result = None
262 if pipefile==None:
263 result = subprocess.run( invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
264 else:
265 result = subprocess.run( invocation, stdout=open( pipefile, "w" ), stderr=subprocess.PIPE )
266 if result.returncode==0:
267 if pipefile==None:
268 print( result.stdout.decode("utf-8") )
269 if result.stderr!=None:
270 print( result.stderr.decode("utf-8") )
271 print( "run completed without error code, but check for warnings and numerical assertions/errors" )
272 else:
273 print( result.stderr.decode("utf-8") )
274 print( "run failed" )
275 success = True
276 except Exception as e:
277 print( "run of application was not successful: " + str(e) )
278 print( "invocation: " + str(invocation) )
279 else:
280 print( "cannot run as code compilation has not been successful" )
281 return success
282
283
285 subproject: 'Project',
286 master = False): #, subnamespace: str #def merge
287 """!
288 Adds a new Peano4 project into this Peano 4 project
289 """
290 """
291 assert(
292 lhs.namespace == rhs.namespace
293 ), "namespaces of the projects being merged don't match"
294
295 assert(
296 lhs.directory == rhs.directory
297 ), "directories of the projects being merged don't match"
298
299 assert(
300 lhs.output.makefile.d["CXX"] == lhs.output.makefile.d["CXX"]
301 ), "CXX compilers of the projects being merged don't match"
302
303 assert(
304 lhs.output.makefile.d["FC"] == lhs.output.makefile.d["FC"]
305 ), "FC compilers of the projects being merged don't match"
306
307 assert(
308 lhs.output.makefile.d["DIM"] == lhs.output.makefile.d["DIM"]
309 ), "dimensions of the projects being merged don't match"
310
311 assert(
312 lhs.output.makefile.d["MODE"] == lhs.output.makefile.d["MODE"]
313 ), "compile modes of the projects being merged don't match"
314 """
315 # assert(
316 # lhs._domain_offset == rhs._domain_offset and
317 # lhs._domain_size == rhs._domain_size and
318 # lhs._dimensions == rhs._dimensions and
319 # lhs._plotter_precision == rhs._plotter_precision
320 # ), "global simulation parameters of the projects being merged don't match"
321
322 # an internal utility function to merge two dictionaries
323 # def _merge(lhs_dict: dict, rhs_dict: dict) -> dict:
324 # dict_merged = lhs_dict.copy()
325 # for key,value in rhs_dict.items():
326 # if key in dict_merged:
327 # dict_merged[key] += value
328 # else:
329 # dict_merged[key] = value
330 # return dict_merged
331
332 assert(subproject._subdirectory != ""), "subdirectory of a subproject mustn't be empty"
333
334 self.namespace = subproject.namespace
335 self.namespaces.append(subproject.namespace)
336 self.datamodel.namespace = subproject.datamodel.namespace
337 self.datamodel._subdirectory = subproject.datamodel._subdirectory
338 if not subproject.subdirectory in self.subdirectories:
339 self.subdirectories.append(subproject.subdirectory)
340 if not self.datamodel.namespace in self.datamodel.namespaces:
341 self.datamodel.namespaces.append(self.datamodel.namespace)
342 if not self.datamodel.subdirectory in self.datamodel.subdirectories:
343 self.datamodel.subdirectories.append(self.datamodel.subdirectory)
344
345
346 #
347 # merge all DaStGen2 and ParticleSet attributes of datamodel
348 #
349 for data in subproject.datamodel.cell_data:
350 self.datamodel.add_cell(data)
351 for data in subproject.datamodel.face_data:
352 self.datamodel.add_face(data)
353 for particle in subproject.datamodel.vertex_data:
354 self.datamodel.add_vertex(particle)
355 for particle in subproject.datamodel.global_data:
356 self.datamodel.add_global(particle)
357
358 #
359 # merge all steps of solversteps
360 #
361
362 # make the steps currently contained in the Peano4 project aware of the incoming subproject's data
363 for step in self.solversteps._steps:
364 added_step = subproject.solversteps._steps[0]
365 step.cell_data.extend(added_step.cell_data)
366 step.face_data.extend(added_step.face_data)
367 step.vertex_data.extend(added_step.vertex_data)
368
369 # make the incoming subproject's steps aware of the Peano4 project data
370 if len(self.solversteps._steps):
371 for added_step in subproject.solversteps._steps:
372 step = self.solversteps._steps[0]
373 added_step.cell_data.clear()
374 added_step.cell_data.extend(step.cell_data)
375 added_step.face_data.clear()
376 added_step.face_data.extend(step.face_data)
377 added_step.vertex_data.clear()
378 added_step.vertex_data.extend(step.vertex_data)
379
380 # finally append the incoming subproject's steps to the Peano4 project's steps
381 for step in subproject.solversteps._steps:
382 self.solversteps._steps.append(step)
383
384 #
385 # merge all keys and strings of output.makefile
386 #
387
388 # ... for compilers
389 if (self.output.makefile.d["CXX"]):
390 assert (
391 self.output.makefile.d["CXX"] == subproject.output.makefile.d["CXX"]
392 ), "the CXX compiler of the subproject being added doesn't match the one of the main project"
393 else:
394 self.output.makefile.d["CXX"] = subproject.output.makefile.d["CXX"]
395
396 if (self.output.makefile.d["FC"]):
397 assert (
398 self.output.makefile.d["FC"] == subproject.output.makefile.d["FC"]
399 ), "the FC compiler of the subproject being added doesn't match the one of the main project"
400 else:
401 self.output.makefile.d["FC"] = subproject.output.makefile.d["FC"]
402
403 for flag in subproject.output.makefile.d["CXXFLAGS"].split():
404 self.output.makefile.add_CXX_flag(flag)
405
406 for flag in subproject.output.makefile.d["FCFLAGS"].split():
407 self.output.makefile.add_Fortran_flag(flag)
408
409 for flag in subproject.output.makefile.d["LDFLAGS"].split():
410 self.output.makefile.add_linker_flag(flag)
411
412 self.output.makefile.d["LIBS"] += subproject.output.makefile.d["LIBS"]
413 self.output.makefile.d["GENERATED_INCLUDE_DIRS"] |= subproject.output.makefile.d["GENERATED_INCLUDE_DIRS"]
414 self.output.makefile.d["MODE"] = subproject.output.makefile.d["MODE"]
415
416 for module in subproject.output.makefile.d["FORTRAN_MODULES"]:
417 self.output.makefile.add_Fortran_module(module)
418
419 assert (
420 self.output.makefile.d["EXECUTABLENAME"]
421 ), "the name of the main project is empty"
422 if not self.output.makefile.d["DIM"]:
423 self.output.makefile.set_dimension(subproject.output.makefile.d["DIM"])
424 else:
425 assert (
426 self.output.makefile.d["DIM"] == subproject.output.makefile.d["DIM"]
427 ), "the dimensions of the added subproject doesn't match the current one of the main project"
428
429 # ... for filepaths
430 for filename in subproject.output.makefile.hfiles:
431 self.output.makefile.add_h_file(filename, False)
432 for filename in subproject.output.makefile.cppfiles:
433 self.output.makefile.add_cpp_file(filename, False)
434 for filename in subproject.output.makefile.fortranfiles:
435 self.output.makefile.add_Fortran_file(filename, False)
436
437 for filename in subproject.output.makefile.generated_hfiles:
438 self.output.makefile.add_h_file(filename, True)
439 for filename in subproject.output.makefile.generated_cppfiles:
440 self.output.makefile.add_cpp_file(filename, True)
441 for filename in subproject.output.makefile.generated_fortranfiles:
442 self.output.makefile.add_Fortran_file(filename, True)
443
444 #
445 # merge all strings and artefacts of readme
446 #
447 for i in subproject.output.readme._entries:
448 self.output.readme.add_entry(i)
449
450 for i in subproject.output.readme._package_descriptions:
451 self.output.readme.add_package_description(i)
452
453 for i in subproject.output.readme._entries:
454 self.output.readme.add_entry(i)
455
456 for i in subproject.output.artefacts:
457 self.output.add(i)
458
459 #
460 # the last added subproject becomes the master project (which will provide the template for the main file)
461 #
462 if not self.is_extended:
463 self.main = subproject.main
464 self.rootnamespace = subproject.rootnamespace
465 self._subdirectory = subproject._subdirectory
466 self.is_extended = True
467 self.main.merge(subproject.main)
468
469 # self.main.project = self
470 # self.main.overwrite = subproject.main.overwrite
471 # self.main.d.update(subproject.main.d)
472
473 #
474 # merge all constants
475 #
476 self.subconstants.append(subproject.constants)
477
478
479 @property
480 def subdirectory(self):
481 subdirectory = self._subdirectory
482 if subdirectory:
483 subdirectory += "/"
484 return subdirectory
#define assert(...)
Definition LinuxAMD.h:28
Represents a Peano 4 project.
Definition Project.py:16
cleanup(self)
This routine has to be called after you've generated your code.
Definition Project.py:93
generate(self, overwrite=peano4.output.Overwrite.Default, throw_away_data_after_generation=False)
Generate all code.
Definition Project.py:111
run(self, executable, args=[], prefix=None, pipefile=None, rebuild_if_required=True)
Runs the code.
Definition Project.py:238
__init__(self, namespace, project_name, directory=".", subdirectory="", executable="")
project_name Simple string.
Definition Project.py:34
set_fenv_handler(self, args)
Definition Project.py:90
add_subproject(self, 'Project' subproject, master=False)
Adds a new Peano4 project into this Peano 4 project.
Definition Project.py:286
build(self, make=True, make_clean_first=True, throw_away_data_after_build=False, number_of_parallel_builds=-1)
Invokes the underlying make build mechanism on the project.
Definition Project.py:167
Representative of generated Constants.h holding all user-defined constants.
Definition Constants.py:10
Represents the total output generated from the Peano4 data model plus all the operations on it.
Definition Output.py:7
The default sequence sketches what Peano does if there's no main.
All operations within Peano 4 model.
Definition Steps.py:6