Peano
Loading...
Searching...
No Matches
PosterioriLimiting.py
Go to the documentation of this file.
1# This file is part of the ExaHyPE2 project. For conditions of distribution and
2# use, please see the copyright notice at www.peano-framework.org
3import os
4from abc import abstractmethod
5
6import jinja2
7
8import dastgen2
9import peano4.output
10import exahype2.grid
12
13from exahype2.solvers.PDETerms import PDETerms
14from exahype2.solvers.aderdg import Polynomials
15from exahype2.solvers.aderdg.Architectures import Architectures
16from exahype2.solvers.aderdg.ADERDG import get_face_overlap_merge_implementation
17
18from .actionsets import (SaveNewCellData, SpreadLimiterStatus,
19 CopyAndConvertPatch, VerifyTroubledness)
20
21class PosterioriLimiting(object):
22 # Initialisation stores the key information about the solver and initialises a number of variables for later usage
24 self,
25 name,
26 regular_solver,
27 limiting_solver,
28 number_of_dmp_observables = 0,
29 dmp_relaxation_parameter = 0.0,
30 dmp_differences_scaling = 0.0,
31 physical_admissibility_criterion = PDETerms.None_Implementation
32 ):
33 self._name = name
34
35 # Respectively solver to be used in stable regions and solver to be used in unstable regions
36 self._regular_solver = regular_solver
37 self._limiter_solver = limiting_solver
38
39 """
40 All of the following are mostly used for limiting, they are set of but some classes that inherit from this
41 may turn them on.
42 Hold previous time step: if this is specified as true, the previous time step will be stored persistently.
43 The old value gets overwritten with the new value at the beginning of the predictor step.
44 This allows rollback of the solution to a previous state if for some reason the computations
45 yield incorrect results.
46 Used for limiting, where troubled cells are rolled back and recomputed with a more robust
47 scheme and neighbouring cells need their old solution to project values with.
48 PAC: if not none, a user-defined physical admissibility criterion should be checked at the end of
49 the correction step which verifies whether or not the produced solution is physically sensible.
50 number_of_pac_observables: number of variables which the physical admisibility criterion should consider
51 number_of_dmp_observables: if greater than zero, a discrete maximum principle should be checked at the
52 end of the corrector step. This essentially verifies whether shocks have formed.
53 """
54 self._number_of_dmp_observables = number_of_dmp_observables
55 self._physical_admissibility_criterion = physical_admissibility_criterion
56
57 self._dmp_relaxation_parameter = dmp_relaxation_parameter
58 self._dmp_differences_scaling = dmp_differences_scaling
59
60 self._halo_size = int(limiting_solver._patch_overlap_old.dim[0]/2)
61
62 """
63 These will be used by children of this class to fill them with the declaration and definition of the
64 above mandatory and optional functions.
65 """
68
71
72 """
73 Used by child classes, will contain code to be executed at the start and end respectively of the
74 time step. Typically specifies some timestep variable and advances the overall timestamp of the
75 entire solver.
76 """
79
81
82 """
83 used if the user wants to add some function to postprocessing, meaning
84 it should be added after every other computation in ADER-DG is done.
85 """
87
88 self._regular_solver._split_sweeps = True
89
90 # create initial structures and action_sets, these are re-generated essentially anytime something about the solver is modified
93
94 """
95 Creates all the structures which are attached on the peano grid either in a cell or on the faces.
96 These are defined as peano patches which have given sizes to contain persistent data as well as
97 conditions for when they should be merged, sent, received, stored, loaded, etc.
98 """
99
100 @abstractmethod
102 regular_patch_size = self._regular_solver.order+1
103
104 """
105 This will serve as a face marker to communicate with neighbouring cells whether
106 a given cell is troubled, is neighbouring a troubled cell, or is fine.
107 Cells that are troubled or neighbouring a troubled cell must rollback their solution
108 to the previous one and perform one timestep using a more robust FV scheme.
109 Therefore these and their neighbours must convert the DG representation of their
110 previous solution to an FV representation and project it to their faces so that these
111 can be exchanged with their neighbours.
112 """
113 Variants = ["REGULAR","REGULAR_TO_LIMITER","LIMITER_TO_REGULAR","TROUBLED"]
114 self._cell_label = exahype2.grid.create_cell_label(self._name)
115 self._face_label = exahype2.grid.create_face_label(self._name)
116 self._cell_label.data.add_attribute(dastgen2.attributes.Enumeration( name="Troubled_Marker", variants=Variants ) )
117 self._face_label.data.add_attribute(dastgen2.attributes.Enumeration( name="Troubled_Marker", variants=Variants ) )
118
119 self._face_label.peano4_mpi_and_storage_aspect.merge_implementation += "\n _Troubled_Marker = std::max(_Troubled_Marker, neighbour.getTroubled_Marker());"
120
121
122 # This will contain the projection of the face-local minima and maxima
124 (2, 2, 1),
126 self._unknown_identifier() + "_min_and_max"
127 )
128 # if self._hold_face_data_on_heap_with_smart_pointer:
129 # self._min_and_max_projection.generator = peano4.datamodel.PatchToDoubleArrayWithSmartPointer(
130 # self._min_and_max_projection,
131 # self._corrector_computation_precision
132 # )
133
134 self._min_and_max_projection.generator.merge_method_definition = get_face_overlap_merge_implementation(self._min_and_max_projection)
135
136 self._min_and_max_projection.generator.load_store_compute_flag = "::peano4::grid::constructLoadStoreComputeFlag({},{},{})".format(
140 )
141
142 self._min_and_max_projection.generator.send_condition = (
143 "repositories::" + self.get_name_of_global_instance() + ".getSolverState()==" + self._name + "::SolverState::RegularSolver"
144 " and " + "repositories::" + self._regular_solver.get_name_of_global_instance() + ".isFirstGridSweepOfTimeStep()"
145 )
146 self._min_and_max_projection.generator.receive_and_merge_condition = (
147 "repositories::" + self.get_name_of_global_instance() + ".getSolverState()==" + self._name + "::SolverState::RegularSolver"
148 " and " + "repositories::" + self._regular_solver.get_name_of_global_instance() + ".isLastGridSweepOfTimeStep()"
149 )
150
151 self._min_and_max_projection.generator.includes += """
152#include "peano4/utils/Loop.h"
153#include "repositories/SolverRepository.h"
154"""
155
156 #TODO: is this the best way to do this?
157 self._limiter_solver._compute_time_step_size = """
158 double timeStepSize = repositories::""" + self._regular_solver.get_name_of_global_instance() + """.getAdmissibleTimeStepSize();
159"""
160
161
162 """
163 Creates the action sets, these are actions tied to the grid that are executed at given points of
164 grid generation, construction or steps of the ader-dg solver. Peano handles grid structures that
165 contain data as well as operations that are executed on that data. These are the latter.
166 The guard here are the conditions for the execution of the operation, as in they are only executed
167 if the guard evaluates to true.
168 """
169
170 @abstractmethod
173 guard = ("not marker.willBeRefined() and not marker.hasBeenRefined() and" +
174 " repositories::" + self.get_name_of_global_instance() + ".getSolverState()==" + self._name + "::SolverState::RegularSolver" +
175 " and " + "repositories::" + self._regular_solver.get_name_of_global_instance() + ".isFirstGridSweepOfTimeStep()"),
176 use_DMP=self._number_of_dmp_observables>0,
177 reset_troubled_markers=True
178 )
179
181 guard = ("not marker.willBeRefined() and not marker.hasBeenRefined() and" +
182 " repositories::" + self.get_name_of_global_instance() + ".getSolverState()==" + self._name + "::SolverState::RegularSolver" +
183 " and " + "repositories::" + self._regular_solver.get_name_of_global_instance() + ".isLastGridSweepOfTimeStep()"),
184 use_PAC=self._physical_admissibility_criterion!=PDETerms.None_Implementation,
185 use_DMP=self._number_of_dmp_observables>0
186 )
187
189 guard = ("not marker.willBeRefined() and not marker.hasBeenRefined() and" +
190 " repositories::" + self.get_name_of_global_instance() + ".getSolverState()==" + self._name + "::SolverState::LimiterStatusSpreadingToNeighbours"
191 ))
192
194 regularToLimiterGuard = ("not marker.willBeRefined() and not marker.hasBeenRefined() and" +
195 " repositories::" + self.get_name_of_global_instance() + ".getSolverState()==" + self._name + "::SolverState::LimiterStatusSpreadingToSecondNeighbours"),
196 limiterToRegularGuard = ( "not marker.willBeRefined() and not marker.hasBeenRefined() and"
197 + " repositories::" + self.get_name_of_global_instance() + ".getSolverState()==" + self._name + "::SolverState::LimiterSolver"
198 + " and " + "repositories::" + self._limiter_solver.get_name_of_global_instance() + ".isLastGridSweepOfTimeStep()"
199 + " and fineGridCell" + self._name + "CellLabel.getTroubled_Marker()>=celldata::" + self._name + "CellLabel::Troubled_Marker::LIMITER_TO_REGULAR")
200 )
201
202 """
203 saving cell needs to happen before the cell is used for any computations
204 checking troubledness needs to happen after any updates have been finalized
205 spreading limiter status is inconsequential, it can happen at any time since it has
206 its own dedicated timestep
207 copying and converting the data from regular to limiter can happen whenever as it
208 happens in a dedicated timestep, it needs to be followed up by a projection of
209 said data, which in the case of an fv limiter is done automatically
210 copying from limiter to regular needs to happen after the limiter is finished
211 computing the new solution within a given cell
212 """
213 self._action_set_save_new_cell_data.descend_invocation_order = 0
214 self._action_set_check_troubledness.descend_invocation_order = 1000
215 self._action_set_spread_limiter_status.descend_invocation_order = 1
216 self._action_set_copy_convert_and_project_data.descend_invocation_order = 1000
217
218 self._limiter_solver._action_set_update_cell.guard += (
219 " and repositories::" + self.get_name_of_global_instance() + ".getSolverState()==" + self._name + "::SolverState::LimiterSolver"
220 + " and fineGridCell" + self._name + "CellLabel.getTroubled_Marker()>=celldata::" + self._name + "CellLabel::Troubled_Marker::LIMITER_TO_REGULAR")
221
222 def create_readme_descriptor(self, domain_offset, domain_size):
223 return (
224 """ ExaHyPE 2 Limiting solver"""
225 )
226
227 def get_user_action_set_includes(self):
228 return self.user_action_set_includes
229
230 def get_user_solver_includes(self):
231 return self.user_solver_includes
232
233 def add_user_action_set_includes(self, value):
234 """
235 Add further includes to this property, if your action sets require some additional
236 routines from other header files.
237 """
238 self.user_action_set_includes += value
239
240
241 def _store_cell_data_default_guard(self):
242 return (
243 "not marker.willBeRefined() "
244 + "and repositories::"
245 + self.get_name_of_global_instance()
246 + ".getSolverState()!="
247 + self._name
248 + "::SolverState::GridConstruction"
249 )
250
251 def _provide_cell_data_to_compute_kernels_default_guard(self):
252 return (
253 "not marker.willBeRefined() "
254 + "and repositories::"
255 + self.get_name_of_global_instance()
256 + ".getSolverState()!="
257 + self._name
258 + "::SolverState::GridConstruction"
259 )
260
261 def _provide_face_data_to_compute_kernels_default_guard(self):
262 return (
263 "not marker.willBeRefined() "
264 + "and repositories::"
265 + self.get_name_of_global_instance()
266 + ".getSolverState()!="
267 + self._name
268 + "::SolverState::GridConstruction"
269 )
270
271 def _load_cell_data_default_guard(self):
272 return (
273 "not marker.hasBeenRefined() "
274 + "and repositories::"
275 + self.get_name_of_global_instance()
276 + ".getSolverState()!="
277 + self._name
278 + "::SolverState::GridConstruction "
279 + "and repositories::"
280 + self.get_name_of_global_instance()
281 + ".getSolverState()!="
282 + self._name
283 + "::SolverState::GridInitialisation"
284 )
285
286 def _store_face_data_default_guard(self):
287 return (
288 "not marker.willBeRefined() "
289 + "and repositories::"
290 + self.get_name_of_global_instance()
291 + ".getSolverState()!="
292 + self._name
293 + "::SolverState::GridConstruction"
294 )
295
296 def _load_face_data_default_guard(self):
297 return (
298 "not marker.hasBeenRefined() "
299 + "and repositories::"
300 + self.get_name_of_global_instance()
301 + ".getSolverState()!="
302 + self._name
303 + "::SolverState::GridConstruction "
304 + "and repositories::"
305 + self.get_name_of_global_instance()
306 + ".getSolverState()!="
307 + self._name
308 + "::SolverState::GridInitialisation"
309 )
310
311 def _store_boundary_data_default_guard(self):
312 return (
313 self._store_face_data_default_guard()
314 + " and not repositories::"
315 + self.get_name_of_global_instance()
316 + ".PeriodicBC[marker.getSelectedFaceNumber()%Dimensions]"
317 + " and not marker.hasBeenRefined() and fineGridFace"
318 + self._name
319 + "FaceLabel.getBoundary()"
320 )
321
322 def _clear_face_data_default_guard(self):
323 return (
324 "not marker.willBeRefined()"
325 + "and repositories::"
326 + self.get_name_of_global_instance()
327 + ".getSolverState()!="
328 + self._name
329 + "::SolverState::GridConstruction "
330 )
331
332 def _interpolate_face_data_default_guard(self):
333 return (
334 "repositories::"
335 + self.get_name_of_global_instance()
336 + ".getSolverState()!="
337 + self._name
338 + "::SolverState::GridInitialisation"
339 )
340
341 def _restrict_face_data_default_guard(self):
342 return "true"
343
344 def _unknown_identifier(self):
345 return self._name + "Q"
346
347 def get_name_of_global_instance(self):
348 return "instanceOf" + self._name
349
350 def _get_default_includes(self):
351 return """
352#include "tarch/la/Vector.h"
353
354#include "peano4/utils/Globals.h"
355#include "peano4/utils/Loop.h"
356
357#include "repositories/SolverRepository.h"
358"""
359
360 """
361 The following functions will be called by the peano4 project,
362 they tell peano which of our generated data it should attach
363 to each grid element and use.
364 """
365
366 def __str__(self):
367 result = (
368 """
369Name: """
370 + self._name
371 + """
372Type: """
373 + self.__class__.__name__
374 )
375 return result
376
377 def add_to_Peano4_datamodel(self, datamodel, verbose):
378 datamodel.add_cell(self._cell_label)
379 datamodel.add_face(self._face_label)
380 if self._number_of_dmp_observables>0:
381 datamodel.add_face(self._min_and_max_projection)
382
383 def add_use_data_statements_to_Peano4_solver_step(self, step):
384 step.use_cell(self._cell_label)
385 step.use_face(self._face_label)
386 if self._number_of_dmp_observables>0:
387 step.use_face(self._min_and_max_projection)
388
389 def add_actions_to_init_grid(self, step):
390 pass
391
392 def add_actions_to_create_grid(self, step, evaluate_refinement_criterion):
393 pass
394
395 def add_actions_to_plot_solution(self, step, output_path):
396 pass
397
398 # these actions are executed during each time step
399 def add_actions_to_perform_time_step(self, step):
400 step.add_action_set(self._action_set_save_new_cell_data)
401 step.add_action_set(self._action_set_check_troubledness)
402 step.add_action_set(self._action_set_spread_limiter_status)
403 step.add_action_set(self._action_set_copy_convert_and_project_data)
404
405 def set_plot_description(self, description):
406 self.plot_description = description
407
408 def _generate_kernels(self, namespace, output, subdirectory, dimensions):
409 d = {}
410 self._init_dictionary_with_default_parameters(d)
411 self.add_entries_to_text_replacement_dictionary(d)
412 d["DIMENSIONS"] = dimensions
413 d["NUMBER_OF_DOF_3D"] = 1 if dimensions == 2 else d["NUMBER_OF_DOF"]
414 d["NUMBER_OF_DOF_LIMITER_3D"] = 1 if dimensions == 2 else d["NUMBER_OF_DOF_LIMITER"]
415 d["GHOST_LAYER_WIDTH_3D"] = 0 if dimensions == 2 else d["GHOST_LAYER_WIDTH"]
416 d["FULL_QUALIFIED_SOLVER_NAME"] ="::".join(namespace) + "::" + self._name
417
418 kernels_namespace = namespace + ["kernels", self._name]
419 kernels_output_path = subdirectory + "kernels/" + self._name
420 kernels_templates_prefix = os.path.dirname(os.path.realpath(__file__)) + "/kernels/"
421
422 if not os.path.exists(kernels_output_path):
423 os.makedirs(kernels_output_path)
424
425 exahype2.solvers.limiting.kernels.Quadrature(d).generate_kernels(
426 kernels_namespace, output, subdirectory, kernels_templates_prefix, kernels_output_path
427 )
428
429 exahype2.solvers.limiting.kernels.Limiter(d).generate_kernels(
430 kernels_namespace, output, subdirectory, kernels_templates_prefix, kernels_output_path
431 )
432
433 """
434 This tells peano to add the solver files to the project. It generates any files that are not
435 specifically cell or face data or grid actions such as the actionsets.
436 Currently it generates solver implementation and header files.
437 """
438 def add_implementation_files_to_project(self, namespace, output, dimensions, subdirectory=""):
439 """
440 The ExaHyPE2 project will call this operation when it sets
441 up the overall environment.
442
443 This routine is typically not invoked by a user.
444
445 output: peano4.output.Output
446 """
447
448 if(subdirectory):
449 subdirectory += "/"
450
451 HeaderDictionary = {}
452 self._init_dictionary_with_default_parameters(HeaderDictionary)
453
454 generated_abstract_files = (
455 peano4.output.Jinja2TemplatedHeaderImplementationFilePair(
456 os.path.dirname(os.path.realpath(__file__)) + "/PosterioriLimitingAbstract.template.h",
457 os.path.dirname(os.path.realpath(__file__)) + "/PosterioriLimitingAbstract.template.cpp",
458 "Abstract" + self._name,
459 namespace,
460 subdirectory + ".",
461 HeaderDictionary,
462 True,
463 True
464 )
465 )
466
467 generated_solver_files = (
468 peano4.output.Jinja2TemplatedHeaderImplementationFilePair(
469 os.path.dirname(os.path.realpath(__file__)) + "/PosterioriLimiting.template.h",
470 os.path.dirname(os.path.realpath(__file__)) + "/PosterioriLimiting.template.cpp",
471 self._name,
472 namespace,
473 subdirectory + ".",
474 HeaderDictionary,
475 False,
476 True
477 )
478 )
479
480 output.add(generated_abstract_files)
481 output.makefile.add_cpp_file(subdirectory + "Abstract" + self._name + ".cpp", generated=True)
482 output.makefile.add_h_file(subdirectory + "Abstract" + self._name + ".h", generated=True)
483
484 output.add(generated_solver_files)
485 output.makefile.add_cpp_file(subdirectory + self._name + ".cpp", generated=True)
486 output.makefile.add_h_file(subdirectory + self._name + ".h", generated=True)
487
488 self._generate_kernels(namespace, output, subdirectory, dimensions)
489
490 @abstractmethod
491 def add_entries_to_text_replacement_dictionary(self, d):
492 pass
493
494 """
495 Generates a dictionary of "words" that will later be used in various templates to fill these out by adding
496 information from the solver or which was specified by the user.
497 """
498 def _init_dictionary_with_default_parameters(self, d):
499 d["SOLVER_INSTANCE"] = self.get_name_of_global_instance()
500 d["REGULAR_SOLVER_INSTANCE"] = self._regular_solver.get_name_of_global_instance()
501 d["LIMITER_SOLVER_INSTANCE"] = self._limiter_solver.get_name_of_global_instance()
502
503 d["SOLVER_NAME"] = self._name
504 d["REGULAR_SOLVER_NAME"] = self._regular_solver._name
505 d["LIMITER_SOLVER_NAME"] = self._limiter_solver._name
506
507 d["UNKNOWN_IDENTIFIER"] = self._unknown_identifier()
508 d["REGULAR_SOLVER_UNKNOWN_IDENTIFIER"] = self._regular_solver._unknown_identifier()
509 d["LIMITER_SOLVER_UNKNOWN_IDENTIFIER"] = self._limiter_solver._unknown_identifier()
510
511 d["ALIGNMENT"] = Architectures.get_alignment(self._regular_solver._architecture)
512 d["SIMD_WIDTH"] = Architectures.get_simd_width(self._regular_solver._architecture)
513 d["ORDER"] = self._regular_solver._order
514 d["NUMBER_OF_DOF"] = self._regular_solver._order + 1
515 d["NUMBER_OF_DOF_PADDED"] = Architectures.get_size_with_padding(self._regular_solver._architecture, self._regular_solver._order + 1)
516 d["NUMBER_OF_DOF_LIMITER"] = self._limiter_solver.patch_size if self._limiter_solver.patch_size >= 0 else 2 * self._regular_solver._order + 1
517 d["NUMBER_OF_DOF_LIMITER_PADDED"] = Architectures.get_size_with_padding(self._regular_solver._architecture, d["NUMBER_OF_DOF_LIMITER"])
518 d["NUMBER_OF_UNKNOWNS"] = self._regular_solver._unknowns
519 d["NUMBER_OF_UNKNOWNS_PADDED"] = Architectures.get_size_with_padding(self._regular_solver._architecture, self._regular_solver._unknowns)
520 d["NUMBER_OF_AUXILIARY_VARIABLES"] = self._regular_solver._auxiliary_variables
521 d["NUMBER_OF_AUXILIARY_VARIABLES_PADDED"] = Architectures.get_size_with_padding(self._regular_solver._architecture, self._regular_solver._auxiliary_variables)
522 d["NUMBER_OF_DATA"] = self._regular_solver._unknowns + self._regular_solver._auxiliary_variables
523 d["NUMBER_OF_DATA_PADDED"] = Architectures.get_size_with_padding(self._regular_solver._architecture, self._regular_solver._unknowns + self._regular_solver._auxiliary_variables)
524 d["NUMBER_OF_OBSERVABLES"] = self._number_of_dmp_observables
525 d["GHOST_LAYER_WIDTH"] = self._halo_size
526
527 d["NUMBER_OF_DOFS_PER_CELL_2D"] = (
528 (self._regular_solver.unknowns + self._regular_solver.auxiliary_variables)
529 * (self._regular_solver.order+1) * (self._regular_solver.order+1) )
530 d["NUMBER_OF_DOFS_PER_CELL_3D"] = (
531 (self._regular_solver.unknowns + self._regular_solver.auxiliary_variables)
532 * (self._regular_solver.order+1) * (self._regular_solver.order+1) * (self._regular_solver.order+1) )
533
534 d["USE_GAUSS_LOBATTO"] = ( "true" if self._regular_solver._polynomials is Polynomials.Gauss_Lobatto else "false" )
535 d["POLYNOMIAL_TYPE"] = ( "lobatto" if self._regular_solver._polynomials is Polynomials.Gauss_Lobatto else "legendre" )
536
537 d["SOLVER_INCLUDES"] = self.get_user_solver_includes()
538
539 d["SOLVER_USER_DECLARATIONS"] = jinja2.Template(
540 self._solver_user_declarations, undefined=jinja2.DebugUndefined
541 ).render(**d)
542 d["SOLVER_USER_DEFINITIONS"] = jinja2.Template(
543 self._solver_user_definitions, undefined=jinja2.DebugUndefined
544 ).render(**d)
545
546 d["START_TIME_STEP_IMPLEMENTATION"] = jinja2.Template(
547 self._start_time_step_implementation, undefined=jinja2.DebugUndefined
548 ).render(**d)
549 d["FINISH_TIME_STEP_IMPLEMENTATION"] = jinja2.Template(
550 self._finish_time_step_implementation, undefined=jinja2.DebugUndefined
551 ).render(**d)
552
553 d["CONSTRUCTOR_IMPLEMENTATION"] = jinja2.Template(
554 self._constructor_implementation, undefined=jinja2.DebugUndefined
555 ).render(**d)
556
557 d["DMP_DIFFERENCES_SCALING"] = self._dmp_differences_scaling
558 d["DMP_RELAXATION_PARAMETER"] = self._dmp_relaxation_parameter
559 d["NUMBER_OF_DMP_OBSERVABLES"] = self._number_of_dmp_observables
560 d["ADMISSIBILITY_IMPLEMENTATION"] = self._physical_admissibility_criterion
561
562 d["QUADRATURE_POINTS"] = ", ".join(self._regular_solver._basis.quadrature_points())
563
564 d["USE_LIBXSMM"] = self._regular_solver._use_libxsmm
565 d["ARCHITECTURE"] = self._regular_solver._architecture
Wrapper around C++ enumerations which is not a datatype supported natively by MPI.
Definition Enumeration.py:6
__init__(self, name, regular_solver, limiting_solver, number_of_dmp_observables=0, dmp_relaxation_parameter=0.0, dmp_differences_scaling=0.0, physical_admissibility_criterion=PDETerms.None_Implementation)