Peano
Loading...
Searching...
No Matches
kernels.py
Go to the documentation of this file.
1# This file is part of the ExaHyPE2 project. For conditions of distribution and
2# use, please see the copyright notice at www.peano-framework.org
3import jinja2
4
5from enum import Enum
6
7
8class SolverVariant(Enum):
9 WithVirtualFunctions = 0
10 Stateless = 1
11 Multicore = 2
12 Accelerator = 3
13
14
15class KernelVariant(Enum):
16 PatchWiseAoS = 10
17 PatchWiseAoSoA = 11
18 PatchWiseSoA = 12
19 BatchedAoS = 20
20 BatchedAoSoA = 21
21 BatchedSoA = 22
22 TaskGraphAoS = 30
23 TaskGraphAoSoA = 31
24 TaskGraphSoA = 32
25 VolumeWiseAoS = 40
26 VolumeWiseAoSoA = 41
27 VolumeWiseSoA = 42
28
29
31 flux_implementation,
32 ncp_implementation,
33 eigenvalues_implementation,
34 riemann_solver_implementation,
35 source_term_implementation,
36 compute_max_eigenvalue_of_next_time_step,
37 solver_variant: SolverVariant,
38 kernel_variant: KernelVariant,
39):
40 """
41 Return only the unqualified function call, i.e., without any namespaces.
42 So by setting the right namespace as prefix, you can direct it to particular
43 implementations.
44 """
45 KernelCalls = {
46 KernelVariant.PatchWiseAoS: "timeStepWithRiemannPatchwiseHeap",
47 KernelVariant.PatchWiseAoSoA: "timeStepWithRiemannPatchwiseHeap",
48 KernelVariant.PatchWiseSoA: "timeStepWithRiemannPatchwiseHeap",
49 KernelVariant.BatchedAoS: "timeStepWithRiemannBatchedHeap",
50 KernelVariant.BatchedAoSoA: "timeStepWithRiemannBatchedHeap",
51 KernelVariant.BatchedSoA: "timeStepWithRiemannBatchedHeap",
52 KernelVariant.TaskGraphAoS: "timeStepWithRiemannTaskgraphHeap",
53 KernelVariant.TaskGraphAoSoA: "timeStepWithRiemannTaskgraphHeap",
54 KernelVariant.TaskGraphSoA: "timeStepWithRiemannTaskgraphHeap",
55 KernelVariant.VolumeWiseAoS: "timeStepWithRiemannVolumewise",
56 KernelVariant.VolumeWiseAoSoA: "timeStepWithRiemannVolumewise",
57 KernelVariant.VolumeWiseSoA: "timeStepWithRiemannVolumewise",
58 }
59
60 EnumeratorTemplateTypes = {
61 KernelVariant.PatchWiseAoS: "::exahype2::enumerator::AoSLexicographicEnumerator",
62 KernelVariant.PatchWiseAoSoA: "::exahype2::enumerator::AoSoALexicographicEnumerator",
63 KernelVariant.PatchWiseSoA: "::exahype2::enumerator::SoALexicographicEnumerator",
64 KernelVariant.BatchedAoS: "::exahype2::enumerator::AoSLexicographicEnumerator",
65 KernelVariant.BatchedAoSoA: "::exahype2::enumerator::AoSoALexicographicEnumerator",
66 KernelVariant.BatchedSoA: "::exahype2::enumerator::SoALexicographicEnumerator",
67 KernelVariant.TaskGraphAoS: "::exahype2::enumerator::AoSLexicographicEnumerator",
68 KernelVariant.TaskGraphAoSoA: "::exahype2::enumerator::AoSoALexicographicEnumerator",
69 KernelVariant.TaskGraphSoA: "::exahype2::enumerator::SoALexicographicEnumerator",
70 KernelVariant.VolumeWiseAoS: "::exahype2::enumerator::AoSLexicographicEnumerator",
71 KernelVariant.VolumeWiseAoSoA: "::exahype2::enumerator::AoSoALexicographicEnumerator",
72 KernelVariant.VolumeWiseSoA: "::exahype2::enumerator::SoALexicographicEnumerator",
73 }
74
75 template = KernelCalls[kernel_variant]
76
77 if solver_variant == SolverVariant.WithVirtualFunctions:
78 template += """Functors<
79 {{NUMBER_OF_VOLUMES_PER_AXIS}},
80 {{HALO_SIZE}},
81 {{NUMBER_OF_UNKNOWNS}},
82 {{NUMBER_OF_AUXILIARY_VARIABLES}},
83 {% if FLUX_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
84 {% if NCP_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
85 {% if EIGENVALUES_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
86 {% if SOURCE_TERM_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
87 {% if RIEMANN_SOLVER_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
88 {% if COMPUTE_MAX_EIGENVALUE==False %} false {% else %} true {% endif %},
89 {{TEMP_DATA_ENUMERATOR}}
90 >(patchData,
91 [&](
92 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
93 const tarch::la::Vector<Dimensions, double>& x,
94 const tarch::la::Vector<Dimensions, double>& h,
95 double t,
96 double dt,
97 int normal,
98 double* __restrict__ F // F[{{NUMBER_OF_UNKNOWNS}}]
99 )->void {
100 {% if FLUX_IMPLEMENTATION!="<none>" %}
101 repositories::{{SOLVER_INSTANCE}}.flux(Q, x, h, t, dt, normal, F);
102 {% endif %}
103 },
104 [&](
105 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
106 const double* __restrict__ deltaQ, // deltaQ[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
107 const tarch::la::Vector<Dimensions, double>& x,
108 const tarch::la::Vector<Dimensions, double>& h,
109 double t,
110 double dt,
111 int normal,
112 double* __restrict__ BTimesDeltaQ // BTimesDeltaQ[{{NUMBER_OF_UNKNOWNS}}]
113 )->void {
114 {% if NCP_IMPLEMENTATION!="<none>" %}
115 repositories::{{SOLVER_INSTANCE}}.nonconservativeProduct(Q, deltaQ, x, h, t, dt, normal, BTimesDeltaQ);
116 {% endif %}
117 },
118 [&](
119 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
120 const tarch::la::Vector<Dimensions, double>& x,
121 const tarch::la::Vector<Dimensions, double>& h,
122 double t,
123 double dt,
124 double* __restrict__ S // S[{{NUMBER_OF_UNKNOWNS}}]
125 )->void {
126 {% if SOURCE_TERM_IMPLEMENTATION!="<none>" %}
127 repositories::{{SOLVER_INSTANCE}}.sourceTerm(Q, x, h, t, dt, S);
128 {% endif %}
129 },
130 [&](
131 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
132 const tarch::la::Vector<Dimensions, double>& x,
133 const tarch::la::Vector<Dimensions, double>& h,
134 double t,
135 double dt,
136 int normal,
137 double* __restrict__ L
138 )->void {
139 {% if EIGENVALUES_IMPLEMENTATION!="<none>" %}
140 repositories::{{SOLVER_INSTANCE}}.eigenvalues(Q, x, h, t, dt, normal, L);
141 {% endif %}
142 },
143 [&](
144 const double* __restrict__ QR,
145 const double* __restrict__ QL,
146 const double* __restrict__ FR,
147 const double* __restrict__ FL,
148 const double* __restrict__ LR,
149 const double* __restrict__ LL,
150 const tarch::la::Vector<Dimensions, double>& xR,
151 const tarch::la::Vector<Dimensions, double>& xL,
152 const tarch::la::Vector<Dimensions, double>& h,
153 double t,
154 double dt,
155 int normal,
156 double* __restrict__ APDQ,
157 double* __restrict__ AMDQ
158 )->double {
159 {% if RIEMANN_SOLVER_IMPLEMENTATION!="<none>" %}
160 return repositories::{{SOLVER_INSTANCE}}.solveRiemannProblem(QR, QL, FR, FL, LR, LL, xR, xL, h, t, dt, normal, APDQ, AMDQ);
161 {% endif %}
162 }
163);
164 """
165 elif solver_variant == SolverVariant.Stateless:
166 template += """Stateless<
167 {{SOLVER_NAME}},
168 {{NUMBER_OF_VOLUMES_PER_AXIS}},
169 {{HALO_SIZE}},
170 {{NUMBER_OF_UNKNOWNS}},
171 {{NUMBER_OF_AUXILIARY_VARIABLES}},
172 {% if FLUX_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
173 {% if NCP_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
174 {% if EIGENVALUES_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
175 {% if SOURCE_TERM_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
176 {% if RIEMANN_SOLVER_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
177 {% if COMPUTE_MAX_EIGENVALUE==False %} false {% else %} true {% endif %},
178 {{TEMP_DATA_ENUMERATOR}}
179 >(patchData);
180 """
181 elif solver_variant == SolverVariant.Multicore:
182 template += """Stateless<
183 {{SOLVER_NAME}},
184 {{NUMBER_OF_VOLUMES_PER_AXIS}},
185 {{HALO_SIZE}},
186 {{NUMBER_OF_UNKNOWNS}},
187 {{NUMBER_OF_AUXILIARY_VARIABLES}},
188 {% if FLUX_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
189 {% if NCP_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
190 {% if EIGENVALUES_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
191 {% if SOURCE_TERM_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
192 {% if RIEMANN_SOLVER_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
193 {% if COMPUTE_MAX_EIGENVALUE==False %} false {% else %} true {% endif %},
194 {{TEMP_DATA_ENUMERATOR}}
195 >(patchData, peano4::utils::LoopPlacement::SpreadOut);
196 """
197 elif solver_variant == SolverVariant.Accelerator:
198 template += """Stateless<
199 {{SOLVER_NAME}},
200 {{NUMBER_OF_VOLUMES_PER_AXIS}},
201 {{HALO_SIZE}},
202 {{NUMBER_OF_UNKNOWNS}},
203 {{NUMBER_OF_AUXILIARY_VARIABLES}},
204 {% if FLUX_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
205 {% if NCP_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
206 {% if EIGENVALUES_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
207 {% if SOURCE_TERM_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
208 {% if RIEMANN_SOLVER_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
209 {% if COMPUTE_MAX_EIGENVALUE==False %} false {% else %} true {% endif %},
210 {{TEMP_DATA_ENUMERATOR}}
211 >(targetDevice, patchData);
212 """
213 else:
214 assert False, "Not supported combination: {} x {}".format(
215 solver_variant, kernel_variant
216 )
217
218 result = jinja2.Template(template, undefined=jinja2.DebugUndefined)
219 d = {}
220 d["FLUX_IMPLEMENTATION"] = flux_implementation
221 d["NCP_IMPLEMENTATION"] = ncp_implementation
222 d["EIGENVALUES_IMPLEMENTATION"] = eigenvalues_implementation
223 d["RIEMANN_SOLVER_IMPLEMENTATION"] = riemann_solver_implementation
224 d["SOURCE_TERM_IMPLEMENTATION"] = source_term_implementation
225 d["COMPUTE_MAX_EIGENVALUE"] = compute_max_eigenvalue_of_next_time_step
226 d["TEMP_DATA_ENUMERATOR"] = EnumeratorTemplateTypes[kernel_variant]
227 return result.render(**d)
228
229
231 flux_implementation,
232 ncp_implementation,
233 eigenvalues_implementation,
234 riemann_solver_implementation,
235 source_term_implementation,
236 pde_terms_without_state,
237):
238 Template = jinja2.Template(
239 """
240 public:
241 {% if EIGENVALUES_IMPLEMENTATION!="<user-defined>" and STATELESS_PDE_TERMS %}
242 #if defined(GPUOffloadingOMP)
243 #pragma omp declare target
244 #endif
245 static GPUCallableMethod void eigenvalues(
246 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
247 const tarch::la::Vector<Dimensions, double>& x,
248 const tarch::la::Vector<Dimensions, double>& h,
249 double t,
250 double dt,
251 int normal,
252 double* __restrict__ L,
253 Offloadable
254 );
255 #if defined(GPUOffloadingOMP)
256 #pragma omp end declare target
257 #endif
258 {% endif %}
259
260 {% if EIGENVALUES_IMPLEMENTATION!="<none>" %}
261 virtual void eigenvalues(
262 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
263 const tarch::la::Vector<Dimensions, double>& x,
264 const tarch::la::Vector<Dimensions, double>& h,
265 double t,
266 double dt,
267 int normal,
268 double* __restrict__ L
269 ) {% if EIGENVALUES_IMPLEMENTATION=="<user-defined>" %} = 0{% else %} final{% endif %};
270 {% endif %}
271
272 {% if FLUX_IMPLEMENTATION!="<user-defined>" and STATELESS_PDE_TERMS %}
273 #if defined(GPUOffloadingOMP)
274 #pragma omp declare target
275 #endif
276 static GPUCallableMethod void flux(
277 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
278 const tarch::la::Vector<Dimensions, double>& x,
279 const tarch::la::Vector<Dimensions, double>& h,
280 double t,
281 double dt,
282 int normal,
283 double* __restrict__ F, // F[{{NUMBER_OF_UNKNOWNS}}]
284 Offloadable
285 );
286 #if defined(GPUOffloadingOMP)
287 #pragma omp end declare target
288 #endif
289 {% endif %}
290
291 {% if FLUX_IMPLEMENTATION!="<none>" %}
292 virtual void flux(
293 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
294 const tarch::la::Vector<Dimensions, double>& x,
295 const tarch::la::Vector<Dimensions, double>& h,
296 double t,
297 double dt,
298 int normal,
299 double* __restrict__ F // F[{{NUMBER_OF_UNKNOWNS}}]
300 ) {% if FLUX_IMPLEMENTATION=="<user-defined>" %} = 0{% else %} final {% endif %};
301 {% endif %}
302
303 {% if NCP_IMPLEMENTATION!="<user-defined>" and STATELESS_PDE_TERMS %}
304 #if defined(GPUOffloadingOMP)
305 #pragma omp declare target
306 #endif
307 static GPUCallableMethod void nonconservativeProduct(
308 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
309 const double* __restrict__ deltaQ, // deltaQ[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
310 const tarch::la::Vector<Dimensions, double>& x,
311 const tarch::la::Vector<Dimensions, double>& h,
312 double t,
313 double dt,
314 int normal,
315 double* __restrict__ BTimesDeltaQ, // BTimesDeltaQ[{{NUMBER_OF_UNKNOWNS}}]
316 Offloadable
317 );
318 #if defined(GPUOffloadingOMP)
319 #pragma omp end declare target
320 #endif
321 {% endif %}
322
323 {% if NCP_IMPLEMENTATION!="<none>" %}
324 virtual void nonconservativeProduct(
325 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
326 const double* __restrict__ deltaQ, // deltaQ[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
327 const tarch::la::Vector<Dimensions, double>& x,
328 const tarch::la::Vector<Dimensions, double>& h,
329 double t,
330 double dt,
331 int normal,
332 double* __restrict__ BTimesDeltaQ // BTimesDeltaQ[{{NUMBER_OF_UNKNOWNS}}]
333 ) {% if NCP_IMPLEMENTATION=="<user-defined>" %} = 0{% else %} final {% endif %};
334 {% endif %}
335
336 {% if SOURCE_TERM_IMPLEMENTATION!="<user-defined>" and STATELESS_PDE_TERMS %}
337 #if defined(GPUOffloadingOMP)
338 #pragma omp declare target
339 #endif
340 static GPUCallableMethod void sourceTerm(
341 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
342 const tarch::la::Vector<Dimensions, double>& x,
343 const tarch::la::Vector<Dimensions, double>& h,
344 double t,
345 double dt,
346 double* __restrict__ S, // S[{{NUMBER_OF_UNKNOWNS}}]
347 Offloadable
348 );
349 #if defined(GPUOffloadingOMP)
350 #pragma omp end declare target
351 #endif
352 {% endif %}
353
354 {% if SOURCE_TERM_IMPLEMENTATION!="<none>" %}
355 virtual void sourceTerm(
356 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
357 const tarch::la::Vector<Dimensions, double>& x,
358 const tarch::la::Vector<Dimensions, double>& h,
359 double t,
360 double dt,
361 double* __restrict__ S // S[{{NUMBER_OF_UNKNOWNS}}]
362 ) {% if SOURCE_TERM_IMPLEMENTATION=="<user-defined>" %} = 0{% else %} final {% endif %};
363 {% endif %}
364
365 {% if RIEMANN_SOLVER_IMPLEMENTATION!="<user-defined>" and STATELESS_PDE_TERMS %}
366 /**
367 * @param QR the right state vector
368 * @param QL the left state vector
369 * @param FR the right flux vector
370 * @param FL the left flux vector
371 * @param LR the eigenvalues of the right state vector
372 * @param LL the eigenvalues of the left state vector
373 * @param xR coordinates of the right state vector
374 * @param xL coordinates of the left state vector
375 * @param h volume dimensions
376 * @param t current time step
377 * @param dt previous time step width
378 * @param normal dimension currently being solved for
379 * @param APDQ right going update, has a positive contribution on right state
380 * @param AMDQ left going update, has a negative contribution on left state
381 */
382 #if defined(GPUOffloadingOMP)
383 #pragma omp declare target
384 #endif
385 static GPUCallableMethod double solveRiemannProblem(
386 const double* __restrict__ QR, // QR[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
387 const double* __restrict__ QL, // QL[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
388 const double* __restrict__ FR, // FR[{{NUMBER_OF_UNKNOWNS}}]
389 const double* __restrict__ FL, // FL[{{NUMBER_OF_UNKNOWNS}}]
390 const double* __restrict__ LR, // LR[{{NUMBER_OF_UNKNOWNS}}]
391 const double* __restrict__ LL, // LL[{{NUMBER_OF_UNKNOWNS}}]
392 const tarch::la::Vector<Dimensions, double>& xR,
393 const tarch::la::Vector<Dimensions, double>& xL,
394 const tarch::la::Vector<Dimensions, double>& h,
395 double t,
396 double dt,
397 int normal,
398 double* __restrict__ APDQ, // APDQ[{{NUMBER_OF_UNKNOWNS}}]
399 double* __restrict__ AMDQ, // AMDQ[{{NUMBER_OF_UNKNOWNS}}]
400 Offloadable
401 );
402 #if defined(GPUOffloadingOMP)
403 #pragma omp end declare target
404 #endif
405 {% endif %}
406
407 {% if RIEMANN_SOLVER_IMPLEMENTATION!="<none>" %}
408 /**
409 * @param QR the right state vector
410 * @param QL the left state vector
411 * @param FR the right flux vector
412 * @param FL the left flux vector
413 * @param LR the eigenvalues of the right state vector
414 * @param LL the eigenvalues of the left state vector
415 * @param xR coordinates of the right state vector
416 * @param xL coordinates of the left state vector
417 * @param h volume dimensions
418 * @param t current time step
419 * @param dt previous time step width
420 * @param normal dimension currently being solved for
421 * @param APDQ right going update, has a positive contribution on right state
422 * @param AMDQ left going update, has a negative contribution on left state
423 */
424 virtual double solveRiemannProblem(
425 const double* __restrict__ QR, // QR[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
426 const double* __restrict__ QL, // QL[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
427 const double* __restrict__ FR, // FR[{{NUMBER_OF_UNKNOWNS}}]
428 const double* __restrict__ FL, // FL[{{NUMBER_OF_UNKNOWNS}}]
429 const double* __restrict__ LR, // LR[{{NUMBER_OF_UNKNOWNS}}]
430 const double* __restrict__ LL, // LL[{{NUMBER_OF_UNKNOWNS}}]
431 const tarch::la::Vector<Dimensions, double>& xR,
432 const tarch::la::Vector<Dimensions, double>& xL,
433 const tarch::la::Vector<Dimensions, double>& h,
434 double t,
435 double dt,
436 int normal,
437 double* __restrict__ APDQ, // APDQ[{{NUMBER_OF_UNKNOWNS}}]
438 double* __restrict__ AMDQ // AMDQ[{{NUMBER_OF_UNKNOWNS}}]
439 ) {% if RIEMANN_SOLVER_IMPLEMENTATION=="<user-defined>" %} = 0{% else %} final {% endif %};
440 {% endif %}
441""",
442 undefined=jinja2.DebugUndefined,
443 )
444
445 d = {}
446 d["FLUX_IMPLEMENTATION"] = flux_implementation
447 d["NCP_IMPLEMENTATION"] = ncp_implementation
448 d["EIGENVALUES_IMPLEMENTATION"] = eigenvalues_implementation
449 d["RIEMANN_SOLVER_IMPLEMENTATION"] = riemann_solver_implementation
450 d["SOURCE_TERM_IMPLEMENTATION"] = source_term_implementation
451 d["STATELESS_PDE_TERMS"] = pde_terms_without_state
452 return Template.render(**d)
453
454
456 flux_implementation,
457 ncp_implementation,
458 eigenvalues_implementation,
459 riemann_solver_implementation,
460 source_term_implementation,
461 pde_terms_without_state,
462):
463 Template = jinja2.Template(
464 """
465{% if EIGENVALUES_IMPLEMENTATION!="<user-defined>" and EIGENVALUES_IMPLEMENTATION!="<none>" and STATELESS_PDE_TERMS %}
466#if defined(GPUOffloadingOMP)
467#pragma omp declare target
468#endif
469GPUCallableMethod void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::eigenvalues(
470 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
471 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
472 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
473 [[maybe_unused]] double t,
474 [[maybe_unused]] double dt,
475 [[maybe_unused]] int normal,
476 [[maybe_unused]] double* __restrict__ L,
477 Offloadable
478) {
479 {{EIGENVALUES_IMPLEMENTATION}}
480}
481#if defined(GPUOffloadingOMP)
482#pragma omp end declare target
483#endif
484{% endif %}
485
486{% if EIGENVALUES_IMPLEMENTATION!="<user-defined>" and EIGENVALUES_IMPLEMENTATION!="<none>" %}
487void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::eigenvalues(
488 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
489 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
490 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
491 [[maybe_unused]] double t,
492 [[maybe_unused]] double dt,
493 [[maybe_unused]] int normal,
494 [[maybe_unused]] double* __restrict__ L
495) {
496 {{EIGENVALUES_IMPLEMENTATION}}
497}
498{% endif %}
499
500{% if FLUX_IMPLEMENTATION!="<user-defined>" and FLUX_IMPLEMENTATION!="<none>" and STATELESS_PDE_TERMS %}
501#if defined(GPUOffloadingOMP)
502#pragma omp declare target
503#endif
504GPUCallableMethod void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::flux(
505 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
506 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
507 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
508 [[maybe_unused]] double t,
509 [[maybe_unused]] double dt,
510 [[maybe_unused]] int normal,
511 [[maybe_unused]] double* __restrict__ F, // F[{{NUMBER_OF_UNKNOWNS}}]
512 Offloadable
513) {
514 {{FLUX_IMPLEMENTATION}}
515}
516#if defined(GPUOffloadingOMP)
517#pragma omp end declare target
518#endif
519{% endif %}
520
521{% if FLUX_IMPLEMENTATION!="<user-defined>" and FLUX_IMPLEMENTATION!="<none>" %}
522void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::flux(
523 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
524 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
525 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
526 [[maybe_unused]] double t,
527 [[maybe_unused]] double dt,
528 [[maybe_unused]] int normal,
529 [[maybe_unused]] double* __restrict__ F // F[{{NUMBER_OF_UNKNOWNS}}]
530) {
531 {{FLUX_IMPLEMENTATION}}
532}
533{% endif %}
534
535{% if NCP_IMPLEMENTATION!="<user-defined>" and NCP_IMPLEMENTATION!="<none>" and STATELESS_PDE_TERMS %}
536#if defined(GPUOffloadingOMP)
537#pragma omp declare target
538#endif
539GPUCallableMethod void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::nonconservativeProduct(
540 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
541 [[maybe_unused]] const double* __restrict__ deltaQ, // deltaQ[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
542 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
543 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
544 [[maybe_unused]] double t,
545 [[maybe_unused]] double dt,
546 [[maybe_unused]] int normal,
547 [[maybe_unused]] double* __restrict__ BTimesDeltaQ, // BTimesDeltaQ[{{NUMBER_OF_UNKNOWNS}}]
548 [[maybe_unused]] Offloadable
549) {
550 {{NCP_IMPLEMENTATION}}
551}
552#if defined(GPUOffloadingOMP)
553#pragma omp end declare target
554#endif
555{% endif %}
556
557{% if NCP_IMPLEMENTATION!="<user-defined>" and NCP_IMPLEMENTATION!="<none>" %}
558void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::nonconservativeProduct(
559 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
560 [[maybe_unused]] const double* __restrict__ deltaQ, // deltaQ[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
561 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
562 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
563 [[maybe_unused]] double t,
564 [[maybe_unused]] double dt,
565 [[maybe_unused]] int normal,
566 [[maybe_unused]] double* __restrict__ BTimesDeltaQ // BTimesDeltaQ[{{NUMBER_OF_UNKNOWNS}}]
567) {
568 {{NCP_IMPLEMENTATION}}
569}
570{% endif %}
571
572{% if SOURCE_TERM_IMPLEMENTATION!="<user-defined>" and SOURCE_TERM_IMPLEMENTATION!="<none>" and STATELESS_PDE_TERMS %}
573#if defined(GPUOffloadingOMP)
574#pragma omp declare target
575#endif
576GPUCallableMethod void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::sourceTerm(
577 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
578 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
579 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
580 [[maybe_unused]] double t,
581 [[maybe_unused]] double dt,
582 [[maybe_unused]] double* __restrict__ S, // S[{{NUMBER_OF_UNKNOWNS}}]
583 [[maybe_unused]] Offloadable
584) {
585 {{SOURCE_TERM_IMPLEMENTATION}}
586}
587#if defined(GPUOffloadingOMP)
588#pragma omp end declare target
589#endif
590{% endif %}
591
592{% if SOURCE_TERM_IMPLEMENTATION!="<user-defined>" and SOURCE_TERM_IMPLEMENTATION!="<none>" %}
593void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::sourceTerm(
594 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
595 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
596 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
597 [[maybe_unused]] double t,
598 [[maybe_unused]] double dt,
599 [[maybe_unused]] double* __restrict__ S // S[{{NUMBER_OF_UNKNOWNS}}]
600) {
601 {% if SOURCE_TERM_IMPLEMENTATION!="<empty>" %}
602 {{SOURCE_TERM_IMPLEMENTATION}}
603 {% else %}
604 std::fill_n(S, {{NUMBER_OF_UNKNOWNS}}, 0.0);
605 {% endif %}
606}
607{% endif %}
608
609{% if RIEMANN_SOLVER_IMPLEMENTATION!="<user-defined>" and RIEMANN_SOLVER_IMPLEMENTATION!="<none>" and STATELESS_PDE_TERMS %}
610#if defined(GPUOffloadingOMP)
611#pragma omp declare target
612#endif
613GPUCallableMethod double {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::solveRiemannProblem(
614 [[maybe_unused]] const double* __restrict__ QR, // QR[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
615 [[maybe_unused]] const double* __restrict__ QL, // QL[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
616 [[maybe_unused]] const double* __restrict__ FR, // FR[{{NUMBER_OF_UNKNOWNS}}]
617 [[maybe_unused]] const double* __restrict__ FL, // FL[{{NUMBER_OF_UNKNOWNS}}]
618 [[maybe_unused]] const double* __restrict__ LR, // LR[{{NUMBER_OF_UNKNOWNS}}]
619 [[maybe_unused]] const double* __restrict__ LL, // LL[{{NUMBER_OF_UNKNOWNS}}]
620 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& xR,
621 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& xL,
622 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
623 [[maybe_unused]] double t,
624 [[maybe_unused]] double dt,
625 [[maybe_unused]] int normal,
626 [[maybe_unused]] double* __restrict__ APDQ, // APDQ[{{NUMBER_OF_UNKNOWNS}}]
627 [[maybe_unused]] double* __restrict__ AMDQ, // AMDQ[{{NUMBER_OF_UNKNOWNS}}]
628 [[maybe_unused]] Offloadable
629) {
630 {{RIEMANN_SOLVER_IMPLEMENTATION}}
631}
632#if defined(GPUOffloadingOMP)
633#pragma omp end declare target
634#endif
635{% endif %}
636
637{% if RIEMANN_SOLVER_IMPLEMENTATION!="<user-defined>" and RIEMANN_SOLVER_IMPLEMENTATION!="<none>" %}
638double {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::solveRiemannProblem(
639 [[maybe_unused]] const double* __restrict__ QR, // QR[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
640 [[maybe_unused]] const double* __restrict__ QL, // QL[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
641 [[maybe_unused]] const double* __restrict__ FR, // FR[{{NUMBER_OF_UNKNOWNS}}]
642 [[maybe_unused]] const double* __restrict__ FL, // FL[{{NUMBER_OF_UNKNOWNS}}]
643 [[maybe_unused]] const double* __restrict__ LR, // LR[{{NUMBER_OF_UNKNOWNS}}]
644 [[maybe_unused]] const double* __restrict__ LL, // LL[{{NUMBER_OF_UNKNOWNS}}]
645 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& xR,
646 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& xL,
647 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
648 [[maybe_unused]] double t,
649 [[maybe_unused]] double dt,
650 [[maybe_unused]] int normal,
651 [[maybe_unused]] double* __restrict__ APDQ, // APDQ[{{NUMBER_OF_UNKNOWNS}}]
652 [[maybe_unused]] double* __restrict__ AMDQ // AMDQ[{{NUMBER_OF_UNKNOWNS}}]
653) {
654 {{RIEMANN_SOLVER_IMPLEMENTATION}}
655}
656{% endif %}
657""",
658 undefined=jinja2.DebugUndefined,
659 )
660
661 d = {}
662 d["FLUX_IMPLEMENTATION"] = flux_implementation
663 d["NCP_IMPLEMENTATION"] = ncp_implementation
664 d["EIGENVALUES_IMPLEMENTATION"] = eigenvalues_implementation
665 d["RIEMANN_SOLVER_IMPLEMENTATION"] = riemann_solver_implementation
666 d["SOURCE_TERM_IMPLEMENTATION"] = source_term_implementation
667 d["STATELESS_PDE_TERMS"] = pde_terms_without_state
668 return Template.render(**d)
669
670
672 flux_implementation,
673 ncp_implementation,
674 eigenvalues_implementation,
675 riemann_solver_implementation,
676 source_term_implementation,
677 pde_terms_without_state,
678):
679 Template = jinja2.Template(
680 """
681 public:
682 {% if EIGENVALUES_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
683 #if defined(GPUOffloadingOMP)
684 #pragma omp declare target
685 #endif
686 static GPUCallableMethod void eigenvalues(
687 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
688 const tarch::la::Vector<Dimensions, double>& x,
689 const tarch::la::Vector<Dimensions, double>& h,
690 double t,
691 double dt,
692 int normal,
693 double* __restrict__ L,
694 Offloadable
695 )
696 //#if defined(GPUOffloadingSYCL)
697 //{}
698 //#else
699 ;
700 //#endif
701 #if defined(GPUOffloadingOMP)
702 #pragma omp end declare target
703 #endif
704 {% endif %}
705
706 {% if EIGENVALUES_IMPLEMENTATION=="<user-defined>" %}
707 virtual void eigenvalues(
708 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
709 const tarch::la::Vector<Dimensions, double>& x,
710 const tarch::la::Vector<Dimensions, double>& h,
711 double t,
712 double dt,
713 int normal,
714 double* __restrict__ L
715 ) override;
716 {% endif %}
717
718 {% if FLUX_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
719 #if defined(GPUOffloadingOMP)
720 #pragma omp declare target
721 #endif
722 static GPUCallableMethod void flux(
723 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
724 const tarch::la::Vector<Dimensions, double>& x,
725 const tarch::la::Vector<Dimensions, double>& h,
726 double t,
727 double dt,
728 int normal,
729 double* __restrict__ F, // F[{{NUMBER_OF_UNKNOWNS}}]
730 Offloadable
731 );
732 #if defined(GPUOffloadingOMP)
733 #pragma omp end declare target
734 #endif
735 {% endif %}
736
737 {% if FLUX_IMPLEMENTATION=="<user-defined>" %}
738 virtual void flux(
739 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
740 const tarch::la::Vector<Dimensions, double>& x,
741 const tarch::la::Vector<Dimensions, double>& h,
742 double t,
743 double dt,
744 int normal,
745 double* __restrict__ F // F[{{NUMBER_OF_UNKNOWNS}}]
746 ) override;
747 {% endif %}
748
749 {% if NCP_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
750 #if defined(GPUOffloadingOMP)
751 #pragma omp declare target
752 #endif
753 static GPUCallableMethod void nonconservativeProduct(
754 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
755 const double* __restrict__ deltaQ, // deltaQ[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
756 const tarch::la::Vector<Dimensions, double>& x,
757 const tarch::la::Vector<Dimensions, double>& h,
758 double t,
759 double dt,
760 int normal,
761 double* __restrict__ BTimesDeltaQ, // BTimesDeltaQ[{{NUMBER_OF_UNKNOWNS}}]
762 Offloadable
763 );
764 #if defined(GPUOffloadingOMP)
765 #pragma omp end declare target
766 #endif
767 {% endif %}
768
769 {% if NCP_IMPLEMENTATION=="<user-defined>" %}
770 virtual void nonconservativeProduct(
771 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
772 const double* __restrict__ deltaQ, // deltaQ[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
773 const tarch::la::Vector<Dimensions, double>& x,
774 const tarch::la::Vector<Dimensions, double>& h,
775 double t,
776 double dt,
777 int normal,
778 double* __restrict__ BTimesDeltaQ // BTimesDeltaQ[{{NUMBER_OF_UNKNOWNS}}]
779 ) override;
780 {% endif %}
781
782 {% if SOURCE_TERM_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
783 #if defined(GPUOffloadingOMP)
784 #pragma omp declare target
785 #endif
786 static GPUCallableMethod void sourceTerm(
787 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
788 const tarch::la::Vector<Dimensions, double>& x,
789 const tarch::la::Vector<Dimensions, double>& h,
790 double t,
791 double dt,
792 double* __restrict__ S, // S[{{NUMBER_OF_UNKNOWNS}}]
793 Offloadable
794 );
795 #if defined(GPUOffloadingOMP)
796 #pragma omp end declare target
797 #endif
798 {% endif %}
799
800 {% if SOURCE_TERM_IMPLEMENTATION=="<user-defined>" %}
801 virtual void sourceTerm(
802 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
803 const tarch::la::Vector<Dimensions, double>& x,
804 const tarch::la::Vector<Dimensions, double>& h,
805 double t,
806 double dt,
807 double* __restrict__ S // S[{{NUMBER_OF_UNKNOWNS}}]
808 ) override;
809 {% endif %}
810
811 {% if RIEMANN_SOLVER_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
812 /**
813 * @param QR the right state vector
814 * @param QL the left state vector
815 * @param FR the right flux vector
816 * @param FL the left flux vector
817 * @param LR the eigenvalues of the right state vector
818 * @param LL the eigenvalues of the left state vector
819 * @param xR coordinates of the right state vector
820 * @param xL coordinates of the left state vector
821 * @param h volume dimensions
822 * @param t current time step
823 * @param dt previous time step width
824 * @param normal dimension currently being solved for
825 * @param APDQ right going update, has a positive contribution on right state
826 * @param AMDQ left going update, has a negative contribution on left state
827 */
828 #if defined(GPUOffloadingOMP)
829 #pragma omp declare target
830 #endif
831 static GPUCallableMethod double solveRiemannProblem(
832 const double* __restrict__ QR, // QR[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
833 const double* __restrict__ QL, // QL[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
834 const double* __restrict__ FR, // FR[{{NUMBER_OF_UNKNOWNS}}]
835 const double* __restrict__ FL, // FL[{{NUMBER_OF_UNKNOWNS}}]
836 const double* __restrict__ LR, // LR[{{NUMBER_OF_UNKNOWNS}}]
837 const double* __restrict__ LL, // LL[{{NUMBER_OF_UNKNOWNS}}]
838 const tarch::la::Vector<Dimensions, double>& xR,
839 const tarch::la::Vector<Dimensions, double>& xL,
840 const tarch::la::Vector<Dimensions, double>& h,
841 double t,
842 double dt,
843 int normal,
844 double* __restrict__ APDQ, // APDQ[{{NUMBER_OF_UNKNOWNS}}]
845 double* __restrict__ AMDQ, // AMDQ[{{NUMBER_OF_UNKNOWNS}}]
846 Offloadable
847 );
848 #if defined(GPUOffloadingOMP)
849 #pragma omp end declare target
850 #endif
851 {% endif %}
852
853 {% if RIEMANN_SOLVER_IMPLEMENTATION=="<user-defined>" %}
854 /**
855 * @param QR the right state vector
856 * @param QL the left state vector
857 * @param FR the right flux vector
858 * @param FL the left flux vector
859 * @param LR the eigenvalues of the right state vector
860 * @param LL the eigenvalues of the left state vector
861 * @param xR coordinates of the right state vector
862 * @param xL coordinates of the left state vector
863 * @param h volume dimensions
864 * @param t current time step
865 * @param dt previous time step width
866 * @param normal dimension currently being solved for
867 * @param APDQ right going update, has a positive contribution on right state
868 * @param AMDQ left going update, has a negative contribution on left state
869 */
870 virtual double solveRiemannProblem(
871 const double* __restrict__ QR, // QR[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
872 const double* __restrict__ QL, // QL[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
873 const double* __restrict__ FR, // FR[{{NUMBER_OF_UNKNOWNS}}]
874 const double* __restrict__ FL, // FL[{{NUMBER_OF_UNKNOWNS}}]
875 const double* __restrict__ LR, // LR[{{NUMBER_OF_UNKNOWNS}}]
876 const double* __restrict__ LL, // LL[{{NUMBER_OF_UNKNOWNS}}]
877 const tarch::la::Vector<Dimensions, double>& xR,
878 const tarch::la::Vector<Dimensions, double>& xL,
879 const tarch::la::Vector<Dimensions, double>& h,
880 double t,
881 double dt,
882 int normal,
883 double* __restrict__ APDQ, // APDQ[{{NUMBER_OF_UNKNOWNS}}]
884 double* __restrict__ AMDQ // AMDQ[{{NUMBER_OF_UNKNOWNS}}]
885 ) override;
886 {% endif %}
887""",
888 undefined=jinja2.DebugUndefined,
889 )
890
891 d = {}
892 d["FLUX_IMPLEMENTATION"] = flux_implementation
893 d["NCP_IMPLEMENTATION"] = ncp_implementation
894 d["EIGENVALUES_IMPLEMENTATION"] = eigenvalues_implementation
895 d["RIEMANN_SOLVER_IMPLEMENTATION"] = riemann_solver_implementation
896 d["SOURCE_TERM_IMPLEMENTATION"] = source_term_implementation
897 d["STATELESS_PDE_TERMS"] = pde_terms_without_state
898 return Template.render(**d)
899
900
902 flux_implementation,
903 ncp_implementation,
904 eigenvalues_implementation,
905 riemann_solver_implementation,
906 source_term_implementation,
907 pde_terms_without_state,
908):
909 Template = jinja2.Template(
910 """
911{% if EIGENVALUES_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
912#if defined(GPUOffloadingOMP)
913#pragma omp declare target
914#endif
915//#if !defined(GPUOffloadingSYCL)
916GPUCallableMethod void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::eigenvalues(
917 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
918 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
919 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
920 [[maybe_unused]] double t,
921 [[maybe_unused]] double dt,
922 [[maybe_unused]] int normal,
923 [[maybe_unused]] double* __restrict__ L,
924 Offloadable
925) {
926 logTraceInWith4Arguments("eigenvalues(...)", x, h, t, normal);
927 // @todo implement
928 logTraceOut("eigenvalues(...)");
929}
930//#endif
931#if defined(GPUOffloadingOMP)
932#pragma omp end declare target
933#endif
934{% endif %}
935
936{% if EIGENVALUES_IMPLEMENTATION=="<user-defined>" %}
937void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::eigenvalues(
938 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
939 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
940 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
941 [[maybe_unused]] double t,
942 [[maybe_unused]] double dt,
943 [[maybe_unused]] int normal,
944 [[maybe_unused]] double* __restrict__ L
945) {
946 logTraceInWith4Arguments("eigenvalues(...)", x, h, t, normal);
947 // @todo implement
948 logTraceOut("eigenvalues(...)");
949}
950{% endif %}
951
952{% if FLUX_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
953#if defined(GPUOffloadingOMP)
954#pragma omp declare target
955#endif
956//#if !defined(GPUOffloadingSYCL)
957GPUCallableMethod void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::flux(
958 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
959 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
960 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
961 [[maybe_unused]] double t,
962 [[maybe_unused]] double dt,
963 [[maybe_unused]] int normal,
964 [[maybe_unused]] double* __restrict__ F, // F[{{NUMBER_OF_UNKNOWNS}}]
965 Offloadable
966) {
967 logTraceInWith4Arguments("flux(...)", x, h, t, normal);
968 // @todo implement
969 logTraceOut("flux(...)");
970}
971//#endif
972#if defined(GPUOffloadingOMP)
973#pragma omp end declare target
974#endif
975{% endif %}
976
977{% if FLUX_IMPLEMENTATION=="<user-defined>" %}
978void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::flux(
979 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
980 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
981 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
982 [[maybe_unused]] double t,
983 [[maybe_unused]] double dt,
984 [[maybe_unused]] int normal,
985 [[maybe_unused]] double* __restrict__ F // F[{{NUMBER_OF_UNKNOWNS}}]
986) {
987 logTraceInWith4Arguments("flux(...)", x, h, t, normal);
988 // @todo implement
989 logTraceOut("flux(...)");
990}
991{% endif %}
992
993{% if NCP_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
994#if defined(GPUOffloadingOMP)
995#pragma omp declare target
996#endif
997GPUCallableMethod void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::nonconservativeProduct(
998 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
999 [[maybe_unused]] const double* __restrict__ deltaQ, // deltaQ[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
1000 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
1001 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
1002 [[maybe_unused]] double t,
1003 [[maybe_unused]] double dt,
1004 [[maybe_unused]] int normal,
1005 [[maybe_unused]] double* __restrict__ BTimesDeltaQ, // BTimesDeltaQ[{{NUMBER_OF_UNKNOWNS}}]
1006 [[maybe_unused]] Offloadable
1007) {
1008 logTraceInWith4Arguments("nonconservativeProduct(...)", x, h, t, normal);
1009 // @todo implement
1010 logTraceOut("nonconservativeProduct(...)");
1011}
1012#if defined(GPUOffloadingOMP)
1013#pragma omp end declare target
1014#endif
1015{% endif %}
1016
1017{% if NCP_IMPLEMENTATION=="<user-defined>" %}
1018void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::nonconservativeProduct(
1019 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
1020 [[maybe_unused]] const double* __restrict__ deltaQ, // deltaQ[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
1021 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
1022 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
1023 [[maybe_unused]] double t,
1024 [[maybe_unused]] double dt,
1025 [[maybe_unused]] int normal,
1026 [[maybe_unused]] double* __restrict__ BTimesDeltaQ // BTimesDeltaQQ[{{NUMBER_OF_UNKNOWNS}}]
1027) {
1028 logTraceInWith4Arguments("nonconservativeProduct(...)", x, h, t, normal);
1029 // @todo implement
1030 logTraceOut("nonconservativeProduct(...)");
1031}
1032{% endif %}
1033
1034{% if SOURCE_TERM_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
1035#if defined(GPUOffloadingOMP)
1036#pragma omp declare target
1037#endif
1038GPUCallableMethod void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::sourceTerm(
1039 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
1040 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
1041 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
1042 [[maybe_unused]] double t,
1043 [[maybe_unused]] double dt,
1044 [[maybe_unused]] double* __restrict__ S, // S[{{NUMBER_OF_UNKNOWNS}}]
1045 [[maybe_unused]] Offloadable
1046) {
1047 logTraceInWith4Arguments("sourceTerm(...)", x, h, t, dt);
1048
1049 // @todo implement and ensure that all entries of S are properly set
1050 for (int i = 0; i < NumberOfUnknowns; i++) {
1051 S[i] = 0.0;
1052 }
1053
1054 logTraceOut("sourceTerm(...)");
1055}
1056#if defined(GPUOffloadingOMP)
1057#pragma omp end declare target
1058#endif
1059{% endif %}
1060
1061{% if SOURCE_TERM_IMPLEMENTATION=="<user-defined>" %}
1062void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::sourceTerm(
1063 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
1064 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
1065 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
1066 [[maybe_unused]] double t,
1067 [[maybe_unused]] double dt,
1068 [[maybe_unused]] double* __restrict__ S // S[{{NUMBER_OF_UNKNOWNS}}]
1069) {
1070 logTraceInWith4Arguments("sourceTerm(...)", x, h, t, dt);
1071
1072 // @todo implement and ensure that all entries of S are properly set
1073 for (int i = 0; i < NumberOfUnknowns; i++) {
1074 S[i] = 0.0;
1075 }
1076
1077 logTraceOut("sourceTerm(...)");
1078}
1079{% endif %}
1080
1081{% if RIEMANN_SOLVER_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
1082#if defined(GPUOffloadingOMP)
1083#pragma omp declare target
1084#endif
1085GPUCallableMethod double {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::solveRiemannProblem(
1086 [[maybe_unused]] const double* __restrict__ QR, // QR[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
1087 [[maybe_unused]] const double* __restrict__ QL, // QL[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
1088 [[maybe_unused]] const double* __restrict__ FR, // FR[{{NUMBER_OF_UNKNOWNS}}]
1089 [[maybe_unused]] const double* __restrict__ FL, // FL[{{NUMBER_OF_UNKNOWNS}}]
1090 [[maybe_unused]] const double* __restrict__ LR, // LR[{{NUMBER_OF_UNKNOWNS}}]
1091 [[maybe_unused]] const double* __restrict__ LL, // LL[{{NUMBER_OF_UNKNOWNS}}]
1092 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& xR,
1093 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& xL,
1094 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
1095 [[maybe_unused]] double t,
1096 [[maybe_unused]] double dt,
1097 [[maybe_unused]] int normal,
1098 [[maybe_unused]] double* __restrict__ APDQ, // APDQ[{{NUMBER_OF_UNKNOWNS}}]
1099 [[maybe_unused]] double* __restrict__ AMDQ, // AMDQ[{{NUMBER_OF_UNKNOWNS}}]
1100 [[maybe_unused]] Offloadable
1101) {
1102 logTraceInWith4Arguments("solveRiemannProblem(...)", xR, xL, t, normal);
1103 // @todo implement
1104 logTraceOut("solveRiemannProblem(...)");
1105}
1106#if defined(GPUOffloadingOMP)
1107#pragma omp end declare target
1108#endif
1109{% endif %}
1110
1111{% if RIEMANN_SOLVER_IMPLEMENTATION=="<user-defined>" %}
1112double {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::solveRiemannProblem(
1113 [[maybe_unused]] const double* __restrict__ QR, // QR[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
1114 [[maybe_unused]] const double* __restrict__ QL, // QL[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
1115 [[maybe_unused]] const double* __restrict__ FR, // FR[{{NUMBER_OF_UNKNOWNS}}]
1116 [[maybe_unused]] const double* __restrict__ FL, // FL[{{NUMBER_OF_UNKNOWNS}}]
1117 [[maybe_unused]] const double* __restrict__ LR, // LR[{{NUMBER_OF_UNKNOWNS}}]
1118 [[maybe_unused]] const double* __restrict__ LL, // LL[{{NUMBER_OF_UNKNOWNS}}]
1119 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& xR,
1120 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& xL,
1121 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
1122 [[maybe_unused]] double t,
1123 [[maybe_unused]] double dt,
1124 [[maybe_unused]] int normal,
1125 [[maybe_unused]] double* __restrict__ APDQ, // APDQ[{{NUMBER_OF_UNKNOWNS}}]
1126 [[maybe_unused]] double* __restrict__ AMDQ // AMDQ[{{NUMBER_OF_UNKNOWNS}}]
1127) {
1128 logTraceInWith4Arguments("solveRiemannProblem(...)", xR, xL, t, normal);
1129 // @todo implement
1130 logTraceOut("solveRiemannProblem(...)");
1131}
1132{% endif %}
1133""",
1134 undefined=jinja2.DebugUndefined,
1135 )
1136
1137 d = {}
1138 d["FLUX_IMPLEMENTATION"] = flux_implementation
1139 d["NCP_IMPLEMENTATION"] = ncp_implementation
1140 d["EIGENVALUES_IMPLEMENTATION"] = eigenvalues_implementation
1141 d["RIEMANN_SOLVER_IMPLEMENTATION"] = riemann_solver_implementation
1142 d["SOURCE_TERM_IMPLEMENTATION"] = source_term_implementation
1143 d["STATELESS_PDE_TERMS"] = pde_terms_without_state
1144 return Template.render(**d)
create_solver_definitions(flux_implementation, ncp_implementation, eigenvalues_implementation, riemann_solver_implementation, source_term_implementation, pde_terms_without_state)
Definition kernels.py:908
create_abstract_solver_declarations(flux_implementation, ncp_implementation, eigenvalues_implementation, riemann_solver_implementation, source_term_implementation, pde_terms_without_state)
Definition kernels.py:237
create_solver_declarations(flux_implementation, ncp_implementation, eigenvalues_implementation, riemann_solver_implementation, source_term_implementation, pde_terms_without_state)
Definition kernels.py:678
create_compute_Riemann_kernel(flux_implementation, ncp_implementation, eigenvalues_implementation, riemann_solver_implementation, source_term_implementation, compute_max_eigenvalue_of_next_time_step, SolverVariant solver_variant, KernelVariant kernel_variant)
Return only the unqualified function call, i.e., without any namespaces.
Definition kernels.py:39
create_abstract_solver_definitions(flux_implementation, ncp_implementation, eigenvalues_implementation, riemann_solver_implementation, source_term_implementation, pde_terms_without_state)
Definition kernels.py:462