33 eigenvalues_implementation,
34 riemann_solver_implementation,
35 source_term_implementation,
36 compute_max_eigenvalue_of_next_time_step,
37 solver_variant: SolverVariant,
38 kernel_variant: KernelVariant,
41 Return only the unqualified function call, i.e., without any namespaces.
42 So by setting the right namespace as prefix, you can direct it to particular
46 KernelVariant.PatchWiseAoS:
"timeStepWithRiemannPatchwiseHeap",
47 KernelVariant.PatchWiseAoSoA:
"timeStepWithRiemannPatchwiseHeap",
48 KernelVariant.PatchWiseSoA:
"timeStepWithRiemannPatchwiseHeap",
49 KernelVariant.BatchedAoS:
"timeStepWithRiemannBatchedHeap",
50 KernelVariant.BatchedAoSoA:
"timeStepWithRiemannBatchedHeap",
51 KernelVariant.BatchedSoA:
"timeStepWithRiemannBatchedHeap",
52 KernelVariant.TaskGraphAoS:
"timeStepWithRiemannTaskgraphHeap",
53 KernelVariant.TaskGraphAoSoA:
"timeStepWithRiemannTaskgraphHeap",
54 KernelVariant.TaskGraphSoA:
"timeStepWithRiemannTaskgraphHeap",
55 KernelVariant.VolumeWiseAoS:
"timeStepWithRiemannVolumewise",
56 KernelVariant.VolumeWiseAoSoA:
"timeStepWithRiemannVolumewise",
57 KernelVariant.VolumeWiseSoA:
"timeStepWithRiemannVolumewise",
60 EnumeratorTemplateTypes = {
61 KernelVariant.PatchWiseAoS:
"::exahype2::enumerator::AoSLexicographicEnumerator",
62 KernelVariant.PatchWiseAoSoA:
"::exahype2::enumerator::AoSoALexicographicEnumerator",
63 KernelVariant.PatchWiseSoA:
"::exahype2::enumerator::SoALexicographicEnumerator",
64 KernelVariant.BatchedAoS:
"::exahype2::enumerator::AoSLexicographicEnumerator",
65 KernelVariant.BatchedAoSoA:
"::exahype2::enumerator::AoSoALexicographicEnumerator",
66 KernelVariant.BatchedSoA:
"::exahype2::enumerator::SoALexicographicEnumerator",
67 KernelVariant.TaskGraphAoS:
"::exahype2::enumerator::AoSLexicographicEnumerator",
68 KernelVariant.TaskGraphAoSoA:
"::exahype2::enumerator::AoSoALexicographicEnumerator",
69 KernelVariant.TaskGraphSoA:
"::exahype2::enumerator::SoALexicographicEnumerator",
70 KernelVariant.VolumeWiseAoS:
"::exahype2::enumerator::AoSLexicographicEnumerator",
71 KernelVariant.VolumeWiseAoSoA:
"::exahype2::enumerator::AoSoALexicographicEnumerator",
72 KernelVariant.VolumeWiseSoA:
"::exahype2::enumerator::SoALexicographicEnumerator",
75 template = KernelCalls[kernel_variant]
77 if solver_variant == SolverVariant.WithVirtualFunctions:
78 template +=
"""Functors<
79 {{NUMBER_OF_VOLUMES_PER_AXIS}},
81 {{NUMBER_OF_UNKNOWNS}},
82 {{NUMBER_OF_AUXILIARY_VARIABLES}},
83 {% if FLUX_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
84 {% if NCP_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
85 {% if EIGENVALUES_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
86 {% if SOURCE_TERM_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
87 {% if RIEMANN_SOLVER_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
88 {% if COMPUTE_MAX_EIGENVALUE==False %} false {% else %} true {% endif %},
89 {{TEMP_DATA_ENUMERATOR}}
92 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
93 const tarch::la::Vector<Dimensions, double>& x,
94 const tarch::la::Vector<Dimensions, double>& h,
98 double* __restrict__ F // F[{{NUMBER_OF_UNKNOWNS}}]
100 {% if FLUX_IMPLEMENTATION!="<none>" %}
101 repositories::{{SOLVER_INSTANCE}}.flux(Q, x, h, t, dt, normal, F);
105 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
106 const double* __restrict__ deltaQ, // deltaQ[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
107 const tarch::la::Vector<Dimensions, double>& x,
108 const tarch::la::Vector<Dimensions, double>& h,
112 double* __restrict__ BTimesDeltaQ // BTimesDeltaQ[{{NUMBER_OF_UNKNOWNS}}]
114 {% if NCP_IMPLEMENTATION!="<none>" %}
115 repositories::{{SOLVER_INSTANCE}}.nonconservativeProduct(Q, deltaQ, x, h, t, dt, normal, BTimesDeltaQ);
119 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
120 const tarch::la::Vector<Dimensions, double>& x,
121 const tarch::la::Vector<Dimensions, double>& h,
124 double* __restrict__ S // S[{{NUMBER_OF_UNKNOWNS}}]
126 {% if SOURCE_TERM_IMPLEMENTATION!="<none>" %}
127 repositories::{{SOLVER_INSTANCE}}.sourceTerm(Q, x, h, t, dt, S);
131 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
132 const tarch::la::Vector<Dimensions, double>& x,
133 const tarch::la::Vector<Dimensions, double>& h,
137 double* __restrict__ L
139 {% if EIGENVALUES_IMPLEMENTATION!="<none>" %}
140 repositories::{{SOLVER_INSTANCE}}.eigenvalues(Q, x, h, t, dt, normal, L);
144 const double* __restrict__ QR,
145 const double* __restrict__ QL,
146 const double* __restrict__ FR,
147 const double* __restrict__ FL,
148 const double* __restrict__ LR,
149 const double* __restrict__ LL,
150 const tarch::la::Vector<Dimensions, double>& xR,
151 const tarch::la::Vector<Dimensions, double>& xL,
152 const tarch::la::Vector<Dimensions, double>& h,
156 double* __restrict__ APDQ,
157 double* __restrict__ AMDQ
159 {% if RIEMANN_SOLVER_IMPLEMENTATION!="<none>" %}
160 return repositories::{{SOLVER_INSTANCE}}.solveRiemannProblem(QR, QL, FR, FL, LR, LL, xR, xL, h, t, dt, normal, APDQ, AMDQ);
165 elif solver_variant == SolverVariant.Stateless:
166 template +=
"""Stateless<
168 {{NUMBER_OF_VOLUMES_PER_AXIS}},
170 {{NUMBER_OF_UNKNOWNS}},
171 {{NUMBER_OF_AUXILIARY_VARIABLES}},
172 {% if FLUX_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
173 {% if NCP_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
174 {% if EIGENVALUES_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
175 {% if SOURCE_TERM_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
176 {% if RIEMANN_SOLVER_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
177 {% if COMPUTE_MAX_EIGENVALUE==False %} false {% else %} true {% endif %},
178 {{TEMP_DATA_ENUMERATOR}}
181 elif solver_variant == SolverVariant.Multicore:
182 template +=
"""Stateless<
184 {{NUMBER_OF_VOLUMES_PER_AXIS}},
186 {{NUMBER_OF_UNKNOWNS}},
187 {{NUMBER_OF_AUXILIARY_VARIABLES}},
188 {% if FLUX_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
189 {% if NCP_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
190 {% if EIGENVALUES_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
191 {% if SOURCE_TERM_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
192 {% if RIEMANN_SOLVER_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
193 {% if COMPUTE_MAX_EIGENVALUE==False %} false {% else %} true {% endif %},
194 {{TEMP_DATA_ENUMERATOR}}
195 >(patchData, peano4::utils::LoopPlacement::SpreadOut);
197 elif solver_variant == SolverVariant.Accelerator:
198 template +=
"""Stateless<
200 {{NUMBER_OF_VOLUMES_PER_AXIS}},
202 {{NUMBER_OF_UNKNOWNS}},
203 {{NUMBER_OF_AUXILIARY_VARIABLES}},
204 {% if FLUX_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
205 {% if NCP_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
206 {% if EIGENVALUES_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
207 {% if SOURCE_TERM_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
208 {% if RIEMANN_SOLVER_IMPLEMENTATION=="<none>" %} false {% else %} true {% endif %},
209 {% if COMPUTE_MAX_EIGENVALUE==False %} false {% else %} true {% endif %},
210 {{TEMP_DATA_ENUMERATOR}}
211 >(targetDevice, patchData);
214 assert False,
"Not supported combination: {} x {}".format(
215 solver_variant, kernel_variant
218 result = jinja2.Template(template, undefined=jinja2.DebugUndefined)
220 d[
"FLUX_IMPLEMENTATION"] = flux_implementation
221 d[
"NCP_IMPLEMENTATION"] = ncp_implementation
222 d[
"EIGENVALUES_IMPLEMENTATION"] = eigenvalues_implementation
223 d[
"RIEMANN_SOLVER_IMPLEMENTATION"] = riemann_solver_implementation
224 d[
"SOURCE_TERM_IMPLEMENTATION"] = source_term_implementation
225 d[
"COMPUTE_MAX_EIGENVALUE"] = compute_max_eigenvalue_of_next_time_step
226 d[
"TEMP_DATA_ENUMERATOR"] = EnumeratorTemplateTypes[kernel_variant]
227 return result.render(**d)
233 eigenvalues_implementation,
234 riemann_solver_implementation,
235 source_term_implementation,
236 pde_terms_without_state,
238 Template = jinja2.Template(
241 {% if EIGENVALUES_IMPLEMENTATION!="<user-defined>" and STATELESS_PDE_TERMS %}
242 #if defined(GPUOffloadingOMP)
243 #pragma omp declare target
245 static GPUCallableMethod void eigenvalues(
246 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
247 const tarch::la::Vector<Dimensions, double>& x,
248 const tarch::la::Vector<Dimensions, double>& h,
252 double* __restrict__ L,
255 #if defined(GPUOffloadingOMP)
256 #pragma omp end declare target
260 {% if EIGENVALUES_IMPLEMENTATION!="<none>" %}
261 virtual void eigenvalues(
262 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
263 const tarch::la::Vector<Dimensions, double>& x,
264 const tarch::la::Vector<Dimensions, double>& h,
268 double* __restrict__ L
269 ) {% if EIGENVALUES_IMPLEMENTATION=="<user-defined>" %} = 0{% else %} final{% endif %};
272 {% if FLUX_IMPLEMENTATION!="<user-defined>" and STATELESS_PDE_TERMS %}
273 #if defined(GPUOffloadingOMP)
274 #pragma omp declare target
276 static GPUCallableMethod void flux(
277 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
278 const tarch::la::Vector<Dimensions, double>& x,
279 const tarch::la::Vector<Dimensions, double>& h,
283 double* __restrict__ F, // F[{{NUMBER_OF_UNKNOWNS}}]
286 #if defined(GPUOffloadingOMP)
287 #pragma omp end declare target
291 {% if FLUX_IMPLEMENTATION!="<none>" %}
293 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
294 const tarch::la::Vector<Dimensions, double>& x,
295 const tarch::la::Vector<Dimensions, double>& h,
299 double* __restrict__ F // F[{{NUMBER_OF_UNKNOWNS}}]
300 ) {% if FLUX_IMPLEMENTATION=="<user-defined>" %} = 0{% else %} final {% endif %};
303 {% if NCP_IMPLEMENTATION!="<user-defined>" and STATELESS_PDE_TERMS %}
304 #if defined(GPUOffloadingOMP)
305 #pragma omp declare target
307 static GPUCallableMethod void nonconservativeProduct(
308 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
309 const double* __restrict__ deltaQ, // deltaQ[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
310 const tarch::la::Vector<Dimensions, double>& x,
311 const tarch::la::Vector<Dimensions, double>& h,
315 double* __restrict__ BTimesDeltaQ, // BTimesDeltaQ[{{NUMBER_OF_UNKNOWNS}}]
318 #if defined(GPUOffloadingOMP)
319 #pragma omp end declare target
323 {% if NCP_IMPLEMENTATION!="<none>" %}
324 virtual void nonconservativeProduct(
325 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
326 const double* __restrict__ deltaQ, // deltaQ[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
327 const tarch::la::Vector<Dimensions, double>& x,
328 const tarch::la::Vector<Dimensions, double>& h,
332 double* __restrict__ BTimesDeltaQ // BTimesDeltaQ[{{NUMBER_OF_UNKNOWNS}}]
333 ) {% if NCP_IMPLEMENTATION=="<user-defined>" %} = 0{% else %} final {% endif %};
336 {% if SOURCE_TERM_IMPLEMENTATION!="<user-defined>" and STATELESS_PDE_TERMS %}
337 #if defined(GPUOffloadingOMP)
338 #pragma omp declare target
340 static GPUCallableMethod void sourceTerm(
341 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
342 const tarch::la::Vector<Dimensions, double>& x,
343 const tarch::la::Vector<Dimensions, double>& h,
346 double* __restrict__ S, // S[{{NUMBER_OF_UNKNOWNS}}]
349 #if defined(GPUOffloadingOMP)
350 #pragma omp end declare target
354 {% if SOURCE_TERM_IMPLEMENTATION!="<none>" %}
355 virtual void sourceTerm(
356 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
357 const tarch::la::Vector<Dimensions, double>& x,
358 const tarch::la::Vector<Dimensions, double>& h,
361 double* __restrict__ S // S[{{NUMBER_OF_UNKNOWNS}}]
362 ) {% if SOURCE_TERM_IMPLEMENTATION=="<user-defined>" %} = 0{% else %} final {% endif %};
365 {% if RIEMANN_SOLVER_IMPLEMENTATION!="<user-defined>" and STATELESS_PDE_TERMS %}
367 * @param QR the right state vector
368 * @param QL the left state vector
369 * @param FR the right flux vector
370 * @param FL the left flux vector
371 * @param LR the eigenvalues of the right state vector
372 * @param LL the eigenvalues of the left state vector
373 * @param xR coordinates of the right state vector
374 * @param xL coordinates of the left state vector
375 * @param h volume dimensions
376 * @param t current time step
377 * @param dt previous time step width
378 * @param normal dimension currently being solved for
379 * @param APDQ right going update, has a positive contribution on right state
380 * @param AMDQ left going update, has a negative contribution on left state
382 #if defined(GPUOffloadingOMP)
383 #pragma omp declare target
385 static GPUCallableMethod double solveRiemannProblem(
386 const double* __restrict__ QR, // QR[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
387 const double* __restrict__ QL, // QL[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
388 const double* __restrict__ FR, // FR[{{NUMBER_OF_UNKNOWNS}}]
389 const double* __restrict__ FL, // FL[{{NUMBER_OF_UNKNOWNS}}]
390 const double* __restrict__ LR, // LR[{{NUMBER_OF_UNKNOWNS}}]
391 const double* __restrict__ LL, // LL[{{NUMBER_OF_UNKNOWNS}}]
392 const tarch::la::Vector<Dimensions, double>& xR,
393 const tarch::la::Vector<Dimensions, double>& xL,
394 const tarch::la::Vector<Dimensions, double>& h,
398 double* __restrict__ APDQ, // APDQ[{{NUMBER_OF_UNKNOWNS}}]
399 double* __restrict__ AMDQ, // AMDQ[{{NUMBER_OF_UNKNOWNS}}]
402 #if defined(GPUOffloadingOMP)
403 #pragma omp end declare target
407 {% if RIEMANN_SOLVER_IMPLEMENTATION!="<none>" %}
409 * @param QR the right state vector
410 * @param QL the left state vector
411 * @param FR the right flux vector
412 * @param FL the left flux vector
413 * @param LR the eigenvalues of the right state vector
414 * @param LL the eigenvalues of the left state vector
415 * @param xR coordinates of the right state vector
416 * @param xL coordinates of the left state vector
417 * @param h volume dimensions
418 * @param t current time step
419 * @param dt previous time step width
420 * @param normal dimension currently being solved for
421 * @param APDQ right going update, has a positive contribution on right state
422 * @param AMDQ left going update, has a negative contribution on left state
424 virtual double solveRiemannProblem(
425 const double* __restrict__ QR, // QR[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
426 const double* __restrict__ QL, // QL[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
427 const double* __restrict__ FR, // FR[{{NUMBER_OF_UNKNOWNS}}]
428 const double* __restrict__ FL, // FL[{{NUMBER_OF_UNKNOWNS}}]
429 const double* __restrict__ LR, // LR[{{NUMBER_OF_UNKNOWNS}}]
430 const double* __restrict__ LL, // LL[{{NUMBER_OF_UNKNOWNS}}]
431 const tarch::la::Vector<Dimensions, double>& xR,
432 const tarch::la::Vector<Dimensions, double>& xL,
433 const tarch::la::Vector<Dimensions, double>& h,
437 double* __restrict__ APDQ, // APDQ[{{NUMBER_OF_UNKNOWNS}}]
438 double* __restrict__ AMDQ // AMDQ[{{NUMBER_OF_UNKNOWNS}}]
439 ) {% if RIEMANN_SOLVER_IMPLEMENTATION=="<user-defined>" %} = 0{% else %} final {% endif %};
442 undefined=jinja2.DebugUndefined,
446 d[
"FLUX_IMPLEMENTATION"] = flux_implementation
447 d[
"NCP_IMPLEMENTATION"] = ncp_implementation
448 d[
"EIGENVALUES_IMPLEMENTATION"] = eigenvalues_implementation
449 d[
"RIEMANN_SOLVER_IMPLEMENTATION"] = riemann_solver_implementation
450 d[
"SOURCE_TERM_IMPLEMENTATION"] = source_term_implementation
451 d[
"STATELESS_PDE_TERMS"] = pde_terms_without_state
452 return Template.render(**d)
458 eigenvalues_implementation,
459 riemann_solver_implementation,
460 source_term_implementation,
461 pde_terms_without_state,
463 Template = jinja2.Template(
465{% if EIGENVALUES_IMPLEMENTATION!="<user-defined>" and EIGENVALUES_IMPLEMENTATION!="<none>" and STATELESS_PDE_TERMS %}
466#if defined(GPUOffloadingOMP)
467#pragma omp declare target
469GPUCallableMethod void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::eigenvalues(
470 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
471 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
472 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
473 [[maybe_unused]] double t,
474 [[maybe_unused]] double dt,
475 [[maybe_unused]] int normal,
476 [[maybe_unused]] double* __restrict__ L,
479 {{EIGENVALUES_IMPLEMENTATION}}
481#if defined(GPUOffloadingOMP)
482#pragma omp end declare target
486{% if EIGENVALUES_IMPLEMENTATION!="<user-defined>" and EIGENVALUES_IMPLEMENTATION!="<none>" %}
487void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::eigenvalues(
488 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
489 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
490 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
491 [[maybe_unused]] double t,
492 [[maybe_unused]] double dt,
493 [[maybe_unused]] int normal,
494 [[maybe_unused]] double* __restrict__ L
496 {{EIGENVALUES_IMPLEMENTATION}}
500{% if FLUX_IMPLEMENTATION!="<user-defined>" and FLUX_IMPLEMENTATION!="<none>" and STATELESS_PDE_TERMS %}
501#if defined(GPUOffloadingOMP)
502#pragma omp declare target
504GPUCallableMethod void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::flux(
505 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
506 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
507 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
508 [[maybe_unused]] double t,
509 [[maybe_unused]] double dt,
510 [[maybe_unused]] int normal,
511 [[maybe_unused]] double* __restrict__ F, // F[{{NUMBER_OF_UNKNOWNS}}]
514 {{FLUX_IMPLEMENTATION}}
516#if defined(GPUOffloadingOMP)
517#pragma omp end declare target
521{% if FLUX_IMPLEMENTATION!="<user-defined>" and FLUX_IMPLEMENTATION!="<none>" %}
522void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::flux(
523 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
524 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
525 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
526 [[maybe_unused]] double t,
527 [[maybe_unused]] double dt,
528 [[maybe_unused]] int normal,
529 [[maybe_unused]] double* __restrict__ F // F[{{NUMBER_OF_UNKNOWNS}}]
531 {{FLUX_IMPLEMENTATION}}
535{% if NCP_IMPLEMENTATION!="<user-defined>" and NCP_IMPLEMENTATION!="<none>" and STATELESS_PDE_TERMS %}
536#if defined(GPUOffloadingOMP)
537#pragma omp declare target
539GPUCallableMethod void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::nonconservativeProduct(
540 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
541 [[maybe_unused]] const double* __restrict__ deltaQ, // deltaQ[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
542 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
543 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
544 [[maybe_unused]] double t,
545 [[maybe_unused]] double dt,
546 [[maybe_unused]] int normal,
547 [[maybe_unused]] double* __restrict__ BTimesDeltaQ, // BTimesDeltaQ[{{NUMBER_OF_UNKNOWNS}}]
548 [[maybe_unused]] Offloadable
550 {{NCP_IMPLEMENTATION}}
552#if defined(GPUOffloadingOMP)
553#pragma omp end declare target
557{% if NCP_IMPLEMENTATION!="<user-defined>" and NCP_IMPLEMENTATION!="<none>" %}
558void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::nonconservativeProduct(
559 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
560 [[maybe_unused]] const double* __restrict__ deltaQ, // deltaQ[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
561 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
562 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
563 [[maybe_unused]] double t,
564 [[maybe_unused]] double dt,
565 [[maybe_unused]] int normal,
566 [[maybe_unused]] double* __restrict__ BTimesDeltaQ // BTimesDeltaQ[{{NUMBER_OF_UNKNOWNS}}]
568 {{NCP_IMPLEMENTATION}}
572{% if SOURCE_TERM_IMPLEMENTATION!="<user-defined>" and SOURCE_TERM_IMPLEMENTATION!="<none>" and STATELESS_PDE_TERMS %}
573#if defined(GPUOffloadingOMP)
574#pragma omp declare target
576GPUCallableMethod void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::sourceTerm(
577 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
578 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
579 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
580 [[maybe_unused]] double t,
581 [[maybe_unused]] double dt,
582 [[maybe_unused]] double* __restrict__ S, // S[{{NUMBER_OF_UNKNOWNS}}]
583 [[maybe_unused]] Offloadable
585 {{SOURCE_TERM_IMPLEMENTATION}}
587#if defined(GPUOffloadingOMP)
588#pragma omp end declare target
592{% if SOURCE_TERM_IMPLEMENTATION!="<user-defined>" and SOURCE_TERM_IMPLEMENTATION!="<none>" %}
593void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::sourceTerm(
594 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
595 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
596 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
597 [[maybe_unused]] double t,
598 [[maybe_unused]] double dt,
599 [[maybe_unused]] double* __restrict__ S // S[{{NUMBER_OF_UNKNOWNS}}]
601 {% if SOURCE_TERM_IMPLEMENTATION!="<empty>" %}
602 {{SOURCE_TERM_IMPLEMENTATION}}
604 std::fill_n(S, {{NUMBER_OF_UNKNOWNS}}, 0.0);
609{% if RIEMANN_SOLVER_IMPLEMENTATION!="<user-defined>" and RIEMANN_SOLVER_IMPLEMENTATION!="<none>" and STATELESS_PDE_TERMS %}
610#if defined(GPUOffloadingOMP)
611#pragma omp declare target
613GPUCallableMethod double {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::solveRiemannProblem(
614 [[maybe_unused]] const double* __restrict__ QR, // QR[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
615 [[maybe_unused]] const double* __restrict__ QL, // QL[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
616 [[maybe_unused]] const double* __restrict__ FR, // FR[{{NUMBER_OF_UNKNOWNS}}]
617 [[maybe_unused]] const double* __restrict__ FL, // FL[{{NUMBER_OF_UNKNOWNS}}]
618 [[maybe_unused]] const double* __restrict__ LR, // LR[{{NUMBER_OF_UNKNOWNS}}]
619 [[maybe_unused]] const double* __restrict__ LL, // LL[{{NUMBER_OF_UNKNOWNS}}]
620 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& xR,
621 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& xL,
622 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
623 [[maybe_unused]] double t,
624 [[maybe_unused]] double dt,
625 [[maybe_unused]] int normal,
626 [[maybe_unused]] double* __restrict__ APDQ, // APDQ[{{NUMBER_OF_UNKNOWNS}}]
627 [[maybe_unused]] double* __restrict__ AMDQ, // AMDQ[{{NUMBER_OF_UNKNOWNS}}]
628 [[maybe_unused]] Offloadable
630 {{RIEMANN_SOLVER_IMPLEMENTATION}}
632#if defined(GPUOffloadingOMP)
633#pragma omp end declare target
637{% if RIEMANN_SOLVER_IMPLEMENTATION!="<user-defined>" and RIEMANN_SOLVER_IMPLEMENTATION!="<none>" %}
638double {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::solveRiemannProblem(
639 [[maybe_unused]] const double* __restrict__ QR, // QR[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
640 [[maybe_unused]] const double* __restrict__ QL, // QL[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
641 [[maybe_unused]] const double* __restrict__ FR, // FR[{{NUMBER_OF_UNKNOWNS}}]
642 [[maybe_unused]] const double* __restrict__ FL, // FL[{{NUMBER_OF_UNKNOWNS}}]
643 [[maybe_unused]] const double* __restrict__ LR, // LR[{{NUMBER_OF_UNKNOWNS}}]
644 [[maybe_unused]] const double* __restrict__ LL, // LL[{{NUMBER_OF_UNKNOWNS}}]
645 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& xR,
646 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& xL,
647 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
648 [[maybe_unused]] double t,
649 [[maybe_unused]] double dt,
650 [[maybe_unused]] int normal,
651 [[maybe_unused]] double* __restrict__ APDQ, // APDQ[{{NUMBER_OF_UNKNOWNS}}]
652 [[maybe_unused]] double* __restrict__ AMDQ // AMDQ[{{NUMBER_OF_UNKNOWNS}}]
654 {{RIEMANN_SOLVER_IMPLEMENTATION}}
658 undefined=jinja2.DebugUndefined,
662 d[
"FLUX_IMPLEMENTATION"] = flux_implementation
663 d[
"NCP_IMPLEMENTATION"] = ncp_implementation
664 d[
"EIGENVALUES_IMPLEMENTATION"] = eigenvalues_implementation
665 d[
"RIEMANN_SOLVER_IMPLEMENTATION"] = riemann_solver_implementation
666 d[
"SOURCE_TERM_IMPLEMENTATION"] = source_term_implementation
667 d[
"STATELESS_PDE_TERMS"] = pde_terms_without_state
668 return Template.render(**d)
674 eigenvalues_implementation,
675 riemann_solver_implementation,
676 source_term_implementation,
677 pde_terms_without_state,
679 Template = jinja2.Template(
682 {% if EIGENVALUES_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
683 #if defined(GPUOffloadingOMP)
684 #pragma omp declare target
686 static GPUCallableMethod void eigenvalues(
687 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
688 const tarch::la::Vector<Dimensions, double>& x,
689 const tarch::la::Vector<Dimensions, double>& h,
693 double* __restrict__ L,
696 //#if defined(GPUOffloadingSYCL)
701 #if defined(GPUOffloadingOMP)
702 #pragma omp end declare target
706 {% if EIGENVALUES_IMPLEMENTATION=="<user-defined>" %}
707 virtual void eigenvalues(
708 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
709 const tarch::la::Vector<Dimensions, double>& x,
710 const tarch::la::Vector<Dimensions, double>& h,
714 double* __restrict__ L
718 {% if FLUX_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
719 #if defined(GPUOffloadingOMP)
720 #pragma omp declare target
722 static GPUCallableMethod void flux(
723 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
724 const tarch::la::Vector<Dimensions, double>& x,
725 const tarch::la::Vector<Dimensions, double>& h,
729 double* __restrict__ F, // F[{{NUMBER_OF_UNKNOWNS}}]
732 #if defined(GPUOffloadingOMP)
733 #pragma omp end declare target
737 {% if FLUX_IMPLEMENTATION=="<user-defined>" %}
739 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
740 const tarch::la::Vector<Dimensions, double>& x,
741 const tarch::la::Vector<Dimensions, double>& h,
745 double* __restrict__ F // F[{{NUMBER_OF_UNKNOWNS}}]
749 {% if NCP_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
750 #if defined(GPUOffloadingOMP)
751 #pragma omp declare target
753 static GPUCallableMethod void nonconservativeProduct(
754 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
755 const double* __restrict__ deltaQ, // deltaQ[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
756 const tarch::la::Vector<Dimensions, double>& x,
757 const tarch::la::Vector<Dimensions, double>& h,
761 double* __restrict__ BTimesDeltaQ, // BTimesDeltaQ[{{NUMBER_OF_UNKNOWNS}}]
764 #if defined(GPUOffloadingOMP)
765 #pragma omp end declare target
769 {% if NCP_IMPLEMENTATION=="<user-defined>" %}
770 virtual void nonconservativeProduct(
771 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
772 const double* __restrict__ deltaQ, // deltaQ[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
773 const tarch::la::Vector<Dimensions, double>& x,
774 const tarch::la::Vector<Dimensions, double>& h,
778 double* __restrict__ BTimesDeltaQ // BTimesDeltaQ[{{NUMBER_OF_UNKNOWNS}}]
782 {% if SOURCE_TERM_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
783 #if defined(GPUOffloadingOMP)
784 #pragma omp declare target
786 static GPUCallableMethod void sourceTerm(
787 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
788 const tarch::la::Vector<Dimensions, double>& x,
789 const tarch::la::Vector<Dimensions, double>& h,
792 double* __restrict__ S, // S[{{NUMBER_OF_UNKNOWNS}}]
795 #if defined(GPUOffloadingOMP)
796 #pragma omp end declare target
800 {% if SOURCE_TERM_IMPLEMENTATION=="<user-defined>" %}
801 virtual void sourceTerm(
802 const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
803 const tarch::la::Vector<Dimensions, double>& x,
804 const tarch::la::Vector<Dimensions, double>& h,
807 double* __restrict__ S // S[{{NUMBER_OF_UNKNOWNS}}]
811 {% if RIEMANN_SOLVER_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
813 * @param QR the right state vector
814 * @param QL the left state vector
815 * @param FR the right flux vector
816 * @param FL the left flux vector
817 * @param LR the eigenvalues of the right state vector
818 * @param LL the eigenvalues of the left state vector
819 * @param xR coordinates of the right state vector
820 * @param xL coordinates of the left state vector
821 * @param h volume dimensions
822 * @param t current time step
823 * @param dt previous time step width
824 * @param normal dimension currently being solved for
825 * @param APDQ right going update, has a positive contribution on right state
826 * @param AMDQ left going update, has a negative contribution on left state
828 #if defined(GPUOffloadingOMP)
829 #pragma omp declare target
831 static GPUCallableMethod double solveRiemannProblem(
832 const double* __restrict__ QR, // QR[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
833 const double* __restrict__ QL, // QL[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
834 const double* __restrict__ FR, // FR[{{NUMBER_OF_UNKNOWNS}}]
835 const double* __restrict__ FL, // FL[{{NUMBER_OF_UNKNOWNS}}]
836 const double* __restrict__ LR, // LR[{{NUMBER_OF_UNKNOWNS}}]
837 const double* __restrict__ LL, // LL[{{NUMBER_OF_UNKNOWNS}}]
838 const tarch::la::Vector<Dimensions, double>& xR,
839 const tarch::la::Vector<Dimensions, double>& xL,
840 const tarch::la::Vector<Dimensions, double>& h,
844 double* __restrict__ APDQ, // APDQ[{{NUMBER_OF_UNKNOWNS}}]
845 double* __restrict__ AMDQ, // AMDQ[{{NUMBER_OF_UNKNOWNS}}]
848 #if defined(GPUOffloadingOMP)
849 #pragma omp end declare target
853 {% if RIEMANN_SOLVER_IMPLEMENTATION=="<user-defined>" %}
855 * @param QR the right state vector
856 * @param QL the left state vector
857 * @param FR the right flux vector
858 * @param FL the left flux vector
859 * @param LR the eigenvalues of the right state vector
860 * @param LL the eigenvalues of the left state vector
861 * @param xR coordinates of the right state vector
862 * @param xL coordinates of the left state vector
863 * @param h volume dimensions
864 * @param t current time step
865 * @param dt previous time step width
866 * @param normal dimension currently being solved for
867 * @param APDQ right going update, has a positive contribution on right state
868 * @param AMDQ left going update, has a negative contribution on left state
870 virtual double solveRiemannProblem(
871 const double* __restrict__ QR, // QR[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
872 const double* __restrict__ QL, // QL[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
873 const double* __restrict__ FR, // FR[{{NUMBER_OF_UNKNOWNS}}]
874 const double* __restrict__ FL, // FL[{{NUMBER_OF_UNKNOWNS}}]
875 const double* __restrict__ LR, // LR[{{NUMBER_OF_UNKNOWNS}}]
876 const double* __restrict__ LL, // LL[{{NUMBER_OF_UNKNOWNS}}]
877 const tarch::la::Vector<Dimensions, double>& xR,
878 const tarch::la::Vector<Dimensions, double>& xL,
879 const tarch::la::Vector<Dimensions, double>& h,
883 double* __restrict__ APDQ, // APDQ[{{NUMBER_OF_UNKNOWNS}}]
884 double* __restrict__ AMDQ // AMDQ[{{NUMBER_OF_UNKNOWNS}}]
888 undefined=jinja2.DebugUndefined,
892 d[
"FLUX_IMPLEMENTATION"] = flux_implementation
893 d[
"NCP_IMPLEMENTATION"] = ncp_implementation
894 d[
"EIGENVALUES_IMPLEMENTATION"] = eigenvalues_implementation
895 d[
"RIEMANN_SOLVER_IMPLEMENTATION"] = riemann_solver_implementation
896 d[
"SOURCE_TERM_IMPLEMENTATION"] = source_term_implementation
897 d[
"STATELESS_PDE_TERMS"] = pde_terms_without_state
898 return Template.render(**d)
904 eigenvalues_implementation,
905 riemann_solver_implementation,
906 source_term_implementation,
907 pde_terms_without_state,
909 Template = jinja2.Template(
911{% if EIGENVALUES_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
912#if defined(GPUOffloadingOMP)
913#pragma omp declare target
915//#if !defined(GPUOffloadingSYCL)
916GPUCallableMethod void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::eigenvalues(
917 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
918 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
919 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
920 [[maybe_unused]] double t,
921 [[maybe_unused]] double dt,
922 [[maybe_unused]] int normal,
923 [[maybe_unused]] double* __restrict__ L,
926 logTraceInWith4Arguments("eigenvalues(...)", x, h, t, normal);
928 logTraceOut("eigenvalues(...)");
931#if defined(GPUOffloadingOMP)
932#pragma omp end declare target
936{% if EIGENVALUES_IMPLEMENTATION=="<user-defined>" %}
937void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::eigenvalues(
938 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
939 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
940 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
941 [[maybe_unused]] double t,
942 [[maybe_unused]] double dt,
943 [[maybe_unused]] int normal,
944 [[maybe_unused]] double* __restrict__ L
946 logTraceInWith4Arguments("eigenvalues(...)", x, h, t, normal);
948 logTraceOut("eigenvalues(...)");
952{% if FLUX_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
953#if defined(GPUOffloadingOMP)
954#pragma omp declare target
956//#if !defined(GPUOffloadingSYCL)
957GPUCallableMethod void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::flux(
958 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
959 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
960 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
961 [[maybe_unused]] double t,
962 [[maybe_unused]] double dt,
963 [[maybe_unused]] int normal,
964 [[maybe_unused]] double* __restrict__ F, // F[{{NUMBER_OF_UNKNOWNS}}]
967 logTraceInWith4Arguments("flux(...)", x, h, t, normal);
969 logTraceOut("flux(...)");
972#if defined(GPUOffloadingOMP)
973#pragma omp end declare target
977{% if FLUX_IMPLEMENTATION=="<user-defined>" %}
978void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::flux(
979 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
980 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
981 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
982 [[maybe_unused]] double t,
983 [[maybe_unused]] double dt,
984 [[maybe_unused]] int normal,
985 [[maybe_unused]] double* __restrict__ F // F[{{NUMBER_OF_UNKNOWNS}}]
987 logTraceInWith4Arguments("flux(...)", x, h, t, normal);
989 logTraceOut("flux(...)");
993{% if NCP_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
994#if defined(GPUOffloadingOMP)
995#pragma omp declare target
997GPUCallableMethod void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::nonconservativeProduct(
998 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
999 [[maybe_unused]] const double* __restrict__ deltaQ, // deltaQ[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
1000 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
1001 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
1002 [[maybe_unused]] double t,
1003 [[maybe_unused]] double dt,
1004 [[maybe_unused]] int normal,
1005 [[maybe_unused]] double* __restrict__ BTimesDeltaQ, // BTimesDeltaQ[{{NUMBER_OF_UNKNOWNS}}]
1006 [[maybe_unused]] Offloadable
1008 logTraceInWith4Arguments("nonconservativeProduct(...)", x, h, t, normal);
1010 logTraceOut("nonconservativeProduct(...)");
1012#if defined(GPUOffloadingOMP)
1013#pragma omp end declare target
1017{% if NCP_IMPLEMENTATION=="<user-defined>" %}
1018void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::nonconservativeProduct(
1019 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
1020 [[maybe_unused]] const double* __restrict__ deltaQ, // deltaQ[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
1021 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
1022 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
1023 [[maybe_unused]] double t,
1024 [[maybe_unused]] double dt,
1025 [[maybe_unused]] int normal,
1026 [[maybe_unused]] double* __restrict__ BTimesDeltaQ // BTimesDeltaQQ[{{NUMBER_OF_UNKNOWNS}}]
1028 logTraceInWith4Arguments("nonconservativeProduct(...)", x, h, t, normal);
1030 logTraceOut("nonconservativeProduct(...)");
1034{% if SOURCE_TERM_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
1035#if defined(GPUOffloadingOMP)
1036#pragma omp declare target
1038GPUCallableMethod void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::sourceTerm(
1039 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
1040 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
1041 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
1042 [[maybe_unused]] double t,
1043 [[maybe_unused]] double dt,
1044 [[maybe_unused]] double* __restrict__ S, // S[{{NUMBER_OF_UNKNOWNS}}]
1045 [[maybe_unused]] Offloadable
1047 logTraceInWith4Arguments("sourceTerm(...)", x, h, t, dt);
1049 // @todo implement and ensure that all entries of S are properly set
1050 for (int i = 0; i < NumberOfUnknowns; i++) {
1054 logTraceOut("sourceTerm(...)");
1056#if defined(GPUOffloadingOMP)
1057#pragma omp end declare target
1061{% if SOURCE_TERM_IMPLEMENTATION=="<user-defined>" %}
1062void {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::sourceTerm(
1063 [[maybe_unused]] const double* __restrict__ Q, // Q[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
1064 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& x,
1065 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
1066 [[maybe_unused]] double t,
1067 [[maybe_unused]] double dt,
1068 [[maybe_unused]] double* __restrict__ S // S[{{NUMBER_OF_UNKNOWNS}}]
1070 logTraceInWith4Arguments("sourceTerm(...)", x, h, t, dt);
1072 // @todo implement and ensure that all entries of S are properly set
1073 for (int i = 0; i < NumberOfUnknowns; i++) {
1077 logTraceOut("sourceTerm(...)");
1081{% if RIEMANN_SOLVER_IMPLEMENTATION=="<user-defined>" and STATELESS_PDE_TERMS %}
1082#if defined(GPUOffloadingOMP)
1083#pragma omp declare target
1085GPUCallableMethod double {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::solveRiemannProblem(
1086 [[maybe_unused]] const double* __restrict__ QR, // QR[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
1087 [[maybe_unused]] const double* __restrict__ QL, // QL[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
1088 [[maybe_unused]] const double* __restrict__ FR, // FR[{{NUMBER_OF_UNKNOWNS}}]
1089 [[maybe_unused]] const double* __restrict__ FL, // FL[{{NUMBER_OF_UNKNOWNS}}]
1090 [[maybe_unused]] const double* __restrict__ LR, // LR[{{NUMBER_OF_UNKNOWNS}}]
1091 [[maybe_unused]] const double* __restrict__ LL, // LL[{{NUMBER_OF_UNKNOWNS}}]
1092 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& xR,
1093 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& xL,
1094 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
1095 [[maybe_unused]] double t,
1096 [[maybe_unused]] double dt,
1097 [[maybe_unused]] int normal,
1098 [[maybe_unused]] double* __restrict__ APDQ, // APDQ[{{NUMBER_OF_UNKNOWNS}}]
1099 [[maybe_unused]] double* __restrict__ AMDQ, // AMDQ[{{NUMBER_OF_UNKNOWNS}}]
1100 [[maybe_unused]] Offloadable
1102 logTraceInWith4Arguments("solveRiemannProblem(...)", xR, xL, t, normal);
1104 logTraceOut("solveRiemannProblem(...)");
1106#if defined(GPUOffloadingOMP)
1107#pragma omp end declare target
1111{% if RIEMANN_SOLVER_IMPLEMENTATION=="<user-defined>" %}
1112double {{FULL_QUALIFIED_NAMESPACE}}::{{CLASSNAME}}::solveRiemannProblem(
1113 [[maybe_unused]] const double* __restrict__ QR, // QR[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
1114 [[maybe_unused]] const double* __restrict__ QL, // QL[{{NUMBER_OF_UNKNOWNS}}+{{NUMBER_OF_AUXILIARY_VARIABLES}}]
1115 [[maybe_unused]] const double* __restrict__ FR, // FR[{{NUMBER_OF_UNKNOWNS}}]
1116 [[maybe_unused]] const double* __restrict__ FL, // FL[{{NUMBER_OF_UNKNOWNS}}]
1117 [[maybe_unused]] const double* __restrict__ LR, // LR[{{NUMBER_OF_UNKNOWNS}}]
1118 [[maybe_unused]] const double* __restrict__ LL, // LL[{{NUMBER_OF_UNKNOWNS}}]
1119 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& xR,
1120 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& xL,
1121 [[maybe_unused]] const tarch::la::Vector<Dimensions, double>& h,
1122 [[maybe_unused]] double t,
1123 [[maybe_unused]] double dt,
1124 [[maybe_unused]] int normal,
1125 [[maybe_unused]] double* __restrict__ APDQ, // APDQ[{{NUMBER_OF_UNKNOWNS}}]
1126 [[maybe_unused]] double* __restrict__ AMDQ // AMDQ[{{NUMBER_OF_UNKNOWNS}}]
1128 logTraceInWith4Arguments("solveRiemannProblem(...)", xR, xL, t, normal);
1130 logTraceOut("solveRiemannProblem(...)");
1134 undefined=jinja2.DebugUndefined,
1138 d[
"FLUX_IMPLEMENTATION"] = flux_implementation
1139 d[
"NCP_IMPLEMENTATION"] = ncp_implementation
1140 d[
"EIGENVALUES_IMPLEMENTATION"] = eigenvalues_implementation
1141 d[
"RIEMANN_SOLVER_IMPLEMENTATION"] = riemann_solver_implementation
1142 d[
"SOURCE_TERM_IMPLEMENTATION"] = source_term_implementation
1143 d[
"STATELESS_PDE_TERMS"] = pde_terms_without_state
1144 return Template.render(**d)
create_compute_Riemann_kernel(flux_implementation, ncp_implementation, eigenvalues_implementation, riemann_solver_implementation, source_term_implementation, compute_max_eigenvalue_of_next_time_step, SolverVariant solver_variant, KernelVariant kernel_variant)
Return only the unqualified function call, i.e., without any namespaces.