25 #if defined(SharedOMP) and (!defined(__INTEL_LLVM_COMPILER) or !defined(GPUOffloadingOMP))
26 #pragma omp declare simd
28 template <
class QInEnumeratorType,
class QOutEnumeratorType>
30 double* __restrict__ QIn,
31 const QInEnumeratorType& QInEnumerator,
37 const QOutEnumeratorType& QOutEnumerator
47 #if defined(SharedOMP) and ((!defined(__INTEL_LLVM_COMPILER) and !defined(__clang__) and !defined(__GNUC__)) or !defined(GPUOffloadingOMP))
48 #pragma omp declare simd
50 template <
class QInEnumeratorType,
class QOutEnumeratorType>
52 const double* __restrict__ QIn,
53 const QInEnumeratorType& QInEnumerator,
62 double* __restrict__ QDiffSrc,
63 const QOutEnumeratorType& QDiffSrcEnumerator,
94 template <
typename Solver,
class QInEnumeratorType,
class QOutEnumeratorType>
96 const double* __restrict__ QIn,
97 const QInEnumeratorType& QInEnumerator,
105 double* __restrict__ QDiffSrc,
106 const QOutEnumeratorType& QDiffSrcEnumerator,
119 #if defined(SharedOMP) and ((!defined(__INTEL_LLVM_COMPILER) and !defined(__clang__) and !defined(__GNUC__)) or !defined(GPUOffloadingOMP))
120 #pragma omp declare simd
122 #if defined(GPUOffloadingOMP)
123 #pragma omp declare target
125 template <
typename QOutEnumeratorType>
127 const double* __restrict__ QDiffSrcX,
128 const double* __restrict__ QDiffSrcY,
129 const double* __restrict__ QDiffSrcZ,
130 const QOutEnumeratorType& QDiffSrcEnumerator,
137 double* __restrict__ QOut,
138 const QOutEnumeratorType& QOutEnumerator
140 #if defined(GPUOffloadingOMP)
141 #pragma omp end declare target
151 #if defined(SharedOMP) and ((!defined(__INTEL_LLVM_COMPILER) and !defined(__clang__) and !defined(__GNUC__)) or !defined(GPUOffloadingOMP))
152 #pragma omp declare simd
154 template <
class QInEnumeratorType,
class QOutEnumeratorType>
156 const double* __restrict__ QIn,
157 const QInEnumeratorType& QInEnumerator,
165 double* __restrict__ QKODsp,
166 const QOutEnumeratorType& QKODspEnumerator
177 #if defined(SharedOMP) and ((!defined(__INTEL_LLVM_COMPILER) and !defined(__clang__) and !defined(__GNUC__)) or !defined(GPUOffloadingOMP))
178 #pragma omp declare simd
180 #if defined(GPUOffloadingOMP)
181 #pragma omp declare target
183 template <
typename QOutEnumeratorType>
185 const double KOSigma,
186 const double* __restrict__ QKODspX,
187 const double* __restrict__ QKODspY,
188 const double* __restrict__ QKODspZ,
189 const QOutEnumeratorType& QKODspEnumerator,
196 double* __restrict__ QOut,
197 const QOutEnumeratorType& QOutEnumerator
199 #if defined(GPUOffloadingOMP)
200 #pragma omp end declare target
216 #if defined(SharedOMP) and ((!defined(__INTEL_LLVM_COMPILER) and !defined(__clang__) and !defined(__GNUC__)) or !defined(GPUOffloadingOMP))
217 #pragma omp declare simd
219 template <
class QInEnumeratorType,
class QOutEnumeratorType>
221 const double* __restrict__ QIn,
222 const QInEnumeratorType& QInEnumerator,
231 double* __restrict__ QFlux,
232 const QOutEnumeratorType& QFluxEnumerator
239 template <
typename Solver,
class QInEnumeratorType,
class QOutEnumeratorType>
241 const double* __restrict__ QIn,
242 const QInEnumeratorType& QInEnumerator,
250 double* __restrict__ QFlux,
251 const QOutEnumeratorType& QFluxEnumerator
263 #if defined(SharedOMP) and ((!defined(__INTEL_LLVM_COMPILER) and !defined(__clang__) and !defined(__GNUC__)) or !defined(GPUOffloadingOMP))
264 #pragma omp declare simd
266 #if defined(GPUOffloadingOMP)
267 #pragma omp declare target
269 template <
typename QOutEnumeratorType>
271 const double* __restrict__ tempFluxX,
272 const double* __restrict__ tempFluxY,
273 const double* __restrict__ tempFluxZ,
274 const QOutEnumeratorType& fluxEnumerator,
281 double* __restrict__ QOut,
282 const QOutEnumeratorType& QOutEnumerator
284 #if defined(GPUOffloadingOMP)
285 #pragma omp end declare target
static void computeAuxiliaryVariables_LoopBody(double *__restrict__ QIn, const QInEnumeratorType &QInEnumerator, const tarch::la::Vector< Dimensions, double > &patchCentre, const tarch::la::Vector< Dimensions, double > &patchSize, int patchIndex, const tarch::la::Vector< Dimensions, int > &volumeIndex, int normal, const QOutEnumeratorType &QOutEnumerator) InlineMethod
static void updateSolutionWithFlux_LoopBody(const double *__restrict__ tempFluxX, const double *__restrict__ tempFluxY, const double *__restrict__ tempFluxZ, const QOutEnumeratorType &fluxEnumerator, const tarch::la::Vector< Dimensions, double > &patchCentre, const tarch::la::Vector< Dimensions, double > &patchSize, int patchIndex, const tarch::la::Vector< Dimensions, int > &volumeIndex, int unknown, double dt, double *__restrict__ QOut, const QOutEnumeratorType &QOutEnumerator) InlineMethod
Plain update of flux in a finite differences scheme.
static void computeFlux_LoopBody(const double *__restrict__ QIn, const QInEnumeratorType &QInEnumerator, exahype2::fd::Flux flux, const tarch::la::Vector< Dimensions, double > &patchCentre, const tarch::la::Vector< Dimensions, double > &patchSize, int patchIndex, const tarch::la::Vector< Dimensions, int > &volumeIndex, double t, double dt, int normal, double *__restrict__ QFlux, const QOutEnumeratorType &QFluxEnumerator) InlineMethod
This routine computes where i is the argument normal.
static void updateSolutionWithDifferentialSourceTerm_LoopBody(const double *__restrict__ QDiffSrcX, const double *__restrict__ QDiffSrcY, const double *__restrict__ QDiffSrcZ, const QOutEnumeratorType &QDiffSrcEnumerator, const tarch::la::Vector< Dimensions, double > &patchCentre, const tarch::la::Vector< Dimensions, double > &patchSize, int patchIndex, const tarch::la::Vector< Dimensions, int > &volumeIndex, int unknown, double dt, double *__restrict__ QOut, const QOutEnumeratorType &QOutEnumerator) InlineMethod
static void updateSolutionWithKODissipationTerm_LoopBody(const double KOSigma, const double *__restrict__ QKODspX, const double *__restrict__ QKODspY, const double *__restrict__ QKODspZ, const QOutEnumeratorType &QKODspEnumerator, const tarch::la::Vector< Dimensions, double > &patchCentre, const tarch::la::Vector< Dimensions, double > &patchSize, int patchIndex, const tarch::la::Vector< Dimensions, int > &volumeIndex, int unknown, double dt, double *__restrict__ QOut, const QOutEnumeratorType &QOutEnumerator) InlineMethod
static void computeKreissOligerDissipationTerm_LoopBody(const double *__restrict__ QIn, const QInEnumeratorType &QInEnumerator, const tarch::la::Vector< Dimensions, double > &patchCentre, const tarch::la::Vector< Dimensions, double > &patchSize, int patchIndex, const tarch::la::Vector< Dimensions, int > &volumeIndex, double t, double dt, int normal, double *__restrict__ QKODsp, const QOutEnumeratorType &QKODspEnumerator) InlineMethod
static void computeDifferentialSourceTerm_LoopBody(const double *__restrict__ QIn, const QInEnumeratorType &QInEnumerator, exahype2::fd::NonconservativeProduct DifferentialSource, const tarch::la::Vector< Dimensions, double > &patchCentre, const tarch::la::Vector< Dimensions, double > &patchSize, int patchIndex, const tarch::la::Vector< Dimensions, int > &volumeIndex, double t, double dt, int normal, double *__restrict__ QDiffSrc, const QOutEnumeratorType &QDiffSrcEnumerator, DifferentialSourceTermVariant variant) InlineMethod
This function calculates the source term that involves B_i\nabla_iQ where i is the spatial dimension.
DifferentialSourceTermVariant
std::function< void(const double *__restrict__ Q, const double *__restrict__ deltaQ, const tarch::la::Vector< Dimensions, double > &gridCellX, const tarch::la::Vector< Dimensions, double > &gridCellH, double t, double dt, int normal, double *__restrict__ DiffSrc) NonconservativeProduct)
std::function< void(const double *__restrict__ Q, const tarch::la::Vector< Dimensions, double > &faceCentre, const tarch::la::Vector< Dimensions, double > &gridCellH, double t, double dt, int normal, double *__restrict__ F) Flux)
For the generic kernels that I use here most of the time.
auto volumeIndex(Args... args)
#define InlineMethod
Generic identifier for inlined functions.