31 _m = tarch::allocateMemory<double>(
36 for (
int lhsRow = 0; lhsRow < lhs.
_rows; lhsRow++)
37 for (
int rhsRow = 0; rhsRow < rhs._rows; rhsRow++)
38 for (
int lhsCol = 0; lhsCol < lhs.
_cols; lhsCol++)
39 for (
int rhsCol = 0; rhsCol < rhs._cols; rhsCol++) {
41 lhsRow * rhs._rows + rhsRow,
42 lhsCol * rhs._cols + rhsCol
44 * rhs._m[rhs.serialise(rhsRow, rhsCol)];
52 std::initializer_list<std::initializer_list<double>> values
60 for (
typename std::initializer_list<
61 std::initializer_list<double>>::const_iterator p
65 for (
typename std::initializer_list<double>::const_iterator pp = p->begin();
77 _m = tarch::allocateMemory<double>(
99 std::initializer_list<std::initializer_list<double>> values
102 for (
typename std::initializer_list<
103 std::initializer_list<double>>::const_iterator p
107 for (
typename std::initializer_list<double>::const_iterator pp = p->begin();
110 assertion3(index < _cols * _rows, _cols, _rows, index);
140 bool result = (_rows == matrix.
_rows and _cols == matrix.
_cols);
158 return _m[serialise(
row,
col)];
166 return _m[serialise(
row,
col)];
171 for (
int i = 0; i < rows; i++) {
180 _m[serialise(
row,
col)] *= value;
185 std::fill_n(result, _rows, 0.0);
204 std::fill_n(result, _rows * matrix.
cols(), 0.0);
208 for (
int i = 0; i < _cols; i++) {
216 std::ostringstream msg;
217 msg <<
"(rows=" << _rows <<
",cols=" << _cols <<
",{";
234 msg << _m[serialise(
row,
col)];
237 if (not addLineBreaks)
248 std::ostringstream msg;
249 msg <<
"(entries=" << entries <<
",{";
250 for (
int i = 0; i < entries; i++) {
264 double* __restrict__ result,
265 const double* __restrict__ x,
268 batchedMultiplyAoS(result, x, batchCount, _rows, 0);
272 double* __restrict__ result,
273 const double* __restrict__ x,
278 assertion3(batchSize > 0, batchSize, resultSize, firstRow);
279 assertion3(resultSize > 0, batchSize, resultSize, firstRow);
280 assertion3(firstRow >= 0, batchSize, resultSize, firstRow);
282 std::fill_n(result, resultSize * batchSize, 0.0);
285 for (
int rowInResult = 0; rowInResult < resultSize; rowInResult++)
289 for (
int i = 0; i < batchSize; i++) {
291 rowInResult + firstRow < _rows,
296 result[rowInResult * batchSize + i]
297 += _m[serialise(rowInResult + firstRow,
col)]
298 * x[
col * batchSize + i];
309 repeatEveryKColumns >= 0,
316 double* oldData = _m;
320 const int numberOfInsertions = repeatEveryKColumns == 0
322 : (_cols / repeatEveryKColumns) * number;
323 _cols += numberOfInsertions;
325 _m = tarch::allocateMemory<double>(
329 std::fill_n(_m, _rows * _cols, 0.0);
332 for (
int col = 0;
col < oldCols;
col++) {
334 imageColumn += number;
335 if (repeatEveryKColumns >= 1) {
336 where += repeatEveryKColumns;
339 for (
int row = 0;
row < oldRows;
row++) {
340 _m[serialise(
row, imageColumn)] = oldData
341 [serialise(
row,
col, oldRows, oldCols)];
356 double* oldData = _m;
360 const int numberOfInsertions = repeatEveryKRows == 0
362 : (_rows / repeatEveryKRows) * number;
363 _rows += numberOfInsertions;
365 _m = tarch::allocateMemory<double>(
369 std::fill_n(_m, _rows * _cols, 0.0);
372 for (
int row = 0;
row < oldRows;
row++) {
375 if (repeatEveryKRows >= 1) {
376 where += repeatEveryKRows;
379 for (
int col = 0;
col < oldCols;
col++) {
380 _m[serialise(imageRow,
col)] = oldData
381 [serialise(
row,
col, oldRows, oldCols)];
390 double* oldData = _m;
396 _m = tarch::allocateMemory<double>(
400 std::fill_n(_m, _rows * _cols, 0.0);
404 int preImageColumn =
col < number ?
col :
col + 1;
405 _m[serialise(
row,
col)] = oldData
406 [serialise(
row, preImageColumn, oldRows, oldCols)];
413 double* oldData = _m;
414 _m = tarch::allocateMemory<double>(
418 std::fill_n(_m, _rows * _cols, 0.0);
422 int imageRow =
row + shift;
425 if (imageRow >= 0 and imageRow < _rows) {
426 _m[serialise(imageRow,
col)] = oldData[serialise(
row,
col)];
434 double* oldData = _m;
435 _m = tarch::allocateMemory<double>(
439 std::fill_n(_m, _rows * _cols, 0.0);
443 int imageCol =
col + shift;
446 if (imageCol >= 0 and imageCol < _cols) {
447 _m[serialise(
row, imageCol)] = oldData[serialise(
row,
col)];
460 int numberOfReplications,
461 int shiftAfterEveryReplication,
462 bool extendColumnsToAccommodateShifts
467 numberOfReplications,
468 shiftAfterEveryReplication
471 _rows % blockSize == 0,
473 numberOfReplications,
474 shiftAfterEveryReplication
477 numberOfReplications >= 2,
479 numberOfReplications,
480 shiftAfterEveryReplication
483 double* oldData = _m;
488 numberOfReplications >= 1,
489 numberOfReplications,
490 shiftAfterEveryReplication
493 shiftAfterEveryReplication >= 0,
494 numberOfReplications,
495 shiftAfterEveryReplication
498 const int numberOfBlocks = _rows / blockSize;
499 _rows = _rows * numberOfReplications;
500 if (extendColumnsToAccommodateShifts) {
501 _cols = _cols + shiftAfterEveryReplication * (numberOfReplications - 1);
504 _m = tarch::allocateMemory<double>(
509 std::fill_n(_m, _rows * _cols, 0.0);
511 for (
int block = 0; block < numberOfBlocks; block++) {
512 for (
int replication = 0; replication < numberOfReplications;
514 for (
int blockRow = 0; blockRow < blockSize; blockRow++) {
515 const int destRow = (block * numberOfReplications + replication
518 const int srcRow = block * blockSize + blockRow;
519 for (
int col = 0;
col < oldCols;
col++) {
520 const int destCol =
col + replication * shiftAfterEveryReplication;
521 if (destCol < _cols) {
522 _m[serialise(destRow, destCol)] = oldData
523 [serialise(srcRow,
col, oldRows, oldCols)];
535 int numberOfReplications,
536 int shiftAfterEveryReplication,
537 bool extendColumnsToAccommodateShifts
542 numberOfReplications,
543 shiftAfterEveryReplication
546 _rows % blockSize == 0,
548 numberOfReplications,
549 shiftAfterEveryReplication
552 numberOfReplications >= 2,
554 numberOfReplications,
555 shiftAfterEveryReplication
558 double* oldData = _m;
563 numberOfReplications >= 1,
564 numberOfReplications,
565 shiftAfterEveryReplication
568 shiftAfterEveryReplication >= 0,
569 numberOfReplications,
570 shiftAfterEveryReplication
573 const int numberOfBlocks = _cols / blockSize;
574 _cols = _cols * numberOfReplications;
575 if (extendColumnsToAccommodateShifts) {
576 _rows = _rows + shiftAfterEveryReplication * (numberOfReplications - 1);
579 _m = tarch::allocateMemory<double>(
584 std::fill_n(_m, _rows * _cols, 0.0);
586 for (
int block = 0; block < numberOfBlocks; block++) {
587 for (
int replication = 0; replication < numberOfReplications;
589 for (
int blockCol = 0; blockCol < blockSize; blockCol++) {
590 const int destCol = (block * numberOfReplications + replication
593 const int srcCol = block * blockSize + blockCol;
594 for (
int row = 0;
row < oldRows;
row++) {
595 const int destRow =
row + replication * shiftAfterEveryReplication;
596 if (destRow < _rows) {
597 _m[serialise(destRow, destCol)] = oldData
598 [serialise(
row, srcCol, oldRows, oldCols)];
636 for (
int i = 0; i < A.
cols(); i++) {
#define assertion2(expr, param0, param1)
#define assertion4(expr, param0, param1, param2, param3)
#define assertion3(expr, param0, param1, param2)
#define assertionEquals(lhs, rhs)
#define assertionMsg(expr, message)
tarch::la::DynamicMatrix kroneckerProduct(const tarch::la::DynamicMatrix &lhs, const tarch::la::DynamicMatrix &rhs)
Wrapper around static routine, so I don't have to use full-qualified name.
My standard matrix is a matrix where the size is fixed at compile time.
double & operator()(int row, int col)
~DynamicMatrix()
Free the array on the heap.
void multiply(double *result, double *x)
void batchedMultiplyAoS(double *__restrict__ result, const double *__restrict__ x, int batchCount, int resultSize, int firstRow)
This operation assumes that x holds a whole batch of vectors in AoS format.
void insertEmptyRows(int number, int where, int repeatEveryKColumns=0)
static std::string vectorToString(double *values, int entries, bool addLineBreaks=false)
I often need this in combination with the toString() operation above.
void removeColumn(int number)
void multiplyBySmallMatrix(double *result, const DynamicMatrix &matrix) const
void shiftColumnsRight(int shift, bool wrap=false)
DynamicMatrix & operator=(const DynamicMatrix &)=delete
bool operator==(double values[][Cols]) const
void shiftRowsDown(int shift, bool wrap=false)
Shift the rows to the right.
void insertEmptyColumns(int number, int where, int repeatEveryKColumns=0)
Insert zero columns.
void replicateRows(int blockSize, int numberOfReplications, int shiftAfterEveryReplication, bool extendColumnsToAccommodateShifts)
Split the matrix into blocks of rows of size blockSize.
DynamicMatrix(int rows, int cols)
Create empty matrix.
void replicateCols(int blockSize, int numberOfReplications, int shiftAfterEveryReplication, bool extendColumnsToAccommodateShifts)
std::string toString(bool addLineBreaks=false) const
static tarch::la::DynamicMatrix id(int rows)
Create (square) identify matrix with rows rows and column.
static int serialise(int row, int col, int Rows, int Cols)
void scale(double value)
Scale all entries.
Vector< Rows, Scalar > col(const Matrix< Rows, Cols, Scalar > &matrix, int whichColumn)
Extract row from matrix.
Matrix< Rows, Cols, Scalar > operator*(const Matrix< Rows, X, Scalar > &lhs, const Matrix< X, Cols, Scalar > &rhs)
bool equals(const Matrix< Rows, Cols, Scalar > &lhs, const Matrix< Rows, Cols, Scalar > &rhs, const Scalar &tolerance=NUMERICAL_ZERO_DIFFERENCE)
Compares to matrices on equality by means of a numerical accuracy.
Vector< Cols, Scalar > row(const Matrix< Rows, Cols, Scalar > &matrix, int whichRow)
Extract row from matrix.
void freeMemory(void *data, MemoryLocation location, int device=accelerator::Device::HostDevice)
Free memory.
std::string toString(MemoryLocation value)
@ Heap
Create data on the heap of the local device.