Peano
Loading...
Searching...
No Matches
GridTraversalEvent.cpp
Go to the documentation of this file.
2
3
4
5#include <sstream>
6#include <algorithm>
7
8
9
10peano4::grid::GridTraversalEvent::GridTraversalEvent(tarch::la::Vector<Dimensions,double> __x, tarch::la::Vector<Dimensions,double> __h, std::bitset<TwoPowerD> __hasBeenRefined, std::bitset<TwoPowerD> __willBeRefined, std::bitset<TwoPowerD> __isVertexLocal, std::bitset<TwoPowerD> __isParentVertexLocal, std::bitset<TwoPowerD> __isVertexParentOfSubtree, std::bitset<TwoTimesD> __isFaceLocal, bool __isCellLocal, bool __isParentCellLocal, std::bitset<TwoPowerD> __isVertexAdjacentToParallelDomainBoundary, std::bitset<TwoTimesD> __isFaceAdjacentToParallelDomainBoundary, tarch::la::Vector<TwoPowerD,int> __numberOfAdjacentTreesPerVertex, std::bitset<ThreePowerD> __isAdjacentCellLocal, tarch::la::Vector<TwoPowerD,int> __vertexDataFrom, tarch::la::Vector<TwoPowerD,int> __vertexDataTo, tarch::la::Vector<TwoTimesD,int> __faceDataFrom, tarch::la::Vector<TwoTimesD,int> __faceDataTo, int __cellData, tarch::la::Vector<Dimensions,int> __relativePositionToFather, int __invokingSpacetree, bool __invokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing){
11setX( __x);
12setH( __h);
13setHasBeenRefined( __hasBeenRefined);
14setWillBeRefined( __willBeRefined);
15setIsVertexLocal( __isVertexLocal);
16setIsParentVertexLocal( __isParentVertexLocal);
17setIsVertexParentOfSubtree( __isVertexParentOfSubtree);
18setIsFaceLocal( __isFaceLocal);
19setIsCellLocal( __isCellLocal);
20setIsParentCellLocal( __isParentCellLocal);
21setIsVertexAdjacentToParallelDomainBoundary( __isVertexAdjacentToParallelDomainBoundary);
22setIsFaceAdjacentToParallelDomainBoundary( __isFaceAdjacentToParallelDomainBoundary);
23setNumberOfAdjacentTreesPerVertex( __numberOfAdjacentTreesPerVertex);
24setIsAdjacentCellLocal( __isAdjacentCellLocal);
25setVertexDataFrom( __vertexDataFrom);
26setVertexDataTo( __vertexDataTo);
27setFaceDataFrom( __faceDataFrom);
28setFaceDataTo( __faceDataTo);
29setCellData( __cellData);
30setRelativePositionToFather( __relativePositionToFather);
31setInvokingSpacetree( __invokingSpacetree);
32setInvokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing( __invokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing);
33}
34
35
36
38 setX( copy.getX() );
39 setH( copy.getH() );
40 setHasBeenRefined( copy.getHasBeenRefined() );
41 setWillBeRefined( copy.getWillBeRefined() );
42 setIsVertexLocal( copy.getIsVertexLocal() );
43 setIsParentVertexLocal( copy.getIsParentVertexLocal() );
44 setIsVertexParentOfSubtree( copy.getIsVertexParentOfSubtree() );
45 setIsFaceLocal( copy.getIsFaceLocal() );
46 setIsCellLocal( copy.getIsCellLocal() );
47 setIsParentCellLocal( copy.getIsParentCellLocal() );
48 setIsVertexAdjacentToParallelDomainBoundary( copy.getIsVertexAdjacentToParallelDomainBoundary() );
49 setIsFaceAdjacentToParallelDomainBoundary( copy.getIsFaceAdjacentToParallelDomainBoundary() );
50 setNumberOfAdjacentTreesPerVertex( copy.getNumberOfAdjacentTreesPerVertex() );
51 setIsAdjacentCellLocal( copy.getIsAdjacentCellLocal() );
52 setVertexDataFrom( copy.getVertexDataFrom() );
53 setVertexDataTo( copy.getVertexDataTo() );
54 setFaceDataFrom( copy.getFaceDataFrom() );
55 setFaceDataTo( copy.getFaceDataTo() );
56 setCellData( copy.getCellData() );
57 setRelativePositionToFather( copy.getRelativePositionToFather() );
58 setInvokingSpacetree( copy.getInvokingSpacetree() );
59 setInvokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing( copy.getInvokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing() );
60}
61
62
63
64
65
67 std::ostringstream out;
68 out << "(";
69 out << "x=" << getX();
70 out << ",";
71 out << "h=" << getH();
72 out << ",";
73 out << "hasBeenRefined=" << getHasBeenRefined();
74 out << ",";
75 out << "willBeRefined=" << getWillBeRefined();
76 out << ",";
77 out << "isVertexLocal=" << getIsVertexLocal();
78 out << ",";
79 out << "isParentVertexLocal=" << getIsParentVertexLocal();
80 out << ",";
81 out << "isVertexParentOfSubtree=" << getIsVertexParentOfSubtree();
82 out << ",";
83 out << "isFaceLocal=" << getIsFaceLocal();
84 out << ",";
85 out << "isCellLocal=" << _isCellLocal;
86 out << ",";
87 out << "isParentCellLocal=" << _isParentCellLocal;
88 out << ",";
89 out << "isVertexAdjacentToParallelDomainBoundary=" << getIsVertexAdjacentToParallelDomainBoundary();
90 out << ",";
91 out << "isFaceAdjacentToParallelDomainBoundary=" << getIsFaceAdjacentToParallelDomainBoundary();
92 out << ",";
93 out << "numberOfAdjacentTreesPerVertex=" << _numberOfAdjacentTreesPerVertex;
94 out << ",";
95 out << "isAdjacentCellLocal=" << getIsAdjacentCellLocal();
96 out << ",";
97 out << "vertexDataFrom=" << _vertexDataFrom;
98 out << ",";
99 out << "vertexDataTo=" << _vertexDataTo;
100 out << ",";
101 out << "faceDataFrom=" << _faceDataFrom;
102 out << ",";
103 out << "faceDataTo=" << _faceDataTo;
104 out << ",";
105 out << "cellData=" << _cellData;
106 out << ",";
107 out << "relativePositionToFather=" << getRelativePositionToFather();
108 out << ",";
109 out << "invokingSpacetree=" << _invokingSpacetree;
110 out << ",";
111 out << "invokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing=" << _invokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing;
112 out << ")";
113 return out.str();
114}
115
116
117
118
119
121
123 for( int i=0; i<Dimensions; i++) {
124 result(i) = _x[i];
125 }
126 return result;
127 }
128
129
131
132 for( int i=0; i<Dimensions; i++) {
133 _x[i] = value(i);
134 }
135 }
136
137
139 return _x[index];
140}
141
142
143void peano4::grid::GridTraversalEvent::setX(int index, double value) {
144 _x[index] = value;
145}
146
147
149
151 for( int i=0; i<Dimensions; i++) {
152 result(i) = _h[i];
153 }
154 return result;
155 }
156
157
159
160 for( int i=0; i<Dimensions; i++) {
161 _h[i] = value(i);
162 }
163 }
164
165
167 return _h[index];
168}
169
170
171void peano4::grid::GridTraversalEvent::setH(int index, double value) {
172 _h[index] = value;
173}
174
175
177
178 std::bitset<TwoPowerD> result;
179 for (int i=0; i<TwoPowerD; i++) result[i] = _hasBeenRefined[i];
180 return result;
181}
182
183
184void peano4::grid::GridTraversalEvent::setHasBeenRefined(const std::bitset<TwoPowerD>& value) {
185
186 for (int i=0; i<TwoPowerD; i++) _hasBeenRefined[i]=value[i];
187}
188
189
191 return _hasBeenRefined[index];
192}
193
194
196 _hasBeenRefined[index] = value;
197}
198
199
201 _hasBeenRefined[index] = not _hasBeenRefined[index];
202}
203
204
206
207 std::bitset<TwoPowerD> result;
208 for (int i=0; i<TwoPowerD; i++) result[i] = _willBeRefined[i];
209 return result;
210}
211
212
213void peano4::grid::GridTraversalEvent::setWillBeRefined(const std::bitset<TwoPowerD>& value) {
214
215 for (int i=0; i<TwoPowerD; i++) _willBeRefined[i]=value[i];
216}
217
218
220 return _willBeRefined[index];
221}
222
223
225 _willBeRefined[index] = value;
226}
227
228
230 _willBeRefined[index] = not _willBeRefined[index];
231}
232
233
235
236 std::bitset<TwoPowerD> result;
237 for (int i=0; i<TwoPowerD; i++) result[i] = _isVertexLocal[i];
238 return result;
239}
240
241
242void peano4::grid::GridTraversalEvent::setIsVertexLocal(const std::bitset<TwoPowerD>& value) {
243
244 for (int i=0; i<TwoPowerD; i++) _isVertexLocal[i]=value[i];
245}
246
247
249 return _isVertexLocal[index];
250}
251
252
254 _isVertexLocal[index] = value;
255}
256
257
259 _isVertexLocal[index] = not _isVertexLocal[index];
260}
261
262
264
265 std::bitset<TwoPowerD> result;
266 for (int i=0; i<TwoPowerD; i++) result[i] = _isParentVertexLocal[i];
267 return result;
268}
269
270
271void peano4::grid::GridTraversalEvent::setIsParentVertexLocal(const std::bitset<TwoPowerD>& value) {
272
273 for (int i=0; i<TwoPowerD; i++) _isParentVertexLocal[i]=value[i];
274}
275
276
278 return _isParentVertexLocal[index];
279}
280
281
283 _isParentVertexLocal[index] = value;
284}
285
286
288 _isParentVertexLocal[index] = not _isParentVertexLocal[index];
289}
290
291
293
294 std::bitset<TwoPowerD> result;
295 for (int i=0; i<TwoPowerD; i++) result[i] = _isVertexParentOfSubtree[i];
296 return result;
297}
298
299
300void peano4::grid::GridTraversalEvent::setIsVertexParentOfSubtree(const std::bitset<TwoPowerD>& value) {
301
302 for (int i=0; i<TwoPowerD; i++) _isVertexParentOfSubtree[i]=value[i];
303}
304
305
307 return _isVertexParentOfSubtree[index];
308}
309
310
312 _isVertexParentOfSubtree[index] = value;
313}
314
315
317 _isVertexParentOfSubtree[index] = not _isVertexParentOfSubtree[index];
318}
319
320
322
323 std::bitset<TwoTimesD> result;
324 for (int i=0; i<TwoTimesD; i++) result[i] = _isFaceLocal[i];
325 return result;
326}
327
328
329void peano4::grid::GridTraversalEvent::setIsFaceLocal(const std::bitset<TwoTimesD>& value) {
330
331 for (int i=0; i<TwoTimesD; i++) _isFaceLocal[i]=value[i];
332}
333
334
336 return _isFaceLocal[index];
337}
338
339
341 _isFaceLocal[index] = value;
342}
343
344
346 _isFaceLocal[index] = not _isFaceLocal[index];
347}
348
349
351 return _isCellLocal;
352}
353
354
356 _isCellLocal = value;
357}
358
359
361 return _isParentCellLocal;
362}
363
364
366 _isParentCellLocal = value;
367}
368
369
371
372 std::bitset<TwoPowerD> result;
373 for (int i=0; i<TwoPowerD; i++) result[i] = _isVertexAdjacentToParallelDomainBoundary[i];
374 return result;
375}
376
377
379
380 for (int i=0; i<TwoPowerD; i++) _isVertexAdjacentToParallelDomainBoundary[i]=value[i];
381}
382
383
385 return _isVertexAdjacentToParallelDomainBoundary[index];
386}
387
388
390 _isVertexAdjacentToParallelDomainBoundary[index] = value;
391}
392
393
395 _isVertexAdjacentToParallelDomainBoundary[index] = not _isVertexAdjacentToParallelDomainBoundary[index];
396}
397
398
400
401 std::bitset<TwoTimesD> result;
402 for (int i=0; i<TwoTimesD; i++) result[i] = _isFaceAdjacentToParallelDomainBoundary[i];
403 return result;
404}
405
406
408
409 for (int i=0; i<TwoTimesD; i++) _isFaceAdjacentToParallelDomainBoundary[i]=value[i];
410}
411
412
414 return _isFaceAdjacentToParallelDomainBoundary[index];
415}
416
417
419 _isFaceAdjacentToParallelDomainBoundary[index] = value;
420}
421
422
424 _isFaceAdjacentToParallelDomainBoundary[index] = not _isFaceAdjacentToParallelDomainBoundary[index];
425}
426
427
431
432
434 _numberOfAdjacentTreesPerVertex = value;
435}
436
437
439 return _numberOfAdjacentTreesPerVertex(index);
440}
441
442
444 _numberOfAdjacentTreesPerVertex(index) = value;
445}
446
447
449
450 std::bitset<ThreePowerD> result;
451 for (int i=0; i<ThreePowerD; i++) result[i] = _isAdjacentCellLocal[i];
452 return result;
453}
454
455
456void peano4::grid::GridTraversalEvent::setIsAdjacentCellLocal(const std::bitset<ThreePowerD>& value) {
457
458 for (int i=0; i<ThreePowerD; i++) _isAdjacentCellLocal[i]=value[i];
459}
460
461
463 return _isAdjacentCellLocal[index];
464}
465
466
468 _isAdjacentCellLocal[index] = value;
469}
470
471
473 _isAdjacentCellLocal[index] = not _isAdjacentCellLocal[index];
474}
475
476
480
481
485
486
488 return _vertexDataFrom(index);
489}
490
491
493 _vertexDataFrom(index) = value;
494}
495
496
500
501
505
506
508 return _vertexDataTo(index);
509}
510
511
513 _vertexDataTo(index) = value;
514}
515
516
520
521
525
526
528 return _faceDataFrom(index);
529}
530
531
533 _faceDataFrom(index) = value;
534}
535
536
540
541
545
546
548 return _faceDataTo(index);
549}
550
551
553 _faceDataTo(index) = value;
554}
555
556
558 return _cellData;
559}
560
561
563 _cellData = value;
564}
565
566
568
570 for( int i=0; i<Dimensions; i++) {
571 result(i) = _relativePositionToFather[i];
572 }
573 return result;
574 }
575
576
578
579 for( int i=0; i<Dimensions; i++) {
580 _relativePositionToFather[i] = value(i);
581 }
582 }
583
584
586 return _relativePositionToFather[index];
587}
588
589
591 _relativePositionToFather[index] = value;
592}
593
594
596 return _invokingSpacetree;
597}
598
599
601 _invokingSpacetree = value;
602}
603
604
606 return _invokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing;
607}
608
609
611 _invokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing = value;
612}
613
614
615
616
617
618
619#ifdef Parallel
620
621#if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
622MPI_Datatype peano4::grid::GridTraversalEvent::Datatype = MPI_DATATYPE_NULL;
623#endif
624
625
626[[clang::map_mpi_datatype]]
628 return Datatype;
629}
630
631
632[[clang::map_mpi_datatype]]
636
637
638[[clang::map_mpi_datatype]]
640 return Datatype;
641}
642
643
644[[clang::map_mpi_datatype]]
646 return Datatype;
647}
648
649
650[[clang::map_mpi_datatype]]
654
655
656[[clang::map_mpi_datatype]]
658 if (Datatype != MPI_DATATYPE_NULL){
659 MPI_Type_free(&Datatype);
660 Datatype = MPI_DATATYPE_NULL;
661 }
662}
663
664
665[[clang::map_mpi_datatype]]
667 if (Datatype != MPI_DATATYPE_NULL){
668 MPI_Type_free(&Datatype);
669 Datatype = MPI_DATATYPE_NULL;
670 }
671}
672
673
674[[clang::map_mpi_datatype]]
676 if (Datatype != MPI_DATATYPE_NULL){
677 MPI_Type_free(&Datatype);
678 Datatype = MPI_DATATYPE_NULL;
679 }
680}
681
682
683[[clang::map_mpi_datatype]]
685 if (Datatype != MPI_DATATYPE_NULL){
686 MPI_Type_free(&Datatype);
687 Datatype = MPI_DATATYPE_NULL;
688 }
689}
690
691
692[[clang::map_mpi_datatype]]
694 if (Datatype != MPI_DATATYPE_NULL){
695 MPI_Type_free(&Datatype);
696 Datatype = MPI_DATATYPE_NULL;
697 }
698}
699
700
702 return _senderDestinationRank;
703}
704
705
706
708 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
710
711 int NumberOfAttributes = 0;
712 NumberOfAttributes++;
713 NumberOfAttributes++;
714 NumberOfAttributes++;
715 NumberOfAttributes++;
716 NumberOfAttributes++;
717 NumberOfAttributes++;
718 NumberOfAttributes++;
719 NumberOfAttributes++;
720 NumberOfAttributes++;
721 NumberOfAttributes++;
722 NumberOfAttributes++;
723 NumberOfAttributes++;
724 NumberOfAttributes++;
725 NumberOfAttributes++;
726 NumberOfAttributes++;
727 NumberOfAttributes++;
728 NumberOfAttributes++;
729 NumberOfAttributes++;
730 NumberOfAttributes++;
731 NumberOfAttributes++;
732 NumberOfAttributes++;
733 NumberOfAttributes++;
734
735 MPI_Datatype* subtypes = new MPI_Datatype[NumberOfAttributes];
736 int* blocklen = new int[NumberOfAttributes];
737 MPI_Aint* disp = new MPI_Aint[NumberOfAttributes];
738
739 int counter = 0;
740 subtypes[counter] = MPI_DOUBLE;
741 blocklen[counter] = Dimensions;
742 counter++;
743 subtypes[counter] = MPI_DOUBLE;
744 blocklen[counter] = Dimensions;
745 counter++;
746 subtypes[counter] = MPI_UNSIGNED_LONG;
747 blocklen[counter] = 1;
748 counter++;
749 subtypes[counter] = MPI_UNSIGNED_LONG;
750 blocklen[counter] = 1;
751 counter++;
752 subtypes[counter] = MPI_UNSIGNED_LONG;
753 blocklen[counter] = 1;
754 counter++;
755 subtypes[counter] = MPI_UNSIGNED_LONG;
756 blocklen[counter] = 1;
757 counter++;
758 subtypes[counter] = MPI_UNSIGNED_LONG;
759 blocklen[counter] = 1;
760 counter++;
761 subtypes[counter] = MPI_UNSIGNED_LONG;
762 blocklen[counter] = 1;
763 counter++;
764 subtypes[counter] = MPI_BYTE;
765 blocklen[counter] = 1;
766 counter++;
767 subtypes[counter] = MPI_BYTE;
768 blocklen[counter] = 1;
769 counter++;
770 subtypes[counter] = MPI_UNSIGNED_LONG;
771 blocklen[counter] = 1;
772 counter++;
773 subtypes[counter] = MPI_UNSIGNED_LONG;
774 blocklen[counter] = 1;
775 counter++;
776 subtypes[counter] = MPI_INT;
777 blocklen[counter] = TwoPowerD;
778 counter++;
779 subtypes[counter] = MPI_UNSIGNED_LONG;
780 blocklen[counter] = 1;
781 counter++;
782 subtypes[counter] = MPI_INT;
783 blocklen[counter] = TwoPowerD;
784 counter++;
785 subtypes[counter] = MPI_INT;
786 blocklen[counter] = TwoPowerD;
787 counter++;
788 subtypes[counter] = MPI_INT;
789 blocklen[counter] = TwoTimesD;
790 counter++;
791 subtypes[counter] = MPI_INT;
792 blocklen[counter] = TwoTimesD;
793 counter++;
794 subtypes[counter] = MPI_INT;
795 blocklen[counter] = 1;
796 counter++;
797 subtypes[counter] = MPI_INT;
798 blocklen[counter] = Dimensions;
799 counter++;
800 subtypes[counter] = MPI_INT;
801 blocklen[counter] = 1;
802 counter++;
803 subtypes[counter] = MPI_BYTE;
804 blocklen[counter] = 1;
805 counter++;
806
807 MPI_Aint baseFirstInstance;
808 MPI_Aint baseSecondInstance;
809 MPI_Get_address( &instances[0], &baseFirstInstance );
810 MPI_Get_address( &instances[1], &baseSecondInstance );
811
812 counter = 0;
813 MPI_Get_address( &(instances[0]._x.data()[0]), &disp[counter] );
814 counter++;
815 MPI_Get_address( &(instances[0]._h.data()[0]), &disp[counter] );
816 counter++;
817 MPI_Get_address( &(instances[0]._hasBeenRefined), &disp[counter] );
818 counter++;
819 MPI_Get_address( &(instances[0]._willBeRefined), &disp[counter] );
820 counter++;
821 MPI_Get_address( &(instances[0]._isVertexLocal), &disp[counter] );
822 counter++;
823 MPI_Get_address( &(instances[0]._isParentVertexLocal), &disp[counter] );
824 counter++;
825 MPI_Get_address( &(instances[0]._isVertexParentOfSubtree), &disp[counter] );
826 counter++;
827 MPI_Get_address( &(instances[0]._isFaceLocal), &disp[counter] );
828 counter++;
829 MPI_Get_address( &(instances[0]._isCellLocal), &disp[counter] );
830 counter++;
831 MPI_Get_address( &(instances[0]._isParentCellLocal), &disp[counter] );
832 counter++;
833 MPI_Get_address( &(instances[0]._isVertexAdjacentToParallelDomainBoundary), &disp[counter] );
834 counter++;
835 MPI_Get_address( &(instances[0]._isFaceAdjacentToParallelDomainBoundary), &disp[counter] );
836 counter++;
837 MPI_Get_address( &(instances[0]._numberOfAdjacentTreesPerVertex.data()[0]), &disp[counter] );
838 counter++;
839 MPI_Get_address( &(instances[0]._isAdjacentCellLocal), &disp[counter] );
840 counter++;
841 MPI_Get_address( &(instances[0]._vertexDataFrom.data()[0]), &disp[counter] );
842 counter++;
843 MPI_Get_address( &(instances[0]._vertexDataTo.data()[0]), &disp[counter] );
844 counter++;
845 MPI_Get_address( &(instances[0]._faceDataFrom.data()[0]), &disp[counter] );
846 counter++;
847 MPI_Get_address( &(instances[0]._faceDataTo.data()[0]), &disp[counter] );
848 counter++;
849 MPI_Get_address( &(instances[0]._cellData), &disp[counter] );
850 counter++;
851 MPI_Get_address( &(instances[0]._relativePositionToFather.data()[0]), &disp[counter] );
852 counter++;
853 MPI_Get_address( &(instances[0]._invokingSpacetree), &disp[counter] );
854 counter++;
855 MPI_Get_address( &(instances[0]._invokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing), &disp[counter] );
856 counter++;
857
858 MPI_Aint offset = disp[0] - baseFirstInstance;
859 MPI_Aint extent = baseSecondInstance - baseFirstInstance - offset;
860 for (int i=NumberOfAttributes-1; i>=0; i--) {
861 disp[i] = disp[i] - disp[0];
862 }
863
864 int errorCode = 0;
865 MPI_Datatype tmpType;
866 errorCode += MPI_Type_create_struct( NumberOfAttributes, blocklen, disp, subtypes, &tmpType );
867 errorCode += MPI_Type_create_resized( tmpType, offset, extent, &Datatype );
868 errorCode += MPI_Type_commit( &Datatype );
869 errorCode += MPI_Type_free( &tmpType );
870 if (errorCode) std::cerr << "error constructing MPI datatype in " << __FILE__ << ":" << __LINE__ << std::endl;
871
872 delete[] subtypes;
873 delete[] blocklen;
874 delete[] disp;
875
876 #else
877 // invoke routine once to trigger lazy initialisation
878 getForkDatatype();
879 getJoinDatatype();
880 getBoundaryExchangeDatatype();
881 getMultiscaleDataExchangeDatatype();
882 getGlobalCommunciationDatatype();
883 #endif
884}
885
886
888 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
889 freeForkDatatype();
890 freeJoinDatatype();
891 freeBoundaryExchangeDatatype();
892 freeMultiscaleDataExchangeDatatype();
893 freeGlobalCommunciationDatatype();
894 #else
895 MPI_Datatype type = Datatype;
896 MPI_Type_free( &type );
897 #endif
898}
899
900
901void peano4::grid::GridTraversalEvent::send(const peano4::grid::GridTraversalEvent& buffer, int destination, int tag, MPI_Comm communicator ) {
902 MPI_Send( &buffer, 1, Datatype, destination, tag, communicator);
903}
904
905
906void peano4::grid::GridTraversalEvent::receive(peano4::grid::GridTraversalEvent& buffer, int source, int tag, MPI_Comm communicator ) {
907 MPI_Status status;
908 MPI_Recv( &buffer, 1, Datatype, source, tag, communicator, &status);
909 buffer._senderDestinationRank = status.MPI_SOURCE;
910}
911
912
915 int destination,
916 int tag,
917 std::function<void()> startCommunicationFunctor,
918 std::function<void()> waitFunctor,
919 MPI_Comm communicator
920) {
921 MPI_Request sendRequestHandle;
922 int flag = 0;
923 MPI_Isend( &buffer, 1, Datatype, destination, tag, communicator, &sendRequestHandle );
924 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
925 startCommunicationFunctor();
926 while (!flag) {
927 waitFunctor();
928 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
929 }
930}
931
932
935 int source,
936 int tag,
937 std::function<void()> startCommunicationFunctor,
938 std::function<void()> waitFunctor,
939 MPI_Comm communicator
940) {
941 MPI_Status status;
942 MPI_Request receiveRequestHandle;
943 int flag = 0;
944 MPI_Irecv( &buffer, 1, Datatype, source, tag, communicator, &receiveRequestHandle );
945 MPI_Test( &receiveRequestHandle, &flag, &status );
946 startCommunicationFunctor();
947 while (!flag) {
948 waitFunctor();
949 MPI_Test( &receiveRequestHandle, &flag, &status );
950 }
951 buffer._senderDestinationRank = status.MPI_SOURCE;
952}
953#endif
954
955#ifdef Parallel
956void peano4::grid::GridTraversalEvent::sendAndPollDanglingMessages(const peano4::grid::GridTraversalEvent& message, int destination, int tag, MPI_Comm communicator ) {
958 message, destination, tag,
959 [&]() {
962 },
963 [&]() {
964 tarch::mpi::Rank::getInstance().writeTimeOutWarning( "peano4::grid::GridTraversalEvent", "sendAndPollDanglingMessages()",destination, tag );
965 tarch::mpi::Rank::getInstance().triggerDeadlockTimeOut( "peano4::grid::GridTraversalEvent", "sendAndPollDanglingMessages()", destination, tag );
967 },
968 communicator
969 );
970}
971
972
975 message, source, tag,
976 [&]() {
979 },
980 [&]() {
981 tarch::mpi::Rank::getInstance().writeTimeOutWarning( "peano4::grid::GridTraversalEvent", "receiveAndPollDanglingMessages()", source, tag );
982 tarch::mpi::Rank::getInstance().triggerDeadlockTimeOut( "peano4::grid::GridTraversalEvent", "receiveAndPollDanglingMessages()", source, tag );
984 },
985 communicator
986 );
987}
988#endif
989
#define TwoTimesD
Definition Globals.h:30
#define ThreePowerD
Definition Globals.h:24
#define TwoPowerD
Definition Globals.h:19
state setH(1.0)
state setX(0.0)
void triggerDeadlockTimeOut(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1, const std::string &comment="")
Triggers a time out and shuts down the cluster if a timeout is violated.
Definition Rank.cpp:124
void setDeadlockWarningTimeStamp()
Memorise global timeout.
Definition Rank.cpp:193
void writeTimeOutWarning(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1)
Writes a warning if relevant.
Definition Rank.cpp:148
void setDeadlockTimeOutTimeStamp()
Definition Rank.cpp:198
static Rank & getInstance()
This operation returns the singleton instance.
Definition Rank.cpp:539
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
static MPI_Datatype getForkDatatype()
Hands out MPI datatype if we work without the LLVM MPI extension.
void setIsFaceAdjacentToParallelDomainBoundary(const std::bitset< TwoTimesD > &value)
std::bitset< TwoTimesD > getIsFaceLocal() const
void setIsFaceLocal(const std::bitset< TwoTimesD > &value)
void setRelativePositionToFather(const tarch::la::Vector< Dimensions, int > &value)
bool getInvokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing() const
void setVertexDataFrom(const tarch::la::Vector< TwoPowerD, int > &value)
tarch::la::Vector< TwoTimesD, int > getFaceDataFrom() const
std::bitset< TwoPowerD > getIsVertexParentOfSubtree() const
std::bitset< TwoPowerD > getHasBeenRefined() const
static void sendAndPollDanglingMessages(const peano4::grid::GridTraversalEvent &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
void setFaceDataTo(const tarch::la::Vector< TwoTimesD, int > &value)
static void receive(peano4::grid::GridTraversalEvent &buffer, int source, int tag, MPI_Comm communicator)
void setX(const tarch::la::Vector< Dimensions, double > &value)
static void send(const peano4::grid::GridTraversalEvent &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
void flipIsVertexAdjacentToParallelDomainBoundary(int index)
void setNumberOfAdjacentTreesPerVertex(const tarch::la::Vector< TwoPowerD, int > &value)
void setInvokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing(bool value)
std::bitset< ThreePowerD > getIsAdjacentCellLocal() const
tarch::la::Vector< TwoPowerD, int > getVertexDataFrom() const
static void shutdownDatatype()
Free the underlying MPI datatype.
void setIsAdjacentCellLocal(const std::bitset< ThreePowerD > &value)
std::bitset< TwoPowerD > getIsParentVertexLocal() const
std::bitset< TwoPowerD > getWillBeRefined() const
std::bitset< TwoPowerD > getIsVertexAdjacentToParallelDomainBoundary() const
void setFaceDataFrom(const tarch::la::Vector< TwoTimesD, int > &value)
tarch::la::Vector< TwoTimesD, int > getFaceDataTo() const
static void initDatatype()
Wrapper around getDatatype() to trigger lazy evaluation if we use the lazy initialisation.
tarch::la::Vector< Dimensions, double > getH() const
void flipIsFaceAdjacentToParallelDomainBoundary(int index)
void setIsVertexLocal(const std::bitset< TwoPowerD > &value)
void setH(const tarch::la::Vector< Dimensions, double > &value)
void setIsParentVertexLocal(const std::bitset< TwoPowerD > &value)
void setWillBeRefined(const std::bitset< TwoPowerD > &value)
static MPI_Datatype getMultiscaleDataExchangeDatatype()
static MPI_Datatype getBoundaryExchangeDatatype()
void setIsVertexAdjacentToParallelDomainBoundary(const std::bitset< TwoPowerD > &value)
tarch::la::Vector< TwoPowerD, int > getNumberOfAdjacentTreesPerVertex() const
static void receiveAndPollDanglingMessages(peano4::grid::GridTraversalEvent &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
void setIsVertexParentOfSubtree(const std::bitset< TwoPowerD > &value)
tarch::la::Vector< TwoPowerD, int > getVertexDataTo() const
std::bitset< TwoPowerD > getIsVertexLocal() const
static MPI_Datatype getGlobalCommunciationDatatype()
tarch::la::Vector< Dimensions, double > getX() const
std::bitset< TwoTimesD > getIsFaceAdjacentToParallelDomainBoundary() const
tarch::la::Vector< Dimensions, int > getRelativePositionToFather() const
void setVertexDataTo(const tarch::la::Vector< TwoPowerD, int > &value)
void setHasBeenRefined(const std::bitset< TwoPowerD > &value)
static MPI_Datatype Datatype
Whenever we use LLVM's MPI extension (DaStGe), we rely on lazy initialisation of the datatype.
Simple vector class.
Definition Vector.h:134