Peano 4
Loading...
Searching...
No Matches
GridTraversalEvent.cpp
Go to the documentation of this file.
2
3
4
5#include <sstream>
6#include <algorithm>
7
8
9
10peano4::grid::GridTraversalEvent::GridTraversalEvent(tarch::la::Vector<Dimensions,double> __x, tarch::la::Vector<Dimensions,double> __h, std::bitset<TwoPowerD> __hasBeenRefined, std::bitset<TwoPowerD> __willBeRefined, std::bitset<TwoPowerD> __isVertexLocal, std::bitset<TwoPowerD> __isParentVertexLocal, std::bitset<TwoPowerD> __isVertexParentOfSubtree, std::bitset<TwoTimesD> __isFaceLocal, bool __isCellLocal, bool __isParentCellLocal, std::bitset<TwoPowerD> __isVertexAdjacentToParallelDomainBoundary, std::bitset<TwoTimesD> __isFaceAdjacentToParallelDomainBoundary, std::bitset<ThreePowerD> __isAdjacentCellLocal, tarch::la::Vector<TwoPowerD,int> __vertexDataFrom, tarch::la::Vector<TwoPowerD,int> __vertexDataTo, tarch::la::Vector<TwoTimesD,int> __faceDataFrom, tarch::la::Vector<TwoTimesD,int> __faceDataTo, int __cellData, tarch::la::Vector<Dimensions,int> __relativePositionToFather, int __invokingSpacetree, bool __invokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing){
11setX( __x);
12setH( __h);
13setHasBeenRefined( __hasBeenRefined);
14setWillBeRefined( __willBeRefined);
15setIsVertexLocal( __isVertexLocal);
16setIsParentVertexLocal( __isParentVertexLocal);
17setIsVertexParentOfSubtree( __isVertexParentOfSubtree);
18setIsFaceLocal( __isFaceLocal);
19setIsCellLocal( __isCellLocal);
20setIsParentCellLocal( __isParentCellLocal);
21setIsVertexAdjacentToParallelDomainBoundary( __isVertexAdjacentToParallelDomainBoundary);
22setIsFaceAdjacentToParallelDomainBoundary( __isFaceAdjacentToParallelDomainBoundary);
23setIsAdjacentCellLocal( __isAdjacentCellLocal);
24setVertexDataFrom( __vertexDataFrom);
25setVertexDataTo( __vertexDataTo);
26setFaceDataFrom( __faceDataFrom);
27setFaceDataTo( __faceDataTo);
28setCellData( __cellData);
29setRelativePositionToFather( __relativePositionToFather);
30setInvokingSpacetree( __invokingSpacetree);
31setInvokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing( __invokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing);
32}
33
34
35
37 setX( copy.getX() );
38 setH( copy.getH() );
39 setHasBeenRefined( copy.getHasBeenRefined() );
40 setWillBeRefined( copy.getWillBeRefined() );
41 setIsVertexLocal( copy.getIsVertexLocal() );
42 setIsParentVertexLocal( copy.getIsParentVertexLocal() );
43 setIsVertexParentOfSubtree( copy.getIsVertexParentOfSubtree() );
44 setIsFaceLocal( copy.getIsFaceLocal() );
45 setIsCellLocal( copy.getIsCellLocal() );
46 setIsParentCellLocal( copy.getIsParentCellLocal() );
47 setIsVertexAdjacentToParallelDomainBoundary( copy.getIsVertexAdjacentToParallelDomainBoundary() );
48 setIsFaceAdjacentToParallelDomainBoundary( copy.getIsFaceAdjacentToParallelDomainBoundary() );
49 setIsAdjacentCellLocal( copy.getIsAdjacentCellLocal() );
50 setVertexDataFrom( copy.getVertexDataFrom() );
51 setVertexDataTo( copy.getVertexDataTo() );
52 setFaceDataFrom( copy.getFaceDataFrom() );
53 setFaceDataTo( copy.getFaceDataTo() );
54 setCellData( copy.getCellData() );
55 setRelativePositionToFather( copy.getRelativePositionToFather() );
56 setInvokingSpacetree( copy.getInvokingSpacetree() );
57 setInvokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing( copy.getInvokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing() );
58}
59
60
61
62
63
65 std::ostringstream out;
66 out << "(";
67 out << "x=" << getX();
68 out << ",";
69 out << "h=" << getH();
70 out << ",";
71 out << "hasBeenRefined=" << getHasBeenRefined();
72 out << ",";
73 out << "willBeRefined=" << getWillBeRefined();
74 out << ",";
75 out << "isVertexLocal=" << getIsVertexLocal();
76 out << ",";
77 out << "isParentVertexLocal=" << getIsParentVertexLocal();
78 out << ",";
79 out << "isVertexParentOfSubtree=" << getIsVertexParentOfSubtree();
80 out << ",";
81 out << "isFaceLocal=" << getIsFaceLocal();
82 out << ",";
83 out << "isCellLocal=" << _isCellLocal;
84 out << ",";
85 out << "isParentCellLocal=" << _isParentCellLocal;
86 out << ",";
87 out << "isVertexAdjacentToParallelDomainBoundary=" << getIsVertexAdjacentToParallelDomainBoundary();
88 out << ",";
89 out << "isFaceAdjacentToParallelDomainBoundary=" << getIsFaceAdjacentToParallelDomainBoundary();
90 out << ",";
91 out << "isAdjacentCellLocal=" << getIsAdjacentCellLocal();
92 out << ",";
93 out << "vertexDataFrom=" << _vertexDataFrom;
94 out << ",";
95 out << "vertexDataTo=" << _vertexDataTo;
96 out << ",";
97 out << "faceDataFrom=" << _faceDataFrom;
98 out << ",";
99 out << "faceDataTo=" << _faceDataTo;
100 out << ",";
101 out << "cellData=" << _cellData;
102 out << ",";
103 out << "relativePositionToFather=" << getRelativePositionToFather();
104 out << ",";
105 out << "invokingSpacetree=" << _invokingSpacetree;
106 out << ",";
107 out << "invokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing=" << _invokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing;
108 out << ")";
109 return out.str();
110}
111
112
113
114
115
117
119 for( int i=0; i<Dimensions; i++) {
120 result(i) = _x[i];
121 }
122 return result;
123 }
124
125
127
128 for( int i=0; i<Dimensions; i++) {
129 _x[i] = value(i);
130 }
131 }
132
133
135 return _x[index];
136}
137
138
139void peano4::grid::GridTraversalEvent::setX(int index, double value) {
140 _x[index] = value;
141}
142
143
145
147 for( int i=0; i<Dimensions; i++) {
148 result(i) = _h[i];
149 }
150 return result;
151 }
152
153
155
156 for( int i=0; i<Dimensions; i++) {
157 _h[i] = value(i);
158 }
159 }
160
161
163 return _h[index];
164}
165
166
167void peano4::grid::GridTraversalEvent::setH(int index, double value) {
168 _h[index] = value;
169}
170
171
173
174 std::bitset<TwoPowerD> result;
175 for (int i=0; i<TwoPowerD; i++) result[i] = _hasBeenRefined[i];
176 return result;
177}
178
179
180void peano4::grid::GridTraversalEvent::setHasBeenRefined(const std::bitset<TwoPowerD>& value) {
181
182 for (int i=0; i<TwoPowerD; i++) _hasBeenRefined[i]=value[i];
183}
184
185
187 return _hasBeenRefined[index];
188}
189
190
192 _hasBeenRefined[index] = value;
193}
194
195
197 _hasBeenRefined[index] = not _hasBeenRefined[index];
198}
199
200
202
203 std::bitset<TwoPowerD> result;
204 for (int i=0; i<TwoPowerD; i++) result[i] = _willBeRefined[i];
205 return result;
206}
207
208
209void peano4::grid::GridTraversalEvent::setWillBeRefined(const std::bitset<TwoPowerD>& value) {
210
211 for (int i=0; i<TwoPowerD; i++) _willBeRefined[i]=value[i];
212}
213
214
216 return _willBeRefined[index];
217}
218
219
221 _willBeRefined[index] = value;
222}
223
224
226 _willBeRefined[index] = not _willBeRefined[index];
227}
228
229
231
232 std::bitset<TwoPowerD> result;
233 for (int i=0; i<TwoPowerD; i++) result[i] = _isVertexLocal[i];
234 return result;
235}
236
237
238void peano4::grid::GridTraversalEvent::setIsVertexLocal(const std::bitset<TwoPowerD>& value) {
239
240 for (int i=0; i<TwoPowerD; i++) _isVertexLocal[i]=value[i];
241}
242
243
245 return _isVertexLocal[index];
246}
247
248
250 _isVertexLocal[index] = value;
251}
252
253
255 _isVertexLocal[index] = not _isVertexLocal[index];
256}
257
258
260
261 std::bitset<TwoPowerD> result;
262 for (int i=0; i<TwoPowerD; i++) result[i] = _isParentVertexLocal[i];
263 return result;
264}
265
266
267void peano4::grid::GridTraversalEvent::setIsParentVertexLocal(const std::bitset<TwoPowerD>& value) {
268
269 for (int i=0; i<TwoPowerD; i++) _isParentVertexLocal[i]=value[i];
270}
271
272
274 return _isParentVertexLocal[index];
275}
276
277
279 _isParentVertexLocal[index] = value;
280}
281
282
284 _isParentVertexLocal[index] = not _isParentVertexLocal[index];
285}
286
287
289
290 std::bitset<TwoPowerD> result;
291 for (int i=0; i<TwoPowerD; i++) result[i] = _isVertexParentOfSubtree[i];
292 return result;
293}
294
295
296void peano4::grid::GridTraversalEvent::setIsVertexParentOfSubtree(const std::bitset<TwoPowerD>& value) {
297
298 for (int i=0; i<TwoPowerD; i++) _isVertexParentOfSubtree[i]=value[i];
299}
300
301
303 return _isVertexParentOfSubtree[index];
304}
305
306
308 _isVertexParentOfSubtree[index] = value;
309}
310
311
313 _isVertexParentOfSubtree[index] = not _isVertexParentOfSubtree[index];
314}
315
316
318
319 std::bitset<TwoTimesD> result;
320 for (int i=0; i<TwoTimesD; i++) result[i] = _isFaceLocal[i];
321 return result;
322}
323
324
325void peano4::grid::GridTraversalEvent::setIsFaceLocal(const std::bitset<TwoTimesD>& value) {
326
327 for (int i=0; i<TwoTimesD; i++) _isFaceLocal[i]=value[i];
328}
329
330
332 return _isFaceLocal[index];
333}
334
335
337 _isFaceLocal[index] = value;
338}
339
340
342 _isFaceLocal[index] = not _isFaceLocal[index];
343}
344
345
347 return _isCellLocal;
348}
349
350
352 _isCellLocal = value;
353}
354
355
357 return _isParentCellLocal;
358}
359
360
362 _isParentCellLocal = value;
363}
364
365
367
368 std::bitset<TwoPowerD> result;
369 for (int i=0; i<TwoPowerD; i++) result[i] = _isVertexAdjacentToParallelDomainBoundary[i];
370 return result;
371}
372
373
375
376 for (int i=0; i<TwoPowerD; i++) _isVertexAdjacentToParallelDomainBoundary[i]=value[i];
377}
378
379
381 return _isVertexAdjacentToParallelDomainBoundary[index];
382}
383
384
386 _isVertexAdjacentToParallelDomainBoundary[index] = value;
387}
388
389
391 _isVertexAdjacentToParallelDomainBoundary[index] = not _isVertexAdjacentToParallelDomainBoundary[index];
392}
393
394
396
397 std::bitset<TwoTimesD> result;
398 for (int i=0; i<TwoTimesD; i++) result[i] = _isFaceAdjacentToParallelDomainBoundary[i];
399 return result;
400}
401
402
404
405 for (int i=0; i<TwoTimesD; i++) _isFaceAdjacentToParallelDomainBoundary[i]=value[i];
406}
407
408
410 return _isFaceAdjacentToParallelDomainBoundary[index];
411}
412
413
415 _isFaceAdjacentToParallelDomainBoundary[index] = value;
416}
417
418
420 _isFaceAdjacentToParallelDomainBoundary[index] = not _isFaceAdjacentToParallelDomainBoundary[index];
421}
422
423
425
426 std::bitset<ThreePowerD> result;
427 for (int i=0; i<ThreePowerD; i++) result[i] = _isAdjacentCellLocal[i];
428 return result;
429}
430
431
432void peano4::grid::GridTraversalEvent::setIsAdjacentCellLocal(const std::bitset<ThreePowerD>& value) {
433
434 for (int i=0; i<ThreePowerD; i++) _isAdjacentCellLocal[i]=value[i];
435}
436
437
439 return _isAdjacentCellLocal[index];
440}
441
442
444 _isAdjacentCellLocal[index] = value;
445}
446
447
449 _isAdjacentCellLocal[index] = not _isAdjacentCellLocal[index];
450}
451
452
456
457
461
462
464 return _vertexDataFrom(index);
465}
466
467
469 _vertexDataFrom(index) = value;
470}
471
472
476
477
481
482
484 return _vertexDataTo(index);
485}
486
487
489 _vertexDataTo(index) = value;
490}
491
492
496
497
501
502
504 return _faceDataFrom(index);
505}
506
507
509 _faceDataFrom(index) = value;
510}
511
512
516
517
521
522
524 return _faceDataTo(index);
525}
526
527
529 _faceDataTo(index) = value;
530}
531
532
534 return _cellData;
535}
536
537
539 _cellData = value;
540}
541
542
544
546 for( int i=0; i<Dimensions; i++) {
547 result(i) = _relativePositionToFather[i];
548 }
549 return result;
550 }
551
552
554
555 for( int i=0; i<Dimensions; i++) {
556 _relativePositionToFather[i] = value(i);
557 }
558 }
559
560
562 return _relativePositionToFather[index];
563}
564
565
567 _relativePositionToFather[index] = value;
568}
569
570
572 return _invokingSpacetree;
573}
574
575
577 _invokingSpacetree = value;
578}
579
580
582 return _invokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing;
583}
584
585
587 _invokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing = value;
588}
589
590
591
592
593
594
595#ifdef Parallel
596
597#if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
598MPI_Datatype peano4::grid::GridTraversalEvent::Datatype = MPI_DATATYPE_NULL;
599#endif
600
601
602[[clang::map_mpi_datatype]]
604 return Datatype;
605}
606
607
608[[clang::map_mpi_datatype]]
612
613
614[[clang::map_mpi_datatype]]
616 return Datatype;
617}
618
619
620[[clang::map_mpi_datatype]]
622 return Datatype;
623}
624
625
626[[clang::map_mpi_datatype]]
630
631
632[[clang::map_mpi_datatype]]
634 if (Datatype != MPI_DATATYPE_NULL){
635 MPI_Type_free(&Datatype);
636 Datatype = MPI_DATATYPE_NULL;
637 }
638}
639
640
641[[clang::map_mpi_datatype]]
643 if (Datatype != MPI_DATATYPE_NULL){
644 MPI_Type_free(&Datatype);
645 Datatype = MPI_DATATYPE_NULL;
646 }
647}
648
649
650[[clang::map_mpi_datatype]]
652 if (Datatype != MPI_DATATYPE_NULL){
653 MPI_Type_free(&Datatype);
654 Datatype = MPI_DATATYPE_NULL;
655 }
656}
657
658
659[[clang::map_mpi_datatype]]
661 if (Datatype != MPI_DATATYPE_NULL){
662 MPI_Type_free(&Datatype);
663 Datatype = MPI_DATATYPE_NULL;
664 }
665}
666
667
668[[clang::map_mpi_datatype]]
670 if (Datatype != MPI_DATATYPE_NULL){
671 MPI_Type_free(&Datatype);
672 Datatype = MPI_DATATYPE_NULL;
673 }
674}
675
676
678 return _senderDestinationRank;
679}
680
681
682
684 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
686
687 int NumberOfAttributes = 0;
688 NumberOfAttributes++;
689 NumberOfAttributes++;
690 NumberOfAttributes++;
691 NumberOfAttributes++;
692 NumberOfAttributes++;
693 NumberOfAttributes++;
694 NumberOfAttributes++;
695 NumberOfAttributes++;
696 NumberOfAttributes++;
697 NumberOfAttributes++;
698 NumberOfAttributes++;
699 NumberOfAttributes++;
700 NumberOfAttributes++;
701 NumberOfAttributes++;
702 NumberOfAttributes++;
703 NumberOfAttributes++;
704 NumberOfAttributes++;
705 NumberOfAttributes++;
706 NumberOfAttributes++;
707 NumberOfAttributes++;
708 NumberOfAttributes++;
709
710 MPI_Datatype* subtypes = new MPI_Datatype[NumberOfAttributes];
711 int* blocklen = new int[NumberOfAttributes];
712 MPI_Aint* disp = new MPI_Aint[NumberOfAttributes];
713
714 int counter = 0;
715 subtypes[counter] = MPI_DOUBLE;
716 blocklen[counter] = Dimensions;
717 counter++;
718 subtypes[counter] = MPI_DOUBLE;
719 blocklen[counter] = Dimensions;
720 counter++;
721 subtypes[counter] = MPI_UNSIGNED_LONG;
722 blocklen[counter] = 1;
723 counter++;
724 subtypes[counter] = MPI_UNSIGNED_LONG;
725 blocklen[counter] = 1;
726 counter++;
727 subtypes[counter] = MPI_UNSIGNED_LONG;
728 blocklen[counter] = 1;
729 counter++;
730 subtypes[counter] = MPI_UNSIGNED_LONG;
731 blocklen[counter] = 1;
732 counter++;
733 subtypes[counter] = MPI_UNSIGNED_LONG;
734 blocklen[counter] = 1;
735 counter++;
736 subtypes[counter] = MPI_UNSIGNED_LONG;
737 blocklen[counter] = 1;
738 counter++;
739 subtypes[counter] = MPI_BYTE;
740 blocklen[counter] = 1;
741 counter++;
742 subtypes[counter] = MPI_BYTE;
743 blocklen[counter] = 1;
744 counter++;
745 subtypes[counter] = MPI_UNSIGNED_LONG;
746 blocklen[counter] = 1;
747 counter++;
748 subtypes[counter] = MPI_UNSIGNED_LONG;
749 blocklen[counter] = 1;
750 counter++;
751 subtypes[counter] = MPI_UNSIGNED_LONG;
752 blocklen[counter] = 1;
753 counter++;
754 subtypes[counter] = MPI_INT;
755 blocklen[counter] = TwoPowerD;
756 counter++;
757 subtypes[counter] = MPI_INT;
758 blocklen[counter] = TwoPowerD;
759 counter++;
760 subtypes[counter] = MPI_INT;
761 blocklen[counter] = TwoTimesD;
762 counter++;
763 subtypes[counter] = MPI_INT;
764 blocklen[counter] = TwoTimesD;
765 counter++;
766 subtypes[counter] = MPI_INT;
767 blocklen[counter] = 1;
768 counter++;
769 subtypes[counter] = MPI_INT;
770 blocklen[counter] = Dimensions;
771 counter++;
772 subtypes[counter] = MPI_INT;
773 blocklen[counter] = 1;
774 counter++;
775 subtypes[counter] = MPI_BYTE;
776 blocklen[counter] = 1;
777 counter++;
778
779 MPI_Aint baseFirstInstance;
780 MPI_Aint baseSecondInstance;
781 MPI_Get_address( &instances[0], &baseFirstInstance );
782 MPI_Get_address( &instances[1], &baseSecondInstance );
783
784 counter = 0;
785 MPI_Get_address( &(instances[0]._x.data()[0]), &disp[counter] );
786 counter++;
787 MPI_Get_address( &(instances[0]._h.data()[0]), &disp[counter] );
788 counter++;
789 MPI_Get_address( &(instances[0]._hasBeenRefined), &disp[counter] );
790 counter++;
791 MPI_Get_address( &(instances[0]._willBeRefined), &disp[counter] );
792 counter++;
793 MPI_Get_address( &(instances[0]._isVertexLocal), &disp[counter] );
794 counter++;
795 MPI_Get_address( &(instances[0]._isParentVertexLocal), &disp[counter] );
796 counter++;
797 MPI_Get_address( &(instances[0]._isVertexParentOfSubtree), &disp[counter] );
798 counter++;
799 MPI_Get_address( &(instances[0]._isFaceLocal), &disp[counter] );
800 counter++;
801 MPI_Get_address( &(instances[0]._isCellLocal), &disp[counter] );
802 counter++;
803 MPI_Get_address( &(instances[0]._isParentCellLocal), &disp[counter] );
804 counter++;
805 MPI_Get_address( &(instances[0]._isVertexAdjacentToParallelDomainBoundary), &disp[counter] );
806 counter++;
807 MPI_Get_address( &(instances[0]._isFaceAdjacentToParallelDomainBoundary), &disp[counter] );
808 counter++;
809 MPI_Get_address( &(instances[0]._isAdjacentCellLocal), &disp[counter] );
810 counter++;
811 MPI_Get_address( &(instances[0]._vertexDataFrom.data()[0]), &disp[counter] );
812 counter++;
813 MPI_Get_address( &(instances[0]._vertexDataTo.data()[0]), &disp[counter] );
814 counter++;
815 MPI_Get_address( &(instances[0]._faceDataFrom.data()[0]), &disp[counter] );
816 counter++;
817 MPI_Get_address( &(instances[0]._faceDataTo.data()[0]), &disp[counter] );
818 counter++;
819 MPI_Get_address( &(instances[0]._cellData), &disp[counter] );
820 counter++;
821 MPI_Get_address( &(instances[0]._relativePositionToFather.data()[0]), &disp[counter] );
822 counter++;
823 MPI_Get_address( &(instances[0]._invokingSpacetree), &disp[counter] );
824 counter++;
825 MPI_Get_address( &(instances[0]._invokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing), &disp[counter] );
826 counter++;
827
828 MPI_Aint offset = disp[0] - baseFirstInstance;
829 MPI_Aint extent = baseSecondInstance - baseFirstInstance - offset;
830 for (int i=NumberOfAttributes-1; i>=0; i--) {
831 disp[i] = disp[i] - disp[0];
832 }
833
834 int errorCode = 0;
835 MPI_Datatype tmpType;
836 errorCode += MPI_Type_create_struct( NumberOfAttributes, blocklen, disp, subtypes, &tmpType );
837 errorCode += MPI_Type_create_resized( tmpType, offset, extent, &Datatype );
838 errorCode += MPI_Type_commit( &Datatype );
839 errorCode += MPI_Type_free( &tmpType );
840 if (errorCode) std::cerr << "error constructing MPI datatype in " << __FILE__ << ":" << __LINE__ << std::endl;
841
842 delete[] subtypes;
843 delete[] blocklen;
844 delete[] disp;
845
846 #else
847 // invoke routine once to trigger lazy initialisation
848 getForkDatatype();
849 getJoinDatatype();
850 getBoundaryExchangeDatatype();
851 getMultiscaleDataExchangeDatatype();
852 getGlobalCommunciationDatatype();
853 #endif
854}
855
856
858 #if !defined(__MPI_ATTRIBUTES_LANGUAGE_EXTENSION__)
859 freeForkDatatype();
860 freeJoinDatatype();
861 freeBoundaryExchangeDatatype();
862 freeMultiscaleDataExchangeDatatype();
863 freeGlobalCommunciationDatatype();
864 #else
865 MPI_Datatype type = Datatype;
866 MPI_Type_free( &type );
867 #endif
868}
869
870
871void peano4::grid::GridTraversalEvent::send(const peano4::grid::GridTraversalEvent& buffer, int destination, int tag, MPI_Comm communicator ) {
872 MPI_Send( &buffer, 1, Datatype, destination, tag, communicator);
873}
874
875
876void peano4::grid::GridTraversalEvent::receive(peano4::grid::GridTraversalEvent& buffer, int source, int tag, MPI_Comm communicator ) {
877 MPI_Status status;
878 MPI_Recv( &buffer, 1, Datatype, source, tag, communicator, &status);
879 buffer._senderDestinationRank = status.MPI_SOURCE;
880}
881
882
885 int destination,
886 int tag,
887 std::function<void()> startCommunicationFunctor,
888 std::function<void()> waitFunctor,
889 MPI_Comm communicator
890) {
891 MPI_Request sendRequestHandle;
892 int flag = 0;
893 MPI_Isend( &buffer, 1, Datatype, destination, tag, communicator, &sendRequestHandle );
894 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
895 startCommunicationFunctor();
896 while (!flag) {
897 waitFunctor();
898 MPI_Test( &sendRequestHandle, &flag, MPI_STATUS_IGNORE );
899 }
900}
901
902
905 int source,
906 int tag,
907 std::function<void()> startCommunicationFunctor,
908 std::function<void()> waitFunctor,
909 MPI_Comm communicator
910) {
911 MPI_Status status;
912 MPI_Request receiveRequestHandle;
913 int flag = 0;
914 MPI_Irecv( &buffer, 1, Datatype, source, tag, communicator, &receiveRequestHandle );
915 MPI_Test( &receiveRequestHandle, &flag, &status );
916 startCommunicationFunctor();
917 while (!flag) {
918 waitFunctor();
919 MPI_Test( &receiveRequestHandle, &flag, &status );
920 }
921 buffer._senderDestinationRank = status.MPI_SOURCE;
922}
923#endif
924
925#ifdef Parallel
926void peano4::grid::GridTraversalEvent::sendAndPollDanglingMessages(const peano4::grid::GridTraversalEvent& message, int destination, int tag, MPI_Comm communicator ) {
928 message, destination, tag,
929 [&]() {
932 },
933 [&]() {
934 tarch::mpi::Rank::getInstance().writeTimeOutWarning( "peano4::grid::GridTraversalEvent", "sendAndPollDanglingMessages()",destination, tag );
935 tarch::mpi::Rank::getInstance().triggerDeadlockTimeOut( "peano4::grid::GridTraversalEvent", "sendAndPollDanglingMessages()", destination, tag );
937 },
938 communicator
939 );
940}
941
942
945 message, source, tag,
946 [&]() {
949 },
950 [&]() {
951 tarch::mpi::Rank::getInstance().writeTimeOutWarning( "peano4::grid::GridTraversalEvent", "receiveAndPollDanglingMessages()", source, tag );
952 tarch::mpi::Rank::getInstance().triggerDeadlockTimeOut( "peano4::grid::GridTraversalEvent", "receiveAndPollDanglingMessages()", source, tag );
954 },
955 communicator
956 );
957}
958#endif
959
#define TwoTimesD
Definition Globals.h:29
#define ThreePowerD
Definition Globals.h:24
#define TwoPowerD
Definition Globals.h:19
state setH(1.0)
state setX(0.0)
void triggerDeadlockTimeOut(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1, const std::string &comment="")
Triggers a time out and shuts down the cluster if a timeout is violated.
Definition Rank.cpp:119
void setDeadlockWarningTimeStamp()
Memorise global timeout.
Definition Rank.cpp:188
void writeTimeOutWarning(const std::string &className, const std::string &methodName, int communicationPartnerRank, int tag, int numberOfExpectedMessages=1)
Writes a warning if relevant.
Definition Rank.cpp:143
void setDeadlockTimeOutTimeStamp()
Definition Rank.cpp:193
static Rank & getInstance()
This operation returns the singleton instance.
Definition Rank.cpp:538
virtual void receiveDanglingMessages() override
Answer to MPI Messages.
static ServiceRepository & getInstance()
static MPI_Datatype getForkDatatype()
Hands out MPI datatype if we work without the LLVM MPI extension.
void setIsFaceAdjacentToParallelDomainBoundary(const std::bitset< TwoTimesD > &value)
std::bitset< TwoTimesD > getIsFaceLocal() const
void setIsFaceLocal(const std::bitset< TwoTimesD > &value)
void setRelativePositionToFather(const tarch::la::Vector< Dimensions, int > &value)
bool getInvokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing() const
void setVertexDataFrom(const tarch::la::Vector< TwoPowerD, int > &value)
tarch::la::Vector< TwoTimesD, int > getFaceDataFrom() const
std::bitset< TwoPowerD > getIsVertexParentOfSubtree() const
std::bitset< TwoPowerD > getHasBeenRefined() const
static void sendAndPollDanglingMessages(const peano4::grid::GridTraversalEvent &message, int destination, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
void setFaceDataTo(const tarch::la::Vector< TwoTimesD, int > &value)
static void receive(peano4::grid::GridTraversalEvent &buffer, int source, int tag, MPI_Comm communicator)
void setX(const tarch::la::Vector< Dimensions, double > &value)
static void send(const peano4::grid::GridTraversalEvent &buffer, int destination, int tag, MPI_Comm communicator)
In DaStGen (the first version), I had a non-static version of the send as well as the receive.
void flipIsVertexAdjacentToParallelDomainBoundary(int index)
void setInvokingSpacetreeIsNotInvolvedInAnyDynamicLoadBalancing(bool value)
std::bitset< ThreePowerD > getIsAdjacentCellLocal() const
tarch::la::Vector< TwoPowerD, int > getVertexDataFrom() const
static void shutdownDatatype()
Free the underlying MPI datatype.
void setIsAdjacentCellLocal(const std::bitset< ThreePowerD > &value)
std::bitset< TwoPowerD > getIsParentVertexLocal() const
std::bitset< TwoPowerD > getWillBeRefined() const
std::bitset< TwoPowerD > getIsVertexAdjacentToParallelDomainBoundary() const
void setFaceDataFrom(const tarch::la::Vector< TwoTimesD, int > &value)
tarch::la::Vector< TwoTimesD, int > getFaceDataTo() const
static void initDatatype()
Wrapper around getDatatype() to trigger lazy evaluation if we use the lazy initialisation.
tarch::la::Vector< Dimensions, double > getH() const
void flipIsFaceAdjacentToParallelDomainBoundary(int index)
void setIsVertexLocal(const std::bitset< TwoPowerD > &value)
void setH(const tarch::la::Vector< Dimensions, double > &value)
void setIsParentVertexLocal(const std::bitset< TwoPowerD > &value)
void setWillBeRefined(const std::bitset< TwoPowerD > &value)
static MPI_Datatype getMultiscaleDataExchangeDatatype()
static MPI_Datatype getBoundaryExchangeDatatype()
void setIsVertexAdjacentToParallelDomainBoundary(const std::bitset< TwoPowerD > &value)
static void receiveAndPollDanglingMessages(peano4::grid::GridTraversalEvent &message, int source, int tag, MPI_Comm communicator=tarch::mpi::Rank::getInstance().getCommunicator())
void setIsVertexParentOfSubtree(const std::bitset< TwoPowerD > &value)
tarch::la::Vector< TwoPowerD, int > getVertexDataTo() const
std::bitset< TwoPowerD > getIsVertexLocal() const
static MPI_Datatype getGlobalCommunciationDatatype()
tarch::la::Vector< Dimensions, double > getX() const
std::bitset< TwoTimesD > getIsFaceAdjacentToParallelDomainBoundary() const
tarch::la::Vector< Dimensions, int > getRelativePositionToFather() const
void setVertexDataTo(const tarch::la::Vector< TwoPowerD, int > &value)
void setHasBeenRefined(const std::bitset< TwoPowerD > &value)
static MPI_Datatype Datatype
Whenever we use LLVM's MPI extension (DaStGe), we rely on lazy initialisation of the datatype.
Simple vector class.
Definition Vector.h:134