TTK
Loading...
Searching...
No Matches
IntegralLines.h
Go to the documentation of this file.
1
15
16#pragma once
17
18// base code includes
19#include <ArrayLinkedList.h>
20#include <Geometry.h>
22#include <Triangulation.h>
23// std includes
24#include <algorithm>
25#include <iterator>
26#include <limits>
27#include <numeric>
28#include <string>
29#include <unordered_set>
30
31#define INTEGRAL_LINE_TABULAR_SIZE 50
32#ifdef TTK_ENABLE_MPI
33#define INTEGRAL_LINE_IS_ELEMENT_TO_PROCESS 0
34#define INTEGRAL_LINE_IS_MESSAGE_SIZE 1
35#endif
36
37namespace ttk {
38 namespace intgl {
39
49 struct IntegralLine {
50 std::vector<ttk::SimplexId> trajectory;
51 std::vector<double> distanceFromSeed;
52 std::vector<ttk::SimplexId> localVertexIdentifier;
55 };
56
57#ifdef TTK_ENABLE_MPI
65 struct GhostElementsToSort {
66 ttk::SimplexId ownedGlobalId;
67 ttk::SimplexId minLocalVertexId;
68 ttk::SimplexId seedIdentifier;
69 ttk::SimplexId forkIdentifier{-1};
70 ttk::SimplexId globalEdgeId{-1};
71 ttk::SimplexId ghostVertexLocalId;
72 ttk::SimplexId ghostEdgeLocalId;
73 };
74
75 inline bool operator<(const GhostElementsToSort &left,
76 const GhostElementsToSort &right) {
77 if(left.seedIdentifier != right.seedIdentifier) {
78 return left.seedIdentifier < right.seedIdentifier;
79 }
80 if(left.forkIdentifier != right.forkIdentifier) {
81 return left.forkIdentifier < right.forkIdentifier;
82 }
83 return left.minLocalVertexId < right.minLocalVertexId;
84 };
85
86 /* Counters for keeping tracks of number of integral lines
87 * left to compute. Each counter is aligned on a cache line
88 * to prevent false-sharing
89 */
90 static int finishedElement_ __attribute__((aligned(64)));
91 static int addedElement_ __attribute__((aligned(64)));
92
93 /*
94 * For each integral line continuing on another process, we send one layer
95 * of ghost cells and we keep one layers for this process, meaning that
96 * vertices Id1 belongs to this process and Id2 belongs to a neighboring
97 * process.
98 */
99 struct ElementToBeSent {
100 ttk::SimplexId Id1;
101 ttk::SimplexId Id2;
102 double DistanceFromSeed1;
103 double DistanceFromSeed2;
104 ttk::SimplexId LocalVertexIdentifier1;
105 ttk::SimplexId LocalVertexIdentifier2;
106 ttk::SimplexId SeedIdentifier;
107 ttk::SimplexId ForkIdentifier;
108 };
109#endif
110 } // namespace intgl
111
113
114 class IntegralLines : virtual public Debug {
115
116 public:
118 ~IntegralLines() override;
119
120 template <class triangulationType = ttk::AbstractTriangulation>
121 int execute(triangulationType *triangulation);
122
132 template <class triangulationType = ttk::AbstractTriangulation>
133 void computeIntegralLine(const triangulationType *triangulation,
134 ttk::intgl::IntegralLine *integralLine,
135 const ttk::SimplexId *offsets) const;
136
147 template <class triangulationType>
148 void createTask(const triangulationType *triangulation,
149 std::vector<ttk::intgl::IntegralLine *> &chunkIntegralLine,
150 const ttk::SimplexId *offsets,
151 int nbElement) const;
166 template <class triangulationType>
167 void
168 prepareForTask(const triangulationType *triangulation,
169 std::vector<ttk::intgl::IntegralLine *> &chunkIntegralLine,
170 int startingIndex,
171 int nbElement,
172 std::vector<SimplexId> *seeds) const;
173
174 inline void setVertexNumber(const SimplexId &vertexNumber) {
175 vertexNumber_ = vertexNumber;
176 }
177
178 inline void setSeedNumber(const SimplexId &seedNumber) {
179 seedNumber_ = seedNumber;
180 }
181
182 inline void setDirection(int direction) {
183 direction_ = direction;
184 }
185
195 void findNextVertex(ttk::SimplexId &vnext,
196 ttk::SimplexId &fnext,
197 std::vector<ttk::SimplexId> &component,
198 const SimplexId *offsets) const;
199
200#ifdef TTK_ENABLE_MPI
201
212 template <typename triangulationType>
213 int getGlobalIdentifiers(
214 std::vector<ttk::SimplexId> &globalVertexId,
215 std::vector<ttk::SimplexId> &globalCellId,
218 &integralLines,
219 const triangulationType *triangulation);
220
231 int exchangeGhosts(
232 const std::vector<std::vector<ttk::intgl::GhostElementsToSort>>
233 &unmatchedGhosts,
234 std::vector<ttk::SimplexId> &globalVertexId,
235 std::vector<ttk::SimplexId> &globalCellId);
236
248 template <class triangulationType>
249 void storeToSendIfNecessary(const triangulationType *triangulation,
250 ttk::intgl::IntegralLine *integralLine,
251 bool &isMax) const;
252
268 template <class triangulationType>
269 void
270 receiveElement(const triangulationType *triangulation,
271 intgl::ElementToBeSent &element,
272 std::vector<ttk::intgl::IntegralLine *> &chunkIntegralLine,
273 int &index,
274 int taskSize,
275 const ttk::SimplexId *offsets);
276
277 inline void setToSend(
278 std::vector<std::vector<std::vector<intgl::ElementToBeSent>>> *send) {
279 toSend_ = send;
280 }
281
282 inline void setGlobalElementCounter(int counter) {
283 globalElementCounter_ = counter;
284 }
285
286 inline void setNeighbors(const std::vector<int> &neighbors) {
287 neighbors_ = neighbors;
288 neighborNumber_ = neighbors_.size();
289 int idx = 0;
290 for(int neighbor : neighbors) {
291 neighborsToId_[neighbor] = idx;
292 idx++;
293 }
294 }
300 void createMessageType() {
301 ttk::SimplexId id = 0;
302 MPI_Datatype types[]
303 = {getMPIType(id), getMPIType(id), MPI_DOUBLE, MPI_DOUBLE,
304 getMPIType(id), getMPIType(id), getMPIType(id), getMPIType(id)};
305 int lengths[] = {1, 1, 1, 1, 1, 1, 1, 1};
306 const long int mpi_offsets[]
307 = {offsetof(intgl::ElementToBeSent, Id1),
308 offsetof(intgl::ElementToBeSent, Id2),
309 offsetof(intgl::ElementToBeSent, DistanceFromSeed1),
310 offsetof(intgl::ElementToBeSent, DistanceFromSeed2),
311 offsetof(intgl::ElementToBeSent, LocalVertexIdentifier1),
312 offsetof(intgl::ElementToBeSent, LocalVertexIdentifier2),
313 offsetof(intgl::ElementToBeSent, SeedIdentifier),
314 offsetof(intgl::ElementToBeSent, ForkIdentifier)};
315 MPI_Type_create_struct(
316 8, lengths, mpi_offsets, types, &(this->MessageType));
317 MPI_Type_commit(&(this->MessageType));
318 }
319#endif
320
322 int status = triangulation->preconditionVertexNeighbors();
323 // For critical points computation
324 status += triangulation->preconditionVertexStars();
325 status += triangulation->preconditionBoundaryVertices();
327 return status;
328 }
329
330 inline void setInputScalarField(void *data) {
331 inputScalarField_ = data;
332 }
333
342 inline void setInputOffsets(const SimplexId *const data) {
343 inputOffsets_ = data;
344 }
345
346 inline void
347 setVertexIdentifierScalarField(std::vector<SimplexId> *const data) {
349 }
350
354 *integralLines) {
355 this->outputIntegralLines_ = integralLines;
356 }
357
358 inline void setChunkSize(int size) {
359 chunkSize_ = size;
360 }
361
364 }
365
366 protected:
373 std::vector<ttk::SimplexId> *vertexIdentifierScalarField_;
378 bool EnableForking{false};
379
380#ifdef TTK_ENABLE_MPI
381 std::vector<std::vector<std::vector<intgl::ElementToBeSent>>> *toSend_{
382 nullptr};
383 int neighborNumber_;
384 std::unordered_map<int, int> neighborsToId_;
385 std::vector<int> neighbors_;
386 SimplexId keepWorking_;
387 SimplexId globalElementCounter_;
388 MPI_Datatype MessageType;
389#endif
390 };
391} // namespace ttk
392
393#ifdef TTK_ENABLE_MPI
394
395template <class triangulationType>
396void ttk::IntegralLines::receiveElement(
397 const triangulationType *triangulation,
398 intgl::ElementToBeSent &element,
399 std::vector<ttk::intgl::IntegralLine *> &chunkIntegralLine,
400 int &index,
401 int taskSize,
402 const ttk::SimplexId *offsets) {
403
404 // Create integral line object on this process
405 int threadNum{0};
406#ifdef TTK_ENABLE_OPENMP
407 threadNum = omp_get_thread_num();
408#endif // TTK_ENABLE_OPENMP
409
410 ttk::intgl::IntegralLine *integralLine
411 = outputIntegralLines_->at(threadNum).addArrayElement(
413 std::vector<ttk::SimplexId>(
414 {triangulation->getVertexLocalId(element.Id1),
415 triangulation->getVertexLocalId(element.Id2)}),
416 std::vector<double>(
417 {element.DistanceFromSeed1, element.DistanceFromSeed2}),
418 std::vector<ttk::SimplexId>(
419 {element.LocalVertexIdentifier1, element.LocalVertexIdentifier2}),
420 element.SeedIdentifier, element.ForkIdentifier});
421
422 // Add to chunks for task granularity
423 chunkIntegralLine[index] = integralLine;
424
425 // Start task if necessary
426 if(index == taskSize - 1) {
427 this->createTask<triangulationType>(
428 triangulation, chunkIntegralLine, offsets, taskSize);
429 index = 0;
430 } else {
431 index++;
432 }
433}
434
435template <class triangulationType>
436void ttk::IntegralLines::storeToSendIfNecessary(
437 const triangulationType *triangulation,
438 ttk::intgl::IntegralLine *integralLine,
439 bool &isMax) const {
440#ifdef TTK_ENABLE_MPI
441 if(ttk::isRunningWithMPI()) {
442 int size = integralLine->trajectory.size();
443 if(size > 1) {
444 int rankArray
445 = triangulation->getVertexRank(integralLine->trajectory.back());
446 if(rankArray != ttk::MPIrank_) {
447 intgl::ElementToBeSent element
448 = intgl::ElementToBeSent{-1,
449 -1,
450 0,
451 0,
452 -1,
453 -1,
454 integralLine->seedIdentifier,
455 integralLine->forkIdentifier};
456 element.Id2
457 = triangulation->getVertexGlobalId(integralLine->trajectory.back());
458 element.Id1 = triangulation->getVertexGlobalId(
459 integralLine->trajectory.at(size - 2));
460 element.DistanceFromSeed2 = integralLine->distanceFromSeed.back();
461 element.DistanceFromSeed1 = integralLine->distanceFromSeed.at(size - 2);
462 element.LocalVertexIdentifier2
463 = integralLine->localVertexIdentifier.back();
464 element.LocalVertexIdentifier1
465 = integralLine->localVertexIdentifier.at(size - 2);
466#ifdef TTK_ENABLE_OPENMP
467 toSend_
468 ->at(neighborsToId_.find(rankArray)->second)[omp_get_thread_num()]
469 .push_back(element);
470#else
471 toSend_->at(neighborsToId_.find(rankArray)->second)[0].push_back(
472 element);
473#endif // TTK_ENABLE_OPENMP
474 isMax = true;
475 }
476 }
477 }
478#endif
479}
480#endif
481
482inline void
484 ttk::SimplexId &fnext,
485 std::vector<ttk::SimplexId> &component,
486 const SimplexId *offsets) const {
487 ttk::SimplexId elementInComponentNumber = component.size();
488 for(ttk::SimplexId k = 0; k < elementInComponentNumber; ++k) {
489 if(direction_ == static_cast<int>(Direction::Forward)) {
490 if(fnext < offsets[component[k]]) {
491 vnext = component[k];
492 fnext = offsets[component[k]];
493 }
494 } else {
495 if(fnext > offsets[component[k]]) {
496 vnext = component[k];
497 fnext = offsets[component[k]];
498 }
499 }
500 }
501}
502
503template <class triangulationType>
505 const triangulationType *triangulation,
506 ttk::intgl::IntegralLine *integralLine,
507 const SimplexId *offsets) const {
508 double distance = integralLine->distanceFromSeed.back();
509 ttk::SimplexId v = integralLine->trajectory.back();
510 float p0[3];
511 float p1[3];
512 triangulation->getVertexPoint(v, p0[0], p0[1], p0[2]);
513 bool isMax{};
514 std::vector<std::vector<ttk::SimplexId>> *components;
515 while(!isMax) {
516 std::vector<std::vector<ttk::SimplexId>> upperComponents;
517 std::vector<std::vector<ttk::SimplexId>> lowerComponents;
518 // Computation of the critical type
519 char criticalType
520 = this->scalarFieldCriticalPoints_.getCriticalType<triangulationType>(
521 v, offsets, triangulation, &upperComponents, &lowerComponents);
522 // End of integral line if an appropriate maxima is reached
523 if((criticalType == (char)CriticalType::Local_maximum
524 && direction_ == static_cast<int>(Direction::Forward))
525 || (criticalType == (char)CriticalType::Local_minimum
526 && direction_ != static_cast<int>(Direction::Forward))) {
527 isMax = true;
528#ifdef TTK_ENABLE_MPI
529 if(ttk::isRunningWithMPI()
530 && triangulation->getVertexRank(v) == ttk::MPIrank_) {
531#ifdef TTK_ENABLE_OPENMP
532#pragma omp atomic update seq_cst
533#endif // TTK_ENABLE_OPENMP
534 ttk::intgl::finishedElement_++;
535 } else {
536 this->storeToSendIfNecessary<triangulationType>(
537 triangulation, integralLine, isMax);
538 }
539#endif
540 } else {
541 // If the integral line continues, the computation continues using lower
542 // and upper components
543 components = &(lowerComponents);
544 if(direction_ == static_cast<int>(Direction::Forward)) {
545 components = &(upperComponents);
546 }
547 if((criticalType == (char)CriticalType::Saddle1
548 || criticalType == (char)CriticalType::Saddle2
549 || criticalType == (char)CriticalType::Degenerate)
550 && EnableForking) {
551 // For each connected components, the max (or the min) is computed
552 // and a task is created to further the computation of the integral line
553 ttk::SimplexId numberOfComponents = components->size();
554#ifdef TTK_ENABLE_MPI
555#ifdef TTK_ENABLE_OPENMP
556#pragma omp atomic update seq_cst
557#endif // TTK_ENABLE_OPENMP
558 ttk::intgl::finishedElement_++;
559#ifdef TTK_ENABLE_OPENMP
560#pragma omp atomic update seq_cst
561#endif // TTK_ENABLE_OPENMP
562 ttk::intgl::addedElement_ += numberOfComponents;
563#endif
564 isMax = true;
565 for(int i = 0; i < numberOfComponents; i++) {
566 SimplexId vnext = -1;
567 ttk::SimplexId fnext = offsets[v];
568 this->findNextVertex(vnext, fnext, components->at(i), offsets);
569 // The task is created
570#ifdef TTK_ENABLE_MPI
571 ttk::SimplexId forkIdentifier
572 = triangulation->getVertexGlobalId(vnext);
573#else
574 ttk::SimplexId forkIdentifier = vnext;
575#endif
576 triangulation->getVertexPoint(vnext, p1[0], p1[1], p1[2]);
577 double distanceFork = Geometry::distance(p0, p1, 3);
578 int threadNum{0};
579#ifdef TTK_ENABLE_OPENMP
580 threadNum = omp_get_thread_num();
581#endif // TTK_ENABLE_OPENMP
582 ttk::intgl::IntegralLine *integralLineFork
583 = outputIntegralLines_->at(threadNum).addArrayElement(
585 std::vector<ttk::SimplexId>({v, vnext}),
586 std::vector<double>({distance, distance + distanceFork}),
587 std::vector<ttk::SimplexId>(
588 {integralLine->localVertexIdentifier.back(),
589 integralLine->localVertexIdentifier.back() + 1}),
590 integralLine->seedIdentifier, forkIdentifier});
591
592#ifdef TTK_ENABLE_OPENMP
593#pragma omp task firstprivate(integralLineFork)
594 {
595#endif // TTK_ENABLE_OPENMP
596#ifdef TTK_ENABLE_MPI
597 bool hasBeenSent = false;
598 this->storeToSendIfNecessary<triangulationType>(
599 triangulation, integralLineFork, hasBeenSent);
600 if(!hasBeenSent) {
601#endif
602 this->computeIntegralLine<triangulationType>(
603 triangulation, integralLineFork, offsets);
604#ifdef TTK_ENABLE_MPI
605 }
606#endif
607#ifdef TTK_ENABLE_OPENMP
608 }
609#endif // TTK_ENABLE_OPENMP
610 }
611 } else {
612 // In case the vertex is not a saddle point, all neighbor vertices
613 // are used for the computation
614 components->clear();
615 ttk::SimplexId neighborNumber
616 = triangulation->getVertexNeighborNumber(v);
617 components->push_back(std::vector<ttk::SimplexId>());
619 for(ttk::SimplexId i = 0; i < neighborNumber; i++) {
620 triangulation->getVertexNeighbor(v, i, id);
621 components->at(0).push_back(id);
622 }
623 SimplexId vnext = -1;
624 ttk::SimplexId fnext = offsets[v];
625 this->findNextVertex(vnext, fnext, components->at(0), offsets);
626 triangulation->getVertexPoint(vnext, p1[0], p1[1], p1[2]);
627 distance += Geometry::distance(p0, p1, 3);
628 integralLine->trajectory.push_back(vnext);
629 p0[0] = p1[0];
630 p0[1] = p1[1];
631 p0[2] = p1[2];
632 integralLine->distanceFromSeed.push_back(distance);
633 integralLine->localVertexIdentifier.push_back(
634 integralLine->localVertexIdentifier.back() + 1);
635 v = vnext;
636#ifdef TTK_ENABLE_MPI
637 this->storeToSendIfNecessary<triangulationType>(
638 triangulation, integralLine, isMax);
639#endif
640 }
641 }
642 }
643}
644
645template <class triangulationType>
647#ifdef TTK_ENABLE_MPI
648 const triangulationType *triangulation,
649#else
650 const triangulationType *ttkNotUsed(triangulation),
651#endif
652 std::vector<ttk::intgl::IntegralLine *> &chunkIntegralLine,
653 int startingIndex,
654 int nbElement,
655 std::vector<SimplexId> *seeds) const {
656
657 for(SimplexId j = 0; j < nbElement; j++) {
658 SimplexId v{seeds->at(j + startingIndex)};
659 int seedIdentifier;
660#ifdef TTK_ENABLE_MPI
661 seedIdentifier = triangulation->getVertexGlobalId(v);
662#else
663 seedIdentifier = v;
664#endif
665 int threadNum{0};
666#ifdef TTK_ENABLE_OPENMP
667 threadNum = omp_get_thread_num();
668#endif // TTK_ENABLE_OPENMP
669 chunkIntegralLine[j] = outputIntegralLines_->at(threadNum).addArrayElement(
671 std::vector<ttk::SimplexId>(1, v), std::vector<double>(1, 0),
672 std::vector<ttk::SimplexId>(1, 0), seedIdentifier, -1});
673 }
674}
675
676template <class triangulationType>
678 const triangulationType *triangulation,
679 std::vector<ttk::intgl::IntegralLine *> &chunkIntegralLine,
680 const ttk::SimplexId *offsets,
681 int nbElement) const {
682#ifdef TTK_ENABLE_OPENMP
683#pragma omp task firstprivate(chunkIntegralLine)
684 {
685#endif // TTK_ENABLE_OPENMP
686 for(int j = 0; j < nbElement; j++) {
687 this->computeIntegralLine<triangulationType>(
688 triangulation, chunkIntegralLine[j], offsets);
689 }
690#ifdef TTK_ENABLE_OPENMP
691 }
692#endif // TTK_ENABLE_OPENMP
693}
694
695template <class triangulationType>
696int ttk::IntegralLines::execute(triangulationType *triangulation) {
697
698#ifdef TTK_ENABLE_MPI
699 keepWorking_ = 1;
700 ttk::intgl::finishedElement_ = 0;
701 ttk::intgl::addedElement_ = 0;
702#endif
703 const SimplexId *offsets = inputOffsets_;
704 std::vector<SimplexId> *seeds = vertexIdentifierScalarField_;
705 Timer t;
706
707 std::vector<ttk::intgl::IntegralLine *> chunkIntegralLine(chunkSize_);
708 int taskNumber = (int)seedNumber_ / chunkSize_;
709#ifdef TTK_ENABLE_OPENMP
710#ifdef TTK_ENABLE_MPI
711#pragma omp parallel shared( \
712 ttk::intgl::finishedElement_, toSend_, ttk::intgl::addedElement_) \
713 num_threads(threadNumber_)
714 {
715#else
716#pragma omp parallel num_threads(threadNumber_)
717 {
718#endif // TTK_ENABLE_MPI
719#pragma omp master
720 {
721#endif // TTK_ENABLE_OPENMP
722 for(SimplexId i = 0; i < taskNumber; ++i) {
723 this->prepareForTask<triangulationType>(
724 triangulation, chunkIntegralLine, i * chunkSize_, chunkSize_, seeds);
725 this->createTask<triangulationType>(
726 triangulation, chunkIntegralLine, offsets, chunkSize_);
727 }
728 int rest = seedNumber_ % chunkSize_;
729 if(rest > 0) {
730 this->prepareForTask<triangulationType>(
731 triangulation, chunkIntegralLine, taskNumber * chunkSize_, rest,
732 seeds);
733 this->createTask<triangulationType>(
734 triangulation, chunkIntegralLine, offsets, rest);
735 }
736#ifdef TTK_ENABLE_OPENMP
737 }
738 }
739#endif
740#ifdef TTK_ENABLE_MPI
741 if(ttk::isRunningWithMPI()) {
742 int i;
743 int finishedElementReceived = 0;
744 std::vector<int> sendMessageSize(neighborNumber_);
745 std::vector<int> recvMessageSize(neighborNumber_);
746 std::vector<std::vector<intgl::ElementToBeSent>> send_buf(neighborNumber_);
747 std::vector<std::vector<intgl::ElementToBeSent>> recv_buf(neighborNumber_);
748 for(i = 0; i < neighborNumber_; i++) {
749 send_buf.reserve((int)seedNumber_ * 0.005);
750 recv_buf.reserve((int)seedNumber_ * 0.005);
751 }
752 std::vector<MPI_Request> requests(2 * neighborNumber_, MPI_REQUEST_NULL);
753 std::vector<MPI_Status> statuses(4 * neighborNumber_);
754 int taskSize;
755 int index;
756 int totalMessageSize;
757 while(keepWorking_) {
758 ttk::intgl::finishedElement_ -= ttk::intgl::addedElement_;
759 ttk::intgl::addedElement_ = 0;
760 // Exchange of the number of integral lines finished on all processes
761 MPI_Allreduce(&ttk::intgl::finishedElement_, &finishedElementReceived, 1,
762 MPI_INTEGER, MPI_SUM, ttk::MPIcomm_);
763 ttk::intgl::finishedElement_ = 0;
764 // Update the number of integral lines left to compute
765 globalElementCounter_ -= finishedElementReceived;
766 // Stop working in case there are no more computation to be done
767 if(globalElementCounter_ == 0) {
768 keepWorking_ = 0;
769 }
770 if(keepWorking_) {
771 totalMessageSize = 0;
772 // Preparation of the send buffers and exchange of the size of messages
773 // to be sent
774 for(i = 0; i < neighborNumber_; i++) {
775 for(int j = 0; j < threadNumber_; j++) {
776 send_buf[i].insert(send_buf[i].end(), toSend_->at(i)[j].begin(),
777 toSend_->at(i)[j].end());
778 toSend_->at(i)[j].clear();
779 }
780 sendMessageSize[i] = (int)send_buf[i].size();
781 MPI_Isend(&sendMessageSize[i], 1, MPI_INTEGER, neighbors_.at(i),
782 INTEGRAL_LINE_IS_MESSAGE_SIZE, ttk::MPIcomm_,
783 &requests[2 * i]);
784 MPI_Irecv(&recvMessageSize[i], 1, MPI_INTEGER, neighbors_.at(i),
785 INTEGRAL_LINE_IS_MESSAGE_SIZE, ttk::MPIcomm_,
786 &requests[2 * i + 1]);
787 }
788 MPI_Waitall(2 * neighborNumber_, requests.data(), MPI_STATUSES_IGNORE);
789 // Exchange of the data
790 for(i = 0; i < neighborNumber_; i++) {
791 if(recv_buf[i].size() < (size_t)recvMessageSize[i]) {
792 recv_buf[i].resize(recvMessageSize[i]);
793 }
794 if(recvMessageSize[i] > 0) {
795 MPI_Irecv(recv_buf[i].data(), recvMessageSize[i], this->MessageType,
796 neighbors_.at(i), INTEGRAL_LINE_IS_ELEMENT_TO_PROCESS,
797 ttk::MPIcomm_, &requests[2 * i]);
798 totalMessageSize += recvMessageSize[i];
799 }
800
801 if(sendMessageSize[i] > 0) {
802 MPI_Isend(send_buf[i].data(), sendMessageSize[i], this->MessageType,
803 neighbors_.at(i), INTEGRAL_LINE_IS_ELEMENT_TO_PROCESS,
804 ttk::MPIcomm_, &requests[2 * i + 1]);
805 }
806 }
807 MPI_Waitall(2 * neighborNumber_, requests.data(), MPI_STATUSES_IGNORE);
808 for(i = 0; i < neighborNumber_; i++) {
809 send_buf[i].clear();
810 }
811 // Extraction of the received data and creation of the tasks
812#ifdef TTK_ENABLE_OPENMP
813#pragma omp parallel shared(ttk::intgl::finishedElement_, toSend_) \
814 num_threads(threadNumber_)
815 {
816#pragma omp master
817 {
818#endif // TTK_ENABLE_OPENMP
819 index = 0;
820 taskSize = std::min(
821 (ttk::SimplexId)std::max(totalMessageSize / (threadNumber_ * 100),
822 std::min(totalMessageSize, 50)),
823 chunkSize_);
824 chunkIntegralLine.resize(taskSize);
825 for(i = 0; i < neighborNumber_; i++) {
826 for(int j = 0; j < recvMessageSize[i]; j++) {
827 this->receiveElement<triangulationType>(
828 triangulation, recv_buf[i][j], chunkIntegralLine, index,
829 taskSize, offsets);
830 }
831 }
832 if(index > 0) {
833 this->createTask<triangulationType>(
834 triangulation, chunkIntegralLine, offsets, index);
835 }
836#ifdef TTK_ENABLE_OPENMP
837 }
838 }
839#endif // TTK_ENABLE_OPENMP
840 }
841 }
842 }
843#endif // TTK_ENABLE_MPI
844 {
845 std::stringstream msg;
846 msg << "Processed " << vertexNumber_ << " points";
847 this->printMsg(msg.str(), 1, t.getElapsedTime(), threadNumber_);
848 }
849 return 0;
850}
851
852#ifdef TTK_ENABLE_MPI
853template <typename triangulationType>
854int ttk::IntegralLines::getGlobalIdentifiers(
855 std::vector<ttk::SimplexId> &globalVertexId,
856 std::vector<ttk::SimplexId> &globalCellId,
857 const std::vector<
859 &integralLines,
860 const triangulationType *triangulation) {
861 ttk::SimplexId outputVertexNumber = 0;
862 ttk::SimplexId outputCellNumber = 0;
863 ttk::SimplexId realVertexNumber = 0;
864 ttk::SimplexId realCellNumber = 0;
865 ttk::SimplexId intervalSize;
866 // Counts vertices and edges number (with and without ghosts)
867#ifdef TTK_ENABLE_OPENMP
868#pragma omp parallel for reduction(+:outputVertexNumber,outputCellNumber,realCellNumber,realVertexNumber) schedule(static,1) private(intervalSize)
869#endif
870 for(int thread = 0; thread < threadNumber_; thread++) {
871 std::list<std::array<ttk::intgl::IntegralLine,
872 INTEGRAL_LINE_TABULAR_SIZE>>::const_iterator
873 integralLine
874 = integralLines[thread].list_.begin();
875 while(integralLine != integralLines[thread].list_.end()) {
876 for(int i = 0; i < INTEGRAL_LINE_TABULAR_SIZE; i++) {
877 if(integralLine->at(i).trajectory.size() > 0) {
878 intervalSize = static_cast<ttk::SimplexId>(
879 integralLine->at(i).trajectory.size());
880 outputVertexNumber += intervalSize;
881 outputCellNumber += intervalSize - 1;
882 if(triangulation->getVertexRank(integralLine->at(i).trajectory.at(0))
883 != ttk::MPIrank_) {
884 intervalSize--;
885 }
886 if(integralLine->at(i).trajectory.size() > 1) {
887 realCellNumber += intervalSize - 1;
888 if(triangulation->getVertexRank(
889 integralLine->at(i).trajectory.back())
890 != ttk::MPIrank_) {
891 intervalSize--;
892 }
893 }
894 realVertexNumber += intervalSize;
895 } else {
896 break;
897 }
898 }
899 integralLine++;
900 }
901 }
902
903 ttk::SimplexId vertIndex;
904 ttk::SimplexId cellIndex;
905
906 // Perform exclusive prefix sum to find local offset for vertices and
907 // cells
908 MPI_Exscan(&realVertexNumber, &vertIndex, 1, ttk::getMPIType(vertIndex),
909 MPI_SUM, ttk::MPIcomm_);
910 MPI_Exscan(&realCellNumber, &cellIndex, 1, ttk::getMPIType(cellIndex),
911 MPI_SUM, ttk::MPIcomm_);
912
913 // Rank 0 received garbage values, it is replaced by the correct offset
914 // (always 0)
915 if(ttk::MPIrank_ == 0) {
916 vertIndex = 0;
917 cellIndex = 0;
918 }
919
920 // Generate Global identifers and package ghost data for the process that owns
921 // the ghost data
922 ttk::SimplexId startCellId = 0;
923 ttk::SimplexId startVertexId = 0;
924 globalVertexId.resize(outputVertexNumber);
925 globalCellId.resize(outputCellNumber);
926 std::vector<std::vector<ttk::intgl::GhostElementsToSort>> unmatchedGhosts(
927 neighborNumber_);
928 std::vector<std::vector<ttk::SimplexId>> unmatchedGhostsVertexLocalId(
929 neighborNumber_);
930 std::vector<std::vector<ttk::SimplexId>> unmatchedGhostsEdgeLocalId(
931 neighborNumber_);
932 for(int thread = 0; thread < threadNumber_; thread++) {
933 std::list<std::array<ttk::intgl::IntegralLine,
934 INTEGRAL_LINE_TABULAR_SIZE>>::const_iterator
935 integralLine
936 = integralLines[thread].list_.begin();
937 while(integralLine != integralLines[thread].list_.end()) {
938 for(int i = 0; i < INTEGRAL_LINE_TABULAR_SIZE; i++) {
939 if(integralLine->at(i).trajectory.size() > 0) {
940 if(triangulation->getVertexRank(integralLine->at(i).trajectory.at(0))
941 != ttk::MPIrank_) {
942 globalVertexId.at(startVertexId) = -1;
943 } else {
944 globalVertexId.at(startVertexId) = vertIndex;
945 vertIndex++;
946 }
947 startVertexId++;
948 if(integralLine->at(i).trajectory.size() > 1) {
949 if(triangulation->getVertexRank(
950 integralLine->at(i).trajectory.at(0))
951 != ttk::MPIrank_) {
952 globalCellId.at(startCellId) = -1;
953 unmatchedGhosts
954 .at(neighborsToId_[triangulation->getVertexRank(
955 integralLine->at(i).trajectory.at(0))])
956 .push_back(ttk::intgl::GhostElementsToSort{
957 vertIndex, integralLine->at(i).localVertexIdentifier.at(0),
958 integralLine->at(i).seedIdentifier,
959 integralLine->at(i).forkIdentifier, -1, startVertexId - 1,
960 startCellId});
961 } else {
962 globalCellId.at(startCellId) = cellIndex;
963 cellIndex++;
964 }
965 startCellId++;
966 if(integralLine->at(i).trajectory.size() > 2) {
967 std::iota(globalCellId.begin() + startCellId,
968 globalCellId.begin() + startCellId
969 + integralLine->at(i).trajectory.size() - 2,
970 cellIndex);
971 std::iota(globalVertexId.begin() + startVertexId,
972 globalVertexId.begin() + startVertexId
973 + integralLine->at(i).trajectory.size() - 2,
974 vertIndex);
975 startCellId += integralLine->at(i).trajectory.size() - 2;
976 startVertexId += integralLine->at(i).trajectory.size() - 2;
977 vertIndex += integralLine->at(i).trajectory.size() - 2;
978 cellIndex += integralLine->at(i).trajectory.size() - 2;
979 }
980 if(triangulation->getVertexRank(
981 integralLine->at(i).trajectory.back())
982 != ttk::MPIrank_) {
983 globalVertexId.at(startVertexId) = -1;
984 unmatchedGhosts
985 .at(neighborsToId_[triangulation->getVertexRank(
986 integralLine->at(i).trajectory.back())])
987 .push_back(ttk::intgl::GhostElementsToSort{
988 globalVertexId.at(startVertexId - 1),
989 integralLine->at(i).localVertexIdentifier.at(
990 integralLine->at(i).localVertexIdentifier.size() - 2),
991 integralLine->at(i).seedIdentifier,
992 integralLine->at(i).forkIdentifier,
993 globalCellId.at(startCellId - 1), startVertexId,
994 startCellId - 1});
995 } else {
996 globalVertexId.at(startVertexId) = vertIndex;
997 vertIndex++;
998 }
999 startVertexId++;
1000 }
1001 } else {
1002 break;
1003 }
1004 }
1005 integralLine++;
1006 }
1007 }
1008 // unmatchedGhosts contains the ghost data for each process.
1009 // For two processes i and j that are neighbors, their respective
1010 // unmatchedGhosts[k_i] and unmatchedGhosts[k_j] have the same
1011 // number of elements and each element represents a segment of
1012 // integral line. This means that if unmatchedGhosts[k_i] and
1013 // unmatchedGhosts[k_j] are sorted using the same comparator,
1014 // element l of unmatchedGhosts[k_i] and element l of unmatchedGhosts[k_j]
1015 // represent the same segment of integral line. Therefore, once
1016 // unmatchedGhosts vectors are sorted for each process, all that
1017 // is left to do is exchange the global identifiers of vertices
1018 // and edges, as the receiving process already knows to which local
1019 // vertices in its domain the ghosts corresponds to.
1020 if(ttk::MPIsize_ > 1) {
1021 for(int i = 0; i < neighborNumber_; i++) {
1022 TTK_PSORT(threadNumber_, unmatchedGhosts.at(i).begin(),
1023 unmatchedGhosts.at(i).end());
1024 }
1025 this->exchangeGhosts(unmatchedGhosts, globalVertexId, globalCellId);
1026 }
1027
1028 return 0;
1029}
1030
1031inline int ttk::IntegralLines::exchangeGhosts(
1032 const std::vector<std::vector<ttk::intgl::GhostElementsToSort>>
1033 &unmatchedGhosts,
1034 std::vector<ttk::SimplexId> &globalVertexId,
1035 std::vector<ttk::SimplexId> &globalCellId) {
1036 ttk::SimplexId id{0};
1037 std::vector<std::vector<ttk::SimplexId>> globalIdsToSend(neighborNumber_);
1038 std::vector<std::vector<ttk::SimplexId>> globalIdsToReceive(neighborNumber_);
1039 for(int i = 0; i < neighborNumber_; i++) {
1040 globalIdsToSend[i].resize(2 * unmatchedGhosts[i].size());
1041 globalIdsToReceive[i].resize(2 * unmatchedGhosts[i].size());
1042 }
1043 // The sending buffer is prepared
1044#ifdef TTK_ENABLE_OPENMP
1045#pragma omp parallel for
1046#endif
1047 for(int i = 0; i < neighborNumber_; i++) {
1048 for(size_t j = 0; j < unmatchedGhosts[i].size(); j++) {
1049 globalIdsToSend[i][2 * j] = unmatchedGhosts[i][j].ownedGlobalId;
1050 globalIdsToSend[i][2 * j + 1] = unmatchedGhosts[i][j].globalEdgeId;
1051 }
1052 }
1053 // Each process sends and receives global identifiers of ghosts
1054 // to its neighbors
1055 for(int neighbor : neighbors_) {
1056 MPI_Sendrecv(globalIdsToSend[neighborsToId_[neighbor]].data(),
1057 globalIdsToSend[neighborsToId_[neighbor]].size(),
1058 ttk::getMPIType(id), neighbor, ttk::MPIrank_,
1059 globalIdsToReceive[neighborsToId_[neighbor]].data(),
1060 globalIdsToSend[neighborsToId_[neighbor]].size(),
1061 ttk::getMPIType(id), neighbor, neighbor, ttk::MPIcomm_,
1062 MPI_STATUS_IGNORE);
1063 }
1064 // The global identifiers of ghosts are inserted in the globalVertexId
1065 // and globalCellId vectors
1066#ifdef TTK_ENABLE_OPENMP
1067#pragma omp parallel for
1068#endif
1069 for(int i = 0; i < neighborNumber_; i++) {
1070 for(size_t j = 0; j < unmatchedGhosts[i].size(); j++) {
1071 globalVertexId.at(unmatchedGhosts[i][j].ghostVertexLocalId)
1072 = globalIdsToReceive[i][2 * j];
1073 if(globalCellId.at(unmatchedGhosts[i][j].ghostEdgeLocalId) == -1) {
1074 globalCellId.at(unmatchedGhosts[i][j].ghostEdgeLocalId)
1075 = globalIdsToReceive[i][j * 2 + 1];
1076 }
1077 }
1078 }
1079 return 0;
1080}
1081
1082#endif // TTK_ENABLE_MPI
#define ttkNotUsed(x)
Mark function/method parameters that are not used in the function body at all.
Definition: BaseClass.h:47
#define INTEGRAL_LINE_TABULAR_SIZE
Definition: IntegralLines.h:31
#define TTK_PSORT(NTHREADS,...)
Parallel sort macro.
Definition: OpenMP.h:46
AbstractTriangulation is an interface class that defines an interface for efficient traversal methods...
This class describes a dynamic size data structure for thread safe computation. It is a linked list o...
Minimalist debugging class.
Definition: Debug.h:88
TTK processing package for the computation of edge-based integral lines of the gradient of an input s...
std::vector< ttk::SimplexId > * vertexIdentifierScalarField_
ttk::ScalarFieldCriticalPoints scalarFieldCriticalPoints_
void buildScalarFieldCriticalPoints()
void setInputOffsets(const SimplexId *const data)
ttk::SimplexId chunkSize_
const ttk::SimplexId * inputOffsets_
void createTask(const triangulationType *triangulation, std::vector< ttk::intgl::IntegralLine * > &chunkIntegralLine, const ttk::SimplexId *offsets, int nbElement) const
Create an OpenMP task that contains the computation of nbElement integral lines.
void setDirection(int direction)
ttk::SimplexId direction_
void setInputScalarField(void *data)
void setChunkSize(int size)
void computeIntegralLine(const triangulationType *triangulation, ttk::intgl::IntegralLine *integralLine, const ttk::SimplexId *offsets) const
Computes the integral line starting at the vertex of global id seedIdentifier.
void findNextVertex(ttk::SimplexId &vnext, ttk::SimplexId &fnext, std::vector< ttk::SimplexId > &component, const SimplexId *offsets) const
Finds the vertex of highest or lowest offsets (depending on the direction of the integral line) in th...
int preconditionTriangulation(ttk::AbstractTriangulation *triangulation)
void setVertexNumber(const SimplexId &vertexNumber)
ttk::SimplexId seedNumber_
int execute(triangulationType *triangulation)
void setOutputIntegralLines(std::vector< ttk::ArrayLinkedList< ttk::intgl::IntegralLine, INTEGRAL_LINE_TABULAR_SIZE > > *integralLines)
void setVertexIdentifierScalarField(std::vector< SimplexId > *const data)
std::vector< ttk::ArrayLinkedList< ttk::intgl::IntegralLine, INTEGRAL_LINE_TABULAR_SIZE > > * outputIntegralLines_
~IntegralLines() override
ttk::SimplexId vertexNumber_
void prepareForTask(const triangulationType *triangulation, std::vector< ttk::intgl::IntegralLine * > &chunkIntegralLine, int startingIndex, int nbElement, std::vector< SimplexId > *seeds) const
Initializes the three attributes of an integral line: the global id of its seed, its trajectory,...
void setSeedNumber(const SimplexId &seedNumber)
TTK processing package for the computation of critical points in PL scalar fields defined on PL manif...
void preconditionTriangulation(AbstractTriangulation *triangulation)
double getElapsedTime()
Definition: Timer.h:15
T distance(const T *p0, const T *p1, const int &dimension=3)
Definition: Geometry.cpp:344
The Topology ToolKit.
COMMON_EXPORTS int MPIsize_
Definition: BaseClass.cpp:10
@ Backward
@ Forward
COMMON_EXPORTS int MPIrank_
Definition: BaseClass.cpp:9
int SimplexId
Identifier type for simplices of any dimension.
Definition: DataTypes.h:22
Struct containing the data of an integral line. trajectories: vector of identifiers of each vertex th...
Definition: IntegralLines.h:49
std::vector< ttk::SimplexId > localVertexIdentifier
Definition: IntegralLines.h:52
ttk::SimplexId forkIdentifier
Definition: IntegralLines.h:54
std::vector< double > distanceFromSeed
Definition: IntegralLines.h:51
ttk::SimplexId seedIdentifier
Definition: IntegralLines.h:53
std::vector< ttk::SimplexId > trajectory
Definition: IntegralLines.h:50
printMsg(debug::output::GREEN+"                           "+debug::output::ENDCOLOR+debug::output::GREEN+"▒"+debug::output::ENDCOLOR+debug::output::GREEN+"▒▒▒▒▒▒▒▒▒▒▒▒▒░"+debug::output::ENDCOLOR, debug::Priority::PERFORMANCE, debug::LineMode::NEW, stream)