17#include <unordered_map>
18#include <unordered_set>
24#define OMPI_SKIP_MPICXX 1
30 inline MPI_Datatype getMPIType(
const float ttkNotUsed(val)) {
33 inline MPI_Datatype getMPIType(
const int ttkNotUsed(val)) {
36 inline MPI_Datatype getMPIType(
const unsigned int ttkNotUsed(val)) {
39 inline MPI_Datatype getMPIType(
const double ttkNotUsed(val)) {
42 inline MPI_Datatype getMPIType(
const long double ttkNotUsed(val)) {
43 return MPI_LONG_DOUBLE;
45 inline MPI_Datatype getMPIType(
const long ttkNotUsed(val)) {
48 inline MPI_Datatype getMPIType(
const unsigned long ttkNotUsed(val)) {
49 return MPI_UNSIGNED_LONG;
51 inline MPI_Datatype getMPIType(
const long long ttkNotUsed(val)) {
54 inline MPI_Datatype getMPIType(
const unsigned long long ttkNotUsed(val)) {
55 return MPI_UNSIGNED_LONG_LONG;
57 inline MPI_Datatype getMPIType(
const unsigned char ttkNotUsed(val)) {
58 return MPI_UNSIGNED_CHAR;
61 template <
typename DT,
typename IT>
66 value(DT _scalar, IT _globalId) : scalar(_scalar), globalId(_globalId) {
70 inline bool isRunningWithMPI() {
74 inline bool hasInitializedMPI() {
78 inline int startMPITimer(Timer &t,
int rank,
int size) {
80 MPI_Barrier(ttk::MPIcomm_);
88 inline double endMPITimer(Timer &t,
int rank,
int size) {
89 double elapsedTime = 0;
91 MPI_Barrier(ttk::MPIcomm_);
93 elapsedTime = t.getElapsedTime();
112 template <
typename DT,
typename triangulationType>
113 int getGhostCellScalars(DT *scalarArray,
114 const triangulationType *triangulation,
115 const int rankToSend,
116 MPI_Comm communicator,
117 const int dimensionNumber) {
118 const std::vector<int> &neighbors = triangulation->getNeighborRanks();
119 if(!ttk::isRunningWithMPI()) {
122 MPI_Datatype MPI_DT = getMPIType(
static_cast<DT
>(0));
125 int tagMultiplier = rankToSend + 1;
126 int valuesTag = 103 * tagMultiplier;
128 int neighborNumber = neighbors.size();
129 const auto &ghostCellsPerOwner = triangulation->getGhostCellsPerOwner();
131 for(
int r = 0; r < neighborNumber; r++) {
133 std::vector<DT> receivedValues(nValues * dimensionNumber);
135 MPI_Recv(receivedValues.data(), nValues * dimensionNumber, MPI_DT,
136 neighbors.at(r), valuesTag, communicator, MPI_STATUS_IGNORE);
139 for(
int j = 0; j < dimensionNumber; j++) {
140 DT receivedVal = receivedValues[i * dimensionNumber + j];
143 scalarArray[localId * dimensionNumber + j] = receivedVal;
151 if(std::find(neighbors.begin(), neighbors.end(), rankToSend)
152 != neighbors.end()) {
154 const auto &ghostCellsForThisRank
155 = triangulation->getRemoteGhostCells()[rankToSend];
159 std::vector<DT> valuesToSend(nValues * dimensionNumber);
161 for(
int j = 0; j < dimensionNumber; j++) {
164 valuesToSend[i * dimensionNumber + j]
165 = scalarArray[localId * dimensionNumber + j];
170 MPI_Send(valuesToSend.data(), nValues * dimensionNumber, MPI_DT,
171 rankToSend, valuesTag, communicator);
179 template <
typename DT,
typename triangulationType>
180 int getGhostVertexScalars(DT *scalarArray,
181 const triangulationType *triangulation,
182 const int rankToSend,
183 MPI_Comm communicator,
184 const int dimensionNumber) {
185 const std::vector<int> &neighbors = triangulation->getNeighborRanks();
186 if(!ttk::isRunningWithMPI()) {
189 MPI_Datatype MPI_DT = getMPIType(
static_cast<DT
>(0));
192 int tagMultiplier = rankToSend + 1;
193 int valuesTag = 103 * tagMultiplier;
195 int neighborNumber = neighbors.size();
196 const auto &ghostVerticesPerOwner
197 = triangulation->getGhostVerticesPerOwner();
199 for(
int r = 0; r < neighborNumber; r++) {
200 ttk::SimplexId nValues = ghostVerticesPerOwner[neighbors[r]].size();
201 std::vector<DT> receivedValues(nValues * dimensionNumber);
203 MPI_Recv(receivedValues.data(), nValues * dimensionNumber, MPI_DT,
204 neighbors.at(r), valuesTag, communicator, MPI_STATUS_IGNORE);
206 for(
int j = 0; j < dimensionNumber; j++) {
207 DT receivedVal = receivedValues[i * dimensionNumber + j];
210 = triangulation->getVertexLocalId(globalId);
211 scalarArray[localId * dimensionNumber + j] = receivedVal;
219 if(std::find(neighbors.begin(), neighbors.end(), rankToSend)
220 != neighbors.end()) {
222 const auto &ghostVerticesForThisRank
223 = triangulation->getRemoteGhostVertices()[rankToSend];
227 std::vector<DT> valuesToSend(nValues * dimensionNumber);
229 for(
int j = 0; j < dimensionNumber; j++) {
232 = triangulation->getVertexLocalId(globalId);
233 valuesToSend[i * dimensionNumber + j]
234 = scalarArray[localId * dimensionNumber + j];
239 MPI_Send(valuesToSend.data(), nValues * dimensionNumber, MPI_DT,
240 rankToSend, valuesTag, communicator);
269 template <
typename DT,
274 int getGhostDataScalarsWithoutTriangulation(DT *scalarArray,
275 const GVR &getVertexRank,
276 const GVGID &getVertexGlobalId,
277 const GVLID &getVertexLocalId,
278 const std::vector<int> &neighbors,
279 const int rankToSend,
281 MPI_Comm communicator,
282 const int dimensionNumber) {
283 int neighborNumber = neighbors.size();
284 if(!ttk::isRunningWithMPI()) {
287 MPI_Datatype MPI_DT = getMPIType(
static_cast<DT
>(0));
288 MPI_Datatype MPI_IT = getMPIType(
static_cast<IT
>(0));
289 using globalIdType =
decltype(getVertexGlobalId(0));
290 MPI_Datatype MPI_GIT = getMPIType(
static_cast<globalIdType
>(0));
293 int tagMultiplier = rankToSend + 1;
294 int amountTag = 101 * tagMultiplier;
295 int idsTag = 102 * tagMultiplier;
296 int valuesTag = 103 * tagMultiplier;
299 std::vector<std::vector<globalIdType>> rankVectors(
303 for(IT i = 0; i < nVerts; i++) {
305 rankVectors[getVertexRank(i)].push_back(getVertexGlobalId(i));
309 for(
int r = 0; r < neighborNumber; r++) {
310 IT nValues = rankVectors[neighbors[r]].size();
311 MPI_Send(&nValues, 1, MPI_IT, neighbors[r], amountTag, communicator);
313 MPI_Send(rankVectors[neighbors[r]].data(), nValues, MPI_GIT,
314 neighbors[r], idsTag, communicator);
318 for(
int r = 0; r < neighborNumber; r++) {
319 IT nValues = rankVectors[neighbors[r]].size();
320 std::vector<DT> receivedValues(nValues * dimensionNumber);
322 MPI_Recv(receivedValues.data(), nValues * dimensionNumber, MPI_DT,
323 neighbors[r], valuesTag, communicator, MPI_STATUS_IGNORE);
324 for(IT i = 0; i < nValues; i++) {
325 for(
int j = 0; j < dimensionNumber; j++) {
326 DT receivedVal = receivedValues[i * dimensionNumber + j];
327 const auto globalId = rankVectors[neighbors[r]][i];
328 IT localId = getVertexLocalId(globalId);
329 scalarArray[localId * dimensionNumber + j] = receivedVal;
337 if(std::find(neighbors.begin(), neighbors.end(), rankToSend)
338 != neighbors.end()) {
342 MPI_Recv(&nValues, 1, MPI_IT, rankToSend, amountTag, communicator,
346 std::vector<globalIdType> receivedIds(nValues);
347 MPI_Recv(receivedIds.data(), nValues, MPI_GIT, rankToSend, idsTag,
348 communicator, MPI_STATUS_IGNORE);
351 std::vector<DT> valuesToSend(nValues * dimensionNumber);
352 for(IT i = 0; i < nValues; i++) {
353 for(
int j = 0; j < dimensionNumber; j++) {
354 IT globalId = receivedIds[i];
355 IT localId = getVertexLocalId(globalId);
356 valuesToSend[i * dimensionNumber + j]
357 = scalarArray[localId * dimensionNumber + j];
362 MPI_Send(valuesToSend.data(), nValues * dimensionNumber, MPI_DT,
363 rankToSend, valuesTag, communicator);
380 template <
typename IT,
typename GVR>
381 int preconditionNeighborsUsingRankArray(std::vector<int> &neighbors,
382 const GVR &getVertexRank,
384 MPI_Comm communicator) {
385 std::unordered_set<int> neighborSet{};
386 for(IT i = 0; i < nVerts; i++) {
388 neighborSet.emplace(getVertexRank(i));
391 std::vector<int> sendVector(neighborSet.begin(), neighborSet.end());
392 int localSize = neighborSet.size();
396 &localSize, 1, MPI_INT, sizes.data(), 1, MPI_INT, 0, communicator);
400 totalSize += sizes[i];
402 displacements[i] = 0;
404 displacements[i] = displacements[i - 1] + sizes[i - 1];
408 std::vector<int> rootVector(totalSize);
410 MPI_Gatherv(sendVector.data(), sendVector.size(), MPI_INT,
411 rootVector.data(), sizes.data(), displacements.data(), MPI_INT,
413 std::vector<int> scatterVector;
418 std::vector<std::unordered_set<int>> setsFromRanks(
ttk::MPIsize_);
419 auto begin = rootVector.begin();
420 auto end = rootVector.begin();
422 end = begin + sizes[i];
423 std::unordered_set<int> s(begin, end);
424 setsFromRanks[i] = s;
431 if(setsFromRanks[j].find(i) != setsFromRanks[j].end()) {
432 setsFromRanks[i].emplace(j);
439 sizes[i] = setsFromRanks[i].size();
441 displacements[i] = 0;
443 displacements[i] = displacements[i - 1] + sizes[i - 1];
445 scatterVector.insert(scatterVector.end(), setsFromRanks[i].begin(),
446 setsFromRanks[i].end());
453 sizes.data(), 1, MPI_INT, &receivedSize, 1, MPI_INT, 0, communicator);
456 std::vector<int> receivedNeighbors(receivedSize);
457 MPI_Scatterv(scatterVector.data(), sizes.data(), displacements.data(),
458 MPI_INT, receivedNeighbors.data(), receivedSize, MPI_INT, 0,
461 std::unordered_set<int> finalSet(
462 receivedNeighbors.begin(), receivedNeighbors.end());
463 neighborSet = finalSet;
467 for(
int neighbor : neighborSet) {
468 neighbors.push_back(neighbor);
470 std::sort(neighbors.begin(), neighbors.end());
486 template <
typename DT,
typename triangulationType>
487 int exchangeGhostCells(DT *scalarArray,
488 const triangulationType *triangulation,
489 MPI_Comm communicator,
490 const int dimensionNumber = 1) {
491 if(!ttk::isRunningWithMPI()) {
494 if(!triangulation->hasPreconditionedDistributedCells()) {
498 getGhostCellScalars<DT, triangulationType>(
499 scalarArray, triangulation, r, communicator, dimensionNumber);
500 MPI_Barrier(communicator);
505 template <
typename DT,
typename triangulationType>
506 int exchangeGhostVertices(DT *scalarArray,
507 const triangulationType *triangulation,
508 MPI_Comm communicator,
509 const int dimensionNumber = 1) {
510 if(!ttk::isRunningWithMPI()) {
513 if(!triangulation->hasPreconditionedDistributedVertices()) {
517 getGhostVertexScalars<DT, triangulationType>(
518 scalarArray, triangulation, r, communicator, dimensionNumber);
519 MPI_Barrier(communicator);
545 template <
typename DT,
550 int exchangeGhostDataWithoutTriangulation(DT *scalarArray,
551 const GVR &getVertexRank,
552 const GVGID &getVertexGlobalId,
553 const GVLID &getVertexLocalId,
555 MPI_Comm communicator,
556 const std::vector<int> &neighbors,
557 const int dimensionNumber = 1) {
558 if(!ttk::isRunningWithMPI()) {
562 getGhostDataScalarsWithoutTriangulation(
563 scalarArray, getVertexRank, getVertexGlobalId, getVertexLocalId,
564 neighbors, r, nVerts, communicator, dimensionNumber);
565 MPI_Barrier(communicator);
571 bool inline checkForIntersection(
double *myBB,
double *theirBB) {
574 || myBB[1] < theirBB[0]
575 || myBB[2] > theirBB[3]
576 || myBB[3] < theirBB[2]
577 || myBB[4] > theirBB[5]
578 || myBB[5] < theirBB[4]
582 void inline preconditionNeighborsUsingBoundingBox(
583 double *boundingBox, std::vector<int> &neighbors) {
585 std::vector<std::array<double, 6>> rankBoundingBoxes(
ttk::MPIsize_);
587 boundingBox, boundingBox + 6, rankBoundingBoxes[
ttk::MPIrank_].begin());
589 MPI_Bcast(rankBoundingBoxes[r].data(), 6, MPI_DOUBLE, r, ttk::MPIcomm_);
592 double epsilon = 0.00001;
594 for(
int i = 0; i < 6; i++) {
596 boundingBox[i] -= epsilon;
598 boundingBox[i] += epsilon;
603 double *theirBoundingBox = rankBoundingBoxes[i].data();
604 if(checkForIntersection(boundingBox, theirBoundingBox)) {
605 neighbors.push_back(i);
620 void inline produceRankArray(std::vector<int> &rankArray,
622 const unsigned char *ghostCells,
625 std::vector<int> &neighbors) {
626 if(neighbors.empty()) {
627 ttk::preconditionNeighborsUsingBoundingBox(boundingBox, neighbors);
630 std::vector<ttk::SimplexId> currentRankUnknownIds;
631 std::vector<std::vector<ttk::SimplexId>> allUnknownIds(
ttk::MPIsize_);
632 std::unordered_set<ttk::SimplexId> gIdSet;
633 std::unordered_map<ttk::SimplexId, ttk::SimplexId> gIdToLocalMap;
635 for(
int i = 0; i < nVertices; i++) {
636 int ghostCellVal = ghostCells[i];
638 if(ghostCellVal == 0) {
642 gIdSet.insert(globalId);
647 currentRankUnknownIds.push_back(globalId);
648 gIdToLocalMap[globalId] = i;
653 std::vector<ttk::SimplexId> gIdsToSend;
654 std::vector<ttk::SimplexId> receivedGlobals;
655 receivedGlobals.resize(sizeOfCurrentRank);
657 std::vector<ttk::SimplexId> neighborUnknownIds;
658 for(
int neighbor : neighbors) {
660 MPI_Sendrecv(&sizeOfCurrentRank, 1, MIT, neighbor,
ttk::MPIrank_,
661 &sizeOfNeighbor, 1, MIT, neighbor, neighbor, ttk::MPIcomm_,
663 neighborUnknownIds.resize(sizeOfNeighbor);
664 gIdsToSend.reserve(sizeOfNeighbor);
666 MPI_Sendrecv(currentRankUnknownIds.data(), sizeOfCurrentRank, MIT,
668 sizeOfNeighbor, MIT, neighbor, neighbor, ttk::MPIcomm_,
674 if(gIdSet.count(gId)) {
676 gIdsToSend.push_back(gId);
682 MPI_Sendrecv(gIdsToSend.data(), gIdsToSend.size(), MIT, neighbor,
684 currentRankUnknownIds.size(), MIT, neighbor, neighbor,
685 ttk::MPIcomm_, &status);
687 MPI_Get_count(&status, MIT, &amount);
688 receivedGlobals.resize(amount);
692 rankArray[localVal] = neighbor;
696 receivedGlobals.resize(sizeOfCurrentRank);
708 template <
typename DT,
typename IT>
709 void returnVectorForBurstsize(std::vector<value<DT, IT>> &outVector,
710 std::vector<value<DT, IT>> &values,
713 if(burstSize > values.size()) {
714 outVector.resize(values.size(), {0, 0});
715 outVector.assign(values.begin(), values.end());
718 outVector.resize(burstSize, {0, 0});
719 outVector.assign(values.end() - burstSize, values.end());
720 values.erase(values.end() - burstSize, values.end());
733 template <
typename DT,
typename IT>
734 void ReceiveAndAddToVector(
737 std::vector<std::vector<value<DT, IT>>> &unsortedReceivedValues) {
738 std::vector<value<DT, IT>> &receivedValues
739 = unsortedReceivedValues[rankFrom];
743 MPI_Probe(rankFrom, structTag * rankFrom, ttk::MPIcomm_, &status);
744 MPI_Get_count(&status, MPI_CHAR, &amount);
745 receivedValues.resize(amount /
sizeof(value<DT, IT>), {0, 0});
746 MPI_Recv(receivedValues.data(), amount, MPI_CHAR, rankFrom,
747 structTag * rankFrom, ttk::MPIcomm_, MPI_STATUS_IGNORE);
774 template <
typename DT,
typename IT>
775 void getMax(
int intTag,
780 IT &processedValueCounter,
781 std::vector<std::vector<value<DT, IT>>> &unsortedReceivedValues,
782 std::vector<std::vector<IT>> &orderResendValues,
783 std::vector<IT> &orderedValuesForRank,
784 std::vector<value<DT, IT>> &sortingValues) {
786 int rankIdOfMaxScalar = -1;
787 DT maxScalar = std::numeric_limits<DT>::lowest();
789 for(
size_t i = 0; i < unsortedReceivedValues.size(); i++) {
790 if(unsortedReceivedValues[i].size() > 0) {
791 const auto &v = unsortedReceivedValues[i].back();
792 if(v.scalar == maxScalar ? v.globalId > maxGId : v.scalar > maxScalar) {
793 maxScalar = v.scalar;
795 rankIdOfMaxScalar = i;
800 value<DT, IT> currentValue
801 = unsortedReceivedValues[rankIdOfMaxScalar].back();
805 orderResendValues[rankIdOfMaxScalar].push_back(currentValue.globalId);
806 orderResendValues[rankIdOfMaxScalar].push_back(currentOrder);
808 processedValueCounter++;
809 unsortedReceivedValues[rankIdOfMaxScalar].pop_back();
810 if(unsortedReceivedValues[rankIdOfMaxScalar].size() == 0) {
811 if(rankIdOfMaxScalar == 0) {
813 orderedValuesForRank.insert(
814 orderedValuesForRank.end(),
815 orderResendValues[rankIdOfMaxScalar].begin(),
816 orderResendValues[rankIdOfMaxScalar].end());
817 orderResendValues[rankIdOfMaxScalar].clear();
819 if(!sortingValues.empty()) {
820 std::vector<value<DT, IT>> ownValues;
821 returnVectorForBurstsize<DT, IT>(ownValues, sortingValues, burstSize);
823 unsortedReceivedValues[rankIdOfMaxScalar] = ownValues;
828 MPI_Send(orderResendValues[rankIdOfMaxScalar].data(),
829 orderResendValues[rankIdOfMaxScalar].size(), MPI_IT,
830 rankIdOfMaxScalar, intTag * rankIdOfMaxScalar, ttk::MPIcomm_);
831 orderResendValues[rankIdOfMaxScalar].clear();
834 ReceiveAndAddToVector(
835 rankIdOfMaxScalar, structTag, unsortedReceivedValues);
851 template <
typename IT,
typename GVLID>
852 void buildArrayForReceivedData(
const size_t nInts,
853 const IT *
const orderedValuesForRank,
854 const GVLID &getVertexLocalId,
856 const int nThreads) {
860#ifdef TTK_ENABLE_OPENMP
861#pragma omp parallel for num_threads(nThreads)
863 for(
size_t i = 0; i < nInts; i += 2) {
864 order[getVertexLocalId(orderedValuesForRank[i])]
865 = orderedValuesForRank[i + 1];
882 template <
typename DT,
typename IT,
typename GVGID,
typename GVR>
883 void populateVector(std::vector<value<DT, IT>> &valuesToSortVector,
885 const DT *
const scalars,
886 const GVGID &getVertexGlobalId,
887 const GVR &getVertexRank) {
888 for(
size_t i = 0; i < nVerts; i++) {
889 IT globalId = getVertexGlobalId(i);
891 valuesToSortVector.emplace_back(scalars[i], globalId);
903 template <
typename DT,
typename IT>
904 void sortVerticesDistributed(std::vector<value<DT, IT>> &values,
905 const int nThreads) {
909 TTK_PSORT(nThreads, values.begin(), values.end(),
910 [](value<DT, IT> v1, value<DT, IT> v2) {
911 return (v1.scalar < v2.scalar)
912 || (v1.scalar == v2.scalar && v1.globalId < v2.globalId);
927 template <
typename DT,
typename GVGID,
typename GVR,
typename GVLID>
928 void produceOrdering(
SimplexId *orderArray,
929 const DT *scalarArray,
930 const GVGID &getVertexGlobalId,
931 const GVR &getVertexRank,
932 const GVLID &getVertexLocalId,
935 std::vector<int> &neighbors) {
938 if(neighbors.empty()) {
939 ttk::preconditionNeighborsUsingRankArray(
940 neighbors, getVertexRank, nVerts, ttk::MPIcomm_);
942 MPI_Barrier(ttk::MPIcomm_);
944 using IT =
decltype(getVertexGlobalId(0));
945 MPI_Datatype MPI_IT = ttk::getMPIType(
static_cast<IT
>(0));
948 std::vector<value<DT, IT>> sortingValues;
950 sortingValues, nVerts, scalarArray, getVertexGlobalId, getVertexRank);
958 MPI_Barrier(ttk::MPIcomm_);
960 std::vector<IT> orderedValuesForRank;
961 IT processedValueCounter = 0;
963 IT localSize = sortingValues.size();
966 MPI_Reduce(&localSize, &totalSize, 1, MPI_IT, MPI_SUM, 0, ttk::MPIcomm_);
968 IT currentOrder = totalSize - 1;
969 std::vector<std::vector<value<DT, IT>>> unsortedReceivedValues;
971 std::vector<std::vector<IT>> orderResendValues;
974 orderResendValues[i].reserve(burstSize);
980 std::vector<value<DT, IT>> ownValues;
981 returnVectorForBurstsize<DT, IT>(ownValues, sortingValues, burstSize);
982 unsortedReceivedValues[i] = ownValues;
984 ReceiveAndAddToVector<DT, IT>(i, structTag, unsortedReceivedValues);
987 while(processedValueCounter < totalSize) {
988 getMax<DT, IT>(intTag, structTag, currentOrder, burstSize, MPI_IT,
989 processedValueCounter, unsortedReceivedValues,
990 orderResendValues, orderedValuesForRank, sortingValues);
996 while(!sortingValues.empty()) {
997 std::vector<value<DT, IT>> sendValues;
998 returnVectorForBurstsize<DT, IT>(sendValues, sortingValues, burstSize);
999 int size = sendValues.size();
1000 MPI_Send(sendValues.data(), size *
sizeof(value<DT, IT>), MPI_CHAR, 0,
1002 std::vector<IT> receivedValues;
1006 receivedValues.resize(burstSize * 2);
1009 MPI_Recv(receivedValues.data(), burstSize * 2, MPI_IT, 0,
1011 MPI_Get_count(&status, MPI_IT, &amount);
1013 receivedValues.resize(amount);
1014 orderedValuesForRank.insert(orderedValuesForRank.end(),
1015 receivedValues.begin(),
1016 receivedValues.end());
1020 MPI_Send(sortingValues.data(), 0, MPI_CHAR, 0, structTag *
ttk::MPIrank_,
1025 MPI_Barrier(ttk::MPIcomm_);
1028 buildArrayForReceivedData<IT>(orderedValuesForRank.size(),
1029 orderedValuesForRank.data(), getVertexLocalId,
1034 ttk::exchangeGhostDataWithoutTriangulation<ttk::SimplexId, IT>(
1035 orderArray, getVertexRank, getVertexGlobalId, getVertexLocalId, nVerts,
1036 ttk::MPIcomm_, neighbors);
1048 template <
typename dataType>
1049 void sendVector(std::vector<dataType> &sendBuffer,
1050 MPI_Datatype messageType,
1053 MPI_Send(&dataSize, 1, getMPIType(dataSize), neighbor,
ttk::MPIrank_,
1055 MPI_Send(sendBuffer.data(), dataSize, messageType, neighbor,
ttk::MPIrank_,
1068 template <
typename dataType>
1069 void recvVector(std::vector<dataType> &receiveBuffer,
1071 MPI_Datatype messageType,
1073 MPI_Recv(&recvMessageSize, 1, getMPIType(recvMessageSize), neighbor,
1074 neighbor, ttk::MPIcomm_, MPI_STATUS_IGNORE);
1075 receiveBuffer.resize(recvMessageSize);
1076 MPI_Recv(receiveBuffer.data(), recvMessageSize, messageType, neighbor,
1077 neighbor, ttk::MPIcomm_, MPI_STATUS_IGNORE);
#define TTK_FORCE_USE(x)
Force the compiler to use the function/method parameter.
#define ttkNotUsed(x)
Mark function/method parameters that are not used in the function body at all.
#define TTK_PSORT(NTHREADS,...)
Parallel sort macro.
COMMON_EXPORTS int MPIsize_
COMMON_EXPORTS int globalThreadNumber_
COMMON_EXPORTS int MPIrank_
long long int LongSimplexId
Identifier type for simplices of any dimension.
int SimplexId
Identifier type for simplices of any dimension.