19 template <
typename scalarType,
typename idType>
21 const scalarType *
const scalars,
22 const idType *
const offsets,
27 std::vector<SimplexId> sortedVertices(nVerts);
31#ifdef TTK_ENABLE_OPENMP
32#pragma omp parallel for num_threads(nThreads)
34 for(
size_t i = 0; i < sortedVertices.size(); ++i) {
35 sortedVertices[i] = i;
38 if(offsets !=
nullptr) {
40 nThreads, sortedVertices.begin(), sortedVertices.end(),
42 return (scalars[a] < scalars[b])
43 || (scalars[a] == scalars[b] && offsets[a] < offsets[b]);
46 TTK_PSORT(nThreads, sortedVertices.begin(), sortedVertices.end(),
48 return (scalars[a] < scalars[b])
49 || (scalars[a] == scalars[b] && a < b);
53#ifdef TTK_ENABLE_OPENMP
54#pragma omp parallel for num_threads(nThreads)
56 for(
size_t i = 0; i < sortedVertices.size(); ++i) {
57 order[sortedVertices[i]] = i;
69 template <
typename scalarType>
71 const scalarType *
const scalars,
76 nVerts, scalars,
static_cast<int *
>(
nullptr), order, nThreads);
#define TTK_FORCE_USE(x)
Force the compiler to use the function/method parameter.
#define TTK_PSORT(NTHREADS,...)
Parallel sort macro.
COMMON_EXPORTS int globalThreadNumber_
void sortVertices(const size_t nVerts, const scalarType *const scalars, const idType *const offsets, SimplexId *const order, const int nThreads)
Sort vertices according to scalars disambiguated by offsets.
void preconditionOrderArray(const size_t nVerts, const scalarType *const scalars, SimplexId *const order, const int nThreads=ttk::globalThreadNumber_)
Precondition an order array to be consumed by the base layer API.
int SimplexId
Identifier type for simplices of any dimension.