Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions src/axom/core/numerics/quadrature.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

#include <cmath>
#include <cstdint>
#include <mutex>

namespace axom
{
Expand Down Expand Up @@ -158,16 +159,18 @@ void compute_gauss_legendre_data(int npts,
* \note If this method has already been called for a given order, it will reuse the same quadrature points
* without needing to recompute them
*
* \warning The use of a static variable to store cached nodes makes this method not threadsafe.
* \note This implementation uses a process-wide cache protected by a mutex for thread safety.
*
* \return The `QuadratureRule` object which contains axom::ArrayView<double>'s of stored nodes and weights
*/
QuadratureRule get_gauss_legendre(int npts, int allocatorID)
{
assert("Quadrature rules must have >= 1 point" && (npts >= 1));

// Store cached rules keyed by (npts, allocatorID). This cache is not thread-safe.
// Store cached rules keyed by (npts, allocatorID).
static axom::FlatMap<std::uint64_t, GaussLegendreRuleStorage> rule_library(64);
static std::mutex rule_library_mutex;
const std::lock_guard<std::mutex> lock(rule_library_mutex);

const std::uint64_t key = make_gauss_legendre_key(npts, allocatorID);

Expand Down
111 changes: 111 additions & 0 deletions src/axom/primal/operators/detail/winding_number_2d_memoization.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -256,6 +256,117 @@ std::ostream& operator<<(std::ostream& os, const NURBSCurveGWNCache<T>& nCurveCa
}

} // namespace detail

/*!
* \brief Manage an array of NURBSCurveGWNCache<double>
*/
class NURBSCurveCacheManager
{
using NURBSCache = axom::primal::detail::NURBSCurveGWNCache<double>;
using NURBSCacheArray = axom::Array<NURBSCache>;
using NURBSCacheArrayView = axom::ArrayView<const NURBSCache>;

using CurveArrayView = axom::ArrayView<const axom::primal::NURBSCurve<double, 2>>;

public:
NURBSCurveCacheManager() = default;

NURBSCurveCacheManager(const CurveArrayView& curves, double bbExpansionAmount = 0.0)
{
for(const auto& curve : curves)
{
m_nurbs_caches.push_back(NURBSCache(curve, bbExpansionAmount));
}
}

/// A view of the manager object.
struct View
{
NURBSCacheArrayView m_view;

/// Return the NURBSCacheArrayView.
NURBSCacheArrayView caches() const { return m_view; }
};

/// Return a view of this manager to pass into a device function.
View view() const { return View {m_nurbs_caches.view()}; }

/// Return if the underlying array is empty
bool empty() const { return m_nurbs_caches.empty(); }

private:
NURBSCacheArray m_nurbs_caches;
};

template <typename ExecSpace>
struct nurbs_cache_2d_traits
{
using type = NURBSCurveCacheManager;
};

#if defined(AXOM_USE_RAJA) && defined(AXOM_USE_OPENMP)
/*!
* \brief Manage per-thread arrays of NURBSCurveGWNCache<double>
*/
class NURBSCurveCacheManagerOMP
{
using NURBSCache = axom::primal::detail::NURBSCurveGWNCache<double>;
using NURBSCachePerThreadArray = axom::Array<axom::Array<NURBSCache>>;
using NURBSCachePerThreadArrayView = axom::ArrayView<const axom::Array<NURBSCache>>;
using NURBSCacheArrayView = axom::ArrayView<const NURBSCache>;

using CurveArrayView = axom::ArrayView<const axom::primal::NURBSCurve<double, 2>>;

public:
NURBSCurveCacheManagerOMP() = default;

NURBSCurveCacheManagerOMP(const CurveArrayView& curves, double bbExpansionAmount = 0.0)
{
const int nt = omp_get_max_threads();
m_nurbs_caches.resize(nt);
auto nurbs_caches_view = m_nurbs_caches.view();

// Make the first one
nurbs_caches_view[0].resize(curves.size());
axom::for_all<axom::OMP_EXEC>(
curves.size(),
AXOM_LAMBDA(axom::IndexType i) {
nurbs_caches_view[0][i] = NURBSCache(curves[i], bbExpansionAmount);
});

// Copy the constructed cache to the other threads' copies (less work than construction)
axom::for_all<axom::OMP_EXEC>(
1,
nt,
AXOM_LAMBDA(axom::IndexType t) { nurbs_caches_view[t] = nurbs_caches_view[0]; });
}

/// A view of the manager object.
struct View
{
NURBSCachePerThreadArrayView m_views;

/// Return the NURBSCacheArrayView for the current OMP thread.
NURBSCacheArrayView caches() const { return m_views[omp_get_thread_num()].view(); }
};

/// Return a view of this manager to pass into a device function.
View view() const { return View {m_nurbs_caches.view()}; }

/// Return if the underlying array is empty
bool empty() const { return m_nurbs_caches.empty(); }

private:
NURBSCachePerThreadArray m_nurbs_caches;
};

template <>
struct nurbs_cache_2d_traits<axom::OMP_EXEC>
{
using type = NURBSCurveCacheManagerOMP;
};
#endif

} // namespace primal
} // namespace axom

Expand Down
133 changes: 132 additions & 1 deletion src/axom/primal/operators/detail/winding_number_3d_memoization.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,19 @@ class NURBSPatchGWNCache
m_averageNormal = m_alteredPatch.calculateTrimmedPatchNormal();
}

// Cast direction is set to average normal, unless it is near zero
if(m_averageNormal.norm() < 1e-10)
{
// ...unless the average direction is zero
double theta = axom::utilities::random_real(0.0, 2 * M_PI);
double u = axom::utilities::random_real(-1.0, 1.0);
m_castDirection = Vector<T, 3> {sin(theta) * sqrt(1 - u * u), cos(theta) * sqrt(1 - u * u), u};
}
else
{
m_castDirection = m_averageNormal.unitVector();
}

m_pboxDiag = m_alteredPatch.getParameterSpaceDiagonal();

// Make a bounding box by doing (trimmed) bezier extraction,
Expand Down Expand Up @@ -238,6 +251,7 @@ class NURBSPatchGWNCache
///@{
//! \name Accessors for precomputed data
const Vector<T, 3>& getAverageNormal() const { return m_averageNormal; }
const Vector<T, 3>& getCastDirection() const { return m_castDirection; }
const BoundingBox<T, 3>& boundingBox() const { return m_bBox; }
const OrientedBoundingBox<T, 3>& orientedBoundingBox() const { return m_oBox; }
//@}
Expand Down Expand Up @@ -272,14 +286,131 @@ class NURBSPatchGWNCache
// Per patch data
BoundingBox<T, 3> m_bBox;
OrientedBoundingBox<T, 3> m_oBox;
Vector<T, 3> m_averageNormal;
Vector<T, 3> m_averageNormal, m_castDirection;
double m_pboxDiag;

// Per trimming curve data, keyed by (whichRefinementLevel, whichRefinementIndex)
mutable axom::Array<std::unordered_map<std::uint64_t, TrimmingCurveQuadratureData<T>>> m_curveQuadratureMaps;
};

} // namespace detail

/*!
* \brief Manage an array of NURBSPatchGWNCache<double>
*/
class NURBSPatchCacheManager
{
using NURBSCache = axom::primal::detail::NURBSPatchGWNCache<double>;
using NURBSCacheArray = axom::Array<NURBSCache>;
using NURBSCacheArrayView = axom::ArrayView<const NURBSCache>;

using PatchArrayView = axom::ArrayView<const axom::primal::NURBSPatch<double, 3>>;

public:
NURBSPatchCacheManager() = default;

NURBSPatchCacheManager(const PatchArrayView& patchs)
{
for(const auto& patch : patchs)
{
m_nurbs_caches.push_back(NURBSCache(patch));
}
}

/// A view of the manager object.
struct View
{
NURBSCacheArrayView m_view;

/// Return the NURBSCacheArrayView.
NURBSCacheArrayView caches() const { return m_view; }
};

/// Return a view of this manager to pass into a device function.
View view() const { return View {m_nurbs_caches.view()}; }

/// Return if the underlying array is empty
bool empty() const { return m_nurbs_caches.empty(); }

private:
NURBSCacheArray m_nurbs_caches;
};

template <typename ExecSpace>
struct nurbs_cache_3d_traits
{
using type = NURBSPatchCacheManager;
};

#if defined(AXOM_USE_RAJA) && defined(AXOM_USE_OPENMP)
/*!
* \brief Manage per-thread arrays of NURBSPatchGWNCache<double>
*/
class NURBSPatchCacheManagerOMP
{
using NURBSCache = axom::primal::detail::NURBSPatchGWNCache<double>;
using NURBSCachePerThreadArray = axom::Array<axom::Array<NURBSCache>>;
using NURBSCachePerThreadArrayView = axom::ArrayView<const axom::Array<NURBSCache>>;
using NURBSCacheArrayView = axom::ArrayView<const NURBSCache>;

using PatchArrayView = axom::ArrayView<const axom::primal::NURBSPatch<double, 3>>;

public:
NURBSPatchCacheManagerOMP() = default;

NURBSPatchCacheManagerOMP(const PatchArrayView& patches)
{
const int nt = omp_get_max_threads();
m_nurbs_caches.resize(nt);
auto nurbs_caches_view = m_nurbs_caches.view();

// Make the first one
nurbs_caches_view[0].resize(patches.size());
axom::for_all<axom::OMP_EXEC>(
patches.size(),
AXOM_LAMBDA(axom::IndexType i) { nurbs_caches_view[0][i] = NURBSCache(patches[i]); });
SLIC_INFO("Finished the first construction");
// Copy the constructed cache to the other threads' copies (less work than construction)
axom::for_all<axom::OMP_EXEC>(
1,
nt,
AXOM_LAMBDA(axom::IndexType t) { nurbs_caches_view[t].resize(nurbs_caches_view[0].size()); });
axom::for_all<axom::OMP_EXEC>(
patches.size(),
AXOM_LAMBDA(axom::IndexType i) {
for(int t = 0; t < nt; t++)
{
nurbs_caches_view[t][i] = nurbs_caches_view[0][i];
}
});
}

/// A view of the manager object.
struct View
{
NURBSCachePerThreadArrayView m_views;

/// Return the NURBSCacheArrayView for the current OMP thread.
NURBSCacheArrayView caches() const { return m_views[omp_get_thread_num()].view(); }
};

/// Return a view of this manager to pass into a device function.
View view() const { return View {m_nurbs_caches.view()}; }

/// Return if the underlying array is empty
bool empty() const { return m_nurbs_caches.empty(); }

private:
NURBSCachePerThreadArray m_nurbs_caches;
};

template <>
struct nurbs_cache_3d_traits<axom::OMP_EXEC>
{
using type = NURBSPatchCacheManagerOMP;
};
#endif

} // namespace primal
} // namespace axom

Expand Down
Loading
Loading