Skip to content

Commit

Permalink
Fix some ODR violations
Browse files Browse the repository at this point in the history
Those turned up when playing with unity builds in cmake.
  • Loading branch information
bernhardmgruber committed Jan 31, 2023
1 parent c6b8302 commit 8604272
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 25 deletions.
8 changes: 5 additions & 3 deletions tests/mapping.BitPackedFloat.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -140,15 +140,15 @@ TEMPLATE_TEST_CASE("mapping.BitPackedFloatSoA", "", float, double)
}
}

constexpr auto n = std::size_t{1000};
TEMPLATE_TEST_CASE(
"mapping.BitPackedFloatSoA.ReducedPrecisionComputation",
"",
(llama::mapping::
BitPackedFloatAoS<llama::ArrayExtents<std::size_t, n>, Vec3D, llama::Constant<8>, llama::Constant<23>>),
BitPackedFloatAoS<llama::ArrayExtents<std::size_t, 1000>, Vec3D, llama::Constant<8>, llama::Constant<23>>),
(llama::mapping::
BitPackedFloatSoA<llama::ArrayExtents<std::size_t, n>, Vec3D, llama::Constant<8>, llama::Constant<23>>) )
BitPackedFloatSoA<llama::ArrayExtents<std::size_t, 1000>, Vec3D, llama::Constant<8>, llama::Constant<23>>) )
{
constexpr auto n = std::size_t{1000};
auto view = llama::allocView(llama::mapping::AoS<llama::ArrayExtents<std::size_t, n>, Vec3D>{{}});
std::default_random_engine engine;
std::uniform_real_distribution dist{0.0f, 1e20f};
Expand Down Expand Up @@ -183,6 +183,7 @@ TEMPLATE_TEST_CASE(

TEST_CASE("mapping.BitPackedFloatAoS.blobs")
{
constexpr auto n = std::size_t{1000};
using Mapping = llama::mapping::
BitPackedFloatAoS<llama::ArrayExtents<std::size_t, n>, Vec3D, llama::Constant<3>, llama::Constant<5>>;
STATIC_REQUIRE(Mapping::blobCount == 1);
Expand All @@ -194,6 +195,7 @@ TEST_CASE("mapping.BitPackedFloatAoS.blobs")

TEST_CASE("mapping.BitPackedFloatSoA.blobs")
{
constexpr auto n = std::size_t{1000};
using Mapping = llama::mapping::
BitPackedFloatSoA<llama::ArrayExtents<std::size_t, n>, Vec3D, llama::Constant<3>, llama::Constant<5>>;
STATIC_REQUIRE(Mapping::blobCount == 3);
Expand Down
6 changes: 3 additions & 3 deletions tests/mapping.BitPackedInt.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -122,13 +122,13 @@ TEMPLATE_TEST_CASE(
}
}

constexpr auto n = 1000;
TEMPLATE_TEST_CASE(
"mapping.BitPackedInt.SInts.Roundtrip",
"",
(llama::mapping::BitPackedIntSoA<llama::ArrayExtents<std::size_t, n>, Vec3I, llama::Constant<12>>),
(llama::mapping::BitPackedIntAoS<llama::ArrayExtents<std::size_t, n>, Vec3I, llama::Constant<12>>) )
(llama::mapping::BitPackedIntSoA<llama::ArrayExtents<std::size_t, 1000>, Vec3I, llama::Constant<12>>),
(llama::mapping::BitPackedIntAoS<llama::ArrayExtents<std::size_t, 1000>, Vec3I, llama::Constant<12>>) )
{
constexpr auto n = 1000;
auto view = llama::allocView(llama::mapping::AoS<llama::ArrayExtents<std::size_t, n>, Vec3I>{});
std::default_random_engine engine;
std::uniform_int_distribution dist{-2000, 2000}; // fits into 12 bits
Expand Down
38 changes: 19 additions & 19 deletions tests/view.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,40 +3,40 @@
#include <atomic>
#include <deque>

// clang-format off
namespace tag
{
struct Value {};
struct Value
{
};
} // namespace tag

using RecordDim = llama::Record<
llama::Field<tag::Value, int>
>;
// clang-format on
using RecordDimJustInt = llama::Record<llama::Field<tag::Value, int>>;

TEST_CASE("view.default-ctor")
{
using ArrayExtents = llama::ArrayExtentsDynamic<std::size_t, 2>;
[[maybe_unused]] const llama::View<llama::mapping::AlignedAoS<ArrayExtents, RecordDim>, std::byte*> view1{};
[[maybe_unused]] const llama::View<llama::mapping::PackedAoS<ArrayExtents, RecordDim>, std::byte*> view2{};
[[maybe_unused]] const llama::View<llama::mapping::AlignedSingleBlobSoA<ArrayExtents, RecordDim>, std::byte*>
view3{};
[[maybe_unused]] const llama::View<llama::mapping::PackedSingleBlobSoA<ArrayExtents, RecordDim>, std::byte*>
[[maybe_unused]] const llama::View<llama::mapping::AlignedAoS<ArrayExtents, RecordDimJustInt>, std::byte*> view1{};
[[maybe_unused]] const llama::View<llama::mapping::PackedAoS<ArrayExtents, RecordDimJustInt>, std::byte*> view2{};
[[maybe_unused]] const llama::
View<llama::mapping::AlignedSingleBlobSoA<ArrayExtents, RecordDimJustInt>, std::byte*>
view3{};
[[maybe_unused]] const llama::View<llama::mapping::PackedSingleBlobSoA<ArrayExtents, RecordDimJustInt>, std::byte*>
view4{};
[[maybe_unused]] const llama::View<llama::mapping::MultiBlobSoA<ArrayExtents, RecordDim>, std::byte*> view5{};
[[maybe_unused]] const llama::View<llama::mapping::One<ArrayExtents, RecordDim>, std::byte*> view6{};
[[maybe_unused]] const llama::View<llama::mapping::MultiBlobSoA<ArrayExtents, RecordDimJustInt>, std::byte*>
view5{};
[[maybe_unused]] const llama::View<llama::mapping::One<ArrayExtents, RecordDimJustInt>, std::byte*> view6{};
[[maybe_unused]] const llama::
View<llama::mapping::tree::Mapping<ArrayExtents, RecordDim, llama::Tuple<>>, std::byte*>
View<llama::mapping::tree::Mapping<ArrayExtents, RecordDimJustInt, llama::Tuple<>>, std::byte*>
view7{};
}
//
// TEST_CASE("view.trivial")
//{
// using ArrayExtents = llama::ArrayExtents<std::size_t, 2, llama::dyn>;
// using Mapping = llama::mapping::AlignedAoS<ArrayExtents, RecordDim>;
// using Mapping = llama::mapping::AlignedAoS<ArrayExtents, RecordDimJustInt>;
// constexpr auto s = Mapping{{10}}.blobSize(0);
// using BlobType = decltype(llama::bloballoc::Stack<s>{}(std::integral_constant<std::size_t, 4>{}, 0));
// using View = llama::View<llama::mapping::AlignedAoS<ArrayExtents, RecordDim>, BlobType>;
// using View = llama::View<llama::mapping::AlignedAoS<ArrayExtents, RecordDimJustInt>, BlobType>;
// STATIC_REQUIRE(std::is_trivially_constructible_v<View>);
// STATIC_REQUIRE(std::is_trivial_v<View>);
// STATIC_REQUIRE(std::is_trivial_v<View>);
Expand All @@ -47,7 +47,7 @@ TEST_CASE("view.move")
using ArrayExtents = llama::ArrayExtentsDynamic<std::size_t, 2>;
constexpr ArrayExtents viewSize{16, 16};

using Mapping = llama::mapping::SoA<ArrayExtents, RecordDim>;
using Mapping = llama::mapping::SoA<ArrayExtents, RecordDimJustInt>;
auto view1 = llama::allocView(Mapping(viewSize));

decltype(view1) view2;
Expand All @@ -61,7 +61,7 @@ TEST_CASE("view.swap")
using ArrayExtents = llama::ArrayExtentsDynamic<std::size_t, 2>;
constexpr ArrayExtents viewSize{16, 16};

using Mapping = llama::mapping::SoA<ArrayExtents, RecordDim>;
using Mapping = llama::mapping::SoA<ArrayExtents, RecordDimJustInt>;
auto view1 = llama::allocView(Mapping(viewSize));
auto view2 = llama::allocView(Mapping(viewSize));

Expand All @@ -83,7 +83,7 @@ TEST_CASE("view.non-memory-owning")
using ArrayExtents = llama::ArrayExtentsDynamic<std::size_t, 1>;
const ArrayExtents extents{256};

using Mapping = llama::mapping::SoA<ArrayExtents, RecordDim>;
using Mapping = llama::mapping::SoA<ArrayExtents, RecordDimJustInt>;
const Mapping mapping{extents};

const auto blobSize = mapping.blobSize(0);
Expand Down

0 comments on commit 8604272

Please sign in to comment.