118 #ifndef NANOVDB_NANOVDB_H_HAS_BEEN_INCLUDED 119 #define NANOVDB_NANOVDB_H_HAS_BEEN_INCLUDED 123 #include <nanovdb/math/Math.h> 126 #define NANOVDB_DATA_ALIGNMENT 32 134 #define NANOVDB_MAGIC_NUMB 0x304244566f6e614eUL // "NanoVDB0" in hex - little endian (uint64_t) 135 #define NANOVDB_MAGIC_GRID 0x314244566f6e614eUL // "NanoVDB1" in hex - little endian (uint64_t) 136 #define NANOVDB_MAGIC_FILE 0x324244566f6e614eUL // "NanoVDB2" in hex - little endian (uint64_t) 137 #define NANOVDB_MAGIC_NODE 0x334244566f6e614eUL // "NanoVDB3" in hex - little endian (uint64_t) 138 #define NANOVDB_MAGIC_FRAG 0x344244566f6e614eUL // "NanoVDB4" in hex - little endian (uint64_t) 139 #define NANOVDB_MAGIC_MASK 0x00FFFFFFFFFFFFFFUL // use this mask to remove the number 144 #define NANOVDB_MAJOR_VERSION_NUMBER 32 // reflects changes to the ABI and hence also the file format 145 #define NANOVDB_MINOR_VERSION_NUMBER 7 // reflects changes to the API but not ABI 146 #define NANOVDB_PATCH_VERSION_NUMBER 0 // reflects changes that does not affect the ABI or API 148 #define TBB_SUPPRESS_DEPRECATED_MESSAGES 1 151 #define NANOVDB_USE_SINGLE_ROOT_KEY 160 #define NANOVDB_NEW_ACCESSOR_METHODS 162 #define NANOVDB_FPN_BRANCHLESS 164 #if !defined(NANOVDB_ALIGN) 165 #define NANOVDB_ALIGN(n) alignas(n) 166 #endif // !defined(NANOVDB_ALIGN) 208 template <
class EnumT>
557 template <
typename T>
642 switch (blindClass) {
694 : mData(major << 21 | minor << 10 | patch)
733 static const int Rank = 0;
734 static const bool IsScalar =
true;
735 static const bool IsVector =
false;
736 static const int Size = 1;
738 static T
scalar(
const T& s) {
return s; }
744 static const int Rank = 1;
745 static const bool IsScalar =
false;
746 static const bool IsVector =
true;
747 static const int Size = T::SIZE;
754 template<typename T, int = sizeof(typename TensorTraits<T>::ElementType)>
811 template<
typename BuildT>
870 template<
typename BuildT>
871 [[deprecated(
"Use toGridType<T>() instead.")]]
877 template<
typename BuildT>
892 template<
typename BuildT>
893 [[deprecated(
"Use toGridClass<T>() instead.")]]
896 return toGridClass<BuildT>();
934 BitFlags(std::initializer_list<uint8_t> list)
936 for (
auto bit : list) mFlags |=
static_cast<Type>(1 << bit);
938 template<
typename MaskT>
939 BitFlags(std::initializer_list<MaskT> list)
941 for (
auto mask : list) mFlags |=
static_cast<Type>(mask);
945 __hostdev__ void initBit(std::initializer_list<uint8_t> list)
948 for (
auto bit : list) mFlags |=
static_cast<Type>(1 << bit);
950 template<
typename MaskT>
951 __hostdev__ void initMask(std::initializer_list<MaskT> list)
954 for (
auto mask : list) mFlags |=
static_cast<Type>(mask);
963 __hostdev__ void setBitOn(uint8_t bit) { mFlags |=
static_cast<Type>(1 << bit); }
964 __hostdev__ void setBitOff(uint8_t bit) { mFlags &= ~static_cast<
Type>(1 << bit); }
966 __hostdev__ void setBitOn(std::initializer_list<uint8_t> list)
968 for (
auto bit : list) mFlags |=
static_cast<Type>(1 << bit);
970 __hostdev__ void setBitOff(std::initializer_list<uint8_t> list)
972 for (
auto bit : list) mFlags &= ~static_cast<
Type>(1 << bit);
975 template<
typename MaskT>
976 __hostdev__ void setMaskOn(MaskT mask) { mFlags |=
static_cast<Type>(mask); }
977 template<
typename MaskT>
978 __hostdev__ void setMaskOff(MaskT mask) { mFlags &= ~static_cast<
Type>(mask); }
980 template<
typename MaskT>
981 __hostdev__ void setMaskOn(std::initializer_list<MaskT> list)
983 for (
auto mask : list) mFlags |=
static_cast<Type>(mask);
985 template<
typename MaskT>
986 __hostdev__ void setMaskOff(std::initializer_list<MaskT> list)
988 for (
auto mask : list) mFlags &= ~static_cast<
Type>(mask);
991 __hostdev__ void setBit(uint8_t bit,
bool on) { on ? this->setBitOn(bit) : this->setBitOff(bit); }
992 template<
typename MaskT>
993 __hostdev__ void setMask(MaskT mask,
bool on) { on ? this->setMaskOn(mask) : this->setMaskOff(mask); }
997 __hostdev__ bool isBitOn(uint8_t bit)
const {
return 0 != (mFlags &
static_cast<Type>(1 << bit)); }
998 __hostdev__ bool isBitOff(uint8_t bit)
const {
return 0 == (mFlags &
static_cast<Type>(1 << bit)); }
999 template<
typename MaskT>
1000 __hostdev__ bool isMaskOn(MaskT mask)
const {
return 0 != (mFlags &
static_cast<Type>(mask)); }
1001 template<
typename MaskT>
1002 __hostdev__ bool isMaskOff(MaskT mask)
const {
return 0 == (mFlags &
static_cast<Type>(mask)); }
1004 template<
typename MaskT>
1005 __hostdev__ bool isMaskOn(std::initializer_list<MaskT> list)
const 1007 for (
auto mask : list) {
1008 if (0 != (mFlags & static_cast<Type>(mask)))
return true;
1013 template<
typename MaskT>
1014 __hostdev__ bool isMaskOff(std::initializer_list<MaskT> list)
const 1016 for (
auto mask : list) {
1017 if (0 == (mFlags & static_cast<Type>(mask)))
return true;
1033 template<u
int32_t LOG2DIM>
1037 static constexpr uint32_t SIZE = 1U << (3 * LOG2DIM);
1038 static constexpr uint32_t WORD_COUNT = SIZE >> 6;
1053 for (
const uint64_t *w = mWords, *q = w + WORD_COUNT; w != q; ++w)
1061 uint32_t n = i >> 6, sum =
util::countOn(mWords[n] & ((uint64_t(1) << (i & 63u)) - 1u));
1062 for (
const uint64_t* w = mWords; n--; ++w)
1087 mPos = mParent->findNext<On>(mPos + 1);
1099 const Mask* mParent;
1141 for (uint32_t i = 0; i < WORD_COUNT; ++i)
1146 const uint64_t v = on ? ~uint64_t(0) : uint64_t(0);
1147 for (uint32_t i = 0; i < WORD_COUNT; ++i)
1154 for (uint32_t i = 0; i < WORD_COUNT; ++i)
1155 mWords[i] = other.mWords[i];
1163 template<
typename MaskT = Mask>
1166 static_assert(
sizeof(
Mask) ==
sizeof(MaskT),
"Mismatching sizeof");
1167 static_assert(WORD_COUNT == MaskT::WORD_COUNT,
"Mismatching word count");
1168 static_assert(LOG2DIM == MaskT::LOG2DIM,
"Mismatching LOG2DIM");
1169 auto* src =
reinterpret_cast<const uint64_t*
>(&other);
1170 for (uint64_t *dst = mWords, *end = dst + WORD_COUNT; dst != end; ++dst)
1176 Mask& operator=(
const Mask&) =
default;
1180 for (uint32_t i = 0; i < WORD_COUNT; ++i) {
1181 if (mWords[i] != other.mWords[i])
1190 __hostdev__ bool isOn(uint32_t n)
const {
return 0 != (mWords[n >> 6] & (uint64_t(1) << (n & 63))); }
1193 __hostdev__ bool isOff(uint32_t n)
const {
return 0 == (mWords[n >> 6] & (uint64_t(1) << (n & 63))); }
1198 for (uint32_t i = 0; i < WORD_COUNT; ++i)
1199 if (mWords[i] != ~uint64_t(0))
1207 for (uint32_t i = 0; i < WORD_COUNT; ++i)
1208 if (mWords[i] != uint64_t(0))
1218 #if defined(__CUDACC__) // the following functions only run on the GPU! 1219 __device__ inline void setOnAtomic(uint32_t n)
1221 atomicOr(reinterpret_cast<unsigned long long int*>(
this) + (n >> 6), 1ull << (n & 63));
1223 __device__ inline void setOffAtomic(uint32_t n)
1225 atomicAnd(reinterpret_cast<unsigned long long int*>(
this) + (n >> 6), ~(1ull << (n & 63)));
1227 __device__ inline void setAtomic(uint32_t n,
bool on)
1229 on ? this->setOnAtomic(n) : this->setOffAtomic(n);
1235 #if 1 // switch between branchless 1236 auto& word = mWords[n >> 6];
1238 word &= ~(uint64_t(1) << n);
1239 word |= uint64_t(on) << n;
1241 on ? this->setOn(n) : this->setOff(n);
1248 for (uint32_t i = 0; i < WORD_COUNT; ++i)mWords[i] = ~uint64_t(0);
1254 for (uint32_t i = 0; i < WORD_COUNT; ++i) mWords[i] = uint64_t(0);
1260 const uint64_t v = on ? ~uint64_t(0) : uint64_t(0);
1261 for (uint32_t i = 0; i < WORD_COUNT; ++i) mWords[i] = v;
1266 uint32_t n = WORD_COUNT;
1267 for (
auto* w = mWords; n--; ++w) *w = ~*w;
1274 uint64_t* w1 = mWords;
1275 const uint64_t* w2 = other.mWords;
1276 for (uint32_t n = WORD_COUNT; n--; ++w1, ++w2) *w1 &= *w2;
1282 uint64_t* w1 = mWords;
1283 const uint64_t* w2 = other.mWords;
1284 for (uint32_t n = WORD_COUNT; n--; ++w1, ++w2) *w1 |= *w2;
1290 uint64_t* w1 = mWords;
1291 const uint64_t* w2 = other.mWords;
1292 for (uint32_t n = WORD_COUNT; n--; ++w1, ++w2) *w1 &= ~*w2;
1298 uint64_t* w1 = mWords;
1299 const uint64_t* w2 = other.mWords;
1300 for (uint32_t n = WORD_COUNT; n--; ++w1, ++w2) *w1 ^= *w2;
1309 const uint64_t* w = mWords;
1310 for (; n < WORD_COUNT && !(ON ? *w : ~*w); ++w, ++n);
1318 uint32_t n = start >> 6;
1319 if (n >= WORD_COUNT)
return SIZE;
1320 uint32_t m = start & 63u;
1321 uint64_t b = ON ? mWords[n] : ~mWords[n];
1322 if (b & (uint64_t(1u) << m))
return start;
1323 b &= ~uint64_t(0u) << m;
1324 while (!b && ++n < WORD_COUNT) b = ON ? mWords[n] : ~mWords[n];
1332 uint32_t n = start >> 6;
1333 if (n >= WORD_COUNT)
return SIZE;
1334 uint32_t m = start & 63u;
1335 uint64_t b = ON ? mWords[n] : ~mWords[n];
1336 if (b & (uint64_t(1u) << m))
return start;
1337 b &= (uint64_t(1u) << m) - 1u;
1338 while (!b && n) b = ON ? mWords[--n] : ~mWords[--n];
1343 uint64_t mWords[WORD_COUNT];
1362 : mMatF{ 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f}
1363 , mInvMatF{1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f}
1364 , mVecF{0.0f, 0.0f, 0.0f}
1366 , mMatD{ 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0}
1367 , mInvMatD{1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0}
1368 , mVecD{0.0, 0.0, 0.0}
1373 : mMatF{float(s), 0.0f, 0.0f, 0.0f, float(s), 0.0f, 0.0f, 0.0f, float(s)}
1374 , mInvMatF{1.0f / float(s), 0.0f, 0.0f, 0.0f, 1.0f / float(s), 0.0f, 0.0f, 0.0f, 1.0f / float(s)}
1375 , mVecF{float(t[0]), float(t[1]), float(t[2])}
1377 , mMatD{s, 0.0, 0.0, 0.0, s, 0.0, 0.0, 0.0, s}
1378 , mInvMatD{1.0 / s, 0.0, 0.0, 0.0, 1.0 / s, 0.0, 0.0, 0.0, 1.0 / s}
1379 , mVecD{t[0], t[1], t[2]}
1386 template<
typename MatT,
typename Vec3T>
1387 void set(
const MatT& mat,
const MatT& invMat,
const Vec3T& translate,
double taper = 1.0);
1392 template<
typename Mat4T>
1393 void set(
const Mat4T& mat,
const Mat4T& invMat,
double taper = 1.0) { this->
set(mat, invMat, mat[3], taper); }
1395 template<
typename Vec3T>
1396 void set(
double scale,
const Vec3T& translation,
double taper = 1.0);
1403 template<
typename Vec3T>
1411 template<
typename Vec3T>
1420 template<
typename Vec3T>
1429 template<
typename Vec3T>
1437 template<
typename Vec3T>
1440 return math::matMult(mInvMatD, Vec3T(xyz[0] - mVecD[0], xyz[1] - mVecD[1], xyz[2] - mVecD[2]));
1448 template<
typename Vec3T>
1451 return math::matMult(mInvMatF, Vec3T(xyz[0] - mVecF[0], xyz[1] - mVecF[1], xyz[2] - mVecF[2]));
1460 template<
typename Vec3T>
1469 template<
typename Vec3T>
1478 template<
typename Vec3T>
1480 template<
typename Vec3T>
1487 template<
typename MatT,
typename Vec3T>
1488 inline void Map::set(
const MatT& mat,
const MatT& invMat,
const Vec3T& translate,
double taper)
1490 float * mf = mMatF, *vf = mVecF, *mif = mInvMatF;
1491 double *md = mMatD, *vd = mVecD, *mid = mInvMatD;
1492 mTaperF =
static_cast<float>(taper);
1494 for (
int i = 0; i < 3; ++i) {
1495 *vd++ = translate[i];
1496 *vf++ =
static_cast<float>(translate[i]);
1497 for (
int j = 0; j < 3; ++j) {
1499 *mid++ = invMat[j][i];
1500 *mf++ =
static_cast<float>(mat[j][i]);
1501 *mif++ =
static_cast<float>(invMat[j][i]);
1506 template<
typename Vec3T>
1507 inline void Map::set(
double dx,
const Vec3T& trans,
double taper)
1510 const double mat[3][3] = { {dx, 0.0, 0.0},
1513 const double idx = 1.0 / dx;
1514 const double invMat[3][3] = { {idx, 0.0, 0.0},
1517 this->
set(mat, invMat, trans, taper);
1524 static const int MaxNameSize = 256;
1531 char mName[MaxNameSize];
1549 template<
typename BlindDataT>
1553 return mDataType == toGridType<BlindDataT>() ? util::PtrAdd<BlindDataT>(
this, mDataOffset) :
nullptr;
1559 auto check = [&]()->
bool{
1577 default:
return true;}
1587 return math::AlignUp<NANOVDB_DATA_ALIGNMENT>(mValueCount * mValueSize);
1595 template<
typename Gr
idOrTreeOrRootT,
int LEVEL>
1599 template<
typename Gr
idOrTreeOrRootT>
1602 static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3,
"Tree depth is not supported");
1603 using Type =
typename GridOrTreeOrRootT::LeafNodeType;
1604 using type =
typename GridOrTreeOrRootT::LeafNodeType;
1606 template<
typename Gr
idOrTreeOrRootT>
1609 static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3,
"Tree depth is not supported");
1610 using Type =
const typename GridOrTreeOrRootT::LeafNodeType;
1611 using type =
const typename GridOrTreeOrRootT::LeafNodeType;
1614 template<
typename Gr
idOrTreeOrRootT>
1617 static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3,
"Tree depth is not supported");
1618 using Type =
typename GridOrTreeOrRootT::RootNodeType::ChildNodeType::ChildNodeType;
1619 using type =
typename GridOrTreeOrRootT::RootNodeType::ChildNodeType::ChildNodeType;
1621 template<
typename Gr
idOrTreeOrRootT>
1624 static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3,
"Tree depth is not supported");
1625 using Type =
const typename GridOrTreeOrRootT::RootNodeType::ChildNodeType::ChildNodeType;
1626 using type =
const typename GridOrTreeOrRootT::RootNodeType::ChildNodeType::ChildNodeType;
1628 template<
typename Gr
idOrTreeOrRootT>
1631 static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3,
"Tree depth is not supported");
1632 using Type =
typename GridOrTreeOrRootT::RootNodeType::ChildNodeType;
1633 using type =
typename GridOrTreeOrRootT::RootNodeType::ChildNodeType;
1635 template<
typename Gr
idOrTreeOrRootT>
1638 static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3,
"Tree depth is not supported");
1639 using Type =
const typename GridOrTreeOrRootT::RootNodeType::ChildNodeType;
1640 using type =
const typename GridOrTreeOrRootT::RootNodeType::ChildNodeType;
1642 template<
typename Gr
idOrTreeOrRootT>
1645 static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3,
"Tree depth is not supported");
1646 using Type =
typename GridOrTreeOrRootT::RootNodeType;
1647 using type =
typename GridOrTreeOrRootT::RootNodeType;
1650 template<
typename Gr
idOrTreeOrRootT>
1653 static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3,
"Tree depth is not supported");
1654 using Type =
const typename GridOrTreeOrRootT::RootNodeType;
1655 using type =
const typename GridOrTreeOrRootT::RootNodeType;
1660 template<
typename BuildT>
1662 template<
typename BuildT>
1664 template<
typename BuildT>
1666 template<
typename BuildT>
1668 template<
typename BuildT>
1670 template<
typename BuildT>
1672 template<
typename BuildT>
1674 template<
typename BuildT>
1712 union { uint32_t mCRC32[2]; uint64_t
mCRC64; };
1716 static constexpr uint32_t EMPTY32 = ~uint32_t{0};
1717 static constexpr uint64_t EMPTY64 = ~uint64_t(0);
1736 [[deprecated(
"Use Checksum::data instead.")]]
1738 [[deprecated(
"Use Checksum::head and Ckecksum::tail instead.")]]
1740 [[deprecated(
"Use Checksum::head and Ckecksum::tail instead.")]]
1751 [[deprecated(
"Use Checksum::isHalf instead.")]]
1812 static const int MaxNameSize = 256;
1820 char mGridName[MaxNameSize];
1835 uint64_t gridSize = 0u,
1840 #ifdef NANOVDB_USE_NEW_MAGIC_NUMBERS 1847 mFlags.initMask(list);
1850 mGridSize = gridSize;
1851 mGridName[0] =
'\0';
1853 mWorldBBox = Vec3dBBox();
1854 mVoxelSize = map.getVoxelSize();
1855 mGridClass = gridClass;
1856 mGridType = gridType;
1857 mBlindMetadataOffset = mGridSize;
1858 mBlindMetadataCount = 0u;
1872 if (test) test = mGridCount > 0u && mGridIndex < mGridCount;
1884 const bool success = (
util::strncpy(mGridName, src, MaxNameSize)[MaxNameSize-1] ==
'\0');
1885 if (!success) mGridName[MaxNameSize-1] =
'\0';
1889 template<
typename Vec3T>
1891 template<
typename Vec3T>
1893 template<
typename Vec3T>
1895 template<
typename Vec3T>
1897 template<
typename Vec3T>
1900 template<
typename Vec3T>
1902 template<
typename Vec3T>
1904 template<
typename Vec3T>
1906 template<
typename Vec3T>
1908 template<
typename Vec3T>
1919 template <u
int32_t LEVEL>
1922 static_assert(LEVEL >= 0 && LEVEL <= 3,
"invalid LEVEL template parameter");
1923 const void *treeData =
this + 1;
1924 const uint64_t nodeOffset = *util::PtrAdd<uint64_t>(treeData, 8*LEVEL);
1925 return nodeOffset ?
util::PtrAdd(treeData, nodeOffset) :
nullptr;
1931 template <u
int32_t LEVEL>
1934 static_assert(LEVEL >= 0 && LEVEL <= 3,
"invalid LEVEL template parameter");
1935 void *treeData =
this + 1;
1936 const uint64_t nodeOffset = *util::PtrAdd<uint64_t>(treeData, 8*LEVEL);
1937 return nodeOffset ?
util::PtrAdd(treeData, nodeOffset) :
nullptr;
1942 template <u
int32_t LEVEL>
1945 static_assert(LEVEL >= 0 && LEVEL < 3,
"invalid LEVEL template parameter");
1946 return *util::PtrAdd<uint32_t>(
this + 1, 4*(8 + LEVEL));
1955 return util::PtrAdd<GridBlindMetaData>(
this, mBlindMetadataOffset) + n;
1962 for (uint32_t i = 0; i < mBlindMetadataCount; ++i) {
1963 const auto* metaData = this->blindMetaData(i);
1966 return metaData->template getBlindData<const char>();
1986 const void *root = this->nodePtr<3>();
1987 return root ? *util::PtrAdd<uint32_t>(root,
sizeof(CoordBBox)) : 0u;
2000 template<
typename BuildT,
int LEVEL0 = -1,
int LEVEL1 = -1,
int LEVEL2 = -1>
2003 template<
typename BuildT>
2010 template<
typename TreeT>
2030 Grid& operator=(
const Grid&) =
delete;
2054 template<
typename T = BuildType>
2061 template<
typename T = BuildType>
2066 __hostdev__ const TreeT&
tree()
const {
return *
reinterpret_cast<const TreeT*
>(this->treePtr()); }
2081 template<
typename Vec3T>
2085 template<
typename Vec3T>
2090 template<
typename Vec3T>
2095 template<
typename Vec3T>
2100 template<
typename Vec3T>
2104 template<
typename Vec3T>
2108 template<
typename Vec3T>
2113 template<
typename Vec3T>
2118 template<
typename Vec3T>
2123 template<
typename Vec3T>
2159 template<
typename NodeT>
2168 __hostdev__ bool isSequential()
const {
return UpperNodeType::FIXED_SIZE && LowerNodeType::FIXED_SIZE && LeafNodeType::FIXED_SIZE && this->isBreadthFirst(); }
2186 __hostdev__ int findBlindData(
const char* name)
const;
2195 [[deprecated(
"Use Grid::getBlindData<T>() instead.")]]
2198 printf(
"\nnanovdb::Grid::blindData is unsafe and hence deprecated! Please use nanovdb::Grid::getBlindData instead.\n\n");
2200 return this->blindMetaData(n).blindData();
2203 template <
typename BlindDataT>
2206 if (n >= DataType::mBlindMetadataCount)
return nullptr;
2207 return this->blindMetaData(n).template getBlindData<BlindDataT>();
2210 template <
typename BlindDataT>
2213 if (n >= DataType::mBlindMetadataCount)
return nullptr;
2214 return const_cast<BlindDataT*
>(this->blindMetaData(n).template getBlindData<BlindDataT>());
2223 template<
typename TreeT>
2226 for (uint32_t i = 0, n = this->blindDataCount(); i < n; ++i) {
2227 if (this->blindMetaData(i).mSemantic == semantic)
2233 template<
typename TreeT>
2236 auto test = [&](
int n) {
2237 const char* str = this->blindMetaData(n).mName;
2239 if (name[i] != str[i])
2241 if (name[i] ==
'\0' && str[i] ==
'\0')
2246 for (
int i = 0, n = this->blindDataCount(); i < n; ++i)
2256 int64_t mNodeOffset[4];
2257 uint32_t mNodeCount[3];
2258 uint32_t mTileCount[3];
2274 template<
typename NodeT>
2278 __hostdev__ bool isEmpty()
const {
return mNodeOffset[3] ? *util::PtrAdd<uint32_t>(
this, mNodeOffset[3] +
sizeof(CoordBBox)) == 0 :
true;}
2281 __hostdev__ CoordBBox
bbox()
const {
return mNodeOffset[3] ? *util::PtrAdd<CoordBBox>(
this, mNodeOffset[3]) : CoordBBox();}
2290 template<
typename Gr
idT>
2293 using Type =
typename GridT::TreeType;
2294 using type =
typename GridT::TreeType;
2296 template<
typename Gr
idT>
2299 using Type =
const typename GridT::TreeType;
2300 using type =
const typename GridT::TreeType;
2306 template<
typename RootT>
2309 static_assert(RootT::LEVEL == 3,
"Tree depth is not supported");
2310 static_assert(RootT::ChildNodeType::LOG2DIM == 5,
"Tree configuration is not supported");
2311 static_assert(RootT::ChildNodeType::ChildNodeType::LOG2DIM == 4,
"Tree configuration is not supported");
2312 static_assert(RootT::LeafNodeType::LOG2DIM == 3,
"Tree configuration is not supported");
2327 using Node2 =
typename RootT::ChildNodeType;
2328 using Node1 =
typename Node2::ChildNodeType;
2334 Tree& operator=(
const Tree&) =
delete;
2346 __hostdev__ const RootT&
root()
const {
return *
reinterpret_cast<const RootT*
>(DataType::getRoot());}
2383 return DataType::mTileCount[level - 1];
2386 template<
typename NodeT>
2389 static_assert(NodeT::LEVEL < 3,
"Invalid NodeT");
2390 return DataType::mNodeCount[NodeT::LEVEL];
2396 return DataType::mNodeCount[level];
2401 return DataType::mNodeCount[0] + DataType::mNodeCount[1] + DataType::mNodeCount[2];
2407 template<
typename NodeT>
2410 const int64_t nodeOffset = DataType::mNodeOffset[NodeT::LEVEL];
2411 return nodeOffset ? util::PtrAdd<NodeT>(
this, nodeOffset) :
nullptr;
2417 template<
typename NodeT>
2420 const int64_t nodeOffset = DataType::mNodeOffset[NodeT::LEVEL];
2421 return nodeOffset ? util::PtrAdd<NodeT>(
this, nodeOffset) :
nullptr;
2430 return this->
template getFirstNode<typename NodeTrait<RootT, LEVEL>::type>();
2439 return this->
template getFirstNode<typename NodeTrait<RootT, LEVEL>::type>();
2450 template<
typename OpT,
typename... ArgsT>
2453 return this->root().template get<OpT>(ijk, args...);
2456 template<
typename OpT,
typename... ArgsT>
2459 return this->root().template set<OpT>(ijk, args...);
2467 template<
typename RootT>
2470 min = this->root().minimum();
2471 max = this->root().maximum();
2479 template<
typename ChildT>
2486 static constexpr
bool FIXED_SIZE =
false;
2489 #ifdef NANOVDB_USE_SINGLE_ROOT_KEY 2491 template<
typename CoordType>
2494 static_assert(
sizeof(
CoordT) ==
sizeof(CoordType),
"Mismatching sizeof");
2495 static_assert(32 - ChildT::TOTAL <= 21,
"Cannot use 64 bit root keys");
2496 return (
KeyT(uint32_t(ijk[2]) >> ChildT::TOTAL)) |
2497 (
KeyT(uint32_t(ijk[1]) >> ChildT::TOTAL) << 21) |
2498 (
KeyT(uint32_t(ijk[0]) >> ChildT::TOTAL) << 42);
2502 static constexpr uint64_t MASK = (1u << 21) - 1;
2503 return CoordT(((key >> 42) & MASK) << ChildT::TOTAL,
2504 ((key >> 21) & MASK) << ChildT::TOTAL,
2505 (key & MASK) << ChildT::TOTAL);
2529 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT)
Tile 2531 template<
typename CoordType>
2534 key = CoordToKey(k);
2538 template<
typename CoordType,
typename ValueType>
2541 key = CoordToKey(k);
2562 return reinterpret_cast<const Tile*
>(
this + 1) + n;
2567 return reinterpret_cast<Tile*
>(
this + 1) + n;
2572 #if 1 // switch between linear and binary seach 2573 const auto key = CoordToKey(ijk);
2574 for (
Tile *p = reinterpret_cast<Tile*>(
this + 1), *q = p + mTableSize; p < q; ++p)
2578 #else // do not enable binary search if tiles are not guaranteed to be sorted!!!!!! 2579 int32_t low = 0, high = mTableSize;
2580 while (low != high) {
2581 int mid = low + ((high - low) >> 1);
2582 const Tile* tile = &tiles[mid];
2583 if (tile->
key == key) {
2585 }
else if (tile->
key < key) {
2597 return const_cast<RootData*
>(
this)->probeTile(ijk);
2606 return util::PtrAdd<ChildT>(
this, tile->
child);
2611 return util::PtrAdd<ChildT>(
this, tile->
child);
2634 template<
typename ChildT>
2653 static constexpr
bool FIXED_SIZE = DataType::FIXED_SIZE;
2655 static constexpr uint32_t LEVEL = 1 + ChildT::LEVEL;
2657 template<
typename RootT>
2680 return this->tile()->origin();
2685 return this->tile()->origin();
2689 template<
typename RootT>
2702 :
BaseT(parent->data(), parent->tileCount())
2705 while (*
this && !this->tile()->isChild())
2711 return *BaseT::mData->getChild(this->tile());
2716 return BaseT::mData->getChild(this->tile());
2722 while (*
this && this->tile()->isValue())
2740 template<
typename RootT>
2751 :
BaseT(parent->data(), parent->tileCount())
2754 while (*
this && this->tile()->isChild())
2760 return this->tile()->value;
2765 return this->tile()->state;
2771 while (*
this && this->tile()->isChild())
2789 template<
typename RootT>
2800 :
BaseT(parent->data(), parent->tileCount())
2803 while (*
this && !this->tile()->isActive())
2809 return this->tile()->value;
2815 while (*
this && !this->tile()->isActive())
2833 template<
typename RootT>
2845 :
BaseT(parent->data(), parent->tileCount())
2852 NodeT* child =
nullptr;
2853 auto* t = this->tile();
2855 child = BaseT::mData->getChild(t);
2864 return this->tile()->state;
2936 #ifdef NANOVDB_NEW_ACCESSOR_METHODS 2944 #else // NANOVDB_NEW_ACCESSOR_METHODS 2949 if (
const Tile* tile = DataType::probeTile(ijk)) {
2950 return tile->isChild() ? this->getChild(tile)->getValue(ijk) : tile->value;
2952 return DataType::mBackground;
2958 if (
const Tile* tile = DataType::probeTile(ijk)) {
2959 return tile->isChild() ? this->getChild(tile)->isActive(ijk) : tile->state;
2966 if (
const Tile* tile = DataType::probeTile(ijk)) {
2967 if (tile->isChild()) {
2968 const auto* child = this->getChild(tile);
2969 return child->probeValue(ijk, v);
2974 v = DataType::mBackground;
2980 const Tile* tile = DataType::probeTile(ijk);
2981 if (tile && tile->isChild()) {
2982 const auto* child = this->getChild(tile);
2983 return child->probeLeaf(ijk);
2988 #endif // NANOVDB_NEW_ACCESSOR_METHODS 2992 const Tile* tile = DataType::probeTile(ijk);
2993 return tile && tile->isChild() ? this->getChild(tile) :
nullptr;
2998 const Tile* tile = DataType::probeTile(ijk);
2999 return tile && tile->isChild() ? this->getChild(tile) :
nullptr;
3002 template<
typename OpT,
typename... ArgsT>
3005 if (
const Tile* tile = this->probeTile(ijk)) {
3006 if (tile->isChild())
3007 return this->getChild(tile)->template get<OpT>(ijk, args...);
3008 return OpT::get(*tile, args...);
3010 return OpT::get(*
this, args...);
3013 template<
typename OpT,
typename... ArgsT>
3015 __hostdev__ decltype(OpT::set(util::declval<Tile&>(), util::declval<ArgsT>()...))
3018 if (
Tile* tile = DataType::probeTile(ijk)) {
3019 if (tile->isChild())
3020 return this->getChild(tile)->template set<OpT>(ijk, args...);
3021 return OpT::set(*tile, args...);
3023 return OpT::set(*
this, args...);
3030 template<
typename,
int,
int,
int>
3035 #ifndef NANOVDB_NEW_ACCESSOR_METHODS 3037 template<
typename AccT>
3038 __hostdev__ typename AccT::NodeInfo getNodeInfoAndCache(
const CoordType& ijk,
const AccT& acc)
const 3040 using NodeInfoT =
typename AccT::NodeInfo;
3041 if (
const Tile* tile = this->probeTile(ijk)) {
3042 if (tile->isChild()) {
3043 const auto* child = this->getChild(tile);
3044 acc.insert(ijk, child);
3045 return child->getNodeInfoAndCache(ijk, acc);
3047 return NodeInfoT{LEVEL, ChildT::dim(), tile->value, tile->value, tile->value, 0, tile->origin(), tile->origin() +
CoordType(ChildT::DIM)};
3049 return NodeInfoT{LEVEL, ChildT::dim(), this->minimum(), this->maximum(), this->average(), this->stdDeviation(), this->bbox()[0], this->bbox()[1]};
3053 template<
typename AccT>
3056 if (
const Tile* tile = this->probeTile(ijk)) {
3057 if (tile->isChild()) {
3058 const auto* child = this->getChild(tile);
3059 acc.insert(ijk, child);
3060 return child->getValueAndCache(ijk, acc);
3064 return DataType::mBackground;
3067 template<
typename AccT>
3070 const Tile* tile = this->probeTile(ijk);
3071 if (tile && tile->isChild()) {
3072 const auto* child = this->getChild(tile);
3073 acc.insert(ijk, child);
3074 return child->isActiveAndCache(ijk, acc);
3079 template<
typename AccT>
3082 if (
const Tile* tile = this->probeTile(ijk)) {
3083 if (tile->isChild()) {
3084 const auto* child = this->getChild(tile);
3085 acc.insert(ijk, child);
3086 return child->probeValueAndCache(ijk, v, acc);
3091 v = DataType::mBackground;
3095 template<
typename AccT>
3098 const Tile* tile = this->probeTile(ijk);
3099 if (tile && tile->isChild()) {
3100 const auto* child = this->getChild(tile);
3101 acc.insert(ijk, child);
3102 return child->probeLeafAndCache(ijk, acc);
3106 #endif // NANOVDB_NEW_ACCESSOR_METHODS 3108 template<
typename RayT,
typename AccT>
3109 __hostdev__ uint32_t getDimAndCache(
const CoordType& ijk,
const RayT& ray,
const AccT& acc)
const 3111 if (
const Tile* tile = this->probeTile(ijk)) {
3112 if (tile->isChild()) {
3113 const auto* child = this->getChild(tile);
3114 acc.insert(ijk, child);
3115 return child->getDimAndCache(ijk, ray, acc);
3117 return 1 << ChildT::TOTAL;
3119 return ChildNodeType::dim();
3122 template<
typename OpT,
typename AccT,
typename... ArgsT>
3125 getAndCache(
const CoordType& ijk,
const AccT& acc, ArgsT&&... args)
const 3127 if (
const Tile* tile = this->probeTile(ijk)) {
3128 if (tile->isChild()) {
3129 const ChildT* child = this->getChild(tile);
3130 acc.insert(ijk, child);
3131 return child->template getAndCache<OpT>(ijk, acc, args...);
3133 return OpT::get(*tile, args...);
3135 return OpT::get(*
this, args...);
3138 template<
typename OpT,
typename AccT,
typename... ArgsT>
3140 __hostdev__ decltype(OpT::set(util::declval<Tile&>(), util::declval<ArgsT>()...))
3141 setAndCache(const
CoordType& ijk, const AccT& acc, ArgsT&&... args)
3143 if (
Tile* tile = DataType::probeTile(ijk)) {
3144 if (tile->isChild()) {
3145 ChildT* child = this->getChild(tile);
3146 acc.insert(ijk, child);
3147 return child->template setAndCache<OpT>(ijk, acc, args...);
3149 return OpT::set(*tile, args...);
3151 return OpT::set(*
this, args...);
3163 template<
typename ChildT, u
int32_t LOG2DIM>
3170 using MaskT =
typename ChildT::template MaskType<LOG2DIM>;
3171 static constexpr
bool FIXED_SIZE =
true;
3180 Tile& operator=(
const Tile&) =
delete;
3202 alignas(32) Tile mTable[1u << (3 * LOG2DIM)];
3212 template<
typename ValueT>
3216 mTable[n].value = v;
3223 return util::PtrAdd<ChildT>(
this, mTable[n].child);
3228 return util::PtrAdd<ChildT>(
this, mTable[n].child);
3234 return mTable[n].value;
3240 return mValueMask.isOn(n);
3245 template<
typename T>
3253 #if defined(__GNUC__) && !defined(__APPLE__) && !defined(__llvm__) 3254 #pragma GCC diagnostic push 3255 #pragma GCC diagnostic ignored "-Wstringop-overflow" 3261 #if defined(__GNUC__) && !defined(__APPLE__) && !defined(__llvm__) 3262 #pragma GCC diagnostic pop 3273 template<
typename ChildT, u
int32_t Log2Dim = ChildT::LOG2DIM + 1>
3284 static constexpr
bool FIXED_SIZE = DataType::FIXED_SIZE;
3285 template<u
int32_t LOG2>
3290 static constexpr uint32_t LOG2DIM = Log2Dim;
3291 static constexpr uint32_t TOTAL = LOG2DIM + ChildT::TOTAL;
3292 static constexpr uint32_t DIM = 1u << TOTAL;
3293 static constexpr uint32_t SIZE = 1u << (3 * LOG2DIM);
3294 static constexpr uint32_t MASK = (1u << TOTAL) - 1u;
3295 static constexpr uint32_t LEVEL = 1 + ChildT::LEVEL;
3296 static constexpr uint64_t NUM_VALUES = uint64_t(1) << (3 * TOTAL);
3299 template <
typename ParentT>
3314 : BaseT(parent->mChildMask.beginOn())
3322 return *mParent->getChild(BaseT::pos());
3327 return mParent->getChild(BaseT::pos());
3332 return (*this)->origin();
3356 : BaseT(parent->data()->mChildMask.beginOff())
3395 : BaseT(parent->data()->mValueMask.beginOn())
3430 , mParent(parent->data())
3437 const ChildT* child =
nullptr;
3438 if (mParent->
mChildMask.isOn(BaseT::pos())) {
3439 child = mParent->
getChild(BaseT::pos());
3441 value = mParent->
getValue(BaseT::pos());
3448 return mParent->
isActive(BaseT::pos());
3453 return mParent->offsetToGlobalCoord(BaseT::pos());
3510 return DataType::mChildMask.isOn(0) ? this->getChild(0)->getFirstValue() : DataType::getValue(0);
3517 return DataType::mChildMask.isOn(SIZE - 1) ? this->getChild(SIZE - 1)->getLastValue() : DataType::getValue(SIZE - 1);
3520 #ifdef NANOVDB_NEW_ACCESSOR_METHODS 3527 #else // NANOVDB_NEW_ACCESSOR_METHODS 3530 const uint32_t n = CoordToOffset(ijk);
3531 return DataType::mChildMask.isOn(n) ? this->getChild(n)->getValue(ijk) : DataType::getValue(n);
3535 const uint32_t n = CoordToOffset(ijk);
3536 return DataType::mChildMask.isOn(n) ? this->getChild(n)->isActive(ijk) : DataType::isActive(n);
3540 const uint32_t n = CoordToOffset(ijk);
3541 if (DataType::mChildMask.isOn(n))
3542 return this->getChild(n)->probeValue(ijk, v);
3543 v = DataType::getValue(n);
3544 return DataType::isActive(n);
3548 const uint32_t n = CoordToOffset(ijk);
3549 if (DataType::mChildMask.isOn(n))
3550 return this->getChild(n)->probeLeaf(ijk);
3554 #endif // NANOVDB_NEW_ACCESSOR_METHODS 3558 const uint32_t n = CoordToOffset(ijk);
3559 return DataType::mChildMask.isOn(n) ? this->getChild(n) :
nullptr;
3563 const uint32_t n = CoordToOffset(ijk);
3564 return DataType::mChildMask.isOn(n) ? this->getChild(n) :
nullptr;
3570 return (((ijk[0] & MASK) >> ChildT::TOTAL) << (2 * LOG2DIM)) |
3571 (((ijk[1] & MASK) >> ChildT::TOTAL) << (LOG2DIM)) |
3572 ((ijk[2] & MASK) >> ChildT::TOTAL);
3579 const uint32_t m = n & ((1 << 2 * LOG2DIM) - 1);
3580 return Coord(n >> 2 * LOG2DIM, m >> LOG2DIM, m & ((1 << LOG2DIM) - 1));
3586 ijk <<= ChildT::TOTAL;
3587 ijk += this->origin();
3593 this->localToGlobalCoord(ijk);
3600 template<
typename OpT,
typename... ArgsT>
3603 const uint32_t n = CoordToOffset(ijk);
3604 if (this->isChild(n))
3605 return this->getChild(n)->template get<OpT>(ijk, args...);
3606 return OpT::get(*
this, n, args...);
3609 template<
typename OpT,
typename... ArgsT>
3611 __hostdev__ decltype(OpT::set(util::declval<InternalNode&>(), util::declval<uint32_t>(), util::declval<ArgsT>()...))
3614 const uint32_t n = CoordToOffset(ijk);
3615 if (this->isChild(n))
3616 return this->getChild(n)->template set<OpT>(ijk, args...);
3617 return OpT::set(*
this, n, args...);
3623 template<
typename,
int,
int,
int>
3628 template<
typename, u
int32_t>
3631 #ifndef NANOVDB_NEW_ACCESSOR_METHODS 3633 template<
typename AccT>
3636 const uint32_t n = CoordToOffset(ijk);
3637 if (DataType::mChildMask.isOff(n))
3638 return DataType::getValue(n);
3639 const ChildT* child = this->getChild(n);
3640 acc.insert(ijk, child);
3641 return child->getValueAndCache(ijk, acc);
3643 template<
typename AccT>
3646 const uint32_t n = CoordToOffset(ijk);
3647 if (DataType::mChildMask.isOff(n))
3648 return DataType::isActive(n);
3649 const ChildT* child = this->getChild(n);
3650 acc.insert(ijk, child);
3651 return child->isActiveAndCache(ijk, acc);
3653 template<
typename AccT>
3656 const uint32_t n = CoordToOffset(ijk);
3657 if (DataType::mChildMask.isOff(n)) {
3658 v = DataType::getValue(n);
3659 return DataType::isActive(n);
3661 const ChildT* child = this->getChild(n);
3662 acc.insert(ijk, child);
3663 return child->probeValueAndCache(ijk, v, acc);
3665 template<
typename AccT>
3668 const uint32_t n = CoordToOffset(ijk);
3669 if (DataType::mChildMask.isOff(n))
3671 const ChildT* child = this->getChild(n);
3672 acc.insert(ijk, child);
3673 return child->probeLeafAndCache(ijk, acc);
3675 template<
typename AccT>
3676 __hostdev__ typename AccT::NodeInfo getNodeInfoAndCache(
const CoordType& ijk,
const AccT& acc)
const 3678 using NodeInfoT =
typename AccT::NodeInfo;
3679 const uint32_t n = CoordToOffset(ijk);
3680 if (DataType::mChildMask.isOff(n)) {
3681 return NodeInfoT{LEVEL, this->dim(), this->minimum(), this->maximum(), this->average(), this->stdDeviation(), this->bbox()[0], this->bbox()[1]};
3683 const ChildT* child = this->getChild(n);
3684 acc.insert(ijk, child);
3685 return child->getNodeInfoAndCache(ijk, acc);
3687 #endif // NANOVDB_NEW_ACCESSOR_METHODS 3689 template<
typename RayT,
typename AccT>
3690 __hostdev__ uint32_t getDimAndCache(
const CoordType& ijk,
const RayT& ray,
const AccT& acc)
const 3692 if (DataType::mFlags & uint32_t(1u))
3696 const uint32_t n = CoordToOffset(ijk);
3697 if (DataType::mChildMask.isOn(n)) {
3698 const ChildT* child = this->getChild(n);
3699 acc.insert(ijk, child);
3700 return child->getDimAndCache(ijk, ray, acc);
3702 return ChildNodeType::dim();
3705 template<
typename OpT,
typename AccT,
typename... ArgsT>
3708 getAndCache(
const CoordType& ijk,
const AccT& acc, ArgsT&&... args)
const 3710 const uint32_t n = CoordToOffset(ijk);
3711 if (DataType::mChildMask.isOff(n))
3712 return OpT::get(*
this, n, args...);
3713 const ChildT* child = this->getChild(n);
3714 acc.insert(ijk, child);
3715 return child->template getAndCache<OpT>(ijk, acc, args...);
3718 template<
typename OpT,
typename AccT,
typename... ArgsT>
3720 __hostdev__ decltype(OpT::set(util::declval<InternalNode&>(), util::declval<uint32_t>(), util::declval<ArgsT>()...))
3721 setAndCache(const
CoordType& ijk, const AccT& acc, ArgsT&&... args)
3723 const uint32_t n = CoordToOffset(ijk);
3724 if (DataType::mChildMask.isOff(n))
3725 return OpT::set(*
this, n, args...);
3726 ChildT* child = this->getChild(n);
3727 acc.insert(ijk, child);
3728 return child->template setAndCache<OpT>(ijk, acc, args...);
3738 template<
typename ValueT,
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
3741 static_assert(
sizeof(CoordT) ==
sizeof(Coord),
"Mismatching sizeof");
3742 static_assert(
sizeof(MaskT<LOG2DIM>) ==
sizeof(
Mask<LOG2DIM>),
"Mismatching sizeof");
3747 static constexpr
bool FIXED_SIZE =
true;
3750 uint8_t mBBoxDif[3];
3765 return sizeof(
LeafData) - (12 + 3 + 1 +
sizeof(MaskT<LOG2DIM>) + 2 * (
sizeof(ValueT) +
sizeof(
FloatType)) + (1u << (3 * LOG2DIM)) *
sizeof(ValueT));
3775 mValueMask.setOn(offset);
3776 mValues[offset] = value;
3790 template<
typename T>
3795 for (
auto *p = mValues, *q = p + 512; p != q; ++p)
3809 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
3812 static_assert(
sizeof(CoordT) ==
sizeof(Coord),
"Mismatching sizeof");
3813 static_assert(
sizeof(MaskT<LOG2DIM>) ==
sizeof(
Mask<LOG2DIM>),
"Mismatching sizeof");
3818 uint8_t mBBoxDif[3];
3835 return sizeof(
LeafFnBase) - (12 + 3 + 1 +
sizeof(MaskT<LOG2DIM>) + 2 * 4 + 4 * 2);
3840 mQuantum = (max -
min) /
float((1 << bitWidth) - 1);
3870 template<
typename T>
3879 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
3880 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT)
LeafData<
Fp4, CoordT, MaskT, LOG2DIM>
3886 static constexpr
bool FIXED_SIZE =
true;
3887 alignas(32) uint8_t mCode[1u << (3 * LOG2DIM - 1)];
3892 static_assert(BaseT::padding() == 0,
"expected no padding in LeafFnBase");
3893 return sizeof(
LeafData) -
sizeof(
BaseT) - (1u << (3 * LOG2DIM - 1));
3900 const uint8_t c = mCode[i>>1];
3901 return ( (i&1) ? c >> 4 : c & uint8_t(15) )*BaseT::mQuantum + BaseT::mMinimum;
3903 return ((mCode[i >> 1] >> ((i & 1) << 2)) & uint8_t(15)) * BaseT::mQuantum + BaseT::mMinimum;
3916 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
3917 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT)
LeafData<
Fp8, CoordT, MaskT, LOG2DIM>
3923 static constexpr
bool FIXED_SIZE =
true;
3924 alignas(32) uint8_t mCode[1u << 3 * LOG2DIM];
3928 static_assert(BaseT::padding() == 0,
"expected no padding in LeafFnBase");
3929 return sizeof(
LeafData) -
sizeof(
BaseT) - (1u << 3 * LOG2DIM);
3935 return mCode[i] * BaseT::mQuantum + BaseT::mMinimum;
3946 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
3947 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT)
LeafData<
Fp16, CoordT, MaskT, LOG2DIM>
3953 static constexpr
bool FIXED_SIZE =
true;
3954 alignas(32) uint16_t mCode[1u << 3 * LOG2DIM];
3959 static_assert(BaseT::padding() == 0,
"expected no padding in LeafFnBase");
3960 return sizeof(
LeafData) -
sizeof(
BaseT) - 2 * (1u << 3 * LOG2DIM);
3966 return mCode[i] * BaseT::mQuantum + BaseT::mMinimum;
3978 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
3979 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT)
LeafData<
FpN, CoordT, MaskT, LOG2DIM>
3985 static constexpr
bool FIXED_SIZE =
false;
3988 static_assert(BaseT::padding() == 0,
"expected no padding in LeafFnBase");
3997 #ifdef NANOVDB_FPN_BRANCHLESS // faster 3998 const int b = BaseT::mFlags >> 5;
4000 uint16_t code =
reinterpret_cast<const uint16_t*
>(
this + 1)[i >> (4 - b)];
4001 const static uint8_t shift[5] = {15, 7, 3, 1, 0};
4002 const static uint16_t mask[5] = {1, 3, 15, 255, 65535};
4003 code >>= (i & shift[b]) << b;
4006 uint32_t code =
reinterpret_cast<const uint32_t*
>(
this + 1)[i >> (5 - b)];
4007 code >>= (i & ((32 >> b) - 1)) << b;
4008 code &= (1 << (1 << b)) - 1;
4010 #else // use branched version (slow) 4012 auto* values =
reinterpret_cast<const uint8_t*
>(
this + 1);
4013 switch (BaseT::mFlags >> 5) {
4015 code = float((values[i >> 3] >> (i & 7)) & uint8_t(1));
4018 code = float((values[i >> 2] >> ((i & 3) << 1)) & uint8_t(3));
4021 code = float((values[i >> 1] >> ((i & 1) << 2)) & uint8_t(15));
4024 code = float(values[i]);
4027 code = float(reinterpret_cast<const uint16_t*>(values)[i]);
4030 return float(code) * BaseT::mQuantum + BaseT::mMinimum;
4043 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
4044 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT)
LeafData<
bool, CoordT, MaskT, LOG2DIM>
4046 static_assert(
sizeof(CoordT) ==
sizeof(Coord),
"Mismatching sizeof");
4047 static_assert(
sizeof(MaskT<LOG2DIM>) ==
sizeof(
Mask<LOG2DIM>),
"Mismatching sizeof");
4052 static constexpr
bool FIXED_SIZE =
true;
4055 uint8_t mBBoxDif[3];
4059 uint64_t mPadding[2];
4071 mValueMask.setOn(offset);
4072 mValues.set(offset, v);
4080 template<
typename T>
4093 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
4096 static_assert(
sizeof(CoordT) ==
sizeof(Coord),
"Mismatching sizeof");
4097 static_assert(
sizeof(MaskT<LOG2DIM>) ==
sizeof(
Mask<LOG2DIM>),
"Mismatching sizeof");
4102 static constexpr
bool FIXED_SIZE =
true;
4105 uint8_t mBBoxDif[3];
4108 uint64_t mPadding[2];
4114 return sizeof(
LeafData) - (12u + 3u + 1u +
sizeof(MaskT<LOG2DIM>) + 2 * 8u);
4129 template<
typename T>
4142 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
4145 static_assert(
sizeof(CoordT) ==
sizeof(Coord),
"Mismatching sizeof");
4146 static_assert(
sizeof(MaskT<LOG2DIM>) ==
sizeof(
Mask<LOG2DIM>),
"Mismatching sizeof");
4150 static constexpr
bool FIXED_SIZE =
true;
4153 uint8_t mBBoxDif[3];
4159 return sizeof(
LeafIndexBase) - (12u + 3u + 1u +
sizeof(MaskT<LOG2DIM>) + 2 * 8u);
4170 template<
typename T>
4184 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
4204 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>