118 #ifndef NANOVDB_NANOVDB_H_HAS_BEEN_INCLUDED
119 #define NANOVDB_NANOVDB_H_HAS_BEEN_INCLUDED
126 #define NANOVDB_MAGIC_NUMBER 0x304244566f6e614eUL // "NanoVDB0" in hex - little endian (uint64_t)
127 #define NANOVDB_MAGIC_GRID 0x314244566f6e614eUL // "NanoVDB1" in hex - little endian (uint64_t)
128 #define NANOVDB_MAGIC_FILE 0x324244566f6e614eUL // "NanoVDB2" in hex - little endian (uint64_t)
129 #define NANOVDB_MAGIC_NODE 0x334244566f6e614eUL // "NanoVDB3" in hex - little endian (uint64_t)
130 #define NANOVDB_MAGIC_MASK 0x00FFFFFFFFFFFFFFUL // use this mask to remove the number
133 #define NANOVDB_MAJOR_VERSION_NUMBER 32 // reflects changes to the ABI and hence also the file format
134 #define NANOVDB_MINOR_VERSION_NUMBER 6 // reflects changes to the API but not ABI
135 #define NANOVDB_PATCH_VERSION_NUMBER 0 // reflects changes that does not affect the ABI or API
137 #define TBB_SUPPRESS_DEPRECATED_MESSAGES 1
140 #define NANOVDB_USE_SINGLE_ROOT_KEY
149 #define NANOVDB_NEW_ACCESSOR_METHODS
151 #define NANOVDB_FPN_BRANCHLESS
154 #define NANOVDB_DATA_ALIGNMENT 32
156 #if !defined(NANOVDB_ALIGN)
157 #define NANOVDB_ALIGN(n) alignas(n)
158 #endif // !defined(NANOVDB_ALIGN)
160 #ifdef __CUDACC_RTC__
162 typedef signed char int8_t;
163 typedef short int16_t;
165 typedef long long int64_t;
166 typedef unsigned char uint8_t;
167 typedef unsigned int uint32_t;
168 typedef unsigned short uint16_t;
169 typedef unsigned long long uint64_t;
171 #define NANOVDB_ASSERT(x)
173 #define UINT64_C(x) (x ## ULL)
175 #else // !__CUDACC_RTC__
185 #ifdef NANOVDB_USE_IOSTREAMS
190 #define NANOVDB_ASSERT(x) assert(x)
192 #define NANOVDB_ASSERT(x)
195 #if defined(NANOVDB_USE_INTRINSICS) && defined(_MSC_VER)
197 #pragma intrinsic(_BitScanReverse)
198 #pragma intrinsic(_BitScanForward)
199 #pragma intrinsic(_BitScanReverse64)
200 #pragma intrinsic(_BitScanForward64)
203 #endif // __CUDACC_RTC__
205 #if defined(__CUDACC__) || defined(__HIP__)
208 #define __hostdev__ __host__ __device__ // Runs on the CPU and GPU, called from the CPU or the GPU
213 #define __hostdev__ // Runs on the CPU and GPU, called from the CPU or the GPU
216 #define __global__ // Runs on the GPU, called from the CPU or the GPU
219 #define __device__ // Runs on the GPU, called from the GPU
222 #define __host__ // Runs on the CPU, called from the CPU
225 #endif // if defined(__CUDACC__) || defined(__HIP__)
229 #if defined(_MSC_VER) && defined(__CUDACC__)
230 #define NANOVDB_HOSTDEV_DISABLE_WARNING __pragma("hd_warning_disable")
231 #elif defined(__GNUC__) && defined(__CUDACC__)
232 #define NANOVDB_HOSTDEV_DISABLE_WARNING _Pragma("hd_warning_disable")
234 #define NANOVDB_HOSTDEV_DISABLE_WARNING
245 #define NANOVDB_OFFSETOF(CLASS, MEMBER) ((int)(size_t)((char*)&((CLASS*)0)->MEMBER - (char*)0))
322 #ifndef __CUDACC_RTC__
328 static const char* LUT[] = {
"?",
"float",
"double",
"int16",
"int32",
"int64",
"Vec3f",
"Vec3d",
"Mask",
"Half",
329 "uint32",
"bool",
"RGBA8",
"Float4",
"Float8",
"Float16",
"FloatN",
"Vec4f",
"Vec4d",
330 "Index",
"OnIndex",
"IndexMask",
"OnIndexMask",
"PointIndex",
"Vec3u8",
"Vec3u16",
"End"};
331 static_assert(
sizeof(LUT) /
sizeof(
char*) - 1 ==
int(
GridType::End),
"Unexpected size of LUT");
332 return LUT[
static_cast<int>(gridType)];
351 #ifndef __CUDACC_RTC__
355 static const char* LUT[] = {
"?",
"SDF",
"FOG",
"MAC",
"PNTIDX",
"PNTDAT",
"TOPO",
"VOX",
"INDEX",
"TENSOR",
"END"};
356 static_assert(
sizeof(LUT) /
sizeof(
char*) - 1 ==
int(
GridClass::End),
"Unexpected size of LUT");
357 return LUT[
static_cast<int>(gridClass)];
374 #ifndef __CUDACC_RTC__
378 static const char* LUT[] = {
"has long grid name",
382 "has standard deviation",
385 static_assert(1 << (
sizeof(LUT) /
sizeof(
char*) - 1) ==
int(
GridFlags::End),
"Unexpected size of LUT");
386 return LUT[
static_cast<int>(gridFlags)];
417 template<
typename T0,
typename T1,
typename ...T>
423 template<
typename T0,
typename T1>
426 static constexpr
bool value =
false;
468 template <
bool,
typename T =
void>
473 template <
typename T>
481 template<
bool,
typename T =
void>
497 static constexpr
bool value =
false;
513 static constexpr
bool value =
false;
557 template <
typename T>
563 template <
typename T>
571 template <
typename T>
577 template <
typename T>
587 template<
typename T,
typename ReferenceT>
598 template<
typename T,
typename ReferenceT>
613 template<
typename AnyType,
template<
typename...>
class TemplateType>
618 template<
typename... Args,
template<
typename...>
class TemplateType>
714 __hostdev__ inline static bool isAligned(
const void* p)
720 __hostdev__ inline static bool isValid(
const void* p)
726 __hostdev__ inline static uint64_t alignmentPadding(
const void* p)
733 template <
typename T>
737 return reinterpret_cast<T*
>( (uint8_t*)p + alignmentPadding(p) );
741 template <
typename T>
745 return reinterpret_cast<const T*
>( (
const uint8_t*)p + alignmentPadding(p) );
756 template<
typename T1,
typename T2>
757 __hostdev__ inline static int64_t PtrDiff(
const T1* p,
const T2*
q)
760 return reinterpret_cast<const char*
>(p) - reinterpret_cast<const char*>(q);
771 template<
typename DstT,
typename SrcT>
775 return reinterpret_cast<DstT*
>(
reinterpret_cast<char*
>(p) + offset);
784 template<
typename DstT,
typename SrcT>
788 return reinterpret_cast<const DstT*
>(
reinterpret_cast<const char*
>(p) + offset);
848 __hostdev__ inline static void* memcpy64(
void *
dst,
const void *
src,
size_t word_count)
851 auto *d =
reinterpret_cast<uint64_t*
>(
dst), *e = d + word_count;
852 auto *
s =
reinterpret_cast<const uint64_t*
>(
src);
853 while (d != e) *d++ = *
s++;
888 switch (blindClass) {
939 : mData(major << 21 | minor << 10 | patch)
960 #ifndef __CUDACC_RTC__
964 char*
buffer = (
char*)malloc(4 + 1 + 4 + 1 + 4 + 1);
978 return 3.141592653589793238462643383279502884e+00;
983 return 3.141592653589793238462643383279502884e+00F;
988 return 3.141592653589793238462643383279502884e+00;
993 return 3.141592653589793238462643383279502884e+00L;
1015 template<
typename T>
1031 template<
typename T>
1033 #if defined(__CUDA_ARCH__) || defined(__HIP__)
1040 struct Maximum<uint32_t>
1045 struct Maximum<
float>
1050 struct Maximum<double>
1055 template<
typename T>
1063 template<
typename Type>
1069 template<
typename Type>
1072 return (a < b) ? a :
b;
1076 return int32_t(fminf(
float(a),
float(b)));
1080 return uint32_t(fminf(
float(a),
float(b)));
1090 template<
typename Type>
1093 return (a > b) ? a :
b;
1098 return int32_t(fmaxf(
float(a),
float(b)));
1102 return uint32_t(fmaxf(
float(a),
float(b)));
1114 return Max(
Min(x, b), a);
1118 return Max(
Min(x, b), a);
1123 return x - floorf(x);
1127 return x -
floor(x);
1132 return int32_t(floorf(x));
1136 return int32_t(
floor(x));
1141 return int32_t(ceilf(x));
1145 return int32_t(
ceil(x));
1148 template<
typename T>
1154 template<
typename T>
1160 template<
typename T>
1165 template<
typename T>
1168 return x < 0 ? -x :
x;
1189 template<
typename CoordT,
typename RealT,
template<
typename>
class Vec3T>
1192 template<
typename CoordT,
template<
typename>
class Vec3T>
1195 return CoordT(int32_t(rintf(xyz[0])), int32_t(rintf(xyz[1])), int32_t(rintf(xyz[2])));
1200 template<
typename CoordT,
template<
typename>
class Vec3T>
1203 return CoordT(int32_t(
floor(xyz[0] + 0.5)), int32_t(
floor(xyz[1] + 0.5)), int32_t(
floor(xyz[2] + 0.5)));
1206 template<
typename CoordT,
typename RealT,
template<
typename>
class Vec3T>
1225 template<
typename T>
1228 return ((
T(0) < x) ?
T(1) :
T(0)) - ((x <
T(0)) ?
T(1) :
T(0));
1231 template<
typename Vec3T>
1235 static const int hashTable[8] = {2, 1, 9, 1, 2, 9, 0, 0};
1236 const int hashKey = ((v[0] < v[1]) << 2) + ((v[0] < v[2]) << 1) + (v[1] < v[2]);
1237 return hashTable[hashKey];
1239 if (v[0] < v[1] && v[0] < v[2])
1248 template<
typename Vec3T>
1252 static const int hashTable[8] = {2, 1, 9, 1, 2, 9, 0, 0};
1253 const int hashKey = ((v[0] > v[1]) << 2) + ((v[0] > v[2]) << 1) + (v[1] > v[2]);
1254 return hashTable[hashKey];
1256 if (v[0] > v[1] && v[0] > v[2])
1268 template<u
int64_t wordSize>
1271 const uint64_t
r = byteCount % wordSize;
1272 return r ? byteCount - r + wordSize : byteCount;
1308 : mVec{ptr[0], ptr[1], ptr[2]}
1335 template<
typename CoordT>
1338 static_assert(
sizeof(
Coord) ==
sizeof(CoordT),
"Mis-matched sizeof");
1357 return mVec[0] < rhs[0] ?
true
1358 : mVec[0] > rhs[0] ?
false
1359 : mVec[1] < rhs[1] ?
true
1360 : mVec[1] > rhs[1] ?
false
1361 : mVec[2] < rhs[2] ?
true :
false;
1367 return mVec[0] < rhs[0] ?
true
1368 : mVec[0] > rhs[0] ?
false
1369 : mVec[1] < rhs[1] ?
true
1370 : mVec[1] > rhs[1] ?
false
1371 : mVec[2] <=rhs[2] ?
true :
false;
1426 if (other[0] < mVec[0])
1428 if (other[1] < mVec[1])
1430 if (other[2] < mVec[2])
1438 if (other[0] > mVec[0])
1440 if (other[1] > mVec[1])
1442 if (other[2] > mVec[2])
1446 #if defined(__CUDACC__) // the following functions only run on the GPU!
1449 atomicMin(&mVec[0], other[0]);
1450 atomicMin(&mVec[1], other[1]);
1451 atomicMin(&mVec[2], other[2]);
1456 atomicMax(&mVec[0], other[0]);
1457 atomicMax(&mVec[1], other[1]);
1458 atomicMax(&mVec[2], other[2]);
1465 return Coord(mVec[0] + dx, mVec[1] + dy, mVec[2] + dz);
1474 return (a[0] < b[0] || a[1] < b[1] || a[2] < b[2]);
1479 template<
typename Vec3T>
1487 template<
int Log2N = 3 + 4 + 5>
1488 __hostdev__ uint32_t
hash()
const {
return ((1 << Log2N) - 1) & (mVec[0] * 73856093 ^ mVec[1] * 19349669 ^ mVec[2] * 83492791); }
1493 (uint8_t(
bool(mVec[1] & (1u << 31))) << 1) |
1494 (uint8_t(
bool(mVec[2] & (1u << 31))) << 2); }
1509 template<
typename T>
1527 template<
template<
class>
class Vec3T,
class T2>
1529 : mVec{
T(v[0]),
T(v[1]),
T(v[2])}
1533 template<
typename T2>
1535 : mVec{
T(v[0]),
T(v[1]),
T(v[2])}
1539 : mVec{
T(ijk[0]),
T(ijk[1]),
T(ijk[2])}
1544 template<
template<
class>
class Vec3T,
class T2>
1555 template<
typename Vec3T>
1556 __hostdev__ T dot(
const Vec3T&
v)
const {
return mVec[0] * v[0] + mVec[1] * v[1] + mVec[2] * v[2]; }
1557 template<
typename Vec3T>
1560 return Vec3(mVec[1] * v[2] - mVec[2] * v[1],
1561 mVec[2] * v[0] - mVec[0] * v[2],
1562 mVec[0] * v[1] - mVec[1] * v[0]);
1566 return mVec[0] * mVec[0] + mVec[1] * mVec[1] + mVec[2] * mVec[2];
1587 mVec[0] +=
T(ijk[0]);
1588 mVec[1] +=
T(ijk[1]);
1589 mVec[2] +=
T(ijk[2]);
1601 mVec[0] -=
T(ijk[0]);
1602 mVec[1] -=
T(ijk[1]);
1603 mVec[2] -=
T(ijk[2]);
1618 if (other[0] < mVec[0])
1620 if (other[1] < mVec[1])
1622 if (other[2] < mVec[2])
1630 if (other[0] > mVec[0])
1632 if (other[1] > mVec[1])
1634 if (other[2] > mVec[2])
1641 return mVec[0] < mVec[1] ? (mVec[0] < mVec[2] ? mVec[0] : mVec[2]) : (mVec[1] < mVec[2] ? mVec[1] : mVec[2]);
1646 return mVec[0] > mVec[1] ? (mVec[0] > mVec[2] ? mVec[0] : mVec[2]) : (mVec[1] > mVec[2] ? mVec[1] : mVec[2]);
1661 return Coord(mVec[0], mVec[1], mVec[2]);
1673 template<
typename T1,
typename T2>
1676 return Vec3<T2>(scalar * vec[0], scalar * vec[1], scalar * vec[2]);
1678 template<
typename T1,
typename T2>
1681 return Vec3<T2>(scalar / vec[0], scalar / vec[1], scalar / vec[2]);
1695 return Vec3f(
float(mVec[0]),
float(mVec[1]),
float(mVec[2]));
1701 return Vec3d(
double(mVec[0]),
double(mVec[1]),
double(mVec[2]));
1707 template<
typename T>
1725 template<
typename T2>
1727 : mVec{
T(v[0]),
T(v[1]),
T(v[2]),
T(v[3])}
1730 template<
template<
class>
class Vec4T,
class T2>
1732 : mVec{
T(v[0]),
T(v[1]),
T(v[2]),
T(v[3])}
1736 __hostdev__ bool operator==(
const Vec4& rhs)
const {
return mVec[0] == rhs[0] && mVec[1] == rhs[1] && mVec[2] == rhs[2] && mVec[3] == rhs[3]; }
1737 __hostdev__ bool operator!=(
const Vec4& rhs)
const {
return mVec[0] != rhs[0] || mVec[1] != rhs[1] || mVec[2] != rhs[2] || mVec[3] != rhs[3]; }
1738 template<
template<
class>
class Vec4T,
class T2>
1751 template<
typename Vec4T>
1752 __hostdev__ T dot(
const Vec4T&
v)
const {
return mVec[0] * v[0] + mVec[1] * v[1] + mVec[2] * v[2] + mVec[3] * v[3]; }
1755 return mVec[0] * mVec[0] + mVec[1] * mVec[1] + mVec[2] * mVec[2] + mVec[3] * mVec[3];
1794 if (other[0] < mVec[0])
1796 if (other[1] < mVec[1])
1798 if (other[2] < mVec[2])
1800 if (other[3] < mVec[3])
1808 if (other[0] > mVec[0])
1810 if (other[1] > mVec[1])
1812 if (other[2] > mVec[2])
1814 if (other[3] > mVec[3])
1820 template<
typename T1,
typename T2>
1823 return Vec4<T2>(scalar * vec[0], scalar * vec[1], scalar * vec[2], scalar * vec[3]);
1825 template<
typename T1,
typename T2>
1828 return Vec4<T2>(scalar / vec[0], scalar / vec[1], scalar / vec[2], scalar / vec[3]);
1868 : mData{{0, 0, 0, 0}}
1870 static_assert(
sizeof(uint32_t) ==
sizeof(
Rgba8),
"Unexpected sizeof");
1876 : mData{{
r,
g,
b,
a}}
1883 : mData{{
v,
v,
v, v}}
1890 : mData{{
static_cast<uint8_t
>(0.5f + r * 255.0f),
1891 static_cast<uint8_t>(0.5
f + g * 255.0
f),
1892 static_cast<uint8_t
>(0.5f + b * 255.0f),
1893 static_cast<uint8_t>(0.5f +
a * 255.0f)}}
1900 : Rgba8(rgb[0], rgb[1], rgb[2])
1907 : Rgba8(rgba[0], rgba[1], rgba[2], rgba[3])
1911 __hostdev__ bool operator< (
const Rgba8& rhs)
const {
return mData.packed < rhs.mData.packed; }
1915 return 0.0000153787005f * (
float(mData.c[0]) * mData.c[0] +
1916 float(mData.c[1]) * mData.c[1] +
1917 float(mData.c[2]) * mData.c[2]);
1921 __hostdev__ float asFloat(
int n)
const {
return 0.003921569f*
float(mData.c[n]); }
1922 __hostdev__ const uint8_t& operator[](
int n)
const {
return mData.c[
n]; }
1923 __hostdev__ uint8_t& operator[](
int n) {
return mData.c[
n]; }
1926 __hostdev__ const uint8_t&
r()
const {
return mData.c[0]; }
1927 __hostdev__ const uint8_t&
g()
const {
return mData.c[1]; }
1928 __hostdev__ const uint8_t&
b()
const {
return mData.c[2]; }
1929 __hostdev__ const uint8_t&
a()
const {
return mData.c[3]; }
1935 return Vec3f(this->asFloat(0), this->asFloat(1), this->asFloat(2));
1938 return Vec4f(this->asFloat(0), this->asFloat(1), this->asFloat(2), this->asFloat(3));
1949 template<
typename T>
1952 static const int Rank = 0;
1953 static const bool IsScalar =
true;
1954 static const bool IsVector =
false;
1955 static const int Size = 1;
1960 template<
typename T>
1963 static const int Rank = 1;
1964 static const bool IsScalar =
false;
1965 static const bool IsVector =
true;
1973 template<typename T, int = sizeof(typename TensorTraits<T>::ElementType)>
1979 template<
typename T>
2030 template<
typename BuildT>
2090 template<
typename BuildT>
2102 return defaultClass;
2113 template<
typename Vec3T>
2116 return Vec3T(fmaf(static_cast<float>(xyz[0]), mat[0], fmaf(static_cast<float>(xyz[1]), mat[1], static_cast<float>(xyz[2]) * mat[2])),
2117 fmaf(static_cast<float>(xyz[0]), mat[3], fmaf(static_cast<float>(xyz[1]), mat[4], static_cast<float>(xyz[2]) * mat[5])),
2118 fmaf(static_cast<float>(xyz[0]), mat[6], fmaf(static_cast<float>(xyz[1]), mat[7], static_cast<float>(xyz[2]) * mat[8])));
2127 template<
typename Vec3T>
2130 return Vec3T(fma(static_cast<double>(xyz[0]), mat[0], fma(static_cast<double>(xyz[1]), mat[1], static_cast<double>(xyz[2]) * mat[2])),
2131 fma(static_cast<double>(xyz[0]), mat[3], fma(static_cast<double>(xyz[1]), mat[4], static_cast<double>(xyz[2]) * mat[5])),
2132 fma(static_cast<double>(xyz[0]), mat[6], fma(static_cast<double>(xyz[1]), mat[7], static_cast<double>(xyz[2]) * mat[8])));
2142 template<
typename Vec3T>
2145 return Vec3T(fmaf(static_cast<float>(xyz[0]), mat[0], fmaf(static_cast<float>(xyz[1]), mat[1], fmaf(static_cast<float>(xyz[2]), mat[2], vec[0]))),
2146 fmaf(static_cast<float>(xyz[0]), mat[3], fmaf(static_cast<float>(xyz[1]), mat[4], fmaf(static_cast<float>(xyz[2]), mat[5], vec[1]))),
2147 fmaf(static_cast<float>(xyz[0]), mat[6], fmaf(static_cast<float>(xyz[1]), mat[7], fmaf(static_cast<float>(xyz[2]), mat[8], vec[2]))));
2157 template<
typename Vec3T>
2160 return Vec3T(fma(static_cast<double>(xyz[0]), mat[0], fma(static_cast<double>(xyz[1]), mat[1], fma(static_cast<double>(xyz[2]), mat[2], vec[0]))),
2161 fma(static_cast<double>(xyz[0]), mat[3], fma(static_cast<double>(xyz[1]), mat[4], fma(static_cast<double>(xyz[2]), mat[5], vec[1]))),
2162 fma(static_cast<double>(xyz[0]), mat[6], fma(static_cast<double>(xyz[1]), mat[7], fma(static_cast<double>(xyz[2]), mat[8], vec[2]))));
2171 template<
typename Vec3T>
2174 return Vec3T(fmaf(static_cast<float>(xyz[0]), mat[0], fmaf(static_cast<float>(xyz[1]), mat[3], static_cast<float>(xyz[2]) * mat[6])),
2175 fmaf(static_cast<float>(xyz[0]), mat[1], fmaf(static_cast<float>(xyz[1]), mat[4], static_cast<float>(xyz[2]) * mat[7])),
2176 fmaf(static_cast<float>(xyz[0]), mat[2], fmaf(static_cast<float>(xyz[1]), mat[5], static_cast<float>(xyz[2]) * mat[8])));
2185 template<
typename Vec3T>
2188 return Vec3T(fma(static_cast<double>(xyz[0]), mat[0], fma(static_cast<double>(xyz[1]), mat[3], static_cast<double>(xyz[2]) * mat[6])),
2189 fma(static_cast<double>(xyz[0]), mat[1], fma(static_cast<double>(xyz[1]), mat[4], static_cast<double>(xyz[2]) * mat[7])),
2190 fma(static_cast<double>(xyz[0]), mat[2], fma(static_cast<double>(xyz[1]), mat[5], static_cast<double>(xyz[2]) * mat[8])));
2193 template<
typename Vec3T>
2196 return Vec3T(fmaf(static_cast<float>(xyz[0]), mat[0], fmaf(static_cast<float>(xyz[1]), mat[3], fmaf(static_cast<float>(xyz[2]), mat[6], vec[0]))),
2197 fmaf(static_cast<float>(xyz[0]), mat[1], fmaf(static_cast<float>(xyz[1]), mat[4], fmaf(static_cast<float>(xyz[2]), mat[7], vec[1]))),
2198 fmaf(static_cast<float>(xyz[0]), mat[2], fmaf(static_cast<float>(xyz[1]), mat[5], fmaf(static_cast<float>(xyz[2]), mat[8], vec[2]))));
2201 template<
typename Vec3T>
2204 return Vec3T(fma(static_cast<double>(xyz[0]), mat[0], fma(static_cast<double>(xyz[1]), mat[3], fma(static_cast<double>(xyz[2]), mat[6], vec[0]))),
2205 fma(static_cast<double>(xyz[0]), mat[1], fma(static_cast<double>(xyz[1]), mat[4], fma(static_cast<double>(xyz[2]), mat[7], vec[1]))),
2206 fma(static_cast<double>(xyz[0]), mat[2], fma(static_cast<double>(xyz[1]), mat[5], fma(static_cast<double>(xyz[2]), mat[8], vec[2]))));
2212 template<
typename Vec3T>
2233 mCoord[0].minComponent(xyz);
2234 mCoord[1].maxComponent(xyz);
2241 mCoord[0].minComponent(bbox[0]);
2242 mCoord[1].maxComponent(bbox[1]);
2249 mCoord[0].maxComponent(bbox[0]);
2250 mCoord[1].minComponent(bbox[1]);
2282 template<
typename Vec3T>
2289 using BaseT::mCoord;
2311 : BBox(bbox[0], bbox[1])
2315 mCoord[0][1] >= mCoord[1][1] ||
2316 mCoord[0][2] >= mCoord[1][2]; }
2317 __hostdev__ operator bool()
const {
return mCoord[0][0] < mCoord[1][0] &&
2318 mCoord[0][1] < mCoord[1][1] &&
2319 mCoord[0][2] < mCoord[1][2]; }
2323 return p[0] > mCoord[0][0] && p[1] > mCoord[0][1] && p[2] > mCoord[0][2] &&
2324 p[0] < mCoord[1][0] && p[1] < mCoord[1][1] && p[2] < mCoord[1][2];
2333 template<
typename CoordT>
2338 using BaseT::mCoord;
2359 if (mPos[2] < mBBox[1][2]) {
2361 }
else if (mPos[1] < mBBox[1][1]) {
2362 mPos[2] = mBBox[0][2];
2364 }
else if (mPos[0] <= mBBox[1][0]) {
2365 mPos[2] = mBBox[0][2];
2366 mPos[1] = mBBox[0][1];
2380 return mPos == rhs.mPos;
2385 return mPos != rhs.mPos;
2390 return mPos < rhs.mPos;
2395 return mPos <= rhs.mPos;
2402 __hostdev__ Iterator
end()
const {
return Iterator{*
this, CoordT(mCoord[1][0]+1, mCoord[0][1], mCoord[0][2])}; }
2412 template<
typename SplitT>
2414 :
BaseT(other.mCoord[0], other.mCoord[1])
2418 mCoord[1][
n] = (mCoord[0][
n] + mCoord[1][
n]) >> 1;
2419 other.mCoord[0][
n] = mCoord[1][
n] + 1;
2424 return BBox(min, min.offsetBy(dim - 1));
2429 return BBox(CoordT(min), CoordT(max));
2433 mCoord[0][1] < mCoord[1][1] &&
2434 mCoord[0][2] < mCoord[1][2]; }
2437 mCoord[0][1] > mCoord[1][1] ||
2438 mCoord[0][2] > mCoord[1][2]; }
2440 __hostdev__ operator bool()
const {
return mCoord[0][0] <= mCoord[1][0] &&
2441 mCoord[0][1] <= mCoord[1][1] &&
2442 mCoord[0][2] <= mCoord[1][2]; }
2446 auto d = this->dim();
2447 return uint64_t(d[0]) * uint64_t(d[1]) * uint64_t(d[2]);
2453 return !(CoordT::lessThan(b.min(), this->
min()) || CoordT::lessThan(this->
max(), b.max()));
2459 return !(CoordT::lessThan(this->
max(), b.min()) || CoordT::lessThan(b.max(), this->
min()));
2463 template<
typename RealT =
double>
2468 Vec3<RealT>(RealT(mCoord[1][0] + 1), RealT(mCoord[1][1] + 1), RealT(mCoord[1][2] + 1)));
2473 return BBox(mCoord[0].offsetBy(-padding), mCoord[1].offsetBy(padding));
2479 template<
typename Map>
2484 bbox.expand(map.
applyMap(
Vec3d(mCoord[0][0], mCoord[0][1], mCoord[1][2])));
2485 bbox.expand(map.
applyMap(
Vec3d(mCoord[0][0], mCoord[1][1], mCoord[0][2])));
2486 bbox.expand(map.
applyMap(
Vec3d(mCoord[1][0], mCoord[0][1], mCoord[0][2])));
2487 bbox.expand(map.
applyMap(
Vec3d(mCoord[1][0], mCoord[1][1], mCoord[0][2])));
2488 bbox.expand(map.
applyMap(
Vec3d(mCoord[1][0], mCoord[0][1], mCoord[1][2])));
2489 bbox.expand(map.
applyMap(
Vec3d(mCoord[0][0], mCoord[1][1], mCoord[1][2])));
2490 bbox.expand(map.
applyMap(
Vec3d(mCoord[1][0], mCoord[1][1], mCoord[1][2])));
2494 #if defined(__CUDACC__) // the following functions only run on the GPU!
2497 mCoord[0].minComponentAtomic(ijk);
2498 mCoord[1].maxComponentAtomic(ijk);
2503 mCoord[0].minComponentAtomic(bbox[0]);
2504 mCoord[1].maxComponentAtomic(bbox[1]);
2509 mCoord[0].maxComponentAtomic(bbox[0]);
2510 mCoord[1].minComponentAtomic(bbox[1]);
2525 __hostdev__ static inline uint32_t FindLowestOn(uint32_t
v)
2528 #if (defined(__CUDA_ARCH__) || defined(__HIP__)) && defined(NANOVDB_USE_INTRINSICS)
2529 return __ffs(v) - 1;
2530 #elif defined(_MSC_VER) && defined(NANOVDB_USE_INTRINSICS)
2531 unsigned long index;
2532 _BitScanForward(&index, v);
2533 return static_cast<uint32_t
>(index);
2534 #elif (defined(__GNUC__) || defined(__clang__)) && defined(NANOVDB_USE_INTRINSICS)
2535 return static_cast<uint32_t
>(__builtin_ctzl(v));
2538 static const unsigned char DeBruijn[32] = {
2539 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9};
2541 #if defined(_MSC_VER) && !defined(__NVCC__)
2542 #pragma warning(push)
2543 #pragma warning(disable : 4146)
2545 return DeBruijn[uint32_t((v & -v) * 0x077CB531U) >> 27];
2546 #if defined(_MSC_VER) && !defined(__NVCC__)
2547 #pragma warning(pop)
2557 __hostdev__ static inline uint32_t FindHighestOn(uint32_t
v)
2560 #if (defined(__CUDA_ARCH__) || defined(__HIP__)) && defined(NANOVDB_USE_INTRINSICS)
2561 return sizeof(uint32_t) * 8 - 1 - __clz(v);
2562 #elif defined(_MSC_VER) && defined(NANOVDB_USE_INTRINSICS)
2563 unsigned long index;
2564 _BitScanReverse(&index, v);
2565 return static_cast<uint32_t
>(index);
2566 #elif (defined(__GNUC__) || defined(__clang__)) && defined(NANOVDB_USE_INTRINSICS)
2567 return sizeof(
unsigned long) * 8 - 1 - __builtin_clzl(v);
2570 static const unsigned char DeBruijn[32] = {
2571 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30,
2572 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31};
2578 return DeBruijn[uint32_t(v * 0x07C4ACDDU) >> 27];
2586 __hostdev__ static inline uint32_t FindLowestOn(uint64_t v)
2589 #if (defined(__CUDA_ARCH__) || defined(__HIP__)) && defined(NANOVDB_USE_INTRINSICS)
2590 return __ffsll(static_cast<unsigned long long int>(v)) - 1;
2591 #elif defined(_MSC_VER) && defined(NANOVDB_USE_INTRINSICS)
2592 unsigned long index;
2593 _BitScanForward64(&index, v);
2594 return static_cast<uint32_t
>(index);
2595 #elif (defined(__GNUC__) || defined(__clang__)) && defined(NANOVDB_USE_INTRINSICS)
2596 return static_cast<uint32_t
>(__builtin_ctzll(v));
2599 static const unsigned char DeBruijn[64] = {
2600 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
2601 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
2602 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
2603 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12,
2606 #if defined(_MSC_VER) && !defined(__NVCC__)
2607 #pragma warning(push)
2608 #pragma warning(disable : 4146)
2610 return DeBruijn[uint64_t((v & -v) * UINT64_C(0x022FDD63CC95386D)) >> 58];
2611 #if defined(_MSC_VER) && !defined(__NVCC__)
2612 #pragma warning(pop)
2622 __hostdev__ static inline uint32_t FindHighestOn(uint64_t v)
2625 #if (defined(__CUDA_ARCH__) || defined(__HIP__)) && defined(NANOVDB_USE_INTRINSICS)
2626 return sizeof(
unsigned long) * 8 - 1 - __clzll(static_cast<unsigned long long int>(v));
2627 #elif defined(_MSC_VER) && defined(NANOVDB_USE_INTRINSICS)
2628 unsigned long index;
2629 _BitScanReverse64(&index, v);
2630 return static_cast<uint32_t
>(index);
2631 #elif (defined(__GNUC__) || defined(__clang__)) && defined(NANOVDB_USE_INTRINSICS)
2632 return sizeof(
unsigned long) * 8 - 1 - __builtin_clzll(v);
2634 const uint32_t* p =
reinterpret_cast<const uint32_t*
>(&
v);
2635 return p[1] ? 32u + FindHighestOn(p[1]) : FindHighestOn(p[0]);
2645 #if (defined(__CUDA_ARCH__) || defined(__HIP__)) && defined(NANOVDB_USE_INTRINSICS)
2649 #elif defined(_MSC_VER) && defined(_M_X64) && (_MSC_VER >= 1928) && defined(NANOVDB_USE_INTRINSICS)
2651 return uint32_t(__popcnt64(v));
2652 #elif (defined(__GNUC__) || defined(__clang__)) && defined(NANOVDB_USE_INTRINSICS)
2654 return __builtin_popcountll(v);
2655 #else // use software implementation
2657 v = v - ((v >> 1) & uint64_t(0x5555555555555555));
2658 v = (v & uint64_t(0x3333333333333333)) + ((v >> 2) & uint64_t(0x3333333333333333));
2659 return (((v + (v >> 4)) & uint64_t(0xF0F0F0F0F0F0F0F)) * uint64_t(0x101010101010101)) >> 56;
2699 for (
auto bit : list)
2702 template<
typename MaskT>
2705 for (
auto mask : list)
2713 for (
auto bit : list)
2716 template<
typename MaskT>
2720 for (
auto mask : list)
2735 for (
auto bit : list)
2740 for (
auto bit : list)
2744 template<
typename MaskT>
2746 template<
typename MaskT>
2749 template<
typename MaskT>
2752 for (
auto mask : list)
2755 template<
typename MaskT>
2758 for (
auto mask : list)
2763 template<
typename MaskT>
2770 template<
typename MaskT>
2772 template<
typename MaskT>
2775 template<
typename MaskT>
2778 for (
auto mask : list)
2784 template<
typename MaskT>
2787 for (
auto mask : list)
2804 template<u
int32_t LOG2DIM>
2808 static constexpr uint32_t
SIZE = 1U << (3 * LOG2DIM);
2832 uint32_t
n = i >> 6, sum =
CountOn(mWords[n] & ((uint64_t(1) << (i & 63u)) - 1u));
2833 for (
const uint64_t*
w = mWords; n--; ++
w)
2859 mPos = mParent->
findNext<On>(mPos + 1);
2871 const Mask* mParent;
2918 const uint64_t v = on ? ~uint64_t(0) : uint64_t(0);
2927 mWords[i] = other.mWords[i];
2935 template<
typename MaskT = Mask>
2938 static_assert(
sizeof(
Mask) ==
sizeof(MaskT),
"Mismatching sizeof");
2939 static_assert(
WORD_COUNT == MaskT::WORD_COUNT,
"Mismatching word count");
2940 static_assert(LOG2DIM == MaskT::LOG2DIM,
"Mismatching LOG2DIM");
2941 auto* src =
reinterpret_cast<const uint64_t*
>(&other);
2956 if (mWords[i] != other.mWords[i])
2965 __hostdev__ bool isOn(uint32_t
n)
const {
return 0 != (mWords[n >> 6] & (uint64_t(1) << (n & 63))); }
2968 __hostdev__ bool isOff(uint32_t
n)
const {
return 0 == (mWords[n >> 6] & (uint64_t(1) << (n & 63))); }
2974 if (mWords[i] != ~uint64_t(0))
2983 if (mWords[i] != uint64_t(0))
2993 #if defined(__CUDACC__) // the following functions only run on the GPU!
2996 atomicOr(reinterpret_cast<unsigned long long int*>(
this) + (n >> 6), 1ull << (n & 63));
2998 __device__ inline void setOffAtomic(uint32_t n)
3000 atomicAnd(reinterpret_cast<unsigned long long int*>(
this) + (n >> 6), ~(1ull << (n & 63)));
3002 __device__ inline void setAtomic(uint32_t n,
bool on)
3004 on ? this->setOnAtomic(n) :
this->setOffAtomic(n);
3010 #if 1 // switch between branchless
3011 auto& word = mWords[n >> 6];
3013 word &= ~(uint64_t(1) <<
n);
3014 word |= uint64_t(on) <<
n;
3024 mWords[i] = ~uint64_t(0);
3031 mWords[i] = uint64_t(0);
3037 const uint64_t v = on ? ~uint64_t(0) : uint64_t(0);
3045 for (
auto*
w = mWords; n--; ++
w)
3053 uint64_t* w1 = mWords;
3054 const uint64_t* w2 = other.mWords;
3055 for (uint32_t n =
WORD_COUNT; n--; ++w1, ++w2)
3062 uint64_t* w1 = mWords;
3063 const uint64_t* w2 = other.mWords;
3064 for (uint32_t n =
WORD_COUNT; n--; ++w1, ++w2)
3071 uint64_t* w1 = mWords;
3072 const uint64_t* w2 = other.mWords;
3073 for (uint32_t n =
WORD_COUNT; n--; ++w1, ++w2)
3080 uint64_t* w1 = mWords;
3081 const uint64_t* w2 = other.mWords;
3082 for (uint32_t n =
WORD_COUNT; n--; ++w1, ++w2)
3092 const uint64_t*
w = mWords;
3095 return n <
WORD_COUNT ? (n << 6) + FindLowestOn(ON ? *w : ~*w) :
SIZE;
3102 uint32_t n = start >> 6;
3105 uint32_t m = start & 63u;
3106 uint64_t b = ON ? mWords[
n] : ~mWords[
n];
3107 if (b & (uint64_t(1u) << m))
3109 b &= ~uint64_t(0u) << m;
3111 b = ON ? mWords[
n] : ~mWords[
n];
3112 return b ? (n << 6) + FindLowestOn(b) :
SIZE;
3119 uint32_t n = start >> 6;
3122 uint32_t m = start & 63u;
3123 uint64_t b = ON ? mWords[
n] : ~mWords[
n];
3124 if (b & (uint64_t(1u) << m))
3126 b &= (uint64_t(1u) << m) - 1u;
3128 b = ON ? mWords[--
n] : ~mWords[--
n];
3129 return b ? (n << 6) + FindHighestOn(b) :
SIZE;
3152 :
mMatF{1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f}
3153 ,
mInvMatF{1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f}
3154 ,
mVecF{0.0f, 0.0f, 0.0f}
3156 ,
mMatD{1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0}
3157 ,
mInvMatD{1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0}
3158 ,
mVecD{0.0, 0.0, 0.0}
3167 ,
mMatD{
s, 0.0, 0.0, 0.0,
s, 0.0, 0.0, 0.0, s}
3168 ,
mInvMatD{1.0 /
s, 0.0, 0.0, 0.0, 1.0 /
s, 0.0, 0.0, 0.0, 1.0 / s}
3176 template<
typename MatT,
typename Vec3T>
3177 void set(
const MatT& mat,
const MatT& invMat,
const Vec3T&
translate,
double taper = 1.0);
3182 template<
typename Mat4T>
3183 void set(
const Mat4T& mat,
const Mat4T& invMat,
double taper = 1.0) { this->
set(mat, invMat, mat[3], taper); }
3185 template<
typename Vec3T>
3186 void set(
double scale,
const Vec3T& translation,
double taper = 1.0);
3193 template<
typename Vec3T>
3201 template<
typename Vec3T>
3210 template<
typename Vec3T>
3219 template<
typename Vec3T>
3227 template<
typename Vec3T>
3238 template<
typename Vec3T>
3250 template<
typename Vec3T>
3259 template<
typename Vec3T>
3268 template<
typename Vec3T>
3270 template<
typename Vec3T>
3277 template<
typename MatT,
typename Vec3T>
3282 mTaperF =
static_cast<float>(taper);
3284 for (
int i = 0; i < 3; ++i) {
3285 *vd++ = translate[i];
3286 *vf++ =
static_cast<float>(translate[i]);
3287 for (
int j = 0;
j < 3; ++
j) {
3289 *mid++ = invMat[
j][i];
3290 *mf++ =
static_cast<float>(mat[
j][i]);
3291 *mif++ =
static_cast<float>(invMat[
j][i]);
3296 template<
typename Vec3T>
3300 const double mat[3][3] = { {dx, 0.0, 0.0},
3303 const double idx = 1.0 / dx;
3304 const double invMat[3][3] = { {idx, 0.0, 0.0},
3307 this->
set(mat, invMat, trans, taper);
3312 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) GridBlindMetaData
3314 static const int MaxNameSize = 256;
3315 int64_t mDataOffset;
3316 uint64_t mValueCount;
3317 uint32_t mValueSize;
3321 char mName[MaxNameSize];
3325 GridBlindMetaData(
const GridBlindMetaData&) =
delete;
3328 const GridBlindMetaData&
operator=(
const GridBlindMetaData&) =
delete;
3330 __hostdev__ void setBlindData(
void* blindData) { mDataOffset = PtrDiff(blindData,
this); }
3333 __hostdev__ const void* blindData()
const {
return PtrAdd<void>(
this, mDataOffset);}
3339 template<
typename BlindDataT>
3340 __hostdev__ const BlindDataT* getBlindData()
const
3343 return mDataType == mapToGridType<BlindDataT>() ? PtrAdd<BlindDataT>(
this, mDataOffset) :
nullptr;
3349 auto check = [&]()->
bool{
3367 default:
return true;}
3377 return AlignUp<NANOVDB_DATA_ALIGNMENT>(mValueCount * mValueSize);
3385 template<
typename Gr
idOrTreeOrRootT,
int LEVEL>
3389 template<
typename Gr
idOrTreeOrRootT>
3392 static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3,
"Tree depth is not supported");
3393 using Type =
typename GridOrTreeOrRootT::LeafNodeType;
3394 using type =
typename GridOrTreeOrRootT::LeafNodeType;
3396 template<
typename Gr
idOrTreeOrRootT>
3399 static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3,
"Tree depth is not supported");
3400 using Type =
const typename GridOrTreeOrRootT::LeafNodeType;
3401 using type =
const typename GridOrTreeOrRootT::LeafNodeType;
3404 template<
typename Gr
idOrTreeOrRootT>
3407 static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3,
"Tree depth is not supported");
3408 using Type =
typename GridOrTreeOrRootT::RootNodeType::ChildNodeType::ChildNodeType;
3409 using type =
typename GridOrTreeOrRootT::RootNodeType::ChildNodeType::ChildNodeType;
3411 template<
typename Gr
idOrTreeOrRootT>
3414 static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3,
"Tree depth is not supported");
3415 using Type =
const typename GridOrTreeOrRootT::RootNodeType::ChildNodeType::ChildNodeType;
3416 using type =
const typename GridOrTreeOrRootT::RootNodeType::ChildNodeType::ChildNodeType;
3418 template<
typename Gr
idOrTreeOrRootT>
3421 static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3,
"Tree depth is not supported");
3422 using Type =
typename GridOrTreeOrRootT::RootNodeType::ChildNodeType;
3423 using type =
typename GridOrTreeOrRootT::RootNodeType::ChildNodeType;
3425 template<
typename Gr
idOrTreeOrRootT>
3428 static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3,
"Tree depth is not supported");
3429 using Type =
const typename GridOrTreeOrRootT::RootNodeType::ChildNodeType;
3430 using type =
const typename GridOrTreeOrRootT::RootNodeType::ChildNodeType;
3432 template<
typename Gr
idOrTreeOrRootT>
3435 static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3,
"Tree depth is not supported");
3436 using Type =
typename GridOrTreeOrRootT::RootNodeType;
3437 using type =
typename GridOrTreeOrRootT::RootNodeType;
3440 template<
typename Gr
idOrTreeOrRootT>
3443 static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3,
"Tree depth is not supported");
3444 using Type =
const typename GridOrTreeOrRootT::RootNodeType;
3445 using type =
const typename GridOrTreeOrRootT::RootNodeType;
3450 template<
typename BuildT>
3452 template<
typename BuildT>
3454 template<
typename BuildT>
3456 template<
typename BuildT>
3458 template<
typename BuildT>
3460 template<
typename BuildT>
3462 template<
typename BuildT>
3464 template<
typename BuildT>
3493 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) GridData
3495 static const int MaxNameSize = 256;
3500 uint32_t mGridIndex;
3501 uint32_t mGridCount;
3503 char mGridName[MaxNameSize];
3509 int64_t mBlindMetadataOffset;
3510 uint32_t mBlindMetadataCount;
3512 uint64_t mData1, mData2;
3516 static_assert(8 * 84 ==
sizeof(GridData),
"GridData has unexpected size");
3517 memcpy64(
this, &other, 84);
3521 uint64_t gridSize = 0u,
3522 const Map& map = Map(),
3526 #ifdef NANOVDB_USE_NEW_MAGIC_NUMBERS
3531 mChecksum = ~uint64_t(0);
3536 mGridSize = gridSize;
3537 mGridName[0] =
'\0';
3539 mWorldBBox = BBox<Vec3d>();
3540 mVoxelSize = map.getVoxelSize();
3541 mGridClass = gridClass;
3542 mGridType = gridType;
3543 mBlindMetadataOffset = mGridSize;
3544 mBlindMetadataCount = 0u;
3553 if (test) test = mVersion.isCompatible();
3554 if (test) test = mGridCount > 0u && mGridIndex < mGridCount;
3566 char *dst = mGridName, *
end = dst + MaxNameSize;
3567 while (*src !=
'\0' && dst < end - 1)
3571 return *src ==
'\0';
3574 template<
typename Vec3T>
3575 __hostdev__ Vec3T applyMap(
const Vec3T& xyz)
const {
return mMap.applyMap(xyz); }
3576 template<
typename Vec3T>
3577 __hostdev__ Vec3T applyInverseMap(
const Vec3T& xyz)
const {
return mMap.applyInverseMap(xyz); }
3578 template<
typename Vec3T>
3579 __hostdev__ Vec3T applyJacobian(
const Vec3T& xyz)
const {
return mMap.applyJacobian(xyz); }
3580 template<
typename Vec3T>
3581 __hostdev__ Vec3T applyInverseJacobian(
const Vec3T& xyz)
const {
return mMap.applyInverseJacobian(xyz); }
3582 template<
typename Vec3T>
3583 __hostdev__ Vec3T applyIJT(
const Vec3T& xyz)
const {
return mMap.applyIJT(xyz); }
3585 template<
typename Vec3T>
3586 __hostdev__ Vec3T applyMapF(
const Vec3T& xyz)
const {
return mMap.applyMapF(xyz); }
3587 template<
typename Vec3T>
3588 __hostdev__ Vec3T applyInverseMapF(
const Vec3T& xyz)
const {
return mMap.applyInverseMapF(xyz); }
3589 template<
typename Vec3T>
3590 __hostdev__ Vec3T applyJacobianF(
const Vec3T& xyz)
const {
return mMap.applyJacobianF(xyz); }
3591 template<
typename Vec3T>
3592 __hostdev__ Vec3T applyInverseJacobianF(
const Vec3T& xyz)
const {
return mMap.applyInverseJacobianF(xyz); }
3593 template<
typename Vec3T>
3594 __hostdev__ Vec3T applyIJTF(
const Vec3T& xyz)
const {
return mMap.applyIJTF(xyz); }
3597 __hostdev__ uint8_t* treePtr() {
return reinterpret_cast<uint8_t*
>(
this + 1); }
3601 __hostdev__ const uint8_t* treePtr()
const {
return reinterpret_cast<const uint8_t*
>(
this + 1); }
3607 template <u
int32_t LEVEL>
3610 static_assert(LEVEL >= 0 && LEVEL <= 3,
"invalid LEVEL template parameter");
3611 auto *treeData = this->treePtr();
3612 auto nodeOffset = *
reinterpret_cast<const uint64_t*
>(treeData + 8*LEVEL);
3613 return nodeOffset ? PtrAdd<uint8_t>(treeData, nodeOffset) :
nullptr;
3619 template <u
int32_t LEVEL>
3620 __hostdev__ uint8_t* nodePtr(){
return const_cast<uint8_t*
>(
const_cast<const GridData*
>(
this)->
template nodePtr<LEVEL>());}
3625 __hostdev__ const GridBlindMetaData* blindMetaData(uint32_t n)
const
3628 return PtrAdd<GridBlindMetaData>(
this, mBlindMetadataOffset) + n;
3635 for (uint32_t i = 0; i < mBlindMetadataCount; ++i) {
3636 const auto* metaData = this->blindMetaData(i);
3639 return metaData->template getBlindData<const char>();
3648 __hostdev__ static uint64_t memUsage() {
return sizeof(GridData); }
3651 __hostdev__ const BBox<Vec3d>& worldBBox()
const {
return mWorldBBox; }
3658 if (
const uint8_t *root = this->nodePtr<3>()) {
3659 return *(
const uint32_t*)(root +
sizeof(
CoordBBox));
3666 __hostdev__ bool isEmpty()
const {
return this->rootTableSize() == 0u;}
3670 __hostdev__ bool isRootConnected()
const {
return *(
const uint64_t*)((
const char*)(
this + 1) + 24) == 64u;}
3674 template<
typename BuildT,
int LEVEL0 = -1,
int LEVEL1 = -1,
int LEVEL2 = -1>
3677 template<
typename BuildT>
3684 template<
typename TreeT>
3728 template<
typename T = BuildType>
3735 template<
typename T = BuildType>
3740 __hostdev__ const TreeT&
tree()
const {
return *
reinterpret_cast<const TreeT*
>(this->treePtr()); }
3755 template<
typename Vec3T>
3759 template<
typename Vec3T>
3764 template<
typename Vec3T>
3769 template<
typename Vec3T>
3774 template<
typename Vec3T>
3778 template<
typename Vec3T>
3782 template<
typename Vec3T>
3787 template<
typename Vec3T>
3792 template<
typename Vec3T>
3797 template<
typename Vec3T>
3833 template<
typename NodeT>
3842 __hostdev__ bool isSequential()
const {
return UpperNodeType::FIXED_SIZE && LowerNodeType::FIXED_SIZE && LeafNodeType::FIXED_SIZE && this->isBreadthFirst(); }
3871 printf(
"\nnanovdb::Grid::blindData is unsafe and hence deprecated! Please use nanovdb::Grid::getBlindData instead.\n\n");
3873 return this->blindMetaData(n).blindData();
3876 template <
typename BlindDataT>
3879 if (n >= DataType::mBlindMetadataCount)
return nullptr;
3880 return this->blindMetaData(n).template getBlindData<BlindDataT>();
3883 template <
typename BlindDataT>
3886 if (n >= DataType::mBlindMetadataCount)
return nullptr;
3887 return const_cast<BlindDataT*
>(this->blindMetaData(n).template getBlindData<BlindDataT>());
3896 template<
typename TreeT>
3899 for (uint32_t i = 0, n = this->blindDataCount(); i <
n; ++i) {
3900 if (this->blindMetaData(i).mSemantic == semantic)
3906 template<
typename TreeT>
3909 auto test = [&](
int n) {
3910 const char* str = this->blindMetaData(n).mName;
3911 for (
int i = 0; i < GridBlindMetaData::MaxNameSize; ++i) {
3912 if (name[i] != str[i])
3914 if (name[i] ==
'\0' && str[i] ==
'\0')
3919 for (
int i = 0, n = this->blindDataCount(); i <
n; ++i)
3927 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) TreeData
3929 int64_t mNodeOffset[4];
3930 uint32_t mNodeCount[3];
3931 uint32_t mTileCount[3];
3932 uint64_t mVoxelCount;
3936 static_assert(8 * 8 ==
sizeof(TreeData),
"TreeData has unexpected size");
3937 memcpy64(
this, &other, 8);
3940 __hostdev__ void setRoot(
const void* root) {mNodeOffset[3] = root ? PtrDiff(root,
this) : 0;}
3941 __hostdev__ uint8_t* getRoot() {
return mNodeOffset[3] ? PtrAdd<uint8_t>(
this, mNodeOffset[3]) :
nullptr; }
3942 __hostdev__ const uint8_t* getRoot()
const {
return mNodeOffset[3] ? PtrAdd<uint8_t>(
this, mNodeOffset[3]) :
nullptr; }
3944 template<
typename NodeT>
3945 __hostdev__ void setFirstNode(
const NodeT* node) {mNodeOffset[NodeT::LEVEL] = node ? PtrDiff(node,
this) : 0;}
3947 __hostdev__ bool isEmpty()
const {
return mNodeOffset[3] ? *PtrAdd<uint32_t>(
this, mNodeOffset[3] +
sizeof(BBox<Coord>)) == 0 :
true;}
3953 __hostdev__ bool isRootNext()
const {
return mNodeOffset[3] ? mNodeOffset[3] ==
sizeof(TreeData) :
false; }
3959 template<
typename Gr
idT>
3962 using Type =
typename GridT::TreeType;
3963 using type =
typename GridT::TreeType;
3965 template<
typename Gr
idT>
3968 using Type =
const typename GridT::TreeType;
3969 using type =
const typename GridT::TreeType;
3975 template<
typename RootT>
3978 static_assert(RootT::LEVEL == 3,
"Tree depth is not supported");
3979 static_assert(RootT::ChildNodeType::LOG2DIM == 5,
"Tree configuration is not supported");
3980 static_assert(RootT::ChildNodeType::ChildNodeType::LOG2DIM == 4,
"Tree configuration is not supported");
3981 static_assert(RootT::LeafNodeType::LOG2DIM == 3,
"Tree configuration is not supported");
3996 using Node2 =
typename RootT::ChildNodeType;
3997 using Node1 =
typename Node2::ChildNodeType;
4015 RootT*
ptr =
reinterpret_cast<RootT*
>(DataType::getRoot());
4022 const RootT*
ptr =
reinterpret_cast<const RootT*
>(DataType::getRoot());
4062 return DataType::mTileCount[level - 1];
4065 template<
typename NodeT>
4068 static_assert(NodeT::LEVEL < 3,
"Invalid NodeT");
4069 return DataType::mNodeCount[NodeT::LEVEL];
4075 return DataType::mNodeCount[
level];
4080 return DataType::mNodeCount[0] + DataType::mNodeCount[1] + DataType::mNodeCount[2];
4086 template<
typename NodeT>
4089 const int64_t
offset = DataType::mNodeOffset[NodeT::LEVEL];
4090 return offset ? PtrAdd<NodeT>(
this,
offset) :
nullptr;
4096 template<
typename NodeT>
4099 const int64_t
offset = DataType::mNodeOffset[NodeT::LEVEL];
4100 return offset ? PtrAdd<NodeT>(
this,
offset) :
nullptr;
4131 template<
typename OpT,
typename... ArgsT>
4134 return this->root().template get<OpT>(ijk,
args...);
4137 template<
typename OpT,
typename... ArgsT>
4140 return this->root().template set<OpT>(ijk,
args...);
4148 template<
typename RootT>
4151 min = this->root().minimum();
4152 max = this->root().maximum();
4160 template<
typename ChildT>
4161 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) RootData
4165 using CoordT =
typename ChildT::CoordType;
4167 static constexpr
bool FIXED_SIZE =
false;
4170 #ifdef NANOVDB_USE_SINGLE_ROOT_KEY
4171 using KeyT = uint64_t;
4172 template<
typename CoordType>
4173 __hostdev__ static KeyT CoordToKey(
const CoordType& ijk)
4175 static_assert(
sizeof(CoordT) ==
sizeof(CoordType),
"Mismatching sizeof");
4176 static_assert(32 - ChildT::TOTAL <= 21,
"Cannot use 64 bit root keys");
4177 return (KeyT(uint32_t(ijk[2]) >> ChildT::TOTAL)) |
4178 (KeyT(uint32_t(ijk[1]) >> ChildT::TOTAL) << 21) |
4179 (KeyT(uint32_t(ijk[0]) >> ChildT::TOTAL) << 42);
4181 __hostdev__ static CoordT KeyToCoord(
const KeyT& key)
4183 static constexpr uint64_t MASK = (1u << 21) - 1;
4184 return CoordT(((key >> 42) & MASK) << ChildT::TOTAL,
4185 ((key >> 21) & MASK) << ChildT::TOTAL,
4186 (key & MASK) << ChildT::TOTAL);
4189 using KeyT = CoordT;
4190 __hostdev__ static KeyT CoordToKey(
const CoordT& ijk) {
return ijk & ~ChildT::MASK; }
4191 __hostdev__ static CoordT KeyToCoord(
const KeyT& key) {
return key; }
4194 uint32_t mTableSize;
4207 return sizeof(RootData) - (24 + 4 + 3 *
sizeof(ValueT) + 2 *
sizeof(StatsT));
4210 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) Tile
4212 template<
typename CoordType>
4215 key = CoordToKey(k);
4217 child = PtrDiff(ptr, data);
4219 template<
typename CoordType,
typename ValueType>
4222 key = CoordToKey(k);
4227 __hostdev__ bool isChild()
const {
return child != 0; }
4228 __hostdev__ bool isValue()
const {
return child == 0; }
4229 __hostdev__ bool isActive()
const {
return child == 0 && state; }
4230 __hostdev__ CoordT origin()
const {
return KeyToCoord(key); }
4243 return reinterpret_cast<const Tile*
>(
this + 1) + n;
4248 return reinterpret_cast<Tile*
>(
this + 1) + n;
4253 #if 1 // switch between linear and binary seach
4254 const auto key = CoordToKey(ijk);
4255 for (Tile *p = reinterpret_cast<Tile*>(
this + 1), *q = p + mTableSize; p <
q; ++p)
4259 #else // do not enable binary search if tiles are not guaranteed to be sorted!!!!!!
4260 int32_t low = 0, high = mTableSize;
4261 while (low != high) {
4262 int mid = low + ((high - low) >> 1);
4263 const Tile* tile = &tiles[mid];
4264 if (tile->key == key) {
4266 }
else if (tile->key < key) {
4276 __hostdev__ inline const Tile* probeTile(
const CoordT& ijk)
const
4278 return const_cast<RootData*
>(
this)->probeTile(ijk);
4287 return PtrAdd<ChildT>(
this, tile->child);
4289 __hostdev__ const ChildT* getChild(
const Tile* tile)
const
4292 return PtrAdd<ChildT>(
this, tile->child);
4297 __hostdev__ const StatsT& average()
const {
return mAverage; }
4298 __hostdev__ const StatsT& stdDeviation()
const {
return mStdDevi; }
4306 RootData() =
delete;
4307 RootData(
const RootData&) =
delete;
4308 RootData&
operator=(
const RootData&) =
delete;
4309 ~RootData() =
delete;
4315 template<
typename ChildT>
4333 using Tile =
typename DataType::Tile;
4334 static constexpr
bool FIXED_SIZE = DataType::FIXED_SIZE;
4336 static constexpr uint32_t LEVEL = 1 + ChildT::LEVEL;
4338 template<
typename RootT>
4361 return this->tile()->origin();
4366 return this->tile()->origin();
4370 template<
typename RootT>
4383 :
BaseT(parent->
data(), parent->tileCount())
4386 while (*
this && !this->tile()->isChild())
4392 return *BaseT::mData->getChild(this->tile());
4397 return BaseT::mData->getChild(this->tile());
4403 while (*
this && this->tile()->isValue())
4421 template<
typename RootT>
4432 :
BaseT(parent->
data(), parent->tileCount())
4435 while (*
this && this->tile()->isChild())
4441 return this->tile()->value;
4446 return this->tile()->state;
4452 while (*
this && this->tile()->isChild())
4470 template<
typename RootT>
4481 :
BaseT(parent->
data(), parent->tileCount())
4484 while (*
this && !this->tile()->isActive())
4490 return this->tile()->value;
4496 while (*
this && !this->tile()->isActive())
4514 template<
typename RootT>
4526 :
BaseT(parent->
data(), parent->tileCount())
4533 NodeT* child =
nullptr;
4534 auto*
t = this->tile();
4536 child = BaseT::mData->getChild(
t);
4545 return this->tile()->state;
4617 #ifdef NANOVDB_NEW_ACCESSOR_METHODS
4625 #else // NANOVDB_NEW_ACCESSOR_METHODS
4630 if (
const Tile* tile = DataType::probeTile(ijk)) {
4631 return tile->isChild() ? this->getChild(tile)->getValue(ijk) : tile->value;
4633 return DataType::mBackground;
4637 __hostdev__ bool isActive(
const CoordType& ijk)
const
4639 if (
const Tile* tile = DataType::probeTile(ijk)) {
4640 return tile->isChild() ? this->getChild(tile)->isActive(ijk) : tile->state;
4647 if (
const Tile* tile = DataType::probeTile(ijk)) {
4648 if (tile->isChild()) {
4649 const auto* child = this->getChild(tile);
4650 return child->probeValue(ijk, v);
4655 v = DataType::mBackground;
4659 __hostdev__ const LeafNodeType* probeLeaf(
const CoordType& ijk)
const
4661 const Tile* tile = DataType::probeTile(ijk);
4662 if (tile && tile->isChild()) {
4663 const auto* child = this->getChild(tile);
4664 return child->probeLeaf(ijk);
4669 #endif // NANOVDB_NEW_ACCESSOR_METHODS
4673 const Tile* tile = DataType::probeTile(ijk);
4674 return tile && tile->isChild() ? this->getChild(tile) :
nullptr;
4679 const Tile* tile = DataType::probeTile(ijk);
4680 return tile && tile->isChild() ? this->getChild(tile) :
nullptr;
4683 template<
typename OpT,
typename... ArgsT>
4686 if (
const Tile* tile = this->probeTile(ijk)) {
4687 if (tile->isChild())
4688 return this->getChild(tile)->template get<OpT>(ijk,
args...);
4694 template<
typename OpT,
typename... ArgsT>
4699 if (
Tile* tile = DataType::probeTile(ijk)) {
4700 if (tile->isChild())
4701 return this->getChild(tile)->template set<OpT>(ijk,
args...);
4702 return OpT::set(*tile,
args...);
4704 return OpT::set(*
this,
args...);
4709 static_assert(
sizeof(
typename DataType::Tile) %
NANOVDB_DATA_ALIGNMENT == 0,
"sizeof(RootData::Tile) is misaligned");
4711 template<
typename,
int,
int,
int>
4716 #ifndef NANOVDB_NEW_ACCESSOR_METHODS
4718 template<
typename AccT>
4719 __hostdev__ typename AccT::NodeInfo getNodeInfoAndCache(
const CoordType& ijk,
const AccT& acc)
const
4721 using NodeInfoT =
typename AccT::NodeInfo;
4722 if (
const Tile* tile = this->probeTile(ijk)) {
4723 if (tile->isChild()) {
4724 const auto* child = this->getChild(tile);
4725 acc.insert(ijk, child);
4726 return child->getNodeInfoAndCache(ijk, acc);
4728 return NodeInfoT{LEVEL, ChildT::dim(), tile->value, tile->value, tile->value, 0, tile->origin(), tile->origin() + CoordType(ChildT::DIM)};
4730 return NodeInfoT{LEVEL, ChildT::dim(), this->minimum(), this->maximum(), this->average(), this->stdDeviation(), this->bbox()[0], this->bbox()[1]};
4734 template<
typename AccT>
4737 if (
const Tile* tile = this->probeTile(ijk)) {
4738 if (tile->isChild()) {
4739 const auto* child = this->getChild(tile);
4740 acc.insert(ijk, child);
4741 return child->getValueAndCache(ijk, acc);
4745 return DataType::mBackground;
4748 template<
typename AccT>
4749 __hostdev__ bool isActiveAndCache(
const CoordType& ijk,
const AccT& acc)
const
4751 const Tile* tile = this->probeTile(ijk);
4752 if (tile && tile->isChild()) {
4753 const auto* child = this->getChild(tile);
4754 acc.insert(ijk, child);
4755 return child->isActiveAndCache(ijk, acc);
4760 template<
typename AccT>
4763 if (
const Tile* tile = this->probeTile(ijk)) {
4764 if (tile->isChild()) {
4765 const auto* child = this->getChild(tile);
4766 acc.insert(ijk, child);
4767 return child->probeValueAndCache(ijk, v, acc);
4772 v = DataType::mBackground;
4776 template<
typename AccT>
4777 __hostdev__ const LeafNodeType* probeLeafAndCache(
const CoordType& ijk,
const AccT& acc)
const
4779 const Tile* tile = this->probeTile(ijk);
4780 if (tile && tile->isChild()) {
4781 const auto* child = this->getChild(tile);
4782 acc.insert(ijk, child);
4783 return child->probeLeafAndCache(ijk, acc);
4787 #endif // NANOVDB_NEW_ACCESSOR_METHODS
4789 template<
typename RayT,
typename AccT>
4790 __hostdev__ uint32_t getDimAndCache(
const CoordType& ijk,
const RayT& ray,
const AccT& acc)
const
4792 if (
const Tile* tile = this->probeTile(ijk)) {
4793 if (tile->isChild()) {
4794 const auto* child = this->getChild(tile);
4795 acc.insert(ijk, child);
4796 return child->getDimAndCache(ijk, ray, acc);
4798 return 1 << ChildT::TOTAL;
4800 return ChildNodeType::dim();
4803 template<
typename OpT,
typename AccT,
typename... ArgsT>
4806 getAndCache(
const CoordType& ijk,
const AccT& acc, ArgsT&&...
args)
const
4808 if (
const Tile* tile = this->probeTile(ijk)) {
4809 if (tile->isChild()) {
4810 const ChildT* child = this->getChild(tile);
4811 acc.insert(ijk, child);
4812 return child->template getAndCache<OpT>(ijk, acc,
args...);
4819 template<
typename OpT,
typename AccT,
typename... ArgsT>
4822 setAndCache(
const CoordType& ijk,
const AccT& acc, ArgsT&&...
args)
4824 if (Tile* tile = DataType::probeTile(ijk)) {
4825 if (tile->isChild()) {
4826 ChildT* child = this->getChild(tile);
4827 acc.insert(ijk, child);
4828 return child->template setAndCache<OpT>(ijk, acc,
args...);
4830 return OpT::set(*tile,
args...);
4832 return OpT::set(*
this,
args...);
4844 template<
typename ChildT, u
int32_t LOG2DIM>
4845 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) InternalData
4850 using CoordT =
typename ChildT::CoordType;
4851 using MaskT =
typename ChildT::template MaskType<LOG2DIM>;
4852 static constexpr
bool FIXED_SIZE =
true;
4860 Tile(
const Tile&) =
delete;
4881 return sizeof(InternalData) - (24u + 8u + 2 * (
sizeof(MaskT) +
sizeof(ValueT) +
sizeof(StatsT)) + (1u << (3 * LOG2DIM)) * (sizeof(ValueT) > 8u ?
sizeof(ValueT) : 8u));
4883 alignas(32) Tile mTable[1u << (3 * LOG2DIM)];
4885 __hostdev__ static uint64_t memUsage() {
return sizeof(InternalData); }
4890 mTable[
n].child = PtrDiff(ptr,
this);
4893 template<
typename ValueT>
4897 mTable[
n].value =
v;
4904 return PtrAdd<ChildT>(
this, mTable[
n].child);
4906 __hostdev__ const ChildT* getChild(uint32_t n)
const
4909 return PtrAdd<ChildT>(
this, mTable[
n].child);
4915 return mTable[
n].value;
4924 __hostdev__ bool isChild(uint32_t n)
const {
return mChildMask.isOn(n); }
4926 template<
typename T>
4931 __hostdev__ const StatsT& average()
const {
return mAverage; }
4932 __hostdev__ const StatsT& stdDeviation()
const {
return mStdDevi; }
4934 #if defined(__GNUC__) && !defined(__APPLE__) && !defined(__llvm__)
4935 #pragma GCC diagnostic push
4936 #pragma GCC diagnostic ignored "-Wstringop-overflow"
4942 #if defined(__GNUC__) && !defined(__APPLE__) && !defined(__llvm__)
4943 #pragma GCC diagnostic pop
4947 InternalData() =
delete;
4948 InternalData(
const InternalData&) =
delete;
4949 InternalData&
operator=(
const InternalData&) =
delete;
4950 ~InternalData() =
delete;
4954 template<
typename ChildT, u
int32_t Log2Dim = ChildT::LOG2DIM + 1>
4965 static constexpr
bool FIXED_SIZE = DataType::FIXED_SIZE;
4966 template<u
int32_t LOG2>
4971 static constexpr uint32_t LOG2DIM = Log2Dim;
4972 static constexpr uint32_t TOTAL = LOG2DIM + ChildT::TOTAL;
4973 static constexpr uint32_t DIM = 1u << TOTAL;
4974 static constexpr uint32_t
SIZE = 1u << (3 * LOG2DIM);
4975 static constexpr uint32_t MASK = (1u << TOTAL) - 1u;
4976 static constexpr uint32_t LEVEL = 1 + ChildT::LEVEL;
4977 static constexpr uint64_t NUM_VALUES = uint64_t(1) << (3 * TOTAL);
4980 template <
typename ParentT>
4995 : BaseT(parent->mChildMask.beginOn())
5003 return *mParent->getChild(BaseT::pos());
5008 return mParent->getChild(BaseT::pos());
5013 return (*this)->origin();
5037 : BaseT(parent->
data()->mChildMask.beginOff())
5045 return mParent->data()->getValue(BaseT::pos());
5050 return mParent->offsetToGlobalCoord(BaseT::pos());
5056 return mParent->data()->isActive(BaseT::mPos);
5084 return mParent->data()->getValue(BaseT::pos());
5089 return mParent->offsetToGlobalCoord(BaseT::pos());
5111 , mParent(parent->
data())
5118 const ChildT* child =
nullptr;
5119 if (mParent->mChildMask.isOn(BaseT::pos())) {
5120 child = mParent->getChild(BaseT::pos());
5122 value = mParent->getValue(BaseT::pos());
5129 return mParent->isActive(BaseT::pos());
5134 return mParent->offsetToGlobalCoord(BaseT::pos());
5191 return DataType::mChildMask.isOn(0) ? this->getChild(0)->getFirstValue() :
DataType::getValue(0);
5201 #ifdef NANOVDB_NEW_ACCESSOR_METHODS
5208 #else // NANOVDB_NEW_ACCESSOR_METHODS
5211 const uint32_t n = CoordToOffset(ijk);
5212 return DataType::mChildMask.isOn(n) ? this->getChild(n)->getValue(ijk) :
DataType::getValue(n);
5214 __hostdev__ bool isActive(
const CoordType& ijk)
const
5216 const uint32_t n = CoordToOffset(ijk);
5217 return DataType::mChildMask.isOn(n) ? this->getChild(n)->isActive(ijk) : DataType::isActive(n);
5221 const uint32_t n = CoordToOffset(ijk);
5222 if (DataType::mChildMask.isOn(n))
5223 return this->getChild(n)->probeValue(ijk, v);
5225 return DataType::isActive(n);
5227 __hostdev__ const LeafNodeType* probeLeaf(
const CoordType& ijk)
const
5229 const uint32_t n = CoordToOffset(ijk);
5230 if (DataType::mChildMask.isOn(n))
5231 return this->getChild(n)->probeLeaf(ijk);
5235 #endif // NANOVDB_NEW_ACCESSOR_METHODS
5239 const uint32_t n = CoordToOffset(ijk);
5240 return DataType::mChildMask.isOn(n) ? this->getChild(n) :
nullptr;
5244 const uint32_t n = CoordToOffset(ijk);
5245 return DataType::mChildMask.isOn(n) ? this->getChild(n) :
nullptr;
5251 return (((ijk[0] & MASK) >> ChildT::TOTAL) << (2 * LOG2DIM)) |
5252 (((ijk[1] & MASK) >> ChildT::TOTAL) << (LOG2DIM)) |
5253 ((ijk[2] & MASK) >> ChildT::TOTAL);
5260 const uint32_t m = n & ((1 << 2 * LOG2DIM) - 1);
5261 return Coord(n >> 2 * LOG2DIM, m >> LOG2DIM, m & ((1 << LOG2DIM) - 1));
5267 ijk <<= ChildT::TOTAL;
5268 ijk += this->origin();
5274 this->localToGlobalCoord(ijk);
5281 template<
typename OpT,
typename... ArgsT>
5284 const uint32_t n = CoordToOffset(ijk);
5285 if (this->isChild(n))
5286 return this->getChild(n)->template get<OpT>(ijk,
args...);
5290 template<
typename OpT,
typename... ArgsT>
5295 const uint32_t n = CoordToOffset(ijk);
5296 if (this->isChild(n))
5297 return this->getChild(n)->template set<OpT>(ijk,
args...);
5298 return OpT::set(*
this, n,
args...);
5304 template<
typename,
int,
int,
int>
5309 template<
typename, u
int32_t>
5312 #ifndef NANOVDB_NEW_ACCESSOR_METHODS
5314 template<
typename AccT>
5317 const uint32_t n = CoordToOffset(ijk);
5318 if (DataType::mChildMask.isOff(n))
5320 const ChildT* child = this->getChild(n);
5321 acc.insert(ijk, child);
5322 return child->getValueAndCache(ijk, acc);
5324 template<
typename AccT>
5325 __hostdev__ bool isActiveAndCache(
const CoordType& ijk,
const AccT& acc)
const
5327 const uint32_t n = CoordToOffset(ijk);
5328 if (DataType::mChildMask.isOff(n))
5329 return DataType::isActive(n);
5330 const ChildT* child = this->getChild(n);
5331 acc.insert(ijk, child);
5332 return child->isActiveAndCache(ijk, acc);
5334 template<
typename AccT>
5337 const uint32_t n = CoordToOffset(ijk);
5338 if (DataType::mChildMask.isOff(n)) {
5340 return DataType::isActive(n);
5342 const ChildT* child = this->getChild(n);
5343 acc.insert(ijk, child);
5344 return child->probeValueAndCache(ijk, v, acc);
5346 template<
typename AccT>
5347 __hostdev__ const LeafNodeType* probeLeafAndCache(
const CoordType& ijk,
const AccT& acc)
const
5349 const uint32_t n = CoordToOffset(ijk);
5350 if (DataType::mChildMask.isOff(n))
5352 const ChildT* child = this->getChild(n);
5353 acc.insert(ijk, child);
5354 return child->probeLeafAndCache(ijk, acc);
5356 template<
typename AccT>
5357 __hostdev__ typename AccT::NodeInfo getNodeInfoAndCache(
const CoordType& ijk,
const AccT& acc)
const
5359 using NodeInfoT =
typename AccT::NodeInfo;
5360 const uint32_t n = CoordToOffset(ijk);
5361 if (DataType::mChildMask.isOff(n)) {
5362 return NodeInfoT{LEVEL, this->dim(), this->minimum(), this->maximum(), this->average(), this->stdDeviation(), this->bbox()[0], this->bbox()[1]};
5364 const ChildT* child = this->getChild(n);
5365 acc.insert(ijk, child);
5366 return child->getNodeInfoAndCache(ijk, acc);
5368 #endif // NANOVDB_NEW_ACCESSOR_METHODS
5370 template<
typename RayT,
typename AccT>
5371 __hostdev__ uint32_t getDimAndCache(
const CoordType& ijk,
const RayT& ray,
const AccT& acc)
const
5377 const uint32_t n = CoordToOffset(ijk);
5378 if (DataType::mChildMask.isOn(n)) {
5379 const ChildT* child = this->getChild(n);
5380 acc.insert(ijk, child);
5381 return child->getDimAndCache(ijk, ray, acc);
5383 return ChildNodeType::dim();
5386 template<
typename OpT,
typename AccT,
typename... ArgsT>
5389 getAndCache(
const CoordType& ijk,
const AccT& acc, ArgsT&&...
args)
const
5391 const uint32_t n = CoordToOffset(ijk);
5392 if (DataType::mChildMask.isOff(n))
5394 const ChildT* child = this->getChild(n);
5395 acc.insert(ijk, child);
5396 return child->template getAndCache<OpT>(ijk, acc,
args...);
5399 template<
typename OpT,
typename AccT,
typename... ArgsT>
5402 setAndCache(
const CoordType& ijk,
const AccT& acc, ArgsT&&...
args)
5404 const uint32_t n = CoordToOffset(ijk);
5405 if (DataType::mChildMask.isOff(n))
5406 return OpT::set(*
this, n,
args...);
5407 ChildT* child = this->getChild(n);
5408 acc.insert(ijk, child);
5409 return child->template setAndCache<OpT>(ijk, acc,
args...);
5419 template<
typename ValueT,
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
5420 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT)
LeafData
5422 static_assert(
sizeof(CoordT) ==
sizeof(Coord),
"Mismatching sizeof");
5423 static_assert(
sizeof(MaskT<LOG2DIM>) ==
sizeof(Mask<LOG2DIM>),
"Mismatching sizeof");
5428 static constexpr
bool FIXED_SIZE =
true;
5446 return sizeof(
LeafData) - (12 + 3 + 1 +
sizeof(MaskT<LOG2DIM>) + 2 * (
sizeof(ValueT) +
sizeof(
FloatType)) + (1u << (3 * LOG2DIM)) *
sizeof(ValueT));
5450 __hostdev__ static bool hasStats() {
return true; }
5471 template<
typename T>
5476 for (
auto *p =
mValues, *q = p + 512; p !=
q; ++p)
5490 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
5491 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) LeafFnBase
5493 static_assert(
sizeof(CoordT) ==
sizeof(Coord),
"Mismatching sizeof");
5494 static_assert(
sizeof(MaskT<LOG2DIM>) ==
sizeof(Mask<LOG2DIM>),
"Mismatching sizeof");
5505 uint16_t mMin, mMax, mAvg, mDev;
5507 __hostdev__ static uint64_t memUsage() {
return sizeof(LeafFnBase); }
5509 __hostdev__ static bool hasStats() {
return true; }
5516 return sizeof(LeafFnBase) - (12 + 3 + 1 +
sizeof(MaskT<LOG2DIM>) + 2 * 4 + 4 * 2);
5521 mQuantum = (max -
min) /
float((1 << bitWidth) - 1);
5540 __hostdev__ void setMin(
float min) { mMin = uint16_t((min - mMinimum) / mQuantum + 0.5
f); }
5543 __hostdev__ void setMax(
float max) { mMax = uint16_t((max - mMinimum) / mQuantum + 0.5
f); }
5546 __hostdev__ void setAvg(
float avg) { mAvg = uint16_t((avg - mMinimum) / mQuantum + 0.5
f); }
5551 template<
typename T>
5560 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
5561 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT)
LeafData<
Fp4, CoordT, MaskT, LOG2DIM>
5562 :
public LeafFnBase<CoordT, MaskT, LOG2DIM>
5564 using BaseT = LeafFnBase<CoordT, MaskT, LOG2DIM>;
5567 static constexpr
bool FIXED_SIZE =
true;
5568 alignas(32) uint8_t
mCode[1u << (3 * LOG2DIM - 1)];
5573 static_assert(BaseT::padding() == 0,
"expected no padding in LeafFnBase");
5574 return sizeof(
LeafData) -
sizeof(
BaseT) - (1u << (3 * LOG2DIM - 1));
5577 __hostdev__ static constexpr uint8_t bitWidth() {
return 4u; }
5581 const uint8_t
c =
mCode[i>>1];
5582 return ( (i&1) ? c >> 4 : c & uint8_t(15) )*BaseT::mQuantum + BaseT::mMinimum;
5584 return ((
mCode[i >> 1] >> ((i & 1) << 2)) & uint8_t(15)) * BaseT::mQuantum + BaseT::mMinimum;
5597 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
5598 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT)
LeafData<
Fp8, CoordT, MaskT, LOG2DIM>
5599 :
public LeafFnBase<CoordT, MaskT, LOG2DIM>
5601 using BaseT = LeafFnBase<CoordT, MaskT, LOG2DIM>;
5604 static constexpr
bool FIXED_SIZE =
true;
5605 alignas(32) uint8_t
mCode[1u << 3 * LOG2DIM];
5609 static_assert(BaseT::padding() == 0,
"expected no padding in LeafFnBase");
5610 return sizeof(
LeafData) -
sizeof(
BaseT) - (1u << 3 * LOG2DIM);
5613 __hostdev__ static constexpr uint8_t bitWidth() {
return 8u; }
5616 return mCode[i] * BaseT::mQuantum + BaseT::mMinimum;
5627 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
5628 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT)
LeafData<
Fp16, CoordT, MaskT, LOG2DIM>
5629 :
public LeafFnBase<CoordT, MaskT, LOG2DIM>
5631 using BaseT = LeafFnBase<CoordT, MaskT, LOG2DIM>;
5634 static constexpr
bool FIXED_SIZE =
true;
5635 alignas(32) uint16_t
mCode[1u << 3 * LOG2DIM];
5640 static_assert(BaseT::padding() == 0,
"expected no padding in LeafFnBase");
5641 return sizeof(
LeafData) -
sizeof(
BaseT) - 2 * (1u << 3 * LOG2DIM);
5644 __hostdev__ static constexpr uint8_t bitWidth() {
return 16u; }
5647 return mCode[i] * BaseT::mQuantum + BaseT::mMinimum;
5659 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
5660 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT)
LeafData<
FpN, CoordT, MaskT, LOG2DIM>
5661 :
public LeafFnBase<CoordT, MaskT, LOG2DIM>
5664 using BaseT = LeafFnBase<CoordT, MaskT, LOG2DIM>;
5666 static constexpr
bool FIXED_SIZE =
false;
5669 static_assert(BaseT::padding() == 0,
"expected no padding in LeafFnBase");
5674 __hostdev__ size_t memUsage()
const {
return sizeof(*this) + this->bitWidth() * 64; }
5675 __hostdev__ static size_t memUsage(uint32_t bitWidth) {
return 96u + bitWidth * 64; }
5678 #ifdef NANOVDB_FPN_BRANCHLESS // faster
5681 uint16_t code =
reinterpret_cast<const uint16_t*
>(
this + 1)[i >> (4 - b)];
5682 const static uint8_t shift[5] = {15, 7, 3, 1, 0};
5683 const static uint16_t
mask[5] = {1, 3, 15, 255, 65535};
5684 code >>= (i & shift[
b]) << b;
5687 uint32_t code =
reinterpret_cast<const uint32_t*
>(
this + 1)[i >> (5 - b)];
5688 code >>= (i & ((32 >>
b) - 1)) << b;
5689 code &= (1 << (1 <<
b)) - 1;
5691 #else // use branched version (slow)
5693 auto*
values =
reinterpret_cast<const uint8_t*
>(
this + 1);
5696 code =
float((
values[i >> 3] >> (i & 7)) & uint8_t(1));
5699 code =
float((
values[i >> 2] >> ((i & 3) << 1)) & uint8_t(3));
5702 code =
float((
values[i >> 1] >> ((i & 1) << 2)) & uint8_t(15));
5708 code =
float(reinterpret_cast<const uint16_t*>(
values)[i]);
5711 return float(code) * BaseT::mQuantum + BaseT::mMinimum;
5724 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
5725 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT)
LeafData<bool, CoordT, MaskT, LOG2DIM>
5727 static_assert(
sizeof(CoordT) ==
sizeof(Coord),
"Mismatching sizeof");
5728 static_assert(
sizeof(MaskT<LOG2DIM>) ==
sizeof(Mask<LOG2DIM>),
"Mismatching sizeof");
5733 static constexpr
bool FIXED_SIZE =
true;
5742 __hostdev__ static constexpr uint32_t padding() {
return sizeof(
LeafData) - 12u - 3u - 1u - 2 *
sizeof(MaskT<LOG2DIM>) - 16u; }
5744 __hostdev__ static bool hasStats() {
return false; }
5761 template<
typename T>
5774 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
5775 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT)
LeafData<ValueMask, CoordT, MaskT, LOG2DIM>
5777 static_assert(
sizeof(CoordT) ==
sizeof(Coord),
"Mismatching sizeof");
5778 static_assert(
sizeof(MaskT<LOG2DIM>) ==
sizeof(Mask<LOG2DIM>),
"Mismatching sizeof");
5783 static constexpr
bool FIXED_SIZE =
true;
5792 __hostdev__ static bool hasStats() {
return false; }
5795 return sizeof(
LeafData) - (12u + 3u + 1u +
sizeof(MaskT<LOG2DIM>) + 2 * 8u);
5810 template<
typename T>
5823 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
5824 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) LeafIndexBase
5826 static_assert(
sizeof(CoordT) ==
sizeof(Coord),
"Mismatching sizeof");
5827 static_assert(
sizeof(MaskT<LOG2DIM>) ==
sizeof(Mask<LOG2DIM>),
"Mismatching sizeof");
5831 static constexpr
bool FIXED_SIZE =
true;
5840 return sizeof(LeafIndexBase) - (12u + 3u + 1u +
sizeof(MaskT<LOG2DIM>) + 2 * 8u);
5842 __hostdev__ static uint64_t memUsage() {
return sizeof(LeafIndexBase); }
5851 template<
typename T>
5858 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
5859 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT)
LeafData<ValueIndex, CoordT, MaskT, LOG2DIM>
5860 :
public LeafIndexBase<CoordT, MaskT, LOG2DIM>
5862 using BaseT = LeafIndexBase<CoordT, MaskT, LOG2DIM>;
5865 __hostdev__ static uint32_t valueCount() {
return uint32_t(512); }
5884 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
5885 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT)
LeafData<ValueOnIndex, CoordT, MaskT, LOG2DIM>
5886 :
public LeafIndexBase<CoordT, MaskT, LOG2DIM>
5888 using BaseT = LeafIndexBase<CoordT, MaskT, LOG2DIM>;
5902 uint32_t n = i >> 6;
5904 if (!(w & mask))
return uint64_t(0);
5906 if (n--) sum += BaseT::mPrefixSum >> (9u *
n) & 511u;
5919 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
5920 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT)
LeafData<ValueIndexMask, CoordT, MaskT, LOG2DIM>
5921 :
public LeafData<ValueIndex, CoordT, MaskT, LOG2DIM>
5930 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
5931 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT)
LeafData<ValueOnIndexMask, CoordT, MaskT, LOG2DIM>
5932 :
public LeafData<ValueOnIndex, CoordT, MaskT, LOG2DIM>
5935 MaskT<LOG2DIM>
mMask;
5943 template<
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
5944 struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT)
LeafData<
Point, CoordT, MaskT, LOG2DIM>
5946 static_assert(
sizeof(CoordT) ==
sizeof(Coord),
"Mismatching sizeof");
5947 static_assert(
sizeof(MaskT<LOG2DIM>) ==
sizeof(Mask<LOG2DIM>),
"Mismatching sizeof");
5952 static constexpr
bool FIXED_SIZE =
true;
5961 alignas(32) uint16_t
mValues[1u << 3 * LOG2DIM];
5969 return sizeof(
LeafData) - (12u + 3u + 1u +
sizeof(MaskT<LOG2DIM>) + 2 * 8u + (1u << 3 * LOG2DIM) * 2u);
5996 template<
typename T>
6009 template<
typename BuildT,
6010 typename CoordT = Coord,
6011 template<u
int32_t>
class MaskT =
Mask,
6012 uint32_t Log2Dim = 3>
6018 static constexpr uint32_t TOTAL = 0;
6019 static constexpr uint32_t DIM = 1;
6023 using DataType = LeafData<BuildT, CoordT, MaskT, Log2Dim>;
6028 static constexpr
bool FIXED_SIZE = DataType::FIXED_SIZE;
6029 template<u
int32_t LOG2>
6055 return mParent->getValue(BaseT::pos());
6060 return mParent->offsetToGlobalCoord(BaseT::pos());
6088 return mParent->getValue(BaseT::pos());
6093 return mParent->offsetToGlobalCoord(BaseT::pos());
6109 , mPos(1u << 3 * Log2Dim)
6122 return mParent->getValue(mPos);
6127 return mParent->offsetToGlobalCoord(mPos);
6132 return mParent->isActive(mPos);
6134 __hostdev__ operator bool()
const {
return mPos < (1u << 3 * Log2Dim); }
6152 static constexpr uint32_t LOG2DIM = Log2Dim;
6153 static constexpr uint32_t TOTAL = LOG2DIM;
6154 static constexpr uint32_t DIM = 1u << TOTAL;
6155 static constexpr uint32_t
SIZE = 1u << 3 * LOG2DIM;
6156 static constexpr uint32_t MASK = (1u << LOG2DIM) - 1u;
6157 static constexpr uint32_t LEVEL = 0;
6158 static constexpr uint64_t NUM_VALUES = uint64_t(1) << (3 * TOTAL);
6194 const uint32_t m = n & ((1 << 2 * LOG2DIM) - 1);
6195 return CoordT(n >> 2 * LOG2DIM, m >> LOG2DIM, m & MASK);
6203 return OffsetToLocalCoord(n) + this->origin();
6213 if (this->hasBBox()) {
6276 const uint32_t n = CoordToOffset(ijk);
6286 return ((ijk[0] & MASK) << (2 * LOG2DIM)) | ((ijk[1] & MASK) << LOG2DIM) | (ijk[2] & MASK);
6298 template<
typename OpT,
typename... ArgsT>
6304 template<
typename OpT,
typename... ArgsT>
6310 template<
typename OpT,
typename... ArgsT>
6313 return OpT::set(*
this, CoordToOffset(ijk),
args...);
6316 template<
typename OpT,
typename... ArgsT>
6319 return OpT::set(*
this, n,
args...);
6325 template<
typename,
int,
int,
int>
6330 template<
typename, u
int32_t>
6333 #ifndef NANOVDB_NEW_ACCESSOR_METHODS
6335 template<
typename AccT>
6339 template<
typename AccT>
6340 __hostdev__ typename AccT::NodeInfo getNodeInfoAndCache(
const CoordType& ,
const AccT& )
const
6342 using NodeInfoT =
typename AccT::NodeInfo;
6343 return NodeInfoT{LEVEL, this->dim(), this->minimum(), this->maximum(), this->average(), this->stdDeviation(), this->bbox()[0], this->bbox()[1]};
6346 template<
typename AccT>
6347 __hostdev__ bool isActiveAndCache(
const CoordT& ijk,
const AccT&)
const {
return this->isActive(ijk); }
6349 template<
typename AccT>
6350 __hostdev__ bool probeValueAndCache(
const CoordT& ijk,
ValueType& v,
const AccT&)
const {
return this->probeValue(ijk, v); }
6352 template<
typename AccT>
6353 __hostdev__ const LeafNode* probeLeafAndCache(
const CoordT&,
const AccT&)
const {
return this; }
6356 template<
typename RayT,
typename AccT>
6357 __hostdev__ uint32_t getDimAndCache(
const CoordT&,
const RayT& ,
const AccT&)
const
6363 return ChildNodeType::dim();
6366 template<
typename OpT,
typename AccT,
typename... ArgsT>
6369 getAndCache(
const CoordType& ijk,
const AccT&, ArgsT&&...
args)
const
6374 template<
typename OpT,
typename AccT,
typename... ArgsT>
6377 setAndCache(
const CoordType& ijk,
const AccT&, ArgsT&&...
args)
6379 return OpT::set(*
this, CoordToOffset(ijk),
args...);
6386 template<
typename ValueT,
typename CoordT,
template<u
int32_t>
class MaskT, uint32_t LOG2DIM>
6389 static_assert(LOG2DIM == 3,
"LeafNode::updateBBox: only supports LOGDIM = 3!");
6394 auto update = [&](uint32_t
min, uint32_t
max,
int axis) {
6400 uint32_t Xmin = word64 ? 0u : 8u, Xmax = Xmin;
6401 for (
int i = 1; i < 8; ++i) {
6410 update(Xmin, Xmax, 0);
6411 update(FindLowestOn(word64) >> 3, FindHighestOn(word64) >> 3, 1);
6412 const uint32_t *p =
reinterpret_cast<const uint32_t*
>(&word64), word32 = p[0] | p[1];
6413 const uint16_t *q =
reinterpret_cast<const uint16_t*
>(&word32), word16 = q[0] | q[1];
6414 const uint8_t * b =
reinterpret_cast<const uint8_t*
>(&word16),
byte = b[0] | b[1];
6416 update(FindLowestOn(static_cast<uint32_t>(
byte)), FindHighestOn(static_cast<uint32_t>(
byte)), 2);
6425 template<
typename BuildT>
6427 template<
typename BuildT>
6429 template<
typename BuildT>
6431 template<
typename BuildT>
6433 template<
typename BuildT>
6435 template<
typename BuildT>
6439 template<
typename BuildT,
int LEVEL>
6443 template<
typename BuildT>
6449 template<
typename BuildT>
6455 template<
typename BuildT>
6461 template<
typename BuildT>
6532 template<
typename BuildT>
6542 mutable const RootT* mRoot;
6548 static const int CacheLevels = 0;
6549 #ifndef NANOVDB_NEW_ACCESSOR_METHODS
6570 : ReadAccessor(grid.tree().root())
6576 : ReadAccessor(tree.root())
6590 #ifdef NANOVDB_NEW_ACCESSOR_METHODS
6593 return this->
template get<GetValue<BuildT>>(ijk);
6602 #else // NANOVDB_NEW_ACCESSOR_METHODS
6605 return mRoot->getValueAndCache(ijk, *
this);
6609 return this->
getValue(CoordType(i, j, k));
6617 return this->
getValue(CoordType(i, j, k));
6620 __hostdev__ NodeInfo getNodeInfo(
const CoordType& ijk)
const
6622 return mRoot->getNodeInfoAndCache(ijk, *
this);
6625 __hostdev__ bool isActive(
const CoordType& ijk)
const
6627 return mRoot->isActiveAndCache(ijk, *
this);
6632 return mRoot->probeValueAndCache(ijk, v, *
this);
6635 __hostdev__ const LeafT* probeLeaf(
const CoordType& ijk)
const
6637 return mRoot->probeLeafAndCache(ijk, *
this);
6639 #endif // NANOVDB_NEW_ACCESSOR_METHODS
6640 template<
typename RayT>
6643 return mRoot->getDimAndCache(ijk, ray, *
this);
6645 template<
typename OpT,
typename... ArgsT>
6648 return mRoot->template get<OpT>(ijk,
args...);
6651 template<
typename OpT,
typename... ArgsT>
6654 return const_cast<RootT*
>(mRoot)->
template set<OpT>(ijk,
args...);
6661 template<
typename, u
int32_t>
6663 template<
typename,
typename,
template<u
int32_t>
class, uint32_t>
6667 template<
typename NodeT>
6672 template<
typename BuildT,
int LEVEL0>
6675 static_assert(LEVEL0 >= 0 && LEVEL0 <= 2,
"LEVEL0 should be 0, 1, or 2");
6689 mutable CoordT mKey;
6690 mutable const RootT* mRoot;
6691 mutable const NodeT* mNode;
6698 static const int CacheLevels = 1;
6699 #ifndef NANOVDB_NEW_ACCESSOR_METHODS
6700 using NodeInfo =
typename ReadAccessor<ValueT, -1, -1, -1>::NodeInfo;
6712 : ReadAccessor(grid.tree().root())
6718 : ReadAccessor(tree.root())
6738 return (ijk[0] & int32_t(~NodeT::MASK)) == mKey[0] &&
6739 (ijk[1] & int32_t(~NodeT::MASK)) == mKey[1] &&
6740 (ijk[2] & int32_t(~NodeT::MASK)) == mKey[2];
6743 #ifdef NANOVDB_NEW_ACCESSOR_METHODS
6746 return this->
template get<GetValue<BuildT>>(ijk);
6755 #else // NANOVDB_NEW_ACCESSOR_METHODS
6758 if (this->isCached(ijk))
6759 return mNode->getValueAndCache(ijk, *
this);
6760 return mRoot->getValueAndCache(ijk, *
this);
6764 return this->
getValue(CoordType(i, j, k));
6772 return this->
getValue(CoordType(i, j, k));
6775 __hostdev__ NodeInfo getNodeInfo(
const CoordType& ijk)
const
6777 if (this->isCached(ijk))
6778 return mNode->getNodeInfoAndCache(ijk, *
this);
6779 return mRoot->getNodeInfoAndCache(ijk, *
this);
6782 __hostdev__ bool isActive(
const CoordType& ijk)
const
6784 if (this->isCached(ijk))
6785 return mNode->isActiveAndCache(ijk, *
this);
6786 return mRoot->isActiveAndCache(ijk, *
this);
6791 if (this->isCached(ijk))
6792 return mNode->probeValueAndCache(ijk, v, *
this);
6793 return mRoot->probeValueAndCache(ijk, v, *
this);
6796 __hostdev__ const LeafT* probeLeaf(
const CoordType& ijk)
const
6798 if (this->isCached(ijk))
6799 return mNode->probeLeafAndCache(ijk, *
this);
6800 return mRoot->probeLeafAndCache(ijk, *
this);
6802 #endif // NANOVDB_NEW_ACCESSOR_METHODS
6803 template<
typename RayT>
6806 if (this->isCached(ijk))
6807 return mNode->getDimAndCache(ijk, ray, *
this);
6808 return mRoot->getDimAndCache(ijk, ray, *
this);
6811 template<
typename OpT,
typename... ArgsT>
6814 if (this->isCached(ijk))
6815 return mNode->template getAndCache<OpT>(ijk, *
this,
args...);
6816 return mRoot->template getAndCache<OpT>(ijk, *
this,
args...);
6819 template<
typename OpT,
typename... ArgsT>
6822 if (this->isCached(ijk))
6823 return const_cast<NodeT*
>(mNode)->
template setAndCache<OpT>(ijk, *
this,
args...);
6824 return const_cast<RootT*
>(mRoot)->
template setAndCache<OpT>(ijk, *
this,
args...);
6831 template<
typename, u
int32_t>
6833 template<
typename,
typename,
template<u
int32_t>
class, uint32_t>
6839 mKey = ijk & ~NodeT::MASK;
6844 template<
typename OtherNodeT>
6849 template<
typename BuildT,
int LEVEL0,
int LEVEL1>
6852 static_assert(LEVEL0 >= 0 && LEVEL0 <= 2,
"LEVEL0 must be 0, 1, 2");
6853 static_assert(LEVEL1 >= 0 && LEVEL1 <= 2,
"LEVEL1 must be 0, 1, 2");
6854 static_assert(LEVEL0 < LEVEL1,
"Level 0 must be lower than level 1");
6867 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY // 44 bytes total
6868 mutable CoordT mKey;
6869 #else // 68 bytes total
6870 mutable CoordT mKeys[2];
6872 mutable const RootT* mRoot;
6873 mutable const Node1T* mNode1;
6874 mutable const Node2T* mNode2;
6881 static const int CacheLevels = 2;
6882 #ifndef NANOVDB_NEW_ACCESSOR_METHODS
6883 using NodeInfo =
typename ReadAccessor<ValueT, -1, -1, -1>::NodeInfo;
6887 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
6900 : ReadAccessor(grid.tree().root())
6906 : ReadAccessor(tree.root())
6913 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
6929 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
6930 __hostdev__ bool isCached1(CoordValueType dirty)
const
6934 if (dirty & int32_t(~Node1T::MASK)) {
6940 __hostdev__ bool isCached2(CoordValueType dirty)
const
6944 if (dirty & int32_t(~Node2T::MASK)) {
6950 __hostdev__ CoordValueType computeDirty(
const CoordType& ijk)
const
6952 return (ijk[0] ^ mKey[0]) | (ijk[1] ^ mKey[1]) | (ijk[2] ^ mKey[2]);
6957 return (ijk[0] & int32_t(~Node1T::MASK)) == mKeys[0][0] &&
6958 (ijk[1] & int32_t(~Node1T::MASK)) == mKeys[0][1] &&
6959 (ijk[2] & int32_t(~Node1T::MASK)) == mKeys[0][2];
6963 return (ijk[0] & int32_t(~Node2T::MASK)) == mKeys[1][0] &&
6964 (ijk[1] & int32_t(~Node2T::MASK)) == mKeys[1][1] &&
6965 (ijk[2] & int32_t(~Node2T::MASK)) == mKeys[1][2];
6969 #ifdef NANOVDB_NEW_ACCESSOR_METHODS
6972 return this->
template get<GetValue<BuildT>>(ijk);
6981 #else // NANOVDB_NEW_ACCESSOR_METHODS
6985 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
6986 const CoordValueType dirty = this->computeDirty(ijk);
6990 if (this->isCached1(dirty)) {
6991 return mNode1->getValueAndCache(ijk, *
this);
6992 }
else if (this->isCached2(dirty)) {
6993 return mNode2->getValueAndCache(ijk, *
this);
6995 return mRoot->getValueAndCache(ijk, *
this);
7003 return this->
getValue(CoordType(i, j, k));
7007 return this->
getValue(CoordType(i, j, k));
7009 __hostdev__ NodeInfo getNodeInfo(
const CoordType& ijk)
const
7011 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7012 const CoordValueType dirty = this->computeDirty(ijk);
7016 if (this->isCached1(dirty)) {
7017 return mNode1->getNodeInfoAndCache(ijk, *
this);
7018 }
else if (this->isCached2(dirty)) {
7019 return mNode2->getNodeInfoAndCache(ijk, *
this);
7021 return mRoot->getNodeInfoAndCache(ijk, *
this);
7024 __hostdev__ bool isActive(
const CoordType& ijk)
const
7026 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7027 const CoordValueType dirty = this->computeDirty(ijk);
7031 if (this->isCached1(dirty)) {
7032 return mNode1->isActiveAndCache(ijk, *
this);
7033 }
else if (this->isCached2(dirty)) {
7034 return mNode2->isActiveAndCache(ijk, *
this);
7036 return mRoot->isActiveAndCache(ijk, *
this);
7041 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7042 const CoordValueType dirty = this->computeDirty(ijk);
7046 if (this->isCached1(dirty)) {
7047 return mNode1->probeValueAndCache(ijk, v, *
this);
7048 }
else if (this->isCached2(dirty)) {
7049 return mNode2->probeValueAndCache(ijk, v, *
this);
7051 return mRoot->probeValueAndCache(ijk, v, *
this);
7054 __hostdev__ const LeafT* probeLeaf(
const CoordType& ijk)
const
7056 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7057 const CoordValueType dirty = this->computeDirty(ijk);
7061 if (this->isCached1(dirty)) {
7062 return mNode1->probeLeafAndCache(ijk, *
this);
7063 }
else if (this->isCached2(dirty)) {
7064 return mNode2->probeLeafAndCache(ijk, *
this);
7066 return mRoot->probeLeafAndCache(ijk, *
this);
7068 #endif // NANOVDB_NEW_ACCESSOR_METHODS
7070 template<
typename RayT>
7073 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7074 const CoordValueType dirty = this->computeDirty(ijk);
7078 if (this->isCached1(dirty)) {
7079 return mNode1->getDimAndCache(ijk, ray, *
this);
7080 }
else if (this->isCached2(dirty)) {
7081 return mNode2->getDimAndCache(ijk, ray, *
this);
7083 return mRoot->getDimAndCache(ijk, ray, *
this);
7086 template<
typename OpT,
typename... ArgsT>
7089 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7090 const CoordValueType dirty = this->computeDirty(ijk);
7094 if (this->isCached1(dirty)) {
7095 return mNode1->template getAndCache<OpT>(ijk, *
this,
args...);
7096 }
else if (this->isCached2(dirty)) {
7097 return mNode2->template getAndCache<OpT>(ijk, *
this,
args...);
7099 return mRoot->template getAndCache<OpT>(ijk, *
this,
args...);
7102 template<
typename OpT,
typename... ArgsT>
7105 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7106 const CoordValueType dirty = this->computeDirty(ijk);
7110 if (this->isCached1(dirty)) {
7111 return const_cast<Node1T*
>(mNode1)->
template setAndCache<OpT>(ijk, *
this,
args...);
7112 }
else if (this->isCached2(dirty)) {
7113 return const_cast<Node2T*
>(mNode2)->
template setAndCache<OpT>(ijk, *
this,
args...);
7115 return const_cast<RootT*
>(mRoot)->
template setAndCache<OpT>(ijk, *
this,
args...);
7122 template<
typename, u
int32_t>
7124 template<
typename,
typename,
template<u
int32_t>
class, uint32_t>
7130 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7133 mKeys[0] = ijk & ~Node1T::MASK;
7139 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7142 mKeys[1] = ijk & ~Node2T::MASK;
7146 template<
typename OtherNodeT>
7151 template<
typename BuildT>
7167 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY // 44 bytes total
7168 mutable CoordT mKey;
7169 #else // 68 bytes total
7170 mutable CoordT mKeys[3];
7172 mutable const RootT* mRoot;
7173 mutable const void* mNode[3];
7180 static const int CacheLevels = 3;
7181 #ifndef NANOVDB_NEW_ACCESSOR_METHODS
7182 using NodeInfo =
typename ReadAccessor<ValueT, -1, -1, -1>::NodeInfo;
7186 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7192 , mNode{
nullptr,
nullptr,
nullptr}
7198 : ReadAccessor(grid.tree().root())
7204 : ReadAccessor(tree.root())
7218 template<
typename NodeT>
7223 return reinterpret_cast<const T*
>(mNode[NodeT::LEVEL]);
7230 static_assert(LEVEL >= 0 && LEVEL <= 2,
"ReadAccessor::getNode: Invalid node type");
7231 return reinterpret_cast<const T*
>(mNode[LEVEL]);
7237 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7242 mNode[0] = mNode[1] = mNode[2] =
nullptr;
7245 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7246 template<
typename NodeT>
7247 __hostdev__ bool isCached(CoordValueType dirty)
const
7249 if (!mNode[NodeT::LEVEL])
7251 if (dirty & int32_t(~NodeT::MASK)) {
7252 mNode[NodeT::LEVEL] =
nullptr;
7258 __hostdev__ CoordValueType computeDirty(
const CoordType& ijk)
const
7260 return (ijk[0] ^ mKey[0]) | (ijk[1] ^ mKey[1]) | (ijk[2] ^ mKey[2]);
7263 template<
typename NodeT>
7266 return (ijk[0] & int32_t(~NodeT::MASK)) == mKeys[NodeT::LEVEL][0] &&
7267 (ijk[1] & int32_t(~NodeT::MASK)) == mKeys[NodeT::LEVEL][1] &&
7268 (ijk[2] & int32_t(~NodeT::MASK)) == mKeys[NodeT::LEVEL][2];
7272 #ifdef NANOVDB_NEW_ACCESSOR_METHODS
7275 return this->
template get<GetValue<BuildT>>(ijk);
7284 #else // NANOVDB_NEW_ACCESSOR_METHODS
7288 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7289 const CoordValueType dirty = this->computeDirty(ijk);
7293 if (this->isCached<LeafT>(dirty)) {
7294 return ((LeafT*)mNode[0])->getValue(ijk);
7295 }
else if (this->isCached<NodeT1>(dirty)) {
7296 return ((NodeT1*)mNode[1])->getValueAndCache(ijk, *
this);
7297 }
else if (this->isCached<NodeT2>(dirty)) {
7298 return ((NodeT2*)mNode[2])->getValueAndCache(ijk, *
this);
7300 return mRoot->getValueAndCache(ijk, *
this);
7308 return this->
getValue(CoordType(i, j, k));
7312 return this->
getValue(CoordType(i, j, k));
7315 __hostdev__ NodeInfo getNodeInfo(
const CoordType& ijk)
const
7317 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7318 const CoordValueType dirty = this->computeDirty(ijk);
7322 if (this->isCached<LeafT>(dirty)) {
7323 return ((LeafT*)mNode[0])->getNodeInfoAndCache(ijk, *
this);
7324 }
else if (this->isCached<NodeT1>(dirty)) {
7325 return ((NodeT1*)mNode[1])->getNodeInfoAndCache(ijk, *
this);
7326 }
else if (this->isCached<NodeT2>(dirty)) {
7327 return ((NodeT2*)mNode[2])->getNodeInfoAndCache(ijk, *
this);
7329 return mRoot->getNodeInfoAndCache(ijk, *
this);
7332 __hostdev__ bool isActive(
const CoordType& ijk)
const
7334 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7335 const CoordValueType dirty = this->computeDirty(ijk);
7339 if (this->isCached<LeafT>(dirty)) {
7340 return ((LeafT*)mNode[0])->isActive(ijk);
7341 }
else if (this->isCached<NodeT1>(dirty)) {
7342 return ((NodeT1*)mNode[1])->isActiveAndCache(ijk, *
this);
7343 }
else if (this->isCached<NodeT2>(dirty)) {
7344 return ((NodeT2*)mNode[2])->isActiveAndCache(ijk, *
this);
7346 return mRoot->isActiveAndCache(ijk, *
this);
7351 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7352 const CoordValueType dirty = this->computeDirty(ijk);
7356 if (this->isCached<LeafT>(dirty)) {
7357 return ((LeafT*)mNode[0])->probeValue(ijk, v);
7358 }
else if (this->isCached<NodeT1>(dirty)) {
7359 return ((NodeT1*)mNode[1])->probeValueAndCache(ijk, v, *
this);
7360 }
else if (this->isCached<NodeT2>(dirty)) {
7361 return ((NodeT2*)mNode[2])->probeValueAndCache(ijk, v, *
this);
7363 return mRoot->probeValueAndCache(ijk, v, *
this);
7365 __hostdev__ const LeafT* probeLeaf(
const CoordType& ijk)
const
7367 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7368 const CoordValueType dirty = this->computeDirty(ijk);
7372 if (this->isCached<LeafT>(dirty)) {
7373 return ((LeafT*)mNode[0]);
7374 }
else if (this->isCached<NodeT1>(dirty)) {
7375 return ((NodeT1*)mNode[1])->probeLeafAndCache(ijk, *
this);
7376 }
else if (this->isCached<NodeT2>(dirty)) {
7377 return ((NodeT2*)mNode[2])->probeLeafAndCache(ijk, *
this);
7379 return mRoot->probeLeafAndCache(ijk, *
this);
7381 #endif // NANOVDB_NEW_ACCESSOR_METHODS
7383 template<
typename OpT,
typename... ArgsT>
7386 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7387 const CoordValueType dirty = this->computeDirty(ijk);
7391 if (this->isCached<LeafT>(dirty)) {
7392 return ((
const LeafT*)mNode[0])->template getAndCache<OpT>(ijk, *
this,
args...);
7393 }
else if (this->isCached<NodeT1>(dirty)) {
7394 return ((
const NodeT1*)mNode[1])->template getAndCache<OpT>(ijk, *
this,
args...);
7395 }
else if (this->isCached<NodeT2>(dirty)) {
7396 return ((
const NodeT2*)mNode[2])->template getAndCache<OpT>(ijk, *
this,
args...);
7398 return mRoot->template getAndCache<OpT>(ijk, *
this,
args...);
7401 template<
typename OpT,
typename... ArgsT>
7404 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7405 const CoordValueType dirty = this->computeDirty(ijk);
7409 if (this->isCached<LeafT>(dirty)) {
7410 return ((
LeafT*)mNode[0])->template setAndCache<OpT>(ijk, *
this,
args...);
7411 }
else if (this->isCached<NodeT1>(dirty)) {
7412 return ((
NodeT1*)mNode[1])->template setAndCache<OpT>(ijk, *
this,
args...);
7413 }
else if (this->isCached<NodeT2>(dirty)) {
7414 return ((
NodeT2*)mNode[2])->template setAndCache<OpT>(ijk, *
this,
args...);
7416 return ((
RootT*)mRoot)->template setAndCache<OpT>(ijk, *
this,
args...);
7419 template<
typename RayT>
7422 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7423 const CoordValueType dirty = this->computeDirty(ijk);
7427 if (this->isCached<LeafT>(dirty)) {
7428 return ((
LeafT*)mNode[0])->getDimAndCache(ijk, ray, *
this);
7429 }
else if (this->isCached<NodeT1>(dirty)) {
7430 return ((
NodeT1*)mNode[1])->getDimAndCache(ijk, ray, *
this);
7431 }
else if (this->isCached<NodeT2>(dirty)) {
7432 return ((
NodeT2*)mNode[2])->getDimAndCache(ijk, ray, *
this);
7434 return mRoot->getDimAndCache(ijk, ray, *
this);
7441 template<
typename, u
int32_t>
7443 template<
typename,
typename,
template<u
int32_t>
class, uint32_t>
7447 template<
typename NodeT>
7450 #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY
7453 mKeys[NodeT::LEVEL] = ijk & ~NodeT::MASK;
7455 mNode[NodeT::LEVEL] = node;
7473 template<
int LEVEL0 = -1,
int LEVEL1 = -1,
int LEVEL2 = -1,
typename ValueT =
float>
7479 template<
int LEVEL0 = -1,
int LEVEL1 = -1,
int LEVEL2 = -1,
typename ValueT =
float>
7485 template<
int LEVEL0 = -1,
int LEVEL1 = -1,
int LEVEL2 = -1,
typename ValueT =
float>
7502 uint32_t mRootTableSize,
mPadding{0};
7505 template<
typename T>
7508 mGridData = *grid.
data();
7509 mTreeData = *grid.
tree().data();
7510 mIndexBBox = grid.indexBBox();
7511 mRootTableSize = grid.
tree().root().getTableSize();
7515 static_assert(8 * 96 ==
sizeof(
GridMetaData),
"GridMetaData has unexpected size");
7517 memcpy64(
this, gridData, 96);
7519 mGridData = *gridData;
7520 mTreeData = *
reinterpret_cast<const TreeData*
>(gridData->treePtr());
7521 mIndexBBox = gridData->indexBBox();
7522 mRootTableSize = gridData->rootTableSize();
7534 return gridData->isRootConnected();
7538 template<
typename T>
7578 template<
typename AttT,
typename BuildT = u
int32_t>
7587 :
AccT(grid.tree().root())
7589 , mData(grid.template getBlindData<AttT>(0))
7605 const uint64_t
count = mGrid.blindMetaData(0u).mValueCount;
7607 end = begin + count;
7615 auto* leaf = this->probeLeaf(ijk);
7616 if (leaf ==
nullptr) {
7619 begin = mData + leaf->minimum();
7620 end = begin + leaf->maximum();
7621 return leaf->maximum();
7627 begin = end =
nullptr;
7628 if (
auto* leaf = this->probeLeaf(ijk)) {
7630 if (leaf->isActive(offset)) {
7631 begin = mData + leaf->minimum();
7632 end = begin + leaf->getValue(offset);
7634 begin += leaf->getValue(offset - 1);
7641 template<
typename AttT>
7650 :
AccT(grid.tree().root())
7652 , mData(grid.template getBlindData<AttT>(0))
7672 const uint64_t
count = mGrid.blindMetaData(0u).mValueCount;
7674 end = begin + count;
7682 auto* leaf = this->probeLeaf(ijk);
7683 if (leaf ==
nullptr)
7685 begin = mData + leaf->offset();
7686 end = begin + leaf->pointCount();
7687 return leaf->pointCount();
7693 if (
auto* leaf = this->probeLeaf(ijk)) {
7695 if (leaf->isActive(n)) {
7696 begin = mData + leaf->first(n);
7697 end = mData + leaf->last(n);
7701 begin = end =
nullptr;
7709 template<
typename ChannelT,
typename IndexT = ValueIndex>
7726 :
BaseT(grid.tree().root())
7732 this->setChannel(channelID);
7737 :
BaseT(grid.tree().root())
7739 , mChannel(channelPtr)
7770 return mChannel =
const_cast<ChannelT*
>(mGrid.template getBlindData<ChannelT>(channelID));
7786 const bool isActive = BaseT::probeValue(ijk, idx);
7793 template<
typename T>
7802 struct MiniGridHandle {
7807 BufferType(BufferType &&other) :
data(other.
data),
size(other.
size) {other.data=
nullptr; other.size=0;}
7808 ~BufferType() {std::free(
data);}
7809 BufferType&
operator=(
const BufferType &other) =
delete;
7810 BufferType&
operator=(BufferType &&other){
data=other.data;
size=other.size; other.data=
nullptr; other.size=0;
return *
this;}
7811 static BufferType create(
size_t n, BufferType*
dummy =
nullptr) {
return BufferType(n);}
7813 MiniGridHandle(BufferType &&
buf) :
buffer(std::move(
buf)) {}
7869 uint32_t nodeCount[4];
7870 uint32_t tileCount[3];
7877 #if !defined(__CUDA_ARCH__) && !defined(__HIP__)
7881 static const char * LUT[] = {
"NONE",
"ZIP",
"BLOSC" ,
"END" };
7882 static_assert(
sizeof(LUT) /
sizeof(
char*) - 1 ==
int(Codec::END),
"Unexpected size of LUT");
7883 return LUT[
static_cast<int>(codec)];
7907 template<
typename StreamT>
7913 #ifdef NANOVDB_USE_NEW_MAGIC_NUMBERS
7918 const char*
gridName = gridData->gridName();
7919 uint32_t nameSize = 1;
7920 for (
const char* p = gridName; *p !=
'\0'; ++p) ++nameSize;
7921 const TreeData* treeData = (
const TreeData*)gridData->treePtr();
7922 FileMetaData meta{gridData->mGridSize, gridData->mGridSize, 0u, treeData->mVoxelCount,
7923 gridData->mGridType, gridData->mGridClass, gridData->mWorldBBox,
7924 treeData->bbox(), gridData->mVoxelSize, nameSize,
7925 {treeData->mNodeCount[0], treeData->mNodeCount[1], treeData->mNodeCount[2], 1u},
7926 {treeData->mTileCount[0], treeData->mTileCount[1], treeData->mTileCount[2]},
7930 os.write(gridName, nameSize);
7932 os.write((
const char*)gridData, gridData->mGridSize);
7937 template<
typename GridHandleT,
template<
typename...>
class VecT>
7940 #ifdef NANOVDB_USE_IOSTREAMS // use this to switch between std::ofstream or FILE implementations
7945 StreamT(
const char*
name) { fptr =
fopen(name,
"wb"); }
7946 ~StreamT() { fclose(fptr); }
7947 void write(
const char*
data,
size_t n) { fwrite(data, 1, n, fptr); }
7948 bool is_open()
const {
return fptr != NULL; }
7951 if (!os.is_open()) {
7952 fprintf(stderr,
"nanovdb::writeUncompressedGrids: Unable to open file \"%s\"for output\n", fileName);
7955 for (
auto&
h : handles) {
7965 template<
typename GridHandleT,
typename StreamT,
template<
typename...>
class VecT>
7968 VecT<GridHandleT> handles;
7970 is.read((
char*)&data,
sizeof(GridData));
7971 if (data.isValid()) {
7972 uint64_t
size = data.mGridSize, sum = 0u;
7973 while(data.mGridIndex + 1u < data.mGridCount) {
7974 is.skip(data.mGridSize -
sizeof(GridData));
7975 is.read((
char*)&data,
sizeof(GridData));
7976 sum += data.mGridSize;
7978 is.skip(-int64_t(sum +
sizeof(GridData)));
7979 auto buffer = GridHandleT::BufferType::create(size + sum, &
pool);
7981 handles.emplace_back(std::move(
buffer));
7983 is.skip(-
sizeof(GridData));
7985 while(is.read((
char*)&head,
sizeof(
FileHeader))) {
7987 fprintf(stderr,
"nanovdb::readUncompressedGrids: invalid magic number = \"%s\"\n", (
const char*)&(head.
magic));
7990 fprintf(stderr,
"nanovdb::readUncompressedGrids: invalid major version = \"%s\"\n", head.
version.
c_str());
7993 fprintf(stderr,
"nanovdb::readUncompressedGrids: invalid codec = \"%s\"\n",
toStr(head.
codec));
7997 for (uint16_t i = 0; i < head.
gridCount; ++i) {
8002 handles.emplace_back(std::move(
buffer));
8010 template<
typename GridHandleT,
template<
typename...>
class VecT>
8013 #ifdef NANOVDB_USE_IOSTREAMS // use this to switch between std::ifstream or FILE implementations
8016 void skip(int64_t off) { this->seekg(off, std::ios_base::cur); }
8021 StreamT(
const char*
name) { fptr =
fopen(name,
"rb"); }
8022 ~StreamT() { fclose(fptr); }
8023 bool read(
char*
data,
size_t n) {
8024 size_t m = fread(data, 1, n, fptr);
8028 bool is_open()
const {
return fptr != NULL; }
8031 StreamT is(fileName);
8032 if (!is.is_open()) {
8033 fprintf(stderr,
"nanovdb::readUncompressedGrids: Unable to open file \"%s\"for input\n", fileName);
8036 return readUncompressedGrids<GridHandleT, StreamT, VecT>(is,
buffer);
8039 #endif // if !defined(__CUDA_ARCH__) && !defined(__HIP__)
8048 template<
typename BuildT>
8058 template<
typename BuildT>
8061 static_assert(!BuildTraits<BuildT>::is_special,
"SetValue does not support special value types");
8070 template<
typename BuildT>
8073 static_assert(!BuildTraits<BuildT>::is_special,
"SetVoxel does not support special value types");
8084 template<
typename BuildT>
8096 template<
typename BuildT>
8108 template<
typename BuildT>
8120 template<
typename BuildT>
8132 template<
typename BuildT>
8144 template<
typename BuildT>
8150 v = root.mBackground;
8156 return tile.state > 0u;
8160 v = node.mTable[
n].value;
8161 return node.mValueMask.isOn(n);
8165 v = node.mTable[
n].value;
8166 return node.mValueMask.isOn(n);
8170 v = leaf.getValue(n);
8171 return leaf.mValueMask.isOn(n);
8177 template<
typename BuildT>
8199 return NodeInfo{2u, node.
dim(), node.minimum(), node.maximum(), node.average(), node.stdDeviation(), node.bbox()};
8203 return NodeInfo{1u, node.
dim(), node.minimum(), node.maximum(), node.average(), node.stdDeviation(), node.bbox()};
8207 return NodeInfo{0u, leaf.
dim(), leaf.minimum(), leaf.maximum(), leaf.average(), leaf.stdDeviation(), leaf.bbox()};
8213 #endif // end of NANOVDB_NANOVDB_H_HAS_BEEN_INCLUDED
#define NANOVDB_MAGIC_NUMBER
__hostdev__ const MaskType< LOG2DIM > & valueMask() const
Return a const reference to the bit mask of active voxels in this internal node.
__hostdev__ BBox(BBox &other, const SplitT &)
__hostdev__ Iterator end() const
__hostdev__ Vec4 & operator*=(const T &s)
__hostdev__ bool isSequential() const
return true if the specified node type is layed out breadth-first in memory and has a fixed size...
typename UpperNodeType::ChildNodeType LowerNodeType
__hostdev__ FloatType variance() const
Return the variance of all the active values encoded in this internal node and any of its child nodes...
static __hostdev__ uint32_t voxelCount()
Return the total number of voxels (e.g. values) encoded in this leaf node.
LeafData< BuildT, Coord, Mask, 3 > DataType
__hostdev__ bool isCached(const CoordType &ijk) const
static __hostdev__ Coord max()
__hostdev__ CoordType getCoord() const
__hostdev__ ReadAccessor(const RootT &root)
Constructor from a root node.
typename RootT::ValueType ValueType
__hostdev__ uint32_t getDim(const CoordType &ijk, const RayT &ray) const
__hostdev__ const Vec3T & min() const
auto data() FMT_NOEXCEPT-> T *
__hostdev__ const TreeType & tree() const
Return a const reference to the tree of the IndexGrid.
__hostdev__ void setValue(const CoordT &ijk, const ValueType &v)
Sets the value at the specified location and activate its state.
__hostdev__ bool isOff() const
Return true if none of the bits are set in this Mask.
Trait use to remove reference, i.e. "&", qualifier from a type. Default implementation is just a pass...
__hostdev__ auto set(const CoordType &ijk, ArgsT &&...args) const
__hostdev__ BBox< CoordT > bbox() const
Return the bounding box in index space of active values in this leaf node.
void writeUncompressedGrids(const char *fileName, const VecT< GridHandleT > &handles, bool raw=false)
write multiple NanoVDB grids to a single file, without compression.
A simple vector class with three components, similar to openvdb::math::Vec3.
static __hostdev__ uint64_t memUsage(uint32_t tableSize)
Return the expected memory footprint in bytes with the specified number of tiles. ...
typedef int(APIENTRYP RE_PFNGLXSWAPINTERVALSGIPROC)(int)
__hostdev__ int32_t z() const
__hostdev__ DenseIter(RootT *parent)
__hostdev__ ValueOnIterator beginValueOn() const
__hostdev__ Vec4 operator/(const Vec4 &v) const
__hostdev__ bool isMaskOn(std::initializer_list< MaskT > list) const
return true if any of the masks in the list are on
typename T::ValueType ElementType
GridBlindDataClass
Blind-data Classes that are currently supported by NanoVDB.
__hostdev__ uint32_t pos() const
__hostdev__ bool hasLongGridName() const
__hostdev__ uint64_t pointCount() const
__hostdev__ const NodeTrait< RootT, LEVEL >::type * getFirstNode() const
return a const pointer to the first node of the specified level
__hostdev__ Vec3T applyJacobianF(const Vec3T &ijk) const
Apply the linear forward 3x3 transformation to an input 3d vector using 32bit floating point arithmet...
GLenum GLuint GLenum GLsizei const GLchar * buf
__hostdev__ ReadAccessor(const TreeT &tree)
Constructor from a tree.
__hostdev__ ChildIter(RootT *parent)
__hostdev__ ValueType getValue(const CoordType &ijk) const
Return the value of the given voxel (regardless of state or location in the tree.) ...
__hostdev__ bool isActive(const CoordType &ijk) const
__hostdev__ Vec3T indexToWorldDirF(const Vec3T &dir) const
transformation from index space direction to world space direction
__hostdev__ const DataType * data() const
__hostdev__ void setOn(uint32_t offset)
__hostdev__ int findBlindDataForSemantic(GridBlindDataSemantic semantic) const
Return the index of the first blind data with specified semantic if found, otherwise -1...
__hostdev__ bool isCached(const CoordType &ijk) const
cvex test(vector P=0;int unbound=3;export float s=0;export vector Cf=0;)
__hostdev__ void setMask(MaskT mask, bool on)
__hostdev__ void extrema(ValueType &min, ValueType &max) const
Sets the extrema values of all the active values in this tree, i.e. in all nodes of the tree...
typename DataType::Tile Tile
__hostdev__ GridClass mapToGridClass(GridClass defaultClass=GridClass::Unknown)
Maps from a templated build type to a GridClass enum.
__hostdev__ uint64_t leafPoints(const Coord &ijk, const AttT *&begin, const AttT *&end) const
Return the number of points in the leaf node containing the coordinate ijk. If this return value is l...
__hostdev__ Vec3< float > asVec3s() const
Return a single precision floating-point vector of this coordinate.
__hostdev__ Vec4(T x, T y, T z, T w)
__hostdev__ const ValueType & minimum() const
Return a const reference to the minimum active value encoded in this internal node and any of its chi...
__hostdev__ Mask & operator-=(const Mask &other)
Bitwise difference.
#define NANOVDB_MAJOR_VERSION_NUMBER
__hostdev__ LeafNodeType * getFirstLeaf()
Template specializations of getFirstNode.
__hostdev__ const T * asPointer() const
return a const raw constant pointer to array of three vector components
__hostdev__ AccessorType getAccessor() const
__hostdev__ Rgba8()
Default ctor initializes all channels to zero.
__hostdev__ const TreeT & tree() const
Return a const reference to the tree.
__hostdev__ uint32_t pos() const
static __hostdev__ auto set(NanoRoot< BuildT > &, const ValueT &)
__hostdev__ BaseBBox & translate(const Vec3T &xyz)
__hostdev__ ValueType getValue(const CoordType &ijk) const
Return the value of the given voxel.
__hostdev__ ValueType min() const
Return the smallest vector component.
__hostdev__ bool isActive(const CoordType &ijk) const
__hostdev__ BaseIter(DataT *data=nullptr, uint32_t n=0)
__hostdev__ const FloatType & stdDeviation() const
Return a const reference to the standard deviation of all the active values encoded in this root node...
typename DataType::ValueType ValueType
__hostdev__ ValueType getValue(int i, int j, int k) const
const typename GridOrTreeOrRootT::LeafNodeType type
__hostdev__ CoordType getOrigin() const
__hostdev__ Vec3(const Coord &ijk)
__hostdev__ BaseBBox & intersect(const BaseBBox &bbox)
Intersect this bounding box with the given bounding box.
Bit-mask to encode active states and facilitate sequential iterators and a fast codec for I/O compres...
Signed (i, j, k) 32-bit integer coordinate class, similar to openvdb::math::Coord.
__hostdev__ uint32_t getMajor() const
typename DataType::BuildType BuildType
__hostdev__ void setOff()
Set all bits off.
__hostdev__ Vec3T & max()
typename DataType::BuildT BuildType
Metafunction used to determine if the first template parameter is a specialization of the class templ...
Trait used to transfer the const-ness of a reference type to another type.
__hostdev__ bool probeValue(const CoordType &ijk, ValueType &v) const
return the state and updates the value of the specified voxel
__hostdev__ BlindDataT * getBlindData(uint32_t n)
__hostdev__ Mask()
Initialize all bits to zero.
__hostdev__ Vec4 & operator=(const Vec4T< T2 > &rhs)
gridName(grid.gridName())
__hostdev__ bool operator==(const Vec3 &rhs) const
__hostdev__ auto set(const uint32_t n, ArgsT &&...args)
OIIO_NAMESPACE_BEGIN typedef std::ifstream ifstream
__hostdev__ const Vec3d & voxelSize() const
Return a vector of the axial voxel sizes.
const typename GridOrTreeOrRootT::RootNodeType type
Dummy type for a 8bit quantization of float point values.
__hostdev__ bool isInside(const CoordT &p) const
__hostdev__ ChannelT & operator()(const Coord &ijk) const
__hostdev__ bool isCached1(const CoordType &ijk) const
__hostdev__ ReadAccessor(const GridT &grid)
Constructor from a grid.
__hostdev__ const NanoGrid< IndexT > & grid() const
Return a const reference to the IndexGrid.
__hostdev__ CoordT Round(const Vec3T< RealT > &xyz)
typename DataType::StatsT FloatType
__hostdev__ Coord round() const
GridType
List of types that are currently supported by NanoVDB.
__hostdev__ uint64_t checksum() const
Return checksum of the grid buffer.
Struct to derive node type from its level in a given grid, tree or root while preserving constness...
__hostdev__ Vec4 & maxComponent(const Vec4 &other)
Perform a component-wise maximum with the other Coord.
IMATH_HOSTDEVICE constexpr int floor(T x) IMATH_NOEXCEPT
typename GridT::TreeType Type
__hostdev__ uint32_t id() const
typename DataType::FloatType FloatType
Trait use to remove pointer, i.e. "*", qualifier from a type. Default implementation is just a pass-t...
__hostdev__ ValueIterator & operator++()
__hostdev__ void setOff()
GridClass
Classes (superset of OpenVDB) that are currently supported by NanoVDB.
__hostdev__ Coord offsetBy(ValueType n) const
auto printf(const S &fmt, const T &...args) -> int
__hostdev__ Vec3T matMultT(const float *mat, const Vec3T &xyz)
Multiply the transposed of a 3x3 matrix and a 3d vector using 32bit floating point arithmetics...
__hostdev__ NodeT & operator*() const
GridFlags
Grid flags which indicate what extra information is present in the grid buffer.
UT_StringArray JOINTS head
__hostdev__ DenseIter operator++(int)
void set(const Mat4T &mat, const Mat4T &invMat, double taper=1.0)
Initialize the member data from 4x4 matrices.
NANOVDB_HOSTDEV_DISABLE_WARNING __hostdev__ uint32_t findNext(uint32_t start) const
__hostdev__ void setBitOn(std::initializer_list< uint8_t > list)
#define NANOVDB_PATCH_VERSION_NUMBER
__hostdev__ const LeafNodeType * probeLeaf(const CoordType &ijk) const
__hostdev__ DataType * data()
__hostdev__ Vec3T applyInverseMap(const Vec3T &xyz) const
Apply the inverse affine mapping to a vector using 64bit floating point arithmetics.
__hostdev__ const T & operator[](int i) const
__hostdev__ Coord(ValueType i, ValueType j, ValueType k)
Initializes coordinate to the given signed integers.
__hostdev__ Version version() const
Node caching at all (three) tree levels.
GLsizei const GLfloat * value
__hostdev__ ValueType getFirstValue() const
If the first entry in this node's table is a tile, return the tile's value. Otherwise, return the result of calling getFirstValue() on the child.
__hostdev__ ValueIterator(const LeafNode *parent)
__hostdev__ Vec3< T2 > operator/(T1 scalar, const Vec3< T2 > &vec)
__hostdev__ const uint32_t & activeTileCount(uint32_t level) const
Return the total number of active tiles at the specified level of the tree.
__hostdev__ ValueType getValue(const CoordType &ijk) const
Leaf nodes of the VDB tree. (defaults to 8x8x8 = 512 voxels)
Return the pointer to the leaf node that contains Coord. Implements Tree::probeLeaf(Coord) ...
__hostdev__ void localToGlobalCoord(Coord &ijk) const
Converts (in place) a local index coordinate to a global index coordinate.
__hostdev__ bool isActive() const
const GLuint GLenum const void * binary
NANOVDB_HOSTDEV_DISABLE_WARNING __hostdev__ uint32_t findFirst() const
__hostdev__ bool is_divisible() const
__hostdev__ uint32_t operator*() const
__hostdev__ uint32_t gridCount() const
Return total number of grids in the buffer.
__hostdev__ ValueOnIter(RootT *parent)
typename remove_const< T >::type type
__hostdev__ void set(bool on)
Set all bits off.
typename RootType::LeafNodeType LeafNodeType
static constexpr bool is_special
__hostdev__ bool operator!=(const Coord &rhs) const
typename RootT::CoordType CoordType
__hostdev__ ValueIterator()
__hostdev__ const NodeTrait< RootT, 1 >::type * getFirstLower() const
vfloat4 sqrt(const vfloat4 &a)
GLdouble GLdouble GLdouble z
__hostdev__ BaseBBox & expand(const BaseBBox &bbox)
Expand this bounding box to enclose the given bounding box.
__hostdev__ bool isInside(const Vec3T &p) const
__hostdev__ BaseBBox & expand(const Vec3T &xyz)
Expand this bounding box to enclose point xyz.
__hostdev__ ValueType getValue(uint32_t offset) const
Return the voxel value at the given offset.
const typename GridT::TreeType type
__hostdev__ bool probeValue(const CoordType &ijk, ValueType &v) const
Return true if this tree is empty, i.e. contains no values or nodes.
const typename GridOrTreeOrRootT::RootNodeType Type
__hostdev__ bool operator==(const Coord &rhs) const
defines a tree type from a grid type while preserving constness
__hostdev__ bool operator<=(const Iterator &rhs) const
static constexpr bool is_onindex
__hostdev__ uint32_t nodeCount() const
__hostdev__ int32_t y() const
__hostdev__ bool hasOverlap(const BBox &b) const
Return true if the given bounding box overlaps with this bounding box.
__hostdev__ bool isActive(const CoordType &ijk) const
const typename GridT::TreeType Type
Dummy type for a voxel whose value equals an offset into an external value array of active values...
Like ValueOnIndex but with a mutable mask.
Highest level of the data structure. Contains a tree and a world->index transform (that currently onl...
__hostdev__ bool getAvg() const
__hostdev__ uint32_t getPatch() const
__hostdev__ uint64_t first(uint32_t i) const
__hostdev__ const CoordT & operator*() const
__hostdev__ T length() const
__hostdev__ Vec4 operator-() const
__hostdev__ Vec3 & minComponent(const Vec3 &other)
Perform a component-wise minimum with the other Coord.
GLboolean GLboolean GLboolean GLboolean a
__hostdev__ uint32_t getDim(const CoordType &ijk, const RayT &ray) const
__hostdev__ OffIterator beginOff() const
__hostdev__ bool isIndex(GridType gridType)
Return true if the GridType maps to a special index type (not a POD integer type).
static __hostdev__ BBox createCube(const CoordT &min, typename CoordT::ValueType dim)
__hostdev__ ValueType getValue(const CoordType &ijk) const
Return the value of the given voxel.
__hostdev__ bool isPointIndex() const
static __hostdev__ double value()
__hostdev__ ValueType operator*() const
__hostdev__ Mask & operator|=(const Mask &other)
Bitwise union.
GLuint GLsizei GLsizei * length
__hostdev__ void setMax(const bool &)
__hostdev__ Coord & minComponent(const Coord &other)
Perform a component-wise minimum with the other Coord.
__hostdev__ void setMaskOff(std::initializer_list< MaskT > list)
__hostdev__ Coord operator>>(IndexType n) const
__hostdev__ ReadAccessor(const TreeT &tree)
Constructor from a tree.
const char * c_str() const
returns a c-string of the semantic version, i.e. major.minor.patch
__hostdev__ const DataType * data() const
typename GridT::TreeType type
__hostdev__ bool isMaskOff(MaskT mask) const
Implements Tree::probeLeaf(Coord)
ImageBuf OIIO_API min(Image_or_Const A, Image_or_Const B, ROI roi={}, int nthreads=0)
Maps one type (e.g. the build types above) to other (actual) types.
__hostdev__ uint64_t getIndex(const Coord &ijk) const
Return the linear offset into a channel that maps to the specified coordinate.
__hostdev__ void setOff(uint32_t n)
Set the specified bit off.
**But if you need a or simply need to know when the task has note that the like this
__hostdev__ const DataType * data() const
static __hostdev__ auto set(NanoLeaf< BuildT > &leaf, uint32_t n, const ValueT &v)
__hostdev__ BitFlags & operator=(Type n)
required for backwards compatibility
__hostdev__ ValueType getValue(const CoordType &ijk) const
__hostdev__ CoordType getCoord() const
static constexpr uint32_t WORD_COUNT
__hostdev__ ValueType operator*() const
__hostdev__ bool isFogVolume() const
__hostdev__ T Sign(const T &x)
Return the sign of the given value as an integer (either -1, 0 or 1).
__hostdev__ bool isValueOn() const
__hostdev__ void initBit(std::initializer_list< uint8_t > list)
__hostdev__ int age() const
Returns the difference between major version of this instance and NANOVDB_MAJOR_VERSION_NUMBER.
typename BuildT::BuildType BuildType
__hostdev__ const uint64_t * words() const
Trait use to const from type. Default implementation is just a pass-through.
__hostdev__ CoordType getCoord() const
__hostdev__ const T & operator[](int i) const
static __hostdev__ uint32_t wordCount()
Return the number of machine words used by this Mask.
__hostdev__ Vec3T applyInverseMapF(const Vec3T &xyz) const
Apply the inverse affine mapping to a vector using 32bit floating point arithmetics.
__hostdev__ ValueOnIter & operator++()
__hostdev__ ValueType getValue(int i, int j, int k) const
__hostdev__ void setValue(uint32_t offset, bool v)
__hostdev__ const NanoGrid< BuildT > & grid() const
static __hostdev__ bool lessThan(const Coord &a, const Coord &b)
__hostdev__ void setOn(uint32_t n)
Set the specified bit on.
static constexpr bool is_indexmask
Visits all tile values in this node, i.e. both inactive and active tiles.
Visits all inactive values in a leaf node.
Return point to the lower internal node where Coord maps to one of its values, i.e. terminates.
__hostdev__ const BBoxType & bbox() const
Return a const reference to the index bounding box of all the active values in this tree...
__hostdev__ DenseIterator(uint32_t pos=Mask::SIZE)
GLdouble GLdouble GLdouble q
__hostdev__ DenseIterator(const InternalNode *parent)
__hostdev__ bool isOn() const
__hostdev__ const ValueType & operator[](IndexType i) const
Return a const reference to the given Coord component.
__hostdev__ ChildIterator beginChild()
__hostdev__ Vec3 operator/(const Vec3 &v) const
static ElementType scalar(const T &v)
__hostdev__ bool isActive(const CoordType &ijk) const
OIIO_FORCEINLINE vbool4 insert(const vbool4 &a, bool val)
Helper: substitute val for a[i].
__hostdev__ bool isValid(const GridBlindDataClass &blindClass, const GridBlindDataSemantic &blindSemantics, const GridType &blindType)
return true if the combination of GridBlindDataClass, GridBlindDataSemantic and GridType is valid...
__hostdev__ Vec4 & normalize()
__hostdev__ bool hasBBox() const
__hostdev__ Vec3T matMult(const float *mat, const Vec3T &xyz)
Multiply a 3x3 matrix and a 3d vector using 32bit floating point arithmetics.
__hostdev__ const RootT & root() const
__hostdev__ const ValueType & maximum() const
Return a const reference to the maximum active value encoded in this internal node and any of its chi...
Codec
Define compression codecs.
typename match_const< DataType, RootT >::type DataT
Dummy type for a variable bit quantization of floating point values.
__hostdev__ Iterator(uint32_t pos, const Mask *parent)
__hostdev__ Vec3T applyIJTF(const Vec3T &xyz) const
Trait used to identify template parameter that are pointers.
__hostdev__ void setValueOnly(uint32_t offset, uint16_t value)
const typename GridOrTreeOrRootT::LeafNodeType Type
__hostdev__ const Vec3T & operator[](int i) const
__hostdev__ const LeafT * probeLeaf(const CoordType &ijk) const
__hostdev__ const FloatType & stdDeviation() const
Return a const reference to the standard deviation of all the active values encoded in this internal ...
#define NANOVDB_HOSTDEV_DISABLE_WARNING
__hostdev__ bool getDev() const
__hostdev__ void setDev(const bool &)
typename RootT::CoordType CoordType
__hostdev__ uint32_t nodeCount(int level) const
__hostdev__ Vec4(const Vec4< T2 > &v)
static __hostdev__ uint32_t dim()
__hostdev__ ValueType operator*() const
__hostdev__ DenseIterator & operator++()
NANOVDB_HOSTDEV_DISABLE_WARNING __hostdev__ uint32_t CountOn(uint64_t v)
static __hostdev__ BBox createCube(const Coord &min, typename Coord::ValueType dim)
#define NANOVDB_ASSERT(x)
__hostdev__ const NodeT * getNode() const
Return a const point to the cached node of the specified type.
__hostdev__ Mask & operator&=(const Mask &other)
Bitwise intersection.
__hostdev__ auto getNodeInfo(const CoordType &ijk) const
__hostdev__ float Clamp(float x, float a, float b)
__hostdev__ BBox(const BaseBBox< Coord > &bbox)
__hostdev__ const RootT & root() const
__hostdev__ ValueType getLastValue() const
Return the last value in this leaf node.
__hostdev__ OnIterator beginOn() const
__hostdev__ ValueType getValue(const CoordType &ijk) const
__hostdev__ const Map & map() const
Return a const reference to the Map for this grid.
__hostdev__ Vec3d getVoxelSize() const
Return a voxels size in each coordinate direction, measured at the origin.
__hostdev__ bool hasStdDeviation() const
__hostdev__ uint64_t memUsage() const
Return the actual memory footprint of this root node.
__hostdev__ uint32_t hash() const
Return a hash key derived from the existing coordinates.
typename RootNodeType::ChildNodeType UpperNodeType
__hostdev__ float getValue(uint32_t i) const
__hostdev__ ConstValueOnIterator cbeginValueOn() const
__hostdev__ ValueType maximum() const
Return a const reference to the maximum active value encoded in this leaf node.
__hostdev__ void setValueOnly(const CoordT &ijk, const ValueType &v)
Define static boolean tests for template build types.
__hostdev__ CoordType getCoord() const
__hostdev__ void setBitOff(uint8_t bit)
__hostdev__ uint32_t totalNodeCount() const
__hostdev__ Vec3T worldToIndexF(const Vec3T &xyz) const
world to index space transformation
ReadAccessor< ValueT, LEVEL0, LEVEL1, LEVEL2 > createAccessor(const NanoGrid< ValueT > &grid)
Free-standing function for convenient creation of a ReadAccessor with optional and customizable node ...
__hostdev__ Vec4 operator/(const T &s) const
__hostdev__ uint32_t getDim(const CoordType &ijk, const RayT &ray) const
__hostdev__ const MaskType< LOG2DIM > & getValueMask() const
__hostdev__ void clear()
Reset this access to its initial state, i.e. with an empty cache.
__hostdev__ Coord offsetToGlobalCoord(uint32_t n) const
typename GridOrTreeOrRootT::RootNodeType type
__hostdev__ const GridClass & gridClass() const
__hostdev__ bool operator==(const Mask &other) const
Iterator & operator=(const Iterator &)=default
Visits child nodes of this node only.
__hostdev__ uint64_t voxelPoints(const Coord &ijk, const AttT *&begin, const AttT *&end) const
get iterators over attributes to points at a specific voxel location
#define NANOVDB_DATA_ALIGNMENT
__hostdev__ CoordT getCoord() const
__hostdev__ bool operator<(const Iterator &rhs) const
__hostdev__ Vec3 operator+(const Vec3 &v) const
__hostdev__ ConstValueIterator cbeginValueAll() const
static T scalar(const T &s)
__hostdev__ ValueType minimum() const
Return a const reference to the minimum active value encoded in this leaf node.
__hostdev__ bool isActive(const CoordType &ijk) const
__hostdev__ const uint32_t & getTableSize() const
__hostdev__ NodeTrait< RootT, LEVEL >::type * getFirstNode()
return a pointer to the first node at the specified level
__hostdev__ int32_t x() const
bool operator==(const BaseDimensions< T > &a, const BaseDimensions< Y > &b)
__hostdev__ bool isLevelSet() const
typename NanoLeaf< BuildT >::ValueType ValueType
void set(const MatT &mat, const MatT &invMat, const Vec3T &translate, double taper=1.0)
Initialize the member data from 3x3 or 4x4 matrices.
Internal nodes of a VDB treedim(),.
__hostdev__ const LeafT * probeLeaf(const CoordType &ijk) const
GA_API const UT_StringHolder scale
typename UpperNodeType::ChildNodeType LowerNodeType
LeafData()=delete
This class cannot be constructed or deleted.
__hostdev__ ValueIterator beginValue()
const typename GridOrTreeOrRootT::RootNodeType::ChildNodeType::ChildNodeType type
__hostdev__ BBox(const Coord &min, const Coord &max)
__hostdev__ void set(uint32_t n, bool on)
Set the specified bit on or off.
__hostdev__ int32_t & z()
Class to access values in channels at a specific voxel location.
PointAccessor(const NanoGrid< BuildT > &grid)
static __hostdev__ uint32_t dim()
Return the dimension, in index space, of this leaf node (typically 8 as for openvdb leaf nodes!) ...
__hostdev__ int findBlindData(const char *name) const
Return the index of the first blind data with specified name if found, otherwise -1.
__hostdev__ uint64_t offset() const
__hostdev__ const RootT & root() const
__hostdev__ void toggle()
brief Toggle the state of all bits in the mask
__hostdev__ void toggle(uint32_t n)
__hostdev__ DenseIterator beginDense()
typename RootNodeType::ChildNodeType UpperNodeType
__hostdev__ Vec3T indexToWorldDir(const Vec3T &dir) const
transformation from index space direction to world space direction
__hostdev__ ValueType getValue(int i, int j, int k) const
__hostdev__ CoordT offsetToGlobalCoord(uint32_t n) const
__hostdev__ Vec3 cross(const Vec3T &v) const
__hostdev__ Type Min(Type a, Type b)
__hostdev__ Vec4 & operator/=(const T &s)
__hostdev__ auto set(const CoordType &ijk, ArgsT &&...args) const
__hostdev__ FloatType variance() const
Return the variance of all the active values encoded in this leaf node.
GridBlindDataSemantic
Blind-data Semantics that are currently understood by NanoVDB.
BitFlags(std::initializer_list< uint8_t > list)
__hostdev__ Coord(ValueType *ptr)
IMATH_HOSTDEVICE constexpr int trunc(T x) IMATH_NOEXCEPT
__hostdev__ ValueType operator()(const CoordType &ijk) const
__hostdev__ T * asPointer()
return a non-const raw constant pointer to array of three vector components
typename GridOrTreeOrRootT::RootNodeType Type
typename GridOrTreeOrRootT::RootNodeType::ChildNodeType Type
__hostdev__ ValueType operator()(const CoordType &ijk) const
__hostdev__ CoordType getCoord() const
__hostdev__ ValueIterator cbeginValueAll() const
IMATH_NAMESPACE::V2f float
__hostdev__ Vec3 & operator+=(const Coord &ijk)
__hostdev__ bool isCached2(const CoordType &ijk) const
__hostdev__ void setMask(uint32_t offset, bool v)
__hostdev__ Vec3< double > asVec3d() const
Return a double precision floating-point vector of this coordinate.
__hostdev__ TileT * tile() const
__hostdev__ ValueOnIterator()
static __hostdev__ auto set(NanoLower< BuildT > &node, uint32_t n, const ValueT &v)
__hostdev__ Vec3T indexToWorldGrad(const Vec3T &grad) const
transform the gradient from index space to world space.
__hostdev__ bool isOff(uint32_t n) const
Return true if the given bit is NOT set.
__hostdev__ bool operator!=(const Vec3 &rhs) const
__hostdev__ const MaskType< LOG2DIM > & getValueMask() const
__hostdev__ ValueOffIterator()
__hostdev__ Vec3 & operator-=(const Coord &ijk)
__hostdev__ enable_if<!is_same< MaskT, Mask >::value, Mask & >::type operator=(const MaskT &other)
Assignment operator that works with openvdb::util::NodeMask.
RootData< ChildT > DataType
__hostdev__ Vec3 operator*(const T &s) const
__hostdev__ bool isMaskOn(uint32_t offset) const
typename RootType::LeafNodeType LeafNodeType
__hostdev__ ReadAccessor(const TreeT &tree)
Constructor from a tree.
typename ChildT::LeafNodeType LeafNodeType
__hostdev__ ValueType getValue(const CoordT &ijk) const
Return the voxel value at the given coordinate.
__hostdev__ ReadAccessor(const GridT &grid)
Constructor from a grid.
OIIO_UTIL_API FILE * fopen(string_view path, string_view mode)
Version of fopen that can handle UTF-8 paths even on Windows.
GA_API const UT_StringHolder trans
Visits active tile values of this node only.
__hostdev__ ValueType operator()(int i, int j, int k) const
__hostdev__ void setMin(const bool &)
__hostdev__ const LeafNodeType * getFirstLeaf() const
__hostdev__ CoordType getOrigin() const
Return point to the upper internal node where Coord maps to one of its values, i.e. terminates.
__hostdev__ bool isActive() const
__hostdev__ Mask(const Mask &other)
Copy constructor.
__hostdev__ CoordT RoundDown(const Vec3T< RealT > &xyz)
__hostdev__ Vec4 operator-(const Vec4 &v) const
__hostdev__ void clear()
Reset this access to its initial state, i.e. with an empty cache.
__hostdev__ bool hasMinMax() const
__hostdev__ bool isActive() const
Return true if any of the voxel value are active in this leaf node.
#define NANOVDB_MAGIC_GRID
BitFlags(std::initializer_list< MaskT > list)
__hostdev__ ValueIter operator++(int)
InternalData< ChildT, Log2Dim > DataType
__hostdev__ Coord offsetBy(ValueType dx, ValueType dy, ValueType dz) const
__hostdev__ int32_t & y()
__hostdev__ uint64_t memUsage() const
return memory usage in bytes for the leaf node
__hostdev__ ConstDenseIterator cbeginChildAll() const
__hostdev__ Vec3T applyJacobian(const Vec3T &ijk) const
Apply the linear forward 3x3 transformation to an input 3d vector using 64bit floating point arithmet...
__hostdev__ DenseIterator beginAll() const
__hostdev__ uint64_t leafPoints(const Coord &ijk, const AttT *&begin, const AttT *&end) const
Return the number of points in the leaf node containing the coordinate ijk. If this return value is l...
__hostdev__ Iterator & operator++()
__hostdev__ CoordType getOrigin() const
__hostdev__ const char * shortGridName() const
Return a c-string with the name of this grid, truncated to 255 characters.
__hostdev__ void setBit(uint8_t bit, bool on)
__hostdev__ ValueType max() const
Return the largest vector component.
__hostdev__ Vec4 operator+(const Vec4 &v) const
static constexpr bool is_float
__hostdev__ uint32_t countOn() const
Return the total number of set bits in this Mask.
__hostdev__ bool getMax() const
__hostdev__ ChannelT & operator()(int i, int j, int k) const
__hostdev__ DenseIterator operator++(int)
typename DataType::ValueT ValueType
typename BuildT::RootType RootType
__hostdev__ void setOrigin(const T &ijk)
__hostdev__ void localToGlobalCoord(Coord &ijk) const
modifies local coordinates to global coordinates of a tile or child node
static __hostdev__ BBox createCube(typename CoordT::ValueType min, typename CoordT::ValueType max)
__hostdev__ void setOn()
Set all bits on.
__hostdev__ ValueType getValue(const CoordType &ijk) const
__hostdev__ ValueOnIterator beginValueOn() const
__hostdev__ uint64_t AlignUp(uint64_t byteCount)
round up byteSize to the nearest wordSize, e.g. to align to machine word: AlignUp<sizeof(size_t)(n) ...
__hostdev__ Vec3 operator+(const Coord &ijk) const
Dummy type for indexing points into voxels.
__hostdev__ Vec3T worldToIndexDir(const Vec3T &dir) const
transformation from world space direction to index space direction
__hostdev__ bool isOn() const
Return true if all the bits are set in this Mask.
__hostdev__ Vec3T applyMap(const Vec3T &ijk) const
Apply the forward affine transformation to a vector using 64bit floating point arithmetics.
Defines an affine transform and its inverse represented as a 3x3 matrix and a vec3 translation...
__hostdev__ ReadAccessor(const RootT &root)
Constructor from a root node.
#define NANOVDB_MAGIC_FILE
__hostdev__ const FloatType & average() const
Return a const reference to the average of all the active values encoded in this internal node and an...
__hostdev__ Vec3T applyMapF(const Vec3T &ijk) const
Apply the forward affine transformation to a vector using 32bit floating point arithmetics.
static __hostdev__ auto set(NanoUpper< BuildT > &node, uint32_t n, const ValueT &v)
__hostdev__ Vec3 & operator*=(const T &s)
__hostdev__ Version(uint32_t data)
Constructor from a raw uint32_t data representation.
__hostdev__ ValueIterator(const InternalNode *parent)
__hostdev__ FloatType variance() const
Return the variance of all the active values encoded in this root node and any of its child nodes...
__hostdev__ void setMaskOn(std::initializer_list< MaskT > list)
__hostdev__ uint64_t activeVoxelCount() const
Return a const reference to the index bounding box of all the active values in this tree...
__hostdev__ Mask(bool on)
__hostdev__ Coord operator-(const Coord &rhs) const
VDB Tree, which is a thin wrapper around a RootNode.
__hostdev__ Vec3 operator-(const Coord &ijk) const
__hostdev__ bool operator==(const Iterator &rhs) const
constexpr enabler dummy
An instance to use in EnableIf.
__hostdev__ bool isBitOn(uint8_t bit) const
__hostdev__ bool operator!=(const Vec4 &rhs) const
const typename GridOrTreeOrRootT::RootNodeType::ChildNodeType Type
static __hostdev__ size_t memUsage()
Return memory usage in bytes for the class.
__hostdev__ Vec3 & operator/=(const T &s)
__hostdev__ uint32_t getDim(const CoordType &ijk, const RayT &ray) const
bool operator<(const GU_TetrahedronFacet &a, const GU_TetrahedronFacet &b)
__hostdev__ uint64_t gridSize() const
Return memory usage in bytes for this class only.
__hostdev__ void setMaskOff(MaskT mask)
__hostdev__ Iterator(const BBox &b)
__hostdev__ ChildIter(ParentT *parent)
__hostdev__ ChannelAccessor(const NanoGrid< IndexT > &grid, uint32_t channelID=0u)
Ctor from an IndexGrid and an integer ID of an internal channel that is assumed to exist as blind dat...
__hostdev__ enable_if< is_same< T, Point >::value, const uint64_t & >::type pointCount() const
Return the total number of points indexed by this PointGrid.
__hostdev__ bool probeValue(const CoordType &ijk, ValueType &v) const
__hostdev__ ValueIterator cbeginValueAll() const
static __hostdev__ auto set(NanoRoot< BuildT > &, const ValueT &)
VecT< GridHandleT > readUncompressedGrids(const char *fileName, const typename GridHandleT::BufferType &buffer=typename GridHandleT::BufferType())
Read a multiple un-compressed NanoVDB grids from a file and return them as a vector.
LeafFnBase< CoordT, MaskT, LOG2DIM > BaseT
__hostdev__ const ValueType & minimum() const
Return a const reference to the minimum active value encoded in this root node and any of its child n...
auto get(const UT_ARTIterator< T > &it) -> decltype(it.key())
__hostdev__ const void * blindData(uint32_t n) const
Returns a const pointer to the blindData at the specified linear offset.
__hostdev__ Coord ceil() const
Round each component if this Vec<T> down to its integer value.
__hostdev__ const ValueType & maximum() const
Return a const reference to the maximum active value encoded in this root node and any of its child n...
__hostdev__ Coord & operator+=(int n)
__hostdev__ uint32_t blindDataCount() const
Return true if this grid is empty, i.e. contains no values or nodes.
__hostdev__ ValueType operator*() const
typename Vec3T::ValueType ValueType
__hostdev__ CoordType getOrigin() const
__hostdev__ CoordT origin() const
Return the origin in index space of this leaf node.
__hostdev__ CoordT getCoord() const
__hostdev__ ValueType operator*() const
static __hostdev__ uint32_t CoordToOffset(const CoordType &ijk)
Return the linear offset corresponding to the given coordinate.
__hostdev__ DenseIterator cbeginChildAll() const
__hostdev__ uint8_t flags() const
__hostdev__ Type data() const
static constexpr bool is_offindex
__hostdev__ auto set(const CoordType &ijk, ArgsT &&...args) const
__hostdev__ Vec4 operator*(const Vec4 &v) const
__hostdev__ DataType * data()
typename GridOrTreeOrRootT::LeafNodeType type
Implements Tree::getValue(Coord), i.e. return the value associated with a specific coordinate ijk...
__hostdev__ Iterator(const BBox &b, const Coord &p)
#define NANOVDB_MINOR_VERSION_NUMBER
typename GridOrTreeOrRootT::LeafNodeType Type
__hostdev__ bool isApproxZero(const Type &x)
GLuint const GLchar * name
__hostdev__ const FloatType & average() const
Return a const reference to the average of all the active values encoded in this root node and any of...
__hostdev__ uint32_t getMinor() const
__hostdev__ bool empty() const
Return true if this bounding box is empty, e.g. uninitialized.
__hostdev__ bool isOn(uint32_t n) const
Return true if the given bit is set.
Maximum floating-point values.
__hostdev__ ValueIterator beginValue() const
__hostdev__ uint64_t voxelPoints(const Coord &ijk, const AttT *&begin, const AttT *&end) const
get iterators over attributes to points at a specific voxel location
__hostdev__ Map(double s, const Vec3d &t=Vec3d(0.0, 0.0, 0.0))
__hostdev__ ChannelT * setChannel(ChannelT *channelPtr)
Change to an external channel.
__hostdev__ int MinIndex(const Vec3T &v)
Like ValueIndex but with a mutable mask.
__hostdev__ bool isActive(const CoordT &ijk) const
Return true if the voxel value at the given coordinate is active.
__hostdev__ NodeT * probeChild(ValueType &value) const
__hostdev__ ReadAccessor(const TreeT &tree)
Constructor from a tree.
__hostdev__ Vec3 operator-() const
__hostdev__ Vec3 & operator-=(const Vec3 &v)
static __hostdev__ uint32_t CoordToOffset(const CoordT &ijk)
Return the linear offset corresponding to the given coordinate.
__hostdev__ Type & data()
__hostdev__ Vec3T indexToWorldF(const Vec3T &xyz) const
index to world space transformation
__hostdev__ const NodeT * getFirstNode() const
return a const pointer to the first node of the specified type
void writeUncompressedGrid(StreamT &os, const GridData *gridData, bool raw=false)
This is a standalone alternative to io::writeGrid(...,Codec::NONE) defined in util/IO.h Unlike the latter this function has no dependencies at all, not even NanoVDB.h, so it also works if client code only includes PNanoVDB.h!
__hostdev__ Vec3T applyInverseJacobianF(const Vec3T &xyz) const
Apply the linear inverse 3x3 transformation to an input 3d vector using 32bit floating point arithmet...
static constexpr uint32_t SIZE
GLboolean GLboolean GLboolean b
static constexpr bool is_Fp
PointAccessor(const NanoGrid< Point > &grid)
static __hostdev__ double value()
NANOVDB_HOSTDEV_DISABLE_WARNING __hostdev__ uint32_t findPrev(uint32_t start) const
__hostdev__ BBox()
Default construction sets BBox to an empty bbox.
__hostdev__ NodeTrait< RootT, 1 >::type * getFirstLower()
__hostdev__ bool hasBBox() const
__hostdev__ bool hasAverage() const
__hostdev__ auto set(const CoordType &ijk, ArgsT &&...args)
static __hostdev__ uint32_t dim()
Return the dimension, in voxel units, of this internal node (typically 8*16 or 8*16*32) ...
__hostdev__ FloatType stdDeviation() const
Return a const reference to the standard deviation of all the active values encoded in this leaf node...
__hostdev__ bool probeValue(const CoordType &ijk, ValueType &v) const
__hostdev__ BBox< Vec3d > transform(const Map &map) const
transform this coordinate bounding box by the specified map
__hostdev__ bool isFloatingPoint(GridType gridType)
return true if the GridType maps to a floating point type
__hostdev__ void setMaskOn(MaskT mask)
__hostdev__ ValueIter(RootT *parent)
__hostdev__ const MaskType< LOG2DIM > & getChildMask() const
__hostdev__ bool isPointData() const
MaskT< LOG2DIM > mValueMask
__hostdev__ ValueIter & operator++()
auto fprintf(std::FILE *f, const S &fmt, const T &...args) -> int
__hostdev__ auto getNodeInfo(const CoordType &ijk) const
__hostdev__ bool probeValue(const Coord &ijk, typename remove_const< ChannelT >::type &v) const
return the state and updates the value of the specified voxel
__hostdev__ FloatType average() const
Return a const reference to the average of all the active values encoded in this leaf node...
__hostdev__ CoordType getOrigin() const
__hostdev__ ChildNodeType * probeChild(const CoordType &ijk)
__hostdev__ uint64_t lastOffset() const
Implements Tree::isActive(Coord)
__hostdev__ ReadAccessor(const RootT &root)
Constructor from a root node.
__hostdev__ auto getNodeInfo(const CoordType &ijk) const
typename NanoLeaf< BuildT >::ValueType ValueT
__hostdev__ const MaskType< LOG2DIM > & childMask() const
Return a const reference to the bit mask of child nodes in this internal node.
__hostdev__ RootT & root()
Iterator< false > OffIterator
__hostdev__ const LeafNode * probeLeaf(const CoordT &) const
__hostdev__ ChannelT * setChannel(uint32_t channelID)
Change to an internal channel, assuming it exists as as blind data in the IndexGrid.
__hostdev__ uint64_t activeVoxelCount() const
Computes a AABB of active values in world space.
__hostdev__ Vec3T dim() const
__hostdev__ const RootT & root() const
static __hostdev__ float value()
__hostdev__ const LeafT * probeLeaf(const CoordType &ijk) const
__hostdev__ const ChildNodeType * probeChild(const CoordType &ijk) const
__hostdev__ int MaxIndex(const Vec3T &v)
__hostdev__ ReadAccessor(const RootT &root)
Constructor from a root node.
static __hostdev__ CoordT OffsetToLocalCoord(uint32_t n)
Compute the local coordinates from a linear offset.
__hostdev__ void setBitOff(std::initializer_list< uint8_t > list)
__hostdev__ uint64_t gridPoints(const AttT *&begin, const AttT *&end) const
Return the total number of point in the grid and set the iterators to the complete range of points...
__hostdev__ Type Max(Type a, Type b)
__hostdev__ Coord & operator-=(const Coord &rhs)
__hostdev__ Vec3T worldToIndex(const Vec3T &xyz) const
world to index space transformation
__hostdev__ bool isGridIndex() const
__hostdev__ Coord & operator+=(const Coord &rhs)
__hostdev__ const ChildNodeType * probeChild(const CoordType &ijk) const
__hostdev__ bool isActive(const CoordType &ijk) const
__hostdev__ NodeT * getFirstNode()
return a pointer to the first node of the specified type
Dummy type for a 16 bit floating point values (placeholder for IEEE 754 Half)
bool isValid(const NanoGrid< ValueT > &grid, bool detailed=true, bool verbose=false)
Return true if the specified grid passes several validation tests.
Dummy type for a voxel whose value equals an offset into an external value array. ...
__hostdev__ uint64_t last(uint32_t i) const
__hostdev__ const GridType & gridType() const
Visits all tile values and child nodes of this node.
__hostdev__ Vec4 & operator-=(const Vec4 &v)
__hostdev__ ValueType getValue(int i, int j, int k) const
__hostdev__ ChannelAccessor(const NanoGrid< IndexT > &grid, ChannelT *channelPtr)
Ctor from an IndexGrid and an external channel.
__hostdev__ bool probeValue(const CoordType &ijk, ValueType &v) const
Iterator< true > OnIterator
GLfloat GLfloat GLfloat GLfloat h
__hostdev__ auto set(const CoordType &ijk, ArgsT &&...args)
IMATH_HOSTDEVICE constexpr int ceil(T x) IMATH_NOEXCEPT
OIIO_UTIL_API int fseek(FILE *file, int64_t offset, int whence)
Version of fseek that works with 64 bit offsets on all systems.
const char * toStr(GridType gridType)
Maps a GridType to a c-string.
__hostdev__ bool isBitOff(uint8_t bit) const
__hostdev__ T dot(const Vec3T &v) const
Visits all values in a leaf node, i.e. both active and inactive values.
__hostdev__ bool isFloatingPointVector(GridType gridType)
return true if the GridType maps to a floating point vec3.
typename ChildT::CoordType CoordType
__hostdev__ ValueType operator*() const
typename UpperNodeType::ChildNodeType LowerNodeType
static __hostdev__ auto set(NanoLeaf< BuildT > &leaf, uint32_t n, const ValueT &v)
__hostdev__ DataType * data()
__hostdev__ bool isInteger(GridType gridType)
Return true if the GridType maps to a POD integer type.
__hostdev__ Vec3(const Vec3< T2 > &v)
const typename remove_const< T >::type type
__hostdev__ Vec3< T2 > operator*(T1 scalar, const Vec3< T2 > &vec)
__hostdev__ Mask & operator^=(const Mask &other)
Bitwise XOR.
__hostdev__ ValueType operator()(int i, int j, int k) const
__hostdev__ Type getFlags() const
__hostdev__ ValueType operator*() const
__hostdev__ T lengthSqr() const
__hostdev__ BBox expandBy(typename CoordT::ValueType padding) const
Return a new instance that is expanded by the specified padding.
__hostdev__ uint64_t * words()
Return a pointer to the list of words of the bit mask.
__hostdev__ Vec3 operator*(const Vec3 &v) const
__hostdev__ ValueOnIter()
__hostdev__ DenseIter & operator++()
__hostdev__ ValueOnIterator()
GLenum GLsizei GLsizei GLint * values
__hostdev__ Vec3T applyInverseJacobian(const Vec3T &xyz) const
Apply the linear inverse 3x3 transformation to an input 3d vector using 64bit floating point arithmet...
__hostdev__ bool operator<=(const Version &rhs) const
typename RootT::BuildType BuildType
__hostdev__ BBox(const CoordT &min, const CoordT &max)
__hostdev__ NodeTrait< RootT, 2 >::type * getFirstUpper()
__hostdev__ DataType * data()
__hostdev__ Rgba8(uint8_t r, uint8_t g, uint8_t b, uint8_t a=255u)
integer r,g,b,a ctor where alpha channel defaults to opaque
__hostdev__ Vec3T & min()
__hostdev__ bool isSequential() const
return true if nodes at all levels can safely be accessed with simple linear offsets ...
Dummy type for a 4bit quantization of float point values.
__hostdev__ constexpr T pi()
Pi constant taken from Boost to match old behaviour.
static __hostdev__ auto set(NanoUpper< BuildT > &, uint32_t, const ValueT &)
__hostdev__ ValueIterator operator++(int)
__hostdev__ uint64_t idx(int i, int j, int k) const
__hostdev__ ValueType operator()(int i, int j, int k) const
__hostdev__ bool operator!=(const Iterator &rhs) const
typename DataType::BuildT BuildType
__hostdev__ CoordT getCoord() const
__hostdev__ bool isUnknown() const
__hostdev__ const GridBlindMetaData & blindMetaData(uint32_t n) const
__hostdev__ ValueType getValue(int i, int j, int k) const
typename GridOrTreeOrRootT::RootNodeType::ChildNodeType::ChildNodeType type
Class to access points at a specific voxel location.
__hostdev__ ChildIter operator++(int)
__hostdev__ ValueOnIterator(const InternalNode *parent)
__hostdev__ bool operator!=(const BaseBBox &rhs) const
typename RootT::ChildNodeType Node2
__hostdev__ Iterator operator++(int)
__hostdev__ BaseBBox(const Vec3T &min, const Vec3T &max)
__hostdev__ const ChildT * probeChild(ValueType &value) const
typename ChildT::CoordType CoordType
__hostdev__ const uint32_t & tileCount() const
Return the number of tiles encoded in this root node.
__hostdev__ ConstChildIterator cbeginChild() const
typename BuildToValueMap< BuildT >::Type ValueT
__hostdev__ bool operator>=(const Version &rhs) const
__hostdev__ bool operator<(const Coord &rhs) const
Return true if this Coord is lexicographically less than the given Coord.
__hostdev__ bool getMin() const
__hostdev__ DenseIterator()
__hostdev__ ReadAccessor(const GridT &grid)
Constructor from a grid.
__hostdev__ const char * gridName() const
Return a c-string with the name of this grid.
LeafData & operator=(const LeafData &)=delete
typename GridOrTreeOrRootT::RootNodeType::ChildNodeType type
__hostdev__ bool isMaskOn(MaskT mask) const
__hostdev__ Iterator begin() const
__hostdev__ void initMask(std::initializer_list< MaskT > list)
DenseIterator & operator=(const DenseIterator &)=default
__hostdev__ uint32_t countOn(uint32_t i) const
Return the number of lower set bits in mask up to but excluding the i'th bit.
__hostdev__ uint64_t gridPoints(const AttT *&begin, const AttT *&end) const
Return the total number of point in the grid and set the iterators to the complete range of points...
Tolerance for floating-point comparison.
typename DataType::StatsT FloatType
__hostdev__ Coord operator+(const Coord &rhs) const
Dummy type for a voxel whose value equals its binary active state.
__hostdev__ Vec3T & operator[](int i)
__hostdev__ Vec3(const Vec3T< T2 > &v)
__hostdev__ ValueIterator beginValue() const
__hostdev__ void setBitOn(uint8_t bit)
static __hostdev__ size_t memUsage()
Return the memory footprint in bytes of this Mask.
typename Mask< Log2Dim >::template Iterator< On > MaskIterT
__hostdev__ int32_t Ceil(float x)
Delta for small floating-point offsets.
__hostdev__ BBox(const Vec3T &min, const Vec3T &max)
Top-most node of the VDB tree structure.
__hostdev__ int32_t Floor(float x)
ImageBuf OIIO_API max(Image_or_Const A, Image_or_Const B, ROI roi={}, int nthreads=0)
__hostdev__ ValueOnIterator(const LeafNode *parent)
__hostdev__ const ValueType & background() const
Return the total number of active voxels in the root and all its child nodes.
__hostdev__ bool isMaskOff(std::initializer_list< MaskT > list) const
return true if any of the masks in the list are off
__hostdev__ const Vec3d & voxelSize() const
Return a const reference to the size of a voxel in world units.
__hostdev__ auto set(const CoordType &ijk, ArgsT &&...args)
typename ChildT::LeafNodeType LeafNodeType
typename GridOrTreeOrRootT::RootNodeType::ChildNodeType::ChildNodeType Type
__hostdev__ ValueType operator()(const CoordType &ijk) const
__hostdev__ TreeT & tree()
Return a non-const reference to the tree.
static __hostdev__ auto set(typename NanoRoot< BuildT >::Tile &tile, const ValueT &v)
typename match_const< Tile, RootT >::type TileT
__hostdev__ ChildIterator beginChild()
__hostdev__ CoordType origin() const
Return the origin in index space of this leaf node.
__hostdev__ T dot(const Vec4T &v) const
**If you just want to fire and args
__hostdev__ Vec3T applyIJT(const Vec3T &xyz) const
Apply the transposed inverse 3x3 transformation to an input 3d vector using 64bit floating point arit...
__hostdev__ ValueOnIterator cbeginValueOn() const
A simple vector class with four components, similar to openvdb::math::Vec4.
__hostdev__ ChildIter & operator++()
__hostdev__ Coord & maxComponent(const Coord &other)
Perform a component-wise maximum with the other Coord.
__hostdev__ const RootT & root() const
__hostdev__ ValueOnIter operator++(int)
__hostdev__ Coord operator-() const
__hostdev__ bool updateBBox()
Updates the local bounding box of active voxels in this node. Return true if bbox was updated...
__hostdev__ uint32_t operator*() const
__hostdev__ bool probeValue(const CoordType &ijk, ValueType &v) const
return the state and updates the value of the specified voxel
__hostdev__ bool isBreadthFirst() const
__hostdev__ Coord operator<<(IndexType n) const
__hostdev__ const NanoGrid< Point > & grid() const
__hostdev__ Vec3 operator-(const Vec3 &v) const
__hostdev__ Coord operator&(IndexType n) const
Return a new instance with coordinates masked by the given unsigned integer.
static __hostdev__ uint64_t memUsage()
return memory usage in bytes for the class
__hostdev__ ValueType operator()(const CoordType &ijk) const
__hostdev__ ValueType operator()(int i, int j, int k) const
const typename GridOrTreeOrRootT::RootNodeType::ChildNodeType type
__hostdev__ CoordT dim() const
__hostdev__ bool isValueOn() const
typename Mask< 3 >::template Iterator< ON > MaskIterT
__hostdev__ auto set(const CoordType &ijk, ArgsT &&...args) const
__hostdev__ Coord()
Initialize all coordinates to zero.
GLubyte GLubyte GLubyte GLubyte w
__hostdev__ int32_t & x()
__hostdev__ bool isInside(const BBox &b) const
Return true if the given bounding box is inside this bounding box.
__hostdev__ const DataType * data() const
__hostdev__ bool operator==(const Vec4 &rhs) const
static constexpr bool value
__hostdev__ ChannelT & getValue(const Coord &ijk) const
Return the value from a cached channel that maps to the specified coordinate.
__hostdev__ ValueIterator()
__hostdev__ Vec3T indexToWorld(const Vec3T &xyz) const
index to world space transformation
__hostdev__ bool isInside(const Vec3T &xyz)
IMATH_INTERNAL_NAMESPACE_HEADER_ENTER IMATH_HOSTDEVICE constexpr T abs(T a) IMATH_NOEXCEPT
__hostdev__ Version(uint32_t major, uint32_t minor, uint32_t patch)
Constructor from major.minor.patch version numbers.
__hostdev__ ConstChildIterator cbeginChild() const
__hostdev__ bool isOff() const
Bit-compacted representation of all three version numbers.
static __hostdev__ size_t memUsage()
__hostdev__ Vec3T indexToWorldGradF(const Vec3T &grad) const
Transforms the gradient from index space to world space.
__hostdev__ enable_if< BuildTraits< T >::is_index, const uint64_t & >::type valueCount() const
Return the total number of values indexed by this IndexGrid.
__hostdev__ bool isActive() const
Return true if this node or any of its child nodes contain active values.
Visits all active values in a leaf node.
__hostdev__ Vec3 & normalize()
static __hostdev__ Coord Floor(const Vec3T &xyz)
Return the largest integer coordinates that are not greater than xyz (node centered conversion)...
__hostdev__ ValueType getFirstValue() const
Return the first value in this leaf node.
__hostdev__ ReadAccessor(const GridT &grid)
Constructor from a grid.
__hostdev__ bool empty() const
__hostdev__ Iterator operator++(int)
__hostdev__ bool isStaggered() const
PUGI__FN char_t * translate(char_t *buffer, const char_t *from, const char_t *to, size_t to_length)
__hostdev__ Vec3T worldToIndexDirF(const Vec3T &dir) const
transformation from world space direction to index space direction
typename ChildT::template MaskType< LOG2 > MaskType
__hostdev__ bool isValid() const
Methods related to the classification of this grid.
__hostdev__ T lengthSqr() const
__hostdev__ Coord & operator&=(int n)
__hostdev__ AccessorType getAccessor() const
__hostdev__ bool operator<(const Version &rhs) const
__hostdev__ const BBox< CoordType > & bbox() const
Return a const reference to the bounding box in index space of active values in this internal node an...
__hostdev__ Coord floor() const
Round each component if this Vec<T> up to its integer value.
__hostdev__ bool isMask() const
__hostdev__ AccessorType getAccessor() const
Return a new instance of a ReadAccessor used to access values in this grid.
__hostdev__ uint8_t octant() const
Return the octant of this Coord.
__hostdev__ BBox< Vec3< RealT > > asReal() const
typename DataType::ValueT ValueType
__hostdev__ bool operator==(const BaseBBox &rhs) const
__hostdev__ T & getValue(const Coord &ijk, T *channelPtr) const
Return the value from a specified channel that maps to the specified coordinate.
__hostdev__ ValueOffIterator(const LeafNode *parent)
__hostdev__ bool probeValue(const CoordT &ijk, ValueType &v) const
Return true if the voxel value at the given coordinate is active and updates v with the value...
__hostdev__ bool operator>(const Version &rhs) const
__hostdev__ const Vec3T & max() const
__hostdev__ const LeafT * probeLeaf(const CoordType &ijk) const
__hostdev__ const DataType * data() const
static __hostdev__ float value()
__hostdev__ Vec3 operator/(const T &s) const
__hostdev__ Mask & operator=(const Mask &other)
__hostdev__ bool operator<=(const Coord &rhs) const
Return true if this Coord is lexicographically less or equal to the given Coord.
__hostdev__ GridType mapToGridType()
Maps from a templated build type to a GridType enum.
auto size() const FMT_NOEXCEPT-> size_t
__hostdev__ T & operator[](int i)
__hostdev__ const ValueType & background() const
Return a const reference to the background value.
__hostdev__ bool isActive() const
__hostdev__ bool operator==(const Version &rhs) const
__hostdev__ bool isActive(const CoordType &ijk) const
Return the active state of the given voxel (regardless of state or location in the tree...
__hostdev__ float Sqrt(float x)
Return the square root of a floating-point value.
__hostdev__ ValueType getValue(int i, int j, int k) const
__hostdev__ const BlindDataT * getBlindData(uint32_t n) const
__hostdev__ void setValue(uint32_t offset, uint16_t value)
__hostdev__ uint64_t volume() const
__hostdev__ Vec4 operator*(const T &s) const
__hostdev__ Vec3 & operator+=(const Vec3 &v)
typename BuildT::ValueType ValueType
typename RootT::ValueType ValueType
__hostdev__ Vec3 & maxComponent(const Vec3 &other)
Perform a component-wise maximum with the other Coord.
__hostdev__ auto set(const CoordType &ijk, ArgsT &&...args)
__hostdev__ Map()
Default constructor for the identity map.
__hostdev__ ValueType & operator[](IndexType i)
Return a non-const reference to the given Coord component.
__hostdev__ ValueOnIterator cbeginValueOn() const
Dummy type for a 16bit quantization of float point values.
__hostdev__ const NodeTrait< TreeT, LEVEL >::type * getNode() const
Implements Tree::getNodeInfo(Coord)
static __hostdev__ auto set(typename NanoRoot< BuildT >::Tile &, const ValueT &)
__hostdev__ bool operator!=(const Mask &other) const
static constexpr bool is_index
static __hostdev__ Coord OffsetToLocalCoord(uint32_t n)
C++11 implementation of std::is_floating_point.
__hostdev__ T & operator[](int i)
__hostdev__ Vec3 & operator=(const Vec3T< T2 > &rhs)
__hostdev__ Vec3(T x, T y, T z)
__hostdev__ Coord(ValueType n)
Initializes all coordinates to the given signed integer.
const typename GridOrTreeOrRootT::RootNodeType::ChildNodeType::ChildNodeType Type
__hostdev__ NodeT * operator->() const
static __hostdev__ Coord min()
__hostdev__ const LeafNodeType * probeLeaf(const CoordType &ijk) const
__hostdev__ Coord & operator<<=(uint32_t n)
typename Node2::ChildNodeType Node1
C++11 implementation of std::enable_if.
uint8_t mCode[1u<< (3 *LOG2DIM-1)]
typename BuildT::CoordType CoordType
__hostdev__ Iterator & operator++()
__hostdev__ bool isActive(uint32_t n) const
__hostdev__ uint32_t pos() const
8-bit red, green, blue, alpha packed into 32 bit unsigned int
__hostdev__ ChildNodeType * probeChild(const CoordType &ijk)
__hostdev__ ValueOffIterator beginValueOff() const
__hostdev__ Vec4(const Vec4T< T2 > &v)
__hostdev__ bool isCompatible() const
__hostdev__ DenseIterator beginDense() const
__hostdev__ ValueOffIterator cbeginValueOff() const
C++11 implementation of std::is_same.
static __hostdev__ uint32_t padding()
__hostdev__ ValueOnIterator beginValueOn()
__hostdev__ void clear()
Reset this access to its initial state, i.e. with an empty cache.
__hostdev__ NodeT & operator*() const
__hostdev__ auto getNodeInfo(const CoordType &ijk) const
__hostdev__ bool probeValue(const CoordType &ijk, ValueType &v) const
__hostdev__ ValueType getLastValue() const
If the last entry in this node's table is a tile, return the tile's value. Otherwise, return the result of calling getLastValue() on the child.
__hostdev__ T length() const
Trait to map from LEVEL to node type.
__hostdev__ DataType * data()
**Note that the tasks the is the thread number *for the pool
__hostdev__ Vec4 & minComponent(const Vec4 &other)
Perform a component-wise minimum with the other Coord.
__hostdev__ Vec4 & operator+=(const Vec4 &v)
__hostdev__ ConstDenseIterator cbeginDense() const
__hostdev__ bool isEmpty() const
Return true if this RootNode is empty, i.e. contains no values or nodes.
typename NanoLeaf< BuildT >::ValueType ValueT
typename NanoLeaf< BuildT >::FloatType FloatType
__hostdev__ Coord & operator=(const CoordT &other)
Assignment operator that works with openvdb::Coord.
__hostdev__ Coord & operator>>=(uint32_t n)
__hostdev__ void setAvg(const bool &)
__hostdev__ Coord round() const
Round each component if this Vec<T> to its closest integer value.
static __hostdev__ auto set(NanoLower< BuildT > &, uint32_t, const ValueT &)
static constexpr bool is_FpX
static __hostdev__ uint32_t bitCount()
Return the number of bits available in this Mask.
Rgba8 & operator=(Rgba8 &&)=default
Default move assignment operator.
__hostdev__ void clear()
Reset this access to its initial state, i.e. with an empty cache Noop since this template specializa...
__hostdev__ uint32_t gridIndex() const
Return index of this grid in the buffer.
__hostdev__ NodeT * operator->() const
Implements Tree::getDim(Coord)
__hostdev__ const NodeTrait< RootT, 2 >::type * getFirstUpper() const
__hostdev__ void setValueOnly(uint32_t offset, const ValueType &v)
Sets the value at the specified location but leaves its state unchanged.
__hostdev__ const MaskType< LOG2DIM > & valueMask() const
Return a const reference to the bit mask of active voxels in this leaf node.
__hostdev__ const uint64_t & valueCount() const
Return total number of values indexed by the IndexGrid.
PcpNodeRef_ChildrenIterator begin(const PcpNodeRef::child_const_range &r)
Support for range-based for loops for PcpNodeRef children ranges.
__hostdev__ float Fract(float x)
__hostdev__ Version()
Default constructor.