20 #ifndef NANOVDB_NODEMANAGER_H_HAS_BEEN_INCLUDED
21 #define NANOVDB_NODEMANAGER_H_HAS_BEEN_INCLUDED
26 template <
typename BuildT>
30 template<
typename BufferT = HostBuffer>
40 template <
typename BuildT,
typename BufferT = HostBuffer>
42 const BufferT&
buffer = BufferT());
53 template<
typename BufferT>
54 class NodeManagerHandle
59 template<
typename BuildT>
60 const NodeManager<BuildT>* getMgr()
const {
61 return mGridType == mapToGridType<BuildT>() ? (
const NodeManager<BuildT>*)mBuffer.
data() :
nullptr;
64 template<typename BuildT, typename U = BufferT>
65 typename enable_if<BufferTraits<U>::hasDeviceDual, const NodeManager<BuildT>*>::
type
66 getDeviceMgr()
const {
67 return mGridType == mapToGridType<BuildT>() ? (
const NodeManager<BuildT>*)mBuffer.deviceData() :
nullptr;
71 static T* no_const(const T*
ptr) {
return const_cast<T*
>(
ptr); }
84 mGridType = other.mGridType;
85 mBuffer = std::move(other.mBuffer);
91 mGridType = other.mGridType;
92 mBuffer = std::move(other.mBuffer);
98 void reset() { mBuffer.clear(); }
104 const BufferT&
buffer()
const {
return mBuffer; }
109 uint8_t*
data() {
return mBuffer.data(); }
114 const uint8_t*
data()
const {
return mBuffer.data(); }
117 uint64_t
size()
const {
return mBuffer.size(); }
122 template<
typename BuildT>
128 template<
typename BuildT>
134 template<
typename BuildT,
typename U = BufferT>
136 deviceMgr()
const {
return this->
template getDeviceMgr<BuildT>(); }
141 template<
typename BuildT,
typename U = BufferT>
143 deviceMgr() {
return no_const(this->
template getDeviceMgr<BuildT>()); }
148 template<
typename U = BufferT>
154 void *tmp =
data->mGrid;
155 data->mGrid = deviceGrid;
156 mBuffer.deviceUpload(
stream, sync);
163 template<
typename U = BufferT>
168 void *tmp =
data->mGrid;
169 mBuffer.deviceDownload(
stream, sync);
178 template<
typename BuildT>
179 class NodeManager :
private NodeManagerData
181 using DataT = NodeManagerData;
182 using GridT = NanoGrid<BuildT>;
186 using RootT = NodeT<3>;
187 using Node2 = NodeT<2>;
188 using Node1 = NodeT<1>;
189 using Node0 = NodeT<0>;
192 static constexpr
bool FIXED_SIZE = Node0::FIXED_SIZE && Node1::FIXED_SIZE && Node2::FIXED_SIZE;
212 const uint32_t *p = grid.
tree().mNodeCount;
213 size +=
sizeof(int64_t)*(p[0]+p[1]+p[2]);
245 const NodeT<LEVEL>*
ptr =
nullptr;
246 if (DataT::mLinear) {
247 ptr = PtrAdd<const NodeT<LEVEL>>(DataT::mGrid, DataT::mOff[LEVEL]) + i;
249 ptr = PtrAdd<const NodeT<LEVEL>>(DataT::mGrid, DataT::mPtr[LEVEL][i]);
259 NodeT<LEVEL>*
ptr =
nullptr;
260 if (DataT::mLinear) {
261 ptr = PtrAdd<NodeT<LEVEL>>(DataT::mGrid, DataT::mOff[LEVEL]) + i;
263 ptr = PtrAdd<NodeT<LEVEL>>(DataT::mGrid, DataT::mPtr[LEVEL][i]);
283 template <
typename BuildT,
typename BufferT>
291 #ifdef NANOVDB_USE_NEW_MAGIC_NUMBERS
298 data->mLinear = uint8_t(1u);
299 data->mOff[0] = PtrDiff(grid.
tree().template getFirstNode<0>(), &grid);
300 data->mOff[1] = PtrDiff(grid.
tree().template getFirstNode<1>(), &grid);
301 data->mOff[2] = PtrDiff(grid.
tree().template getFirstNode<2>(), &grid);
303 int64_t *ptr0 = data->mPtr[0] =
reinterpret_cast<int64_t*
>(data + 1);
304 int64_t *ptr1 = data->mPtr[1] = data->mPtr[0] + grid.
tree().nodeCount(0);
305 int64_t *ptr2 = data->mPtr[2] = data->mPtr[1] + grid.
tree().nodeCount(1);
307 for (
auto it2 = grid.
tree().root().cbeginChild(); it2; ++it2) {
308 *ptr2++ = PtrDiff(&*it2, &grid);
309 for (
auto it1 = it2->cbeginChild(); it1; ++it1) {
310 *ptr1++ = PtrDiff(&*it1, &grid);
311 for (
auto it0 = it1->cbeginChild(); it0; ++it0) {
312 *ptr0++ = PtrDiff(&*it0, &grid);
323 #if defined(__CUDACC__)
324 #include <nanovdb/util/cuda/CudaNodeManager.cuh>
325 #endif// defined(__CUDACC__)
327 #endif // NANOVDB_NODEMANAGER_H_HAS_BEEN_INCLUDED
#define NANOVDB_MAGIC_NUMBER
const NodeManager< BuildT > * mgr() const
Returns a const pointer to the NodeManager encoded in this NodeManagerHandle.
static __hostdev__ bool isLinear(const GridT &grid)
return true if the nodes have both fixed size and are arranged breadth-first in memory. This allows for direct and memory-efficient linear access to nodes.
__hostdev__ uint64_t memUsage() const
Return the memory footprint in bytes of this instance.
BufferT & buffer()
Return a reference to the buffer.
__hostdev__ const TreeT & tree() const
Return a const reference to the tree.
NodeManagerHandle manages the memory of a NodeManager.
__hostdev__ const TreeT & tree() const
NodeManagerHandle()=default
Empty ctor.
NodeManager< BuildT > * mgr()
Returns a pointer to the NodeManager encoded in this NodeManagerHandle.
__hostdev__ uint64_t leafCount() const
__hostdev__ bool isValid(GridType gridType, GridClass gridClass)
return true if the combination of GridType and GridClass is valid.
GridType
List of types that are currently supported by NanoVDB.
__hostdev__ GridT & grid()
Return a reference to the grid.
void reset()
clear the buffer
enable_if< BufferTraits< U >::hasDeviceDual, void >::type deviceDownload(void *stream=nullptr, bool sync=true)
Download the NodeManager to from the device, e.g. from GPU to CPU.
Highest level of the data structure. Contains a tree and a world->index transform (that currently onl...
static __hostdev__ uint64_t memUsage(const GridT &grid)
Return the memory footprint in bytes of the NodeManager derived from the specified grid...
NodeManager allows for sequential access to nodes.
__hostdev__ const RootT & root() const
__hostdev__ Node2 & upper(uint32_t i)
enable_if< BufferTraits< U >::hasDeviceDual, const NodeManager< BuildT > * >::type deviceMgr() const
Return a const pointer to the NodeManager encoded in this NodeManagerHandle on the device...
__hostdev__ const Node1 & lower(uint32_t i) const
Return the i'th lower internal node with respect to breadth-first ordering.
__hostdev__ NodeT< LEVEL > & node(uint32_t i)
Return the i'th node with respect to breadth-first ordering.
#define NANOVDB_ASSERT(x)
__hostdev__ const NodeT< LEVEL > & node(uint32_t i) const
Return the i'th leaf node with respect to breadth-first ordering.
__hostdev__ const Node0 & leaf(uint32_t i) const
Return the i'th leaf node with respect to breadth-first ordering.
__hostdev__ RootT & root()
Return a reference to the root.
__hostdev__ uint64_t lowerCount() const
__hostdev__ Node1 & lower(uint32_t i)
__hostdev__ uint64_t nodeCount(int level) const
Return the number of tree nodes at the specified level.
enable_if< BufferTraits< U >::hasDeviceDual, NodeManager< BuildT > * >::type deviceMgr()
Return a const pointer to the NodeManager encoded in this NodeManagerHandle on the device...
NodeManagerHandle & operator=(const NodeManagerHandle &)=delete
Disallow copy assignment operation.
~NodeManagerHandle()
Default destructor.
Implements a light-weight self-contained VDB data-structure in a single file! In other words...
uint64_t size() const
Returns the size in bytes of the raw memory buffer managed by this NodeManagerHandle's allocator...
#define NANOVDB_MAGIC_NODE
__hostdev__ const GridT & grid() const
HostBuffer - a buffer that contains a shared or private bump pool to either externally or internally ...
__hostdev__ bool isLinear() const
return true if the nodes have both fixed size and are arranged breadth-first in memory. This allows for direct and memory-efficient linear access to nodes.
NodeManagerHandle< BufferT > createNodeManager(const NanoGrid< BuildT > &grid, const BufferT &buffer=BufferT())
brief Construct a NodeManager and return its handle
NodeManagerHandle & operator=(NodeManagerHandle &&other) noexcept
Move copy assignment operation.
const BufferT & buffer() const
Return a const reference to the buffer.
uint8_t * data()
Returns a non-const pointer to the data.
__hostdev__ const GridType & gridType() const
enable_if< BufferTraits< U >::hasDeviceDual, void >::type deviceUpload(void *deviceGrid, void *stream=nullptr, bool sync=true)
Upload the NodeManager to the device, e.g. from CPU to GPU.
__hostdev__ uint64_t upperCount() const
NodeManagerHandle(NodeManagerHandle &&other) noexcept
Move copy-constructor.
__hostdev__ TreeT & tree()
Return a reference to the tree.
__hostdev__ bool isBreadthFirst() const
NodeManagerHandle(GridType gridType, BufferT &&buffer)
Move constructor from a buffer.
const uint8_t * data() const
Returns a const pointer to the data.
__hostdev__ const Node2 & upper(uint32_t i) const
Return the i'th upper internal node with respect to breadth-first ordering.
__hostdev__ Node0 & leaf(uint32_t i)
C++11 implementation of std::enable_if.