HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
CreateNanoGrid.h
Go to the documentation of this file.
1 // Copyright Contributors to the OpenVDB Project
2 // SPDX-License-Identifier: MPL-2.0
3 
4 /*!
5  \file CreateNanoGrid.h
6 
7  \author Ken Museth
8 
9  \date June 26, 2020
10 
11  \note In the examples below we assume that @c srcGrid is a exiting grid of type
12  SrcGridT = @c openvdb::FloatGrid, @c openvdb::FloatGrid or @c nanovdb::build::FloatGrid.
13 
14  \brief Convert any grid to a nanovdb grid of the same type, e.g. float->float
15  \code
16  auto handle = nanovdb::createNanoGrid(srcGrid);
17  auto *dstGrid = handle.grid<float>();
18  \endcode
19 
20  \brief Convert a grid to a nanovdb grid of a different type, e.g. float->half
21  \code
22  auto handle = nanovdb::createNanoGrid<SrcGridT,nanovdb::Fp16>(srcGrid);
23  auto *dstGrid = handle.grid<nanovdb::Fp16>();
24  \endcode
25 
26  \brief Convert a grid to a nanovdb grid of the same type but using a CUDA buffer
27  \code
28  auto handle = nanovdb::createNanoGrid<SrcGridT, float, nanovdb::CudaDeviceBuffer>(srcGrid);
29  auto *dstGrid = handle.grid<float>();
30  \endcode
31 
32  \brief Create a nanovdb grid that indices values in an existing source grid of any type.
33  If DstBuildT = nanovdb::ValueIndex both active and in-active values are indexed
34  and if DstBuildT = nanovdb::ValueOnIndex only active values are indexed.
35  \code
36  using DstBuildT = nanovdb::ValueIndex;// index both active an inactive values
37  auto handle = nanovdb::createNanoGridSrcGridT,DstBuildT>(srcGrid,0,false,false);//no blind data, tile values or stats
38  auto *dstGrid = handle.grid<DstBuildT>();
39  \endcode
40 
41  \brief Create a NanoVDB grid from scratch
42  \code
43 #if defined(NANOVDB_USE_OPENVDB) && !defined(__CUDACC__)
44  using SrcGridT = openvdb::FloatGrid;
45 #else
46  using SrcGridT = nanovdb::build::FloatGrid;
47 #endif
48  SrcGridT srcGrid(0.0f);// create an empty source grid
49  auto srcAcc = srcGrid.getAccessor();// create an accessor
50  srcAcc.setValue(nanovdb::Coord(1,2,3), 1.0f);// set a voxel value
51 
52  auto handle = nanovdb::createNanoGrid(srcGrid);// convert source grid to a grid handle
53  auto dstGrid = handle.grid<float>();// get a pointer to the destination grid
54  \endcode
55 
56  \brief Convert a base-pointer to an openvdb grid, denoted srcGrid, to a nanovdb
57  grid of the same type, e.g. float -> float or openvdb::Vec3f -> nanovdb::Vec3f
58  \code
59  auto handle = nanovdb::openToNanoVDB(*srcGrid);// convert source grid to a grid handle
60  auto dstGrid = handle.grid<float>();// get a pointer to the destination grid
61  \endcode
62 
63  \brief Converts any existing grid to a NanoVDB grid, for example:
64  nanovdb::build::Grid<SrcBuildT> -> nanovdb::Grid<DstBuildT>
65  nanovdb::Grid<SrcBuildT> -> nanovdb::Grid<DstBuildT>
66  nanovdb::Grid<SrcBuildT> -> nanovdb::Grid<ValueIndex or ValueOnIndex>
67  openvdb::Grid<SrcBuildT> -> nanovdb::Grid<DstBuildT>
68  openvdb::Grid<PointIndex> -> nanovdb::Grid<PointIndex>
69  openvdb::Grid<PointData> -> nanovdb::Grid<PointData>
70  openvdb::Grid<SrcBuildT> -> nanovdb::Grid<ValueIndex or ValueOnIndex>
71 
72  \note This files replaces GridBuilder.h, IndexGridBuilder.h and OpenToNanoVDB.h
73 */
74 
75 #ifndef NANOVDB_CREATE_NANOGRID_H_HAS_BEEN_INCLUDED
76 #define NANOVDB_CREATE_NANOGRID_H_HAS_BEEN_INCLUDED
77 
78 #if defined(NANOVDB_USE_OPENVDB) && !defined(__CUDACC__)
79 #include <openvdb/openvdb.h>
82 #endif
83 
84 #include "GridBuilder.h"
85 #include "NodeManager.h"
86 #include "GridHandle.h"
87 #include "GridStats.h"
88 #include "GridChecksum.h"
89 #include "Range.h"
90 #include "Invoke.h"
91 #include "ForEach.h"
92 #include "Reduce.h"
93 #include "PrefixSum.h"
94 #include "DitherLUT.h"// for nanovdb::DitherLUT
95 
96 #include <limits>
97 #include <vector>
98 #include <set>
99 #include <cstring> // for memcpy
100 #include <type_traits>
101 
102 namespace nanovdb {
103 
104 // Forward declarations (defined below)
105 template <typename> class CreateNanoGrid;
106 class AbsDiff;
107 template <typename> struct MapToNano;
108 
109 //================================================================================================
110 
111 #if defined(NANOVDB_USE_OPENVDB) && !defined(__CUDACC__)
112 /// @brief Forward declaration of free-standing function that converts an OpenVDB GridBase into a NanoVDB GridHandle
113 /// @tparam BufferT Type of the buffer used to allocate the destination grid
114 /// @param base Shared pointer to a base openvdb grid to be converted
115 /// @param sMode Mode for computing statistics of the destination grid
116 /// @param cMode Mode for computing checksums of the destination grid
117 /// @param verbose Mode of verbosity
118 /// @return Handle to the destination NanoGrid
119 template<typename BufferT = HostBuffer>
121 openToNanoVDB(const openvdb::GridBase::Ptr& base,
124  int verbose = 0);
125 #endif
126 
127 //================================================================================================
128 
129 /// @brief Freestanding function that creates a NanoGrid<T> from any source grid
130 /// @tparam SrcGridT Type of in input (source) grid, e.g. openvdb::Grid or nanovdb::Grid
131 /// @tparam DstBuildT Type of values in the output (destination) nanovdb Grid, e.g. float or nanovdb::Fp16
132 /// @tparam BufferT Type of the buffer used ti allocate the destination grid
133 /// @param srcGrid Input (source) grid to be converted
134 /// @param sMode Mode for computing statistics of the destination grid
135 /// @param cMode Mode for computing checksums of the destination grid
136 /// @param verbose Mode of verbosity
137 /// @param buffer Instance of a buffer used for allocation
138 /// @return Handle to the destination NanoGrid
139 template<typename SrcGridT,
140  typename DstBuildT = typename MapToNano<typename SrcGridT::BuildType>::type,
141  typename BufferT = HostBuffer>
143 createNanoGrid(const SrcGridT &srcGrid,
146  int verbose = 0,
147  const BufferT &buffer = BufferT());
148 
149 //================================================================================================
150 
151 /// @brief Freestanding function that creates a NanoGrid<ValueIndex> or NanoGrid<ValueOnIndex> from any source grid
152 /// @tparam SrcGridT Type of in input (source) grid, e.g. openvdb::Grid or nanovdb::Grid
153 /// @tparam DstBuildT If ValueIndex all (active and inactive) values are indexed and if
154 /// it is ValueOnIndex only active values are indexed.
155 /// @tparam BufferT BufferT Type of the buffer used ti allocate the destination grid
156 /// @param channels If non-zero the values (active or all) in @c srcGrid are encoded as blind
157 /// data in the output index grid. @c channels indicates the number of copies
158 /// of these blind data
159 /// @param includeStats If true all tree nodes will includes indices for stats, i.e. min/max/avg/std-div
160 /// @param includeTiles If false on values in leaf nodes are indexed
161 /// @param verbose Mode of verbosity
162 /// @param buffer Instance of a buffer used for allocation
163 /// @return Handle to the destination NanoGrid<T> where T = ValueIndex or ValueOnIndex
164 template<typename SrcGridT,
165  typename DstBuildT = typename MapToNano<typename SrcGridT::BuildType>::type,
166  typename BufferT = HostBuffer>
168 createNanoGrid(const SrcGridT &srcGrid,
169  uint32_t channels = 0u,
170  bool includeStats = true,
171  bool includeTiles = true,
172  int verbose = 0,
173  const BufferT &buffer = BufferT());
174 
175 //================================================================================================
176 
177 /// @brief Freestanding function to create a NanoGrid<FpN> from any source grid
178 /// @tparam SrcGridT Type of in input (source) grid, e.g. openvdb::Grid or nanovdb::Grid
179 /// @tparam DstBuildT = FpN, i.e. variable bit-width of the output grid
180 /// @tparam OracleT Type of the oracle used to determine the local bit-width, i.e. N in FpN
181 /// @tparam BufferT Type of the buffer used to allocate the destination grid
182 /// @param srcGrid Input (source) grid to be converted
183 /// @param ditherOn switch to enable or disable dithering of quantization error
184 /// @param sMode Mode for computing statistics of the destination grid
185 /// @param cMode Mode for computing checksums of the destination grid
186 /// @param verbose Mode of verbosity
187 /// @param oracle Instance of a oracle used to determine the local bit-width, i.e. N in FpN
188 /// @param buffer Instance of a buffer used for allocation
189 /// @return Handle to the destination NanoGrid
190 template<typename SrcGridT,
191  typename DstBuildT = typename MapToNano<typename SrcGridT::BuildType>::type,
192  typename OracleT = AbsDiff,
193  typename BufferT = HostBuffer>
195 createNanoGrid(const SrcGridT &srcGrid,
198  bool ditherOn = false,
199  int verbose = 0,
200  const OracleT &oracle = OracleT(),
201  const BufferT &buffer = BufferT());
202 
203 //================================================================================================
204 
205 /// @brief Freestanding function to create a NanoGrid<FpX> from any source grid, X=4,8,16
206 /// @tparam SrcGridT Type of in input (source) grid, e.g. openvdb::Grid or nanovdb::Grid
207 /// @tparam DstBuildT = Fp4, Fp8 or Fp16, i.e. quantization bit-width of the output grid
208 /// @tparam BufferT Type of the buffer used to allocate the destination grid
209 /// @param srcGrid Input (source) grid to be converted
210 /// @param ditherOn switch to enable or disable dithering of quantization error
211 /// @param sMode Mode for computing statistics of the destination grid
212 /// @param cMode Mode for computing checksums of the destination grid
213 /// @param verbose Mode of verbosity
214 /// @param buffer Instance of a buffer used for allocation
215 /// @return Handle to the destination NanoGrid
216 template<typename SrcGridT,
217  typename DstBuildT = typename MapToNano<typename SrcGridT::BuildType>::type,
218  typename BufferT = HostBuffer>
220 createNanoGrid(const SrcGridT &srcGrid,
223  bool ditherOn = false,
224  int verbose = 0,
225  const BufferT &buffer = BufferT());
226 
227 //================================================================================================
228 
229 /// @brief Compression oracle based on absolute difference
230 class AbsDiff
231 {
232  float mTolerance;// absolute error tolerance
233 public:
234  /// @note The default value of -1 means it's un-initialized!
235  AbsDiff(float tolerance = -1.0f) : mTolerance(tolerance) {}
236  AbsDiff(const AbsDiff&) = default;
237  ~AbsDiff() = default;
238  operator bool() const {return mTolerance>=0.0f;}
239  void init(nanovdb::GridClass gClass, float background) {
240  if (gClass == GridClass::LevelSet) {
241  static const float halfWidth = 3.0f;
242  mTolerance = 0.1f * background / halfWidth;// range of ls: [-3dx; 3dx]
243  } else if (gClass == GridClass::FogVolume) {
244  mTolerance = 0.01f;// range of FOG volumes: [0;1]
245  } else {
246  mTolerance = 0.0f;
247  }
248  }
249  void setTolerance(float tolerance) { mTolerance = tolerance; }
250  float getTolerance() const { return mTolerance; }
251  /// @brief Return true if the approximate value is within the accepted
252  /// absolute error bounds of the exact value.
253  ///
254  /// @details Required member method
255  bool operator()(float exact, float approx) const
256  {
257  return Abs(exact - approx) <= mTolerance;
258  }
259 };// AbsDiff
260 
261 inline std::ostream& operator<<(std::ostream& os, const AbsDiff& diff)
262 {
263  os << "Absolute tolerance: " << diff.getTolerance();
264  return os;
265 }
266 
267 //================================================================================================
268 
269 /// @brief Compression oracle based on relative difference
270 class RelDiff
271 {
272  float mTolerance;// relative error tolerance
273 public:
274  /// @note The default value of -1 means it's un-initialized!
275  RelDiff(float tolerance = -1.0f) : mTolerance(tolerance) {}
276  RelDiff(const RelDiff&) = default;
277  ~RelDiff() = default;
278  operator bool() const {return mTolerance>=0.0f;}
279  void setTolerance(float tolerance) { mTolerance = tolerance; }
280  float getTolerance() const { return mTolerance; }
281  /// @brief Return true if the approximate value is within the accepted
282  /// relative error bounds of the exact value.
283  ///
284  /// @details Required member method
285  bool operator()(float exact, float approx) const
286  {
287  return Abs(exact - approx)/Max(Abs(exact), Abs(approx)) <= mTolerance;
288  }
289 };// RelDiff
290 
291 inline std::ostream& operator<<(std::ostream& os, const RelDiff& diff)
292 {
293  os << "Relative tolerance: " << diff.getTolerance();
294  return os;
295 }
296 
297 //================================================================================================
298 
299 /// @brief The NodeAccessor provides a uniform API for accessing nodes got NanoVDB, OpenVDB and build Grids
300 ///
301 /// @note General implementation that works with nanovdb::build::Grid
302 template <typename GridT>
304 {
305 public:
306  static constexpr bool IS_OPENVDB = false;
307  static constexpr bool IS_NANOVDB = false;
308  using BuildType = typename GridT::BuildType;
309  using ValueType = typename GridT::ValueType;
310  using GridType = GridT;
311  using TreeType = typename GridT::TreeType;
312  using RootType = typename TreeType::RootNodeType;
313  template<int LEVEL>
315  NodeAccessor(const GridT &grid) : mMgr(const_cast<GridT&>(grid)) {}
316  const GridType& grid() const {return mMgr.grid();}
317  const TreeType& tree() const {return mMgr.tree();}
318  const RootType& root() const {return mMgr.root();}
319  uint64_t nodeCount(int level) const { return mMgr.nodeCount(level); }
320  template <int LEVEL>
321  const NodeType<LEVEL>& node(uint32_t i) const {return mMgr.template node<LEVEL>(i); }
322  const std::string& getName() const {return this->grid().getName();};
323  bool hasLongGridName() const {return this->grid().getName().length() >= GridData::MaxNameSize;}
324  const nanovdb::Map& map() const {return this->grid().map();}
325  GridClass gridClass() const {return this->grid().gridClass();}
326 private:
328 };// NodeAccessor<GridT>
329 
330 //================================================================================================
331 
332 /// @brief Template specialization for nanovdb::Grid which is special since its NodeManage
333 /// uses a handle in order to support node access on the GPU!
334 template <typename BuildT>
335 class NodeAccessor< NanoGrid<BuildT> >
336 {
337 public:
338  static constexpr bool IS_OPENVDB = false;
339  static constexpr bool IS_NANOVDB = true;
340  using BuildType = BuildT;
343  using ValueType = typename GridType::ValueType;
344  using TreeType = typename GridType::TreeType;
345  using RootType = typename TreeType::RootType;
346  template<int LEVEL>
349  : mHandle(createNodeManager<BuildT, BufferType>(grid))
350  , mMgr(*(mHandle.template mgr<BuildT>())) {}
351  const GridType& grid() const {return mMgr.grid();}
352  const TreeType& tree() const {return mMgr.tree();}
353  const RootType& root() const {return mMgr.root();}
354  uint64_t nodeCount(int level) const { return mMgr.nodeCount(level); }
355  template <int LEVEL>
356  const NodeType<LEVEL>& node(uint32_t i) const {return mMgr.template node<LEVEL>(i); }
357  std::string getName() const {return std::string(this->grid().gridName());};
358  bool hasLongGridName() const {return this->grid().hasLongGridName();}
359  const nanovdb::Map& map() const {return this->grid().map();}
360  GridClass gridClass() const {return this->grid().gridClass();}
361 private:
363  const NodeManager<BuildT> &mMgr;
364 };// NodeAccessor<nanovdb::Grid>
365 
366 //================================================================================================
367 
368 /// @brief Trait that maps any type to the corresponding nanovdb type
369 /// @tparam T Type to be mapped
370 template<typename T>
371 struct MapToNano { using type = T; };
372 
373 #if defined(NANOVDB_USE_OPENVDB) && !defined(__CUDACC__)
374 
375 template<>
376 struct MapToNano<openvdb::ValueMask> {using type = nanovdb::ValueMask;};
377 template<typename T>
378 struct MapToNano<openvdb::math::Vec3<T>>{using type = nanovdb::Vec3<T>;};
379 template<typename T>
380 struct MapToNano<openvdb::math::Vec4<T>>{using type = nanovdb::Vec4<T>;};
381 template<>
382 struct MapToNano<openvdb::PointIndex32> {using type = uint32_t;};
383 template<>
384 struct MapToNano<openvdb::PointDataIndex32> {using type = uint32_t;};
385 
386 /// Templated Grid with default 32->16->8 configuration
387 template <typename BuildT>
388 using OpenLeaf = openvdb::tree::LeafNode<BuildT,3>;
389 template <typename BuildT>
390 using OpenLower = openvdb::tree::InternalNode<OpenLeaf<BuildT>,4>;
391 template <typename BuildT>
392 using OpenUpper = openvdb::tree::InternalNode<OpenLower<BuildT>,5>;
393 template <typename BuildT>
394 using OpenRoot = openvdb::tree::RootNode<OpenUpper<BuildT>>;
395 template <typename BuildT>
396 using OpenTree = openvdb::tree::Tree<OpenRoot<BuildT>>;
397 template <typename BuildT>
398 using OpenGrid = openvdb::Grid<OpenTree<BuildT>>;
399 
400 //================================================================================================
401 
402 /// @brief Template specialization for openvdb::Grid
403 template <typename BuildT>
404 class NodeAccessor<OpenGrid<BuildT>>
405 {
406 public:
407  static constexpr bool IS_OPENVDB = true;
408  static constexpr bool IS_NANOVDB = false;
409  using BuildType = BuildT;
410  using GridType = OpenGrid<BuildT>;
411  using ValueType = typename GridType::ValueType;
412  using TreeType = OpenTree<BuildT>;
413  using RootType = OpenRoot<BuildT>;
414  template<int LEVEL>
416  NodeAccessor(const GridType &grid) : mMgr(const_cast<GridType&>(grid)) {
417  const auto mat4 = this->grid().transform().baseMap()->getAffineMap()->getMat4();
418  mMap.set(mat4, mat4.inverse());
419  }
420  const GridType& grid() const {return mMgr.grid();}
421  const TreeType& tree() const {return mMgr.tree();}
422  const RootType& root() const {return mMgr.root();}
423  uint64_t nodeCount(int level) const { return mMgr.nodeCount(level); }
424  template <int LEVEL>
425  const NodeType<LEVEL>& node(uint32_t i) const {return mMgr.template node<LEVEL>(i); }
426  std::string getName() const { return this->grid().getName(); };
427  bool hasLongGridName() const {return this->grid().getName().length() >= GridData::MaxNameSize;}
428  const nanovdb::Map& map() const {return mMap;}
429  GridClass gridClass() const {
430  switch (this->grid().getGridClass()) {
432  if (!is_floating_point<BuildT>::value) OPENVDB_THROW(openvdb::ValueError, "processGrid: Level sets are expected to be floating point types");
433  return GridClass::LevelSet;
435  return GridClass::FogVolume;
437  return GridClass::Staggered;
438  default:
439  return GridClass::Unknown;
440  }
441  }
442 private:
443  build::NodeManager<GridType> mMgr;
444  nanovdb::Map mMap;
445 };// NodeAccessor<openvdb::Grid<T>>
446 
447 //================================================================================================
448 
449 /// @brief Template specialization for openvdb::tools::PointIndexGrid
450 template <>
451 class NodeAccessor<openvdb::tools::PointIndexGrid>
452 {
453 public:
454  static constexpr bool IS_OPENVDB = true;
455  static constexpr bool IS_NANOVDB = false;
459  using RootType = typename TreeType::RootNodeType;
460  using ValueType = typename GridType::ValueType;
461  template<int LEVEL>
463  NodeAccessor(const GridType &grid) : mMgr(const_cast<GridType&>(grid)) {
464  const auto mat4 = this->grid().transform().baseMap()->getAffineMap()->getMat4();
465  mMap.set(mat4, mat4.inverse());
466  }
467  const GridType& grid() const {return mMgr.grid();}
468  const TreeType& tree() const {return mMgr.tree();}
469  const RootType& root() const {return mMgr.root();}
470  uint64_t nodeCount(int level) const { return mMgr.nodeCount(level); }
471  template <int LEVEL>
472  const NodeType<LEVEL>& node(uint32_t i) const {return mMgr.template node<LEVEL>(i); }
473  std::string getName() const { return this->grid().getName(); };
474  bool hasLongGridName() const {return this->grid().getName().length() >= GridData::MaxNameSize;}
475  const nanovdb::Map& map() const {return mMap;}
476  GridClass gridClass() const {return GridClass::PointIndex;}
477 private:
478  build::NodeManager<GridType> mMgr;
479  nanovdb::Map mMap;
480 };// NodeAccessor<openvdb::tools::PointIndexGrid>
481 
482 //================================================================================================
483 
484 // @brief Template specialization for openvdb::points::PointDataGrid
485 template <>
486 class NodeAccessor<openvdb::points::PointDataGrid>
487 {
488 public:
489  static constexpr bool IS_OPENVDB = true;
490  static constexpr bool IS_NANOVDB = false;
494  using RootType = typename TreeType::RootNodeType;
495  using ValueType = typename GridType::ValueType;
496  template<int LEVEL>
498  NodeAccessor(const GridType &grid) : mMgr(const_cast<GridType&>(grid)) {
499  const auto mat4 = this->grid().transform().baseMap()->getAffineMap()->getMat4();
500  mMap.set(mat4, mat4.inverse());
501  }
502  const GridType& grid() const {return mMgr.grid();}
503  const TreeType& tree() const {return mMgr.tree();}
504  const RootType& root() const {return mMgr.root();}
505  uint64_t nodeCount(int level) const { return mMgr.nodeCount(level); }
506  template <int LEVEL>
507  const NodeType<LEVEL>& node(uint32_t i) const {return mMgr.template node<LEVEL>(i); }
508  std::string getName() const { return this->grid().getName(); };
509  bool hasLongGridName() const {return this->grid().getName().length() >= GridData::MaxNameSize;}
510  const nanovdb::Map& map() const {return mMap;}
511  GridClass gridClass() const {return GridClass::PointData;}
512 private:
513  build::NodeManager<GridType> mMgr;
514  nanovdb::Map mMap;
515 };// NodeAccessor<openvdb::points::PointDataGrid>
516 
517 #endif// NANOVDB_USE_OPENVDB
518 
519 //================================================================================================
520 
521 /// @brief Creates any nanovdb Grid from any source grid (certain combinations are obviously not allowed)
522 template <typename SrcGridT>
523 class CreateNanoGrid
524 {
525 public:
526  // SrcGridT can be either openvdb::Grid, nanovdb::Grid or nanovdb::build::Grid
530  using SrcTreeT = typename SrcNodeAccT::TreeType;
531  using SrcRootT = typename SrcNodeAccT::RootType;
532  template <int LEVEL>
534 
535  /// @brief Constructor from a source grid
536  /// @param srcGrid Source grid of type SrcGridT
537  CreateNanoGrid(const SrcGridT &srcGrid);
538 
539  /// @brief Constructor from a source node accessor (defined above)
540  /// @param srcNodeAcc Source node accessor of type SrcNodeAccT
541  CreateNanoGrid(const SrcNodeAccT &srcNodeAcc);
542 
543  /// @brief Set the level of verbosity
544  /// @param mode level of verbosity, mode=0 means quiet
545  void setVerbose(int mode = 1) { mVerbose = mode; }
546 
547  /// @brief Enable or disable dithering, i.e. randomization of the quantization error.
548  /// @param on enable or disable dithering
549  /// @warning Dithering only has an affect when DstBuildT = {Fp4, Fp8, Fp16, FpN}
550  void enableDithering(bool on = true) { mDitherOn = on; }
551 
552  /// @brief Set the mode used for computing statistics of the destination grid
553  /// @param mode specify the mode of statistics
555 
556  /// @brief Set the mode used for computing checksums of the destination grid
557  /// @param mode specify the mode of checksum
559 
560  /// @brief Converts the source grid into a nanovdb grid with the specified destination build type
561  /// @tparam DstBuildT build type of the destination, output, grid
562  /// @tparam BufferT Type of the buffer used for allocating the destination grid
563  /// @param buffer instance of the buffer use for allocation
564  /// @return Return an instance of a GridHandle (invoking move semantics)
565  /// @note This version is when DstBuildT != {FpN, ValueIndex, ValueOnIndex}
569  getHandle(const BufferT &buffer = BufferT());
570 
571  /// @brief Converts the source grid into a nanovdb grid with variable bit quantization
572  /// @tparam DstBuildT FpN, i.e. the destination grid uses variable bit quantization
573  /// @tparam OracleT Type of oracle used to determine the N in FpN
574  /// @tparam BufferT Type of the buffer used for allocating the destination grid
575  /// @param oracle Instance of the oracle used to determine the N in FpN
576  /// @param buffer instance of the buffer use for allocation
577  /// @return Return an instance of a GridHandle (invoking move semantics)
578  /// @note This version assumes DstBuildT == FpN
581  getHandle(const OracleT &oracle = OracleT(),
582  const BufferT &buffer = BufferT());
583 
584  /// @brief Converts the source grid into a nanovdb grid with indices to external arrays of values
585  /// @tparam DstBuildT ValueIndex or ValueOnIndex, i.e. index all or just active values
586  /// @tparam BufferT Type of the buffer used for allocating the destination grid
587  /// @param channels Number of copies of values encoded as blind data in the destination grid
588  /// @param includeStats Specify if statics should be indexed
589  /// @param includeTiles Specify if tile values, i.e. non-leaf-node-values, should be indexed
590  /// @param buffer instance of the buffer use for allocation
591  /// @return Return an instance of a GridHandle (invoking move semantics)
594  getHandle(uint32_t channels = 0u,
595  bool includeStats = true,
596  bool includeTiles = true,
597  const BufferT &buffer = BufferT());
598 
599  /// @brief Add blind data to the destination grid
600  /// @param name String name of the blind data
601  /// @param dataSemantic Semantics of the blind data
602  /// @param dataClass Class of the blind data
603  /// @param dataType Type of the blind data
604  /// @param count Element count of the blind data
605  /// @param size Size of each element of the blind data
606  /// @return Return the index used to access the blind data
607  uint64_t addBlindData(const std::string& name,
608  GridBlindDataSemantic dataSemantic,
609  GridBlindDataClass dataClass,
611  size_t count, size_t size)
612  {
613  const size_t order = mBlindMetaData.size();
614  mBlindMetaData.emplace(name, dataSemantic, dataClass, dataType, order, count, size);
615  return order;
616  }
617 
618  /// @brief This method only has affect when getHandle was called with DstBuildT = ValueIndex or ValueOnIndex
619  /// @return Return the number of indexed values. If called before getHandle was called with
620  /// DstBuildT = ValueIndex or ValueOnIndex the return value is zero. Else it is a value larger than zero.
621  uint64_t valueCount() const {return mValIdx[0].empty() ? 0u : mValIdx[0].back();}
622 
623  /// @brief Copy values from the source grid into a provided buffer
624  /// @tparam DstBuildT Must be ValueIndex or ValueOnIndex, i.e. a index grid
625  /// @param buffer point in which to write values
626  template <typename DstBuildT>
629 
630 private:
631 
632  // =========================================================
633 
634  template <typename T, int LEVEL>
635  typename enable_if<!(is_same<T,FpN>::value&&LEVEL==0), typename NodeTrait<NanoRoot<T>, LEVEL>::type*>::type
636  dstNode(uint64_t i) const {
637  static_assert(LEVEL==0 || LEVEL==1 || LEVEL==2, "Expected LEVEL== {0,1,2}");
638  using NodeT = typename NodeTrait<NanoRoot<T>, LEVEL>::type;
639  return PtrAdd<NodeT>(mBufferPtr, mOffset[5-LEVEL]) + i;
640  }
641  template <typename T, int LEVEL>
642  typename enable_if<is_same<T,FpN>::value && LEVEL==0, NanoLeaf<FpN>*>::type
643  dstNode(uint64_t i) const {return PtrAdd<NanoLeaf<FpN>>(mBufferPtr, mCodec[i].offset);}
644 
645  template <typename T> NanoRoot<T>* dstRoot() const {return PtrAdd<NanoRoot<T>>(mBufferPtr, mOffset.root);}
646  template <typename T> NanoTree<T>* dstTree() const {return PtrAdd<NanoTree<T>>(mBufferPtr, mOffset.tree);}
647  template <typename T> NanoGrid<T>* dstGrid() const {return PtrAdd<NanoGrid<T>>(mBufferPtr, mOffset.grid);}
648  GridBlindMetaData* dstMeta(uint32_t i) const { return PtrAdd<GridBlindMetaData>(mBufferPtr, mOffset.meta) + i;};
649 
650  // =========================================================
651 
652  template <typename DstBuildT>
653  typename disable_if<is_same<FpN,DstBuildT>::value || BuildTraits<DstBuildT>::is_index>::type
654  preProcess();
655 
656  template <typename DstBuildT>
657  typename enable_if<BuildTraits<DstBuildT>::is_index>::type
658  preProcess(uint32_t channels);
659 
660  template <typename DstBuildT, typename OracleT>
662  preProcess(OracleT oracle);
663 
664  // =========================================================
665 
666  // Below are private methods use to serialize nodes into NanoVDB
667  template<typename DstBuildT, typename BufferT>
668  GridHandle<BufferT> initHandle(const BufferT& buffer);
669 
670  // =========================================================
671 
672  template <typename DstBuildT>
673  inline typename enable_if<BuildTraits<DstBuildT>::is_index>::type
674  postProcess(uint32_t channels);
675 
676  template <typename DstBuildT>
677  inline typename disable_if<BuildTraits<DstBuildT>::is_index>::type
678  postProcess();
679 
680  // ========================================================
681 
682  template<typename DstBuildT>
683  typename disable_if<BuildTraits<DstBuildT>::is_special>::type
684  processLeafs();
685 
686  template<typename DstBuildT>
687  typename enable_if<BuildTraits<DstBuildT>::is_index>::type
688  processLeafs();
689 
690  template<typename DstBuildT>
691  typename enable_if<BuildTraits<DstBuildT>::is_FpX>::type
692  processLeafs();
693 
694  template<typename DstBuildT>
696  processLeafs();
697 
698  template<typename DstBuildT>
700  processLeafs();
701 
702  template<typename DstBuildT>
704  processLeafs();
705 
706  // =========================================================
707 
708  template<typename DstBuildT, int LEVEL>
709  typename enable_if<BuildTraits<DstBuildT>::is_index>::type
710  processInternalNodes();
711 
712  template<typename DstBuildT, int LEVEL>
713  typename enable_if<!BuildTraits<DstBuildT>::is_index>::type
714  processInternalNodes();
715 
716  // =========================================================
717 
718  template <typename DstBuildT>
719  typename enable_if<BuildTraits<DstBuildT>::is_index>::type
720  processRoot();
721 
722  template <typename DstBuildT>
723  typename enable_if<!BuildTraits<DstBuildT>::is_index>::type
724  processRoot();
725 
726  // =========================================================
727 
728  template<typename DstBuildT>
729  void processTree();
730 
731  template<typename DstBuildT>
732  void processGrid();
733 
734  template <typename DstBuildT, int LEVEL>
735  typename enable_if<BuildTraits<DstBuildT>::is_index, uint64_t>::type
736  countTileValues(uint64_t valueCount);
737 
738  template <typename DstBuildT>
739  typename enable_if<BuildTraits<DstBuildT>::is_index, uint64_t>::type
740  countValues();
741 
742 #if defined(NANOVDB_USE_OPENVDB) && !defined(__CUDACC__)
743  template<typename T = SrcGridT>
746  countPoints() const;
747 
748  template<typename T = SrcGridT>
751  countPoints() const;
752 
753  template<typename DstBuildT, typename AttT, typename CodecT = openvdb::points::UnknownCodec, typename T = SrcGridT>
755  copyPointAttribute(size_t attIdx, AttT *attPtr);
756 #else
757  uint64_t countPoints() const {return 0u;}
758 #endif
759 
760  uint8_t* mBufferPtr;// pointer to the beginning of the destination nanovdb grid buffer
761  struct BufferOffsets {
762  uint64_t grid, tree, root, upper, lower, leaf, meta, blind, size;
763  uint64_t operator[](int i) const { return *(reinterpret_cast<const uint64_t*>(this)+i); }
764  } mOffset;
765  int mVerbose;
766  uint64_t mLeafNodeSize;// non-trivial when DstBuiltT = FpN
767 
768  std::unique_ptr<SrcNodeAccT> mSrcNodeAccPtr;// placeholder for potential local instance
769  const SrcNodeAccT &mSrcNodeAcc;
770  struct BlindMetaData; // forward declaration
771  std::set<BlindMetaData> mBlindMetaData; // sorted according to BlindMetaData.order
772  struct Codec { float min, max; uint64_t offset; uint8_t log2; };// used for adaptive bit-rate quantization
773  std::unique_ptr<Codec[]> mCodec;// defines a codec per leaf node when DstBuildT = FpN
774  StatsMode mStats;
775  ChecksumMode mChecksum;
776  bool mDitherOn, mIncludeStats, mIncludeTiles;
777  std::vector<uint64_t> mValIdx[3];// store id of first value in node
778 }; // CreateNanoGrid
779 
780 //================================================================================================
781 
782 template <typename SrcGridT>
784  : mVerbose(0)
785  , mSrcNodeAccPtr(new SrcNodeAccT(srcGrid))
786  , mSrcNodeAcc(*mSrcNodeAccPtr)
787  , mStats(StatsMode::Default)
788  , mChecksum(ChecksumMode::Default)
789  , mDitherOn(false)
790  , mIncludeStats(true)
791  , mIncludeTiles(true)
792 {
793 }
794 
795 //================================================================================================
796 
797 template <typename SrcGridT>
799  : mVerbose(0)
800  , mSrcNodeAccPtr(nullptr)
801  , mSrcNodeAcc(srcNodeAcc)
802  , mStats(StatsMode::Default)
803  , mChecksum(ChecksumMode::Default)
804  , mDitherOn(false)
805  , mIncludeStats(true)
806  , mIncludeTiles(true)
807 {
808 }
809 
810 //================================================================================================
811 
812 template <typename SrcGridT>
813 struct CreateNanoGrid<SrcGridT>::BlindMetaData
814 {
815  BlindMetaData(const std::string& name,// name + used to derive GridBlindDataSemantic
816  const std::string& type,// used to derive GridType of blind data
817  GridBlindDataClass dataClass,
818  size_t i, size_t valueCount, size_t valueSize)
819  : metaData(reinterpret_cast<GridBlindMetaData*>(new char[sizeof(GridBlindMetaData)]))
820  , order(i)// sorted id of meta data
821  , size(AlignUp<NANOVDB_DATA_ALIGNMENT>(valueCount * valueSize))
822  {
823  std::memset(metaData, 0, sizeof(GridBlindMetaData));// zero out all meta data
824  if (name.length()>=GridData::MaxNameSize) throw std::runtime_error("blind data name exceeds limit");
825  std::memcpy(metaData->mName, name.c_str(), name.length() + 1);
826  metaData->mValueCount = valueCount;
827  metaData->mSemantic = BlindMetaData::mapToSemantics(name);
828  metaData->mDataClass = dataClass;
829  metaData->mDataType = BlindMetaData::mapToType(type);
830  metaData->mValueSize = valueSize;
831  NANOVDB_ASSERT(metaData->isValid());
832  }
833  BlindMetaData(const std::string& name,// only name
834  GridBlindDataSemantic dataSemantic,
835  GridBlindDataClass dataClass,
837  size_t i, size_t valueCount, size_t valueSize)
838  : metaData(reinterpret_cast<GridBlindMetaData*>(new char[sizeof(GridBlindMetaData)]))
839  , order(i)// sorted id of meta data
840  , size(AlignUp<NANOVDB_DATA_ALIGNMENT>(valueCount * valueSize))
841  {
842  std::memset(metaData, 0, sizeof(GridBlindMetaData));// zero out all meta data
843  if (name.length()>=GridData::MaxNameSize) throw std::runtime_error("blind data name exceeds character limit");
844  std::memcpy(metaData->mName, name.c_str(), name.length() + 1);
845  metaData->mValueCount = valueCount;
846  metaData->mSemantic = dataSemantic;
847  metaData->mDataClass = dataClass;
848  metaData->mDataType = dataType;
849  metaData->mValueSize = valueSize;
850  NANOVDB_ASSERT(metaData->isValid());
851  }
852  ~BlindMetaData(){ delete [] reinterpret_cast<char*>(metaData); }
853  bool operator<(const BlindMetaData& other) const { return order < other.order; } // required by std::set
855  {
857  if ("uint32_t" == name) {
858  type = GridType::UInt32;
859  } else if ("float" == name) {
860  type = GridType::Float;
861  } else if ("vec3s"== name) {
862  type = GridType::Vec3f;
863  } else if ("int32" == name) {
864  type = GridType::Int32;
865  } else if ("int64" == name) {
866  type = GridType::Int64;
867  }
868  return type;
869  }
871  {
873  if ("P" == name) {
875  } else if ("V" == name) {
877  } else if ("Cd" == name) {
879  } else if ("N" == name) {
881  } else if ("id" == name) {
883  }
884  return semantic;
885  }
886  GridBlindMetaData *metaData;
887  const size_t order, size;
888 }; // CreateNanoGrid::BlindMetaData
889 
890 //================================================================================================
891 
892 template <typename SrcGridT>
893 template<typename DstBuildT, typename BufferT>
897 {
898  this->template preProcess<DstBuildT>();
899  auto handle = this->template initHandle<DstBuildT>(pool);
900  this->template postProcess<DstBuildT>();
901  return handle;
902 } // CreateNanoGrid::getHandle<T>
903 
904 //================================================================================================
905 
906 template <typename SrcGridT>
907 template<typename DstBuildT, typename OracleT, typename BufferT>
909 CreateNanoGrid<SrcGridT>::getHandle(const OracleT& oracle, const BufferT& pool)
910 {
911  this->template preProcess<DstBuildT, OracleT>(oracle);
912  auto handle = this->template initHandle<DstBuildT>(pool);
913  this->template postProcess<DstBuildT>();
914  return handle;
915 } // CreateNanoGrid::getHandle<FpN>
916 
917 //================================================================================================
918 
919 template <typename SrcGridT>
920 template<typename DstBuildT, typename BufferT>
923  bool includeStats,
924  bool includeTiles,
925  const BufferT &pool)
926 {
927  mIncludeStats = includeStats;
928  mIncludeTiles = includeTiles;
929  this->template preProcess<DstBuildT>(channels);
930  auto handle = this->template initHandle<DstBuildT>(pool);
931  this->template postProcess<DstBuildT>(channels);
932  return handle;
933 }// CreateNanoGrid::getHandle<ValueIndex or ValueOnIndex>
934 
935 //================================================================================================
936 
937 template <typename SrcGridT>
938 template <typename DstBuildT, typename BufferT>
940 {
941  mOffset.grid = 0;// grid is always stored at the start of the buffer!
942  mOffset.tree = NanoGrid<DstBuildT>::memUsage(); // grid ends and tree begins
943  mOffset.root = mOffset.tree + NanoTree<DstBuildT>::memUsage(); // tree ends and root node begins
944  mOffset.upper = mOffset.root + NanoRoot<DstBuildT>::memUsage(mSrcNodeAcc.root().getTableSize()); // root node ends and upper internal nodes begin
945  mOffset.lower = mOffset.upper + NanoUpper<DstBuildT>::memUsage()*mSrcNodeAcc.nodeCount(2); // upper internal nodes ends and lower internal nodes begin
946  mOffset.leaf = mOffset.lower + NanoLower<DstBuildT>::memUsage()*mSrcNodeAcc.nodeCount(1); // lower internal nodes ends and leaf nodes begin
947  mOffset.meta = mOffset.leaf + mLeafNodeSize;// leaf nodes end and blind meta data begins
948  mOffset.blind = mOffset.meta + sizeof(GridBlindMetaData)*mBlindMetaData.size(); // meta data ends and blind data begins
949  mOffset.size = mOffset.blind;// end of buffer
950  for (const auto& b : mBlindMetaData) mOffset.size += b.size; // accumulate all the blind data
951 
952  auto buffer = BufferT::create(mOffset.size, &pool);
953  mBufferPtr = buffer.data();
954 
955  // Concurrent processing of all tree levels!
956  invoke( [&](){this->template processLeafs<DstBuildT>();},
957  [&](){this->template processInternalNodes<DstBuildT, 1>();},
958  [&](){this->template processInternalNodes<DstBuildT, 2>();},
959  [&](){this->template processRoot<DstBuildT>();},
960  [&](){this->template processTree<DstBuildT>();},
961  [&](){this->template processGrid<DstBuildT>();} );
962 
963  return GridHandle<BufferT>(std::move(buffer));
964 } // CreateNanoGrid::initHandle
965 
966 //================================================================================================
967 
968 template <typename SrcGridT>
969 template <typename DstBuildT>
970 inline typename disable_if<is_same<FpN, DstBuildT>::value || BuildTraits<DstBuildT>::is_index>::type
971 CreateNanoGrid<SrcGridT>::preProcess()
972 {
973  if (const uint64_t pointCount = this->countPoints()) {
974 #if defined(NANOVDB_USE_OPENVDB) && !defined(__CUDACC__)
976  if (!mBlindMetaData.empty()) throw std::runtime_error("expected no blind meta data");
977  this->addBlindData("index",
981  pointCount,
982  sizeof(uint32_t));
984  if (!mBlindMetaData.empty()) throw std::runtime_error("expected no blind meta data");
985  auto &srcLeaf = mSrcNodeAcc.template node<0>(0);
986  const auto& attributeSet = srcLeaf.attributeSet();
987  const auto& descriptor = attributeSet.descriptor();
988  const auto& nameMap = descriptor.map();
989  for (auto it = nameMap.begin(); it != nameMap.end(); ++it) {
990  const size_t index = it->second;
991  auto& attArray = srcLeaf.constAttributeArray(index);
992  mBlindMetaData.emplace(it->first, // name used to derive semantics
993  descriptor.valueType(index), // type
995  index, // order
996  pointCount, // element count
997  attArray.valueTypeSize()); // element size
998  }
999  }
1000 #endif// end NANOVDB_USE_OPENVDB
1001  }
1002  if (mSrcNodeAcc.hasLongGridName()) {
1003  this->addBlindData("grid name",
1007  mSrcNodeAcc.getName().length() + 1, 1);
1008  }
1009  mLeafNodeSize = mSrcNodeAcc.nodeCount(0)*NanoLeaf<DstBuildT>::DataType::memUsage();
1010 }// CreateNanoGrid::preProcess<T>
1011 
1012 //================================================================================================
1013 
1014 template <typename SrcGridT>
1015 template <typename DstBuildT, typename OracleT>
1017 CreateNanoGrid<SrcGridT>::preProcess(OracleT oracle)
1018 {
1019  static_assert(is_same<float, SrcValueT>::value, "preProcess<FpN>: expected SrcValueT == float");
1020 
1021  const size_t leafCount = mSrcNodeAcc.nodeCount(0);
1022  if (leafCount==0) {
1023  mLeafNodeSize = 0u;
1024  return;
1025  }
1026  mCodec.reset(new Codec[leafCount]);
1027 
1028  if constexpr(is_same<AbsDiff, OracleT>::value) {
1029  if (!oracle) oracle.init(mSrcNodeAcc.gridClass(), mSrcNodeAcc.root().background());
1030  }
1031 
1032  DitherLUT lut(mDitherOn);
1033  forEach(0, leafCount, 4, [&](const Range1D &r) {
1034  for (auto i=r.begin(); i!=r.end(); ++i) {
1035  const auto &srcLeaf = mSrcNodeAcc.template node<0>(i);
1036  float &min = mCodec[i].min = std::numeric_limits<float>::max();
1037  float &max = mCodec[i].max = -min;
1038  for (int j=0; j<512; ++j) {
1039  float v = srcLeaf.getValue(j);
1040  if (v<min) min = v;
1041  if (v>max) max = v;
1042  }
1043  const float range = max - min;
1044  uint8_t &logBitWidth = mCodec[i].log2 = 0;// 0,1,2,3,4 => 1,2,4,8,16 bits
1045  while (range > 0.0f && logBitWidth < 4u) {
1046  const uint32_t mask = (uint32_t(1) << (uint32_t(1) << logBitWidth)) - 1u;
1047  const float encode = mask/range;
1048  const float decode = range/mask;
1049  int j = 0;
1050  do {
1051  const float exact = srcLeaf.getValue(j);//data[j];// exact value
1052  const uint32_t code = uint32_t(encode*(exact - min) + lut(j));
1053  const float approx = code * decode + min;// approximate value
1054  j += oracle(exact, approx) ? 1 : 513;
1055  } while(j < 512);
1056  if (j == 512) break;
1057  ++logBitWidth;
1058  }
1059  }
1060  });
1061 
1062  auto getOffset = [&](size_t i){
1063  --i;
1064  return mCodec[i].offset + NanoLeaf<DstBuildT>::DataType::memUsage(1u << mCodec[i].log2);
1065  };
1066  mCodec[0].offset = NanoGrid<FpN>::memUsage() +
1068  NanoRoot<FpN>::memUsage(mSrcNodeAcc.root().getTableSize()) +
1069  NanoUpper<FpN>::memUsage()*mSrcNodeAcc.nodeCount(2) +
1070  NanoLower<FpN>::memUsage()*mSrcNodeAcc.nodeCount(1);
1071  for (size_t i=1; i<leafCount; ++i) mCodec[i].offset = getOffset(i);
1072  mLeafNodeSize = getOffset(leafCount);
1073 
1074  if (mVerbose) {
1075  uint32_t counters[5+1] = {0};
1076  ++counters[mCodec[0].log2];
1077  for (size_t i=1; i<leafCount; ++i) ++counters[mCodec[i].log2];
1078  std::cout << "\n" << oracle << std::endl;
1079  std::cout << "Dithering: " << (mDitherOn ? "enabled" : "disabled") << std::endl;
1080  float avg = 0.0f;
1081  for (uint32_t i=0; i<=5; ++i) {
1082  if (uint32_t n = counters[i]) {
1083  avg += n * float(1 << i);
1084  printf("%2i bits: %6u leaf nodes, i.e. %4.1f%%\n",1<<i, n, 100.0f*n/float(leafCount));
1085  }
1086  }
1087  printf("%4.1f bits per value on average\n", avg/float(leafCount));
1088  }
1089 
1090  if (mSrcNodeAcc.hasLongGridName()) {
1091  this->addBlindData("grid name",
1095  mSrcNodeAcc.getName().length() + 1, 1);
1096  }
1097 }// CreateNanoGrid::preProcess<FpN>
1098 
1099 //================================================================================================
1100 
1101 template <typename SrcGridT>
1102 template <typename DstBuildT, int LEVEL>
1103 inline typename enable_if<BuildTraits<DstBuildT>::is_index, uint64_t>::type
1104 CreateNanoGrid<SrcGridT>::countTileValues(uint64_t valueCount)
1105 {
1106  const uint64_t stats = mIncludeStats ? 4u : 0u;// minimum, maximum, average, and deviation
1107  mValIdx[LEVEL].clear();
1108  mValIdx[LEVEL].resize(mSrcNodeAcc.nodeCount(LEVEL) + 1, stats);// minimum 1 entry
1109  forEach(1, mValIdx[LEVEL].size(), 8, [&](const Range1D& r){
1110  for (auto i = r.begin(); i!=r.end(); ++i) {
1111  auto &srcNode = mSrcNodeAcc.template node<LEVEL>(i-1);
1112  if constexpr(BuildTraits<DstBuildT>::is_onindex) {// resolved at compile time
1113  mValIdx[LEVEL][i] += srcNode.getValueMask().countOn();
1114  } else {
1115  static const uint64_t maxTileCount = uint64_t(1u) << 3*srcNode.LOG2DIM;
1116  mValIdx[LEVEL][i] += maxTileCount - srcNode.getChildMask().countOn();
1117  }
1118  }
1119  });
1120  mValIdx[LEVEL][0] = valueCount;
1121  for (size_t i=1; i<mValIdx[LEVEL].size(); ++i) mValIdx[LEVEL][i] += mValIdx[LEVEL][i-1];// pre-fixed sum
1122  return mValIdx[LEVEL].back();
1123 }// CreateNanoGrid::countTileValues<ValueIndex or ValueOnIndex>
1124 
1125 //================================================================================================
1126 
1127 template <typename SrcGridT>
1128 template <typename DstBuildT>
1129 inline typename enable_if<BuildTraits<DstBuildT>::is_index, uint64_t>::type
1130 CreateNanoGrid<SrcGridT>::countValues()
1131 {
1132  const uint64_t stats = mIncludeStats ? 4u : 0u;// minimum, maximum, average, and deviation
1133  uint64_t valueCount = 1u;// offset 0 corresponds to the background value
1134  if (mIncludeTiles) {
1135  if constexpr(BuildTraits<DstBuildT>::is_onindex) {
1136  for (auto it = mSrcNodeAcc.root().cbeginValueOn(); it; ++it) ++valueCount;
1137  } else {
1138  for (auto it = mSrcNodeAcc.root().cbeginValueAll(); it; ++it) ++valueCount;
1139  }
1140  valueCount += stats;// optionally append stats for the root node
1141  valueCount = countTileValues<DstBuildT, 2>(valueCount);
1142  valueCount = countTileValues<DstBuildT, 1>(valueCount);
1143  }
1144  mValIdx[0].clear();
1145  mValIdx[0].resize(mSrcNodeAcc.nodeCount(0) + 1, 512u + stats);// minimum 1 entry
1146  if constexpr(BuildTraits<DstBuildT>::is_onindex) {
1147  forEach(1, mValIdx[0].size(), 8, [&](const Range1D& r) {
1148  for (auto i = r.begin(); i != r.end(); ++i) {
1149  mValIdx[0][i] = stats;
1150  mValIdx[0][i] += mSrcNodeAcc.template node<0>(i-1).getValueMask().countOn();
1151  }
1152  });
1153  }
1154  mValIdx[0][0] = valueCount;
1155  prefixSum(mValIdx[0], true);// inclusive prefix sum
1156  return mValIdx[0].back();
1157 }// CreateNanoGrid::countValues<ValueIndex or ValueOnIndex>()
1158 
1159 //================================================================================================
1160 
1161 template <typename SrcGridT>
1162 template <typename DstBuildT>
1163 inline typename enable_if<BuildTraits<DstBuildT>::is_index>::type
1164 CreateNanoGrid<SrcGridT>::preProcess(uint32_t channels)
1165 {
1166  const uint64_t valueCount = this->template countValues<DstBuildT>();
1167  mLeafNodeSize = mSrcNodeAcc.nodeCount(0)*NanoLeaf<DstBuildT>::DataType::memUsage();
1168 
1169  uint32_t order = mBlindMetaData.size();
1170  for (uint32_t i=0; i<channels; ++i) {
1171  mBlindMetaData.emplace("channel_"+std::to_string(i),
1172  toStr(mapToGridType<SrcValueT>()),
1174  order++,
1175  valueCount,
1176  sizeof(SrcValueT));
1177  }
1178  if (mSrcNodeAcc.hasLongGridName()) {
1179  this->addBlindData("grid name",
1183  mSrcNodeAcc.getName().length() + 1, 1);
1184  }
1185 }// preProcess<ValueIndex or ValueOnIndex>
1186 
1187 //================================================================================================
1188 
1189 template <typename SrcGridT>
1190 template <typename DstBuildT>
1191 inline typename disable_if<BuildTraits<DstBuildT>::is_special>::type
1192 CreateNanoGrid<SrcGridT>::processLeafs()
1193 {
1194  using DstDataT = typename NanoLeaf<DstBuildT>::DataType;
1195  using DstValueT = typename DstDataT::ValueType;
1196  static_assert(DstDataT::FIXED_SIZE, "Expected destination LeafNode<T> to have fixed size");
1197  forEach(0, mSrcNodeAcc.nodeCount(0), 8, [&](const Range1D& r) {
1198  auto *dstData = this->template dstNode<DstBuildT,0>(r.begin())->data();
1199  for (auto i = r.begin(); i != r.end(); ++i, ++dstData) {
1200  auto &srcLeaf = mSrcNodeAcc.template node<0>(i);
1201  if (DstDataT::padding()>0u) {
1202  // Cast to void* to avoid compiler warning about missing trivial copy-assignment
1203  std::memset(reinterpret_cast<void*>(dstData), 0, DstDataT::memUsage());
1204  } else {
1205  dstData->mBBoxDif[0] = dstData->mBBoxDif[1] = dstData->mBBoxDif[2] = 0u;
1206  dstData->mFlags = 0u;// enable rendering, no bbox, no stats
1207  dstData->mMinimum = dstData->mMaximum = typename DstDataT::ValueType();
1208  dstData->mAverage = dstData->mStdDevi = 0;
1209  }
1210  dstData->mBBoxMin = srcLeaf.origin(); // copy origin of node
1211  dstData->mValueMask = srcLeaf.getValueMask(); // copy value mask
1212  DstValueT *dst = dstData->mValues;
1213  if constexpr(is_same<DstValueT, SrcValueT>::value && SrcNodeAccT::IS_OPENVDB) {
1214  const SrcValueT *src = srcLeaf.buffer().data();
1215  for (auto *end = dst + 512u; dst != end; dst += 4, src += 4) {
1216  dst[0] = src[0]; // copy *all* voxel values in sets of four, i.e. loop-unrolling
1217  dst[1] = src[1];
1218  dst[2] = src[2];
1219  dst[3] = src[3];
1220  }
1221  } else {
1222  for (uint32_t j=0; j<512u; ++j) *dst++ = static_cast<DstValueT>(srcLeaf.getValue(j));
1223  }
1224  }
1225  });
1226 } // CreateNanoGrid::processLeafs<T>
1227 
1228 //================================================================================================
1229 
1230 template <typename SrcGridT>
1231 template <typename DstBuildT>
1232 inline typename enable_if<BuildTraits<DstBuildT>::is_index>::type
1233 CreateNanoGrid<SrcGridT>::processLeafs()
1234 {
1235  using DstDataT = typename NanoLeaf<DstBuildT>::DataType;
1236  static_assert(DstDataT::FIXED_SIZE, "Expected destination LeafNode<ValueIndex> to have fixed size");
1237  static_assert(DstDataT::padding()==0u, "Expected leaf nodes to have no padding");
1238 
1239  forEach(0, mSrcNodeAcc.nodeCount(0), 8, [&](const Range1D& r) {
1240  const uint8_t flags = mIncludeStats ? 16u : 0u;// 4th bit indicates stats
1241  DstDataT *dstData = this->template dstNode<DstBuildT,0>(r.begin())->data();// fixed size
1242  for (auto i = r.begin(); i != r.end(); ++i, ++dstData) {
1243  auto &srcLeaf = mSrcNodeAcc.template node<0>(i);
1244  dstData->mBBoxMin = srcLeaf.origin(); // copy origin of node
1245  dstData->mBBoxDif[0] = dstData->mBBoxDif[1] = dstData->mBBoxDif[2] = 0u;
1246  dstData->mFlags = flags;
1247  dstData->mValueMask = srcLeaf.getValueMask(); // copy value mask
1248  dstData->mOffset = mValIdx[0][i];
1249  if constexpr(BuildTraits<DstBuildT>::is_onindex) {
1250  const uint64_t *w = dstData->mValueMask.words();
1251 #ifdef USE_OLD_VALUE_ON_INDEX
1252  int32_t sum = CountOn(*w++);
1253  uint8_t *p = reinterpret_cast<uint8_t*>(&dstData->mPrefixSum), *q = p + 7;
1254  for (int j=0; j<7; ++j) {
1255  *p++ = sum & 255u;
1256  *q |= (sum >> 8) << j;
1257  sum += CountOn(*w++);
1258  }
1259 #else
1260  uint64_t &prefixSum = dstData->mPrefixSum, sum = CountOn(*w++);
1261  prefixSum = sum;
1262  for (int n = 9; n < 55; n += 9) {// n=i*9 where i=1,2,..6
1263  sum += CountOn(*w++);
1264  prefixSum |= sum << n;// each pre-fixed sum is encoded in 9 bits
1265  }
1266 #endif
1267  } else {
1268  dstData->mPrefixSum = 0u;
1269  }
1270  if constexpr(BuildTraits<DstBuildT>::is_indexmask) dstData->mMask = dstData->mValueMask;
1271  }
1272  });
1273 } // CreateNanoGrid::processLeafs<ValueIndex or ValueOnIndex>
1274 
1275 //================================================================================================
1276 
1277 template <typename SrcGridT>
1278 template <typename DstBuildT>
1279 inline typename enable_if<is_same<ValueMask, DstBuildT>::value>::type
1280 CreateNanoGrid<SrcGridT>::processLeafs()
1281 {
1282  using DstDataT = typename NanoLeaf<ValueMask>::DataType;
1283  static_assert(DstDataT::FIXED_SIZE, "Expected destination LeafNode<ValueMask> to have fixed size");
1284  forEach(0, mSrcNodeAcc.nodeCount(0), 8, [&](const Range1D& r) {
1285  auto *dstData = this->template dstNode<DstBuildT,0>(r.begin())->data();
1286  for (auto i = r.begin(); i != r.end(); ++i, ++dstData) {
1287  auto &srcLeaf = mSrcNodeAcc.template node<0>(i);
1288  if (DstDataT::padding()>0u) {
1289  // Cast to void* to avoid compiler warning about missing trivial copy-assignment
1290  std::memset(reinterpret_cast<void*>(dstData), 0, DstDataT::memUsage());
1291  } else {
1292  dstData->mBBoxDif[0] = dstData->mBBoxDif[1] = dstData->mBBoxDif[2] = 0u;
1293  dstData->mFlags = 0u;// enable rendering, no bbox, no stats
1294  dstData->mPadding[0] = dstData->mPadding[1] = 0u;
1295  }
1296  dstData->mBBoxMin = srcLeaf.origin(); // copy origin of node
1297  dstData->mValueMask = srcLeaf.getValueMask(); // copy value mask
1298  }
1299  });
1300 } // CreateNanoGrid::processLeafs<ValueMask>
1301 
1302 //================================================================================================
1303 
1304 template <typename SrcGridT>
1305 template <typename DstBuildT>
1307 CreateNanoGrid<SrcGridT>::processLeafs()
1308 {
1309  using DstDataT = typename NanoLeaf<bool>::DataType;
1310  static_assert(DstDataT::FIXED_SIZE, "Expected destination LeafNode<bool> to have fixed size");
1311  forEach(0, mSrcNodeAcc.nodeCount(0), 8, [&](const Range1D& r) {
1312  auto *dstData = this->template dstNode<DstBuildT,0>(r.begin())->data();
1313  for (auto i = r.begin(); i != r.end(); ++i, ++dstData) {
1314  auto &srcLeaf = mSrcNodeAcc.template node<0>(i);
1315  if (DstDataT::padding()>0u) {
1316  // Cast to void* to avoid compiler warning about missing trivial copy-assignment
1317  std::memset(reinterpret_cast<void*>(dstData), 0, DstDataT::memUsage());
1318  } else {
1319  dstData->mBBoxDif[0] = dstData->mBBoxDif[1] = dstData->mBBoxDif[2] = 0u;
1320  dstData->mFlags = 0u;// enable rendering, no bbox, no stats
1321  }
1322  dstData->mBBoxMin = srcLeaf.origin(); // copy origin of node
1323  dstData->mValueMask = srcLeaf.getValueMask(); // copy value mask
1324  if constexpr(!is_same<bool, SrcBuildT>::value) {
1325  for (int j=0; j<512; ++j) dstData->mValues.set(j, static_cast<bool>(srcLeaf.getValue(j)));
1326  } else if constexpr(SrcNodeAccT::IS_OPENVDB) {
1327  dstData->mValues = *reinterpret_cast<const Mask<3>*>(srcLeaf.buffer().data());
1328  } else if constexpr(SrcNodeAccT::IS_NANOVDB) {
1329  dstData->mValues = srcLeaf.data()->mValues;
1330  } else {// build::Leaf
1331  dstData->mValues = srcLeaf.mValues; // copy value mask
1332  }
1333  }
1334  });
1335 } // CreateNanoGrid::processLeafs<bool>
1336 
1337 //================================================================================================
1338 
1339 template <typename SrcGridT>
1340 template <typename DstBuildT>
1341 inline typename enable_if<BuildTraits<DstBuildT>::is_FpX>::type
1342 CreateNanoGrid<SrcGridT>::processLeafs()
1343 {
1344  using DstDataT = typename NanoLeaf<DstBuildT>::DataType;
1345  static_assert(DstDataT::FIXED_SIZE, "Expected destination LeafNode<Fp4|Fp8|Fp16> to have fixed size");
1346  using ArrayT = typename DstDataT::ArrayType;
1347  static_assert(is_same<float, SrcValueT>::value, "Expected ValueT == float");
1348  using FloatT = typename std::conditional<DstDataT::bitWidth()>=16, double, float>::type;// 16 compression and higher requires double
1349  static constexpr FloatT UNITS = FloatT((1 << DstDataT::bitWidth()) - 1);// # of unique non-zero values
1350  DitherLUT lut(mDitherOn);
1351 
1352  forEach(0, mSrcNodeAcc.nodeCount(0), 8, [&](const Range1D& r) {
1353  auto *dstData = this->template dstNode<DstBuildT,0>(r.begin())->data();
1354  for (auto i = r.begin(); i != r.end(); ++i, ++dstData) {
1355  auto &srcLeaf = mSrcNodeAcc.template node<0>(i);
1356  if (DstDataT::padding()>0u) {
1357  // Cast to void* to avoid compiler warning about missing trivial copy-assignment
1358  std::memset(reinterpret_cast<void*>(dstData), 0, DstDataT::memUsage());
1359  } else {
1360  dstData->mFlags = dstData->mBBoxDif[2] = dstData->mBBoxDif[1] = dstData->mBBoxDif[0] = 0u;
1361  dstData->mDev = dstData->mAvg = dstData->mMax = dstData->mMin = 0u;
1362  }
1363  dstData->mBBoxMin = srcLeaf.origin(); // copy origin of node
1364  dstData->mValueMask = srcLeaf.getValueMask(); // copy value mask
1365  // compute extrema values
1366  float min = std::numeric_limits<float>::max(), max = -min;
1367  for (uint32_t j=0; j<512u; ++j) {
1368  const float v = srcLeaf.getValue(j);
1369  if (v < min) min = v;
1370  if (v > max) max = v;
1371  }
1372  dstData->init(min, max, DstDataT::bitWidth());
1373  // perform quantization relative to the values in the current leaf node
1374  const FloatT encode = UNITS/(max-min);
1375  uint32_t offset = 0;
1376  auto quantize = [&]()->ArrayT{
1377  const ArrayT tmp = static_cast<ArrayT>(encode * (srcLeaf.getValue(offset) - min) + lut(offset));
1378  ++offset;
1379  return tmp;
1380  };
1381  auto *code = reinterpret_cast<ArrayT*>(dstData->mCode);
1382  if (is_same<Fp4, DstBuildT>::value) {// resolved at compile-time
1383  for (uint32_t j=0; j<128u; ++j) {
1384  auto tmp = quantize();
1385  *code++ = quantize() << 4 | tmp;
1386  tmp = quantize();
1387  *code++ = quantize() << 4 | tmp;
1388  }
1389  } else {
1390  for (uint32_t j=0; j<128u; ++j) {
1391  *code++ = quantize();
1392  *code++ = quantize();
1393  *code++ = quantize();
1394  *code++ = quantize();
1395  }
1396  }
1397  }
1398  });
1399 } // CreateNanoGrid::processLeafs<Fp4, Fp8, Fp16>
1400 
1401 //================================================================================================
1402 
1403 template <typename SrcGridT>
1404 template <typename DstBuildT>
1405 inline typename enable_if<is_same<FpN, DstBuildT>::value>::type
1406 CreateNanoGrid<SrcGridT>::processLeafs()
1407 {
1408  static_assert(is_same<float, SrcValueT>::value, "Expected SrcValueT == float");
1409  DitherLUT lut(mDitherOn);
1410  forEach(0, mSrcNodeAcc.nodeCount(0), 8, [&](const Range1D& r) {
1411  for (auto i = r.begin(); i != r.end(); ++i) {
1412  auto &srcLeaf = mSrcNodeAcc.template node<0>(i);
1413  auto *dstData = this->template dstNode<DstBuildT,0>(i)->data();
1414  dstData->mBBoxMin = srcLeaf.origin(); // copy origin of node
1415  dstData->mBBoxDif[0] = dstData->mBBoxDif[1] = dstData->mBBoxDif[2] = 0u;
1416  const uint8_t logBitWidth = mCodec[i].log2;
1417  dstData->mFlags = logBitWidth << 5;// pack logBitWidth into 3 MSB of mFlag
1418  dstData->mValueMask = srcLeaf.getValueMask(); // copy value mask
1419  const float min = mCodec[i].min, max = mCodec[i].max;
1420  dstData->init(min, max, uint8_t(1) << logBitWidth);
1421  // perform quantization relative to the values in the current leaf node
1422  uint32_t offset = 0;
1423  float encode = 0.0f;
1424  auto quantize = [&]()->uint8_t{
1425  const uint8_t tmp = static_cast<uint8_t>(encode * (srcLeaf.getValue(offset) - min) + lut(offset));
1426  ++offset;
1427  return tmp;
1428  };
1429  auto *dst = reinterpret_cast<uint8_t*>(dstData+1);
1430  switch (logBitWidth) {
1431  case 0u: {// 1 bit
1432  encode = 1.0f/(max - min);
1433  for (int j=0; j<64; ++j) {
1434  uint8_t a = 0;
1435  for (int k=0; k<8; ++k) a |= quantize() << k;
1436  *dst++ = a;
1437  }
1438  }
1439  break;
1440  case 1u: {// 2 bits
1441  encode = 3.0f/(max - min);
1442  for (int j=0; j<128; ++j) {
1443  auto a = quantize();
1444  a |= quantize() << 2;
1445  a |= quantize() << 4;
1446  *dst++ = quantize() << 6 | a;
1447  }
1448  }
1449  break;
1450  case 2u: {// 4 bits
1451  encode = 15.0f/(max - min);
1452  for (int j=0; j<128; ++j) {
1453  auto a = quantize();
1454  *dst++ = quantize() << 4 | a;
1455  a = quantize();
1456  *dst++ = quantize() << 4 | a;
1457  }
1458  }
1459  break;
1460  case 3u: {// 8 bits
1461  encode = 255.0f/(max - min);
1462  for (int j=0; j<128; ++j) {
1463  *dst++ = quantize();
1464  *dst++ = quantize();
1465  *dst++ = quantize();
1466  *dst++ = quantize();
1467  }
1468  }
1469  break;
1470  default: {// 16 bits - special implementation using higher bit-precision
1471  auto *dst = reinterpret_cast<uint16_t*>(dstData+1);
1472  const double encode = 65535.0/(max - min);// note that double is required!
1473  for (int j=0; j<128; ++j) {
1474  *dst++ = uint16_t(encode * (srcLeaf.getValue(offset) - min) + lut(offset)); ++offset;
1475  *dst++ = uint16_t(encode * (srcLeaf.getValue(offset) - min) + lut(offset)); ++offset;
1476  *dst++ = uint16_t(encode * (srcLeaf.getValue(offset) - min) + lut(offset)); ++offset;
1477  *dst++ = uint16_t(encode * (srcLeaf.getValue(offset) - min) + lut(offset)); ++offset;
1478  }
1479  }
1480  }// end switch
1481  }
1482  });// kernel
1483 } // CreateNanoGrid::processLeafs<FpN>
1484 
1485 //================================================================================================
1486 
1487 template <typename SrcGridT>
1488 template <typename DstBuildT, int LEVEL>
1489 inline typename enable_if<!BuildTraits<DstBuildT>::is_index>::type
1490 CreateNanoGrid<SrcGridT>::processInternalNodes()
1491 {
1492  using DstNodeT = typename NanoNode<DstBuildT, LEVEL>::type;
1493  using DstValueT = typename DstNodeT::ValueType;
1494  using DstChildT = typename NanoNode<DstBuildT, LEVEL-1>::type;
1495  static_assert(LEVEL == 1 || LEVEL == 2, "Expected internal node");
1496 
1497  const uint64_t nodeCount = mSrcNodeAcc.nodeCount(LEVEL);
1498  if (nodeCount > 0) {// compute and temporarily encode IDs of child nodes
1499  uint64_t childCount = 0;
1500  auto *dstData = this->template dstNode<DstBuildT,LEVEL>(0)->data();
1501  for (uint64_t i=0; i<nodeCount; ++i) {
1502  dstData[i].mFlags = childCount;
1503  childCount += mSrcNodeAcc.template node<LEVEL>(i).getChildMask().countOn();
1504  }
1505  }
1506 
1507  forEach(0, nodeCount, 4, [&](const Range1D& r) {
1508  auto *dstData = this->template dstNode<DstBuildT,LEVEL>(r.begin())->data();
1509  for (auto i = r.begin(); i != r.end(); ++i, ++dstData) {
1510  auto &srcNode = mSrcNodeAcc.template node<LEVEL>(i);
1511  uint64_t childID = dstData->mFlags;
1512  if (DstNodeT::DataType::padding()>0u) {
1513  // Cast to void* to avoid compiler warning about missing trivial copy-assignment
1514  std::memset(reinterpret_cast<void*>(dstData), 0, DstNodeT::memUsage());
1515  } else {
1516  dstData->mFlags = 0;// enable rendering, no bbox, no stats
1517  dstData->mMinimum = dstData->mMaximum = typename DstNodeT::ValueType();
1518  dstData->mAverage = dstData->mStdDevi = 0;
1519  }
1520  dstData->mBBox[0] = srcNode.origin(); // copy origin of node
1521  dstData->mValueMask = srcNode.getValueMask(); // copy value mask
1522  dstData->mChildMask = srcNode.getChildMask(); // copy child mask
1523  for (auto it = srcNode.cbeginChildAll(); it; ++it) {
1524  SrcValueT value{}; // default initialization
1525  if (it.probeChild(value)) {
1526  DstChildT *dstChild = this->template dstNode<DstBuildT,LEVEL-1>(childID++);// might be Leaf<FpN>
1527  dstData->setChild(it.pos(), dstChild);
1528  } else {
1529  dstData->setValue(it.pos(), static_cast<DstValueT>(value));
1530  }
1531  }
1532  }
1533  });
1534 } // CreateNanoGrid::processInternalNodes<T>
1535 
1536 //================================================================================================
1537 
1538 template <typename SrcGridT>
1539 template <typename DstBuildT, int LEVEL>
1540 inline typename enable_if<BuildTraits<DstBuildT>::is_index>::type
1541 CreateNanoGrid<SrcGridT>::processInternalNodes()
1542 {
1543  using DstNodeT = typename NanoNode<DstBuildT, LEVEL>::type;
1544  using DstChildT = typename NanoNode<DstBuildT, LEVEL-1>::type;
1545  static_assert(LEVEL == 1 || LEVEL == 2, "Expected internal node");
1546  static_assert(DstNodeT::DataType::padding()==0u, "Expected internal nodes to have no padding");
1547 
1548  const uint64_t nodeCount = mSrcNodeAcc.nodeCount(LEVEL);
1549  if (nodeCount > 0) {// compute and temporarily encode IDs of child nodes
1550  uint64_t childCount = 0;
1551  auto *dstData = this->template dstNode<DstBuildT,LEVEL>(0)->data();
1552  for (uint64_t i=0; i<nodeCount; ++i) {
1553  dstData[i].mFlags = childCount;
1554  childCount += mSrcNodeAcc.template node<LEVEL>(i).getChildMask().countOn();
1555  }
1556  }
1557 
1558  forEach(0, nodeCount, 4, [&](const Range1D& r) {
1559  auto *dstData = this->template dstNode<DstBuildT,LEVEL>(r.begin())->data();
1560  for (auto i = r.begin(); i != r.end(); ++i, ++dstData) {
1561  auto &srcNode = mSrcNodeAcc.template node<LEVEL>(i);
1562  uint64_t childID = dstData->mFlags;
1563  dstData->mFlags = 0u;
1564  dstData->mBBox[0] = srcNode.origin(); // copy origin of node
1565  dstData->mValueMask = srcNode.getValueMask(); // copy value mask
1566  dstData->mChildMask = srcNode.getChildMask(); // copy child mask
1567  uint64_t n = mIncludeTiles ? mValIdx[LEVEL][i] : 0u;
1568  for (auto it = srcNode.cbeginChildAll(); it; ++it) {
1569  SrcValueT value;
1570  if (it.probeChild(value)) {
1571  DstChildT *dstChild = this->template dstNode<DstBuildT,LEVEL-1>(childID++);// might be Leaf<FpN>
1572  dstData->setChild(it.pos(), dstChild);
1573  } else {
1574  uint64_t m = 0u;
1575  if (mIncludeTiles && !((BuildTraits<DstBuildT>::is_onindex) && dstData->mValueMask.isOff(it.pos()))) m = n++;
1576  dstData->setValue(it.pos(), m);
1577  }
1578  }
1579  if (mIncludeTiles && mIncludeStats) {// stats are always placed after the tile values
1580  dstData->mMinimum = n++;
1581  dstData->mMaximum = n++;
1582  dstData->mAverage = n++;
1583  dstData->mStdDevi = n++;
1584  } else {// if not tiles or stats set stats to the background offset
1585  dstData->mMinimum = 0u;
1586  dstData->mMaximum = 0u;
1587  dstData->mAverage = 0u;
1588  dstData->mStdDevi = 0u;
1589  }
1590  }
1591  });
1592 } // CreateNanoGrid::processInternalNodes<ValueIndex or ValueOnIndex>
1593 
1594 //================================================================================================
1595 
1596 template <typename SrcGridT>
1597 template <typename DstBuildT>
1598 inline typename enable_if<!BuildTraits<DstBuildT>::is_index>::type
1599 CreateNanoGrid<SrcGridT>::processRoot()
1600 {
1601  using DstRootT = NanoRoot<DstBuildT>;
1602  using DstValueT = typename DstRootT::ValueType;
1603  auto &srcRoot = mSrcNodeAcc.root();
1604  auto *dstData = this->template dstRoot<DstBuildT>()->data();
1605  const uint32_t tableSize = srcRoot.getTableSize();
1606  // Cast to void* to avoid compiler warning about missing trivial copy-assignment
1607  if (DstRootT::DataType::padding()>0) std::memset(reinterpret_cast<void*>(dstData), 0, DstRootT::memUsage(tableSize));
1608  dstData->mTableSize = tableSize;
1609  dstData->mMinimum = dstData->mMaximum = dstData->mBackground = srcRoot.background();
1610  dstData->mBBox = CoordBBox(); // // set to an empty bounding box
1611  if (tableSize==0) return;
1612  auto *dstChild = this->template dstNode<DstBuildT, 2>(0);// fixed size and linear in memory
1613  auto *dstTile = dstData->tile(0);// fixed size and linear in memory
1614  for (auto it = srcRoot.cbeginChildAll(); it; ++it, ++dstTile) {
1615  SrcValueT value;
1616  if (it.probeChild(value)) {
1617  dstTile->setChild(it.getCoord(), dstChild++, dstData);
1618  } else {
1619  dstTile->setValue(it.getCoord(), it.isValueOn(), static_cast<DstValueT>(value));
1620  }
1621  }
1622 } // CreateNanoGrid::processRoot<T>
1623 
1624 //================================================================================================
1625 
1626 template <typename SrcGridT>
1627 template <typename DstBuildT>
1628 inline typename enable_if<BuildTraits<DstBuildT>::is_index>::type
1629 CreateNanoGrid<SrcGridT>::processRoot()
1630 {
1631  using DstRootT = NanoRoot<DstBuildT>;
1632  auto &srcRoot = mSrcNodeAcc.root();
1633  auto *dstData = this->template dstRoot<DstBuildT>()->data();
1634  const uint32_t tableSize = srcRoot.getTableSize();
1635  // Cast to void* to avoid compiler warning about missing trivial copy-assignment
1636  if (DstRootT::DataType::padding()>0) std::memset(reinterpret_cast<void*>(dstData), 0, DstRootT::memUsage(tableSize));
1637  dstData->mTableSize = tableSize;
1638  dstData->mBackground = 0u;
1639  uint64_t valueCount = 0u;// the first entry is always the background value
1640  dstData->mBBox = CoordBBox(); // set to an empty/invalid bounding box
1641 
1642  if (tableSize>0) {
1643  auto *dstChild = this->template dstNode<DstBuildT, 2>(0);// fixed size and linear in memory
1644  auto *dstTile = dstData->tile(0);// fixed size and linear in memory
1645  for (auto it = srcRoot.cbeginChildAll(); it; ++it, ++dstTile) {
1646  SrcValueT tmp;
1647  if (it.probeChild(tmp)) {
1648  dstTile->setChild(it.getCoord(), dstChild++, dstData);
1649  } else {
1650  dstTile->setValue(it.getCoord(), it.isValueOn(), 0u);
1651  if (mIncludeTiles && !((BuildTraits<DstBuildT>::is_onindex) && !dstTile->state)) dstTile->value = ++valueCount;
1652  }
1653  }
1654  }
1655  if (mIncludeTiles && mIncludeStats) {// stats are always placed after the tile values
1656  dstData->mMinimum = ++valueCount;
1657  dstData->mMaximum = ++valueCount;
1658  dstData->mAverage = ++valueCount;
1659  dstData->mStdDevi = ++valueCount;
1660  } else if (dstData->padding()==0) {
1661  dstData->mMinimum = 0u;
1662  dstData->mMaximum = 0u;
1663  dstData->mAverage = 0u;
1664  dstData->mStdDevi = 0u;
1665  }
1666 } // CreateNanoGrid::processRoot<ValueIndex or ValueOnIndex>
1667 
1668 //================================================================================================
1669 
1670 template <typename SrcGridT>
1671 template <typename DstBuildT>
1672 void CreateNanoGrid<SrcGridT>::processTree()
1673 {
1674  const uint64_t nodeCount[3] = {mSrcNodeAcc.nodeCount(0), mSrcNodeAcc.nodeCount(1), mSrcNodeAcc.nodeCount(2)};
1675  auto *dstTree = this->template dstTree<DstBuildT>();
1676  auto *dstData = dstTree->data();
1677  dstData->setRoot( this->template dstRoot<DstBuildT>() );
1678 
1679  dstData->setFirstNode(nodeCount[2] ? this->template dstNode<DstBuildT, 2>(0) : nullptr);
1680  dstData->setFirstNode(nodeCount[1] ? this->template dstNode<DstBuildT, 1>(0) : nullptr);
1681  dstData->setFirstNode(nodeCount[0] ? this->template dstNode<DstBuildT, 0>(0) : nullptr);
1682 
1683  dstData->mNodeCount[0] = static_cast<uint32_t>(nodeCount[0]);
1684  dstData->mNodeCount[1] = static_cast<uint32_t>(nodeCount[1]);
1685  dstData->mNodeCount[2] = static_cast<uint32_t>(nodeCount[2]);
1686 
1687  // Count number of active leaf level tiles
1688  dstData->mTileCount[0] = reduce(Range1D(0,nodeCount[1]), uint32_t(0), [&](Range1D &r, uint32_t sum){
1689  for (auto i=r.begin(); i!=r.end(); ++i) sum += mSrcNodeAcc.template node<1>(i).getValueMask().countOn();
1690  return sum;}, std::plus<uint32_t>());
1691 
1692  // Count number of active lower internal node tiles
1693  dstData->mTileCount[1] = reduce(Range1D(0,nodeCount[2]), uint32_t(0), [&](Range1D &r, uint32_t sum){
1694  for (auto i=r.begin(); i!=r.end(); ++i) sum += mSrcNodeAcc.template node<2>(i).getValueMask().countOn();
1695  return sum;}, std::plus<uint32_t>());
1696 
1697  // Count number of active upper internal node tiles
1698  dstData->mTileCount[2] = 0;
1699  for (auto it = mSrcNodeAcc.root().cbeginValueOn(); it; ++it) dstData->mTileCount[2] += 1;
1700 
1701  // Count number of active voxels
1702  dstData->mVoxelCount = reduce(Range1D(0, nodeCount[0]), uint64_t(0), [&](Range1D &r, uint64_t sum){
1703  for (auto i=r.begin(); i!=r.end(); ++i) sum += mSrcNodeAcc.template node<0>(i).getValueMask().countOn();
1704  return sum;}, std::plus<uint64_t>());
1705 
1706  dstData->mVoxelCount += uint64_t(dstData->mTileCount[0]) << 9;// = 3 * 3
1707  dstData->mVoxelCount += uint64_t(dstData->mTileCount[1]) << 21;// = 3 * (3+4)
1708  dstData->mVoxelCount += uint64_t(dstData->mTileCount[2]) << 36;// = 3 * (3+4+5)
1709 
1710 } // CreateNanoGrid::processTree
1711 
1712 //================================================================================================
1713 
1714 template <typename SrcGridT>
1715 template <typename DstBuildT>
1716 void CreateNanoGrid<SrcGridT>::processGrid()
1717 {
1718  auto* dstData = this->template dstGrid<DstBuildT>()->data();
1719  dstData->init({GridFlags::IsBreadthFirst}, mOffset.size, mSrcNodeAcc.map(),
1720  mapToGridType<DstBuildT>(), mapToGridClass<DstBuildT>(mSrcNodeAcc.gridClass()));
1721  dstData->mBlindMetadataCount = static_cast<uint32_t>(mBlindMetaData.size());
1722  dstData->mData1 = this->valueCount();
1723 
1724  std::memset(dstData->mGridName, '\0', GridData::MaxNameSize);//overwrite mGridName
1725  strncpy(dstData->mGridName, mSrcNodeAcc.getName().c_str(), GridData::MaxNameSize-1);
1726  if (mSrcNodeAcc.hasLongGridName()) dstData->setLongGridNameOn();// grid name is long so store it as blind data
1727 
1728  // Partially process blind meta data - they will be complete in postProcess
1729  if (mBlindMetaData.size()>0) {
1730  auto *metaData = this->dstMeta(0);
1731  dstData->mBlindMetadataOffset = PtrDiff(metaData, dstData);
1732  dstData->mBlindMetadataCount = static_cast<uint32_t>(mBlindMetaData.size());
1733  char *blindData = PtrAdd<char>(mBufferPtr, mOffset.blind);
1734  for (const auto &b : mBlindMetaData) {
1735  std::memcpy(metaData, b.metaData, sizeof(GridBlindMetaData));
1736  metaData->setBlindData(blindData);// sets metaData.mOffset
1737  if (metaData->mDataClass == GridBlindDataClass::GridName) strcpy(blindData, mSrcNodeAcc.getName().c_str());
1738  ++metaData;
1739  blindData += b.size;
1740  }
1741  mBlindMetaData.clear();
1742  }
1743 } // CreateNanoGrid::processGrid
1744 
1745 //================================================================================================
1746 
1747 template <typename SrcGridT>
1748 template <typename DstBuildT>
1749 inline typename disable_if<BuildTraits<DstBuildT>::is_index>::type
1750 CreateNanoGrid<SrcGridT>::postProcess()
1751 {
1752  if constexpr(is_same<FpN, DstBuildT>::value) mCodec.reset();
1753  auto *dstGrid = this->template dstGrid<DstBuildT>();
1754  gridStats(*dstGrid, mStats);
1755 #if defined(NANOVDB_USE_OPENVDB) && !defined(__CUDACC__)
1756  auto *metaData = this->dstMeta(0);
1759  static_assert(is_same<DstBuildT, uint32_t>::value, "expected DstBuildT==uint32_t");
1760  auto *dstData0 = this->template dstNode<DstBuildT,0>(0)->data();
1761  dstData0->mMinimum = 0; // start of prefix sum
1762  dstData0->mMaximum = dstData0->mValues[511u];
1763  for (uint32_t i=1, n=mSrcNodeAcc.nodeCount(0); i<n; ++i) {
1764  auto *dstData1 = dstData0 + 1;
1765  dstData1->mMinimum = dstData0->mMinimum + dstData0->mMaximum;
1766  dstData1->mMaximum = dstData1->mValues[511u];
1767  dstData0 = dstData1;
1768  }
1769  for (size_t i = 0, n = dstGrid->blindDataCount(); i < n; ++i, ++metaData) {
1771  if (metaData->mDataClass != GridBlindDataClass::IndexArray) continue;
1772  if (metaData->mDataType == GridType::UInt32) {
1773  uint32_t *blindData = const_cast<uint32_t*>(metaData->template getBlindData<uint32_t>());
1774  forEach(0, mSrcNodeAcc.nodeCount(0), 16, [&](const auto& r) {
1775  auto *dstData = this->template dstNode<DstBuildT,0>(r.begin())->data();
1776  for (auto j = r.begin(); j != r.end(); ++j, ++dstData) {
1777  uint32_t* p = blindData + dstData->mMinimum;
1778  for (uint32_t idx : mSrcNodeAcc.template node<0>(j).indices()) *p++ = idx;
1779  }
1780  });
1781  }
1782  } else {// if constexpr(is_same<openvdb::points::PointDataGrid, SrcGridT>::value)
1783  if (metaData->mDataClass != GridBlindDataClass::AttributeArray) continue;
1784  if (auto *blindData = dstGrid->template getBlindData<float>(i)) {
1785  this->template copyPointAttribute<DstBuildT>(i, blindData);
1786  } else if (auto *blindData = dstGrid->template getBlindData<nanovdb::Vec3f>(i)) {
1787  this->template copyPointAttribute<DstBuildT>(i, reinterpret_cast<openvdb::Vec3f*>(blindData));
1788  } else if (auto *blindData = dstGrid->template getBlindData<int32_t>(i)) {
1789  this->template copyPointAttribute<DstBuildT>(i, blindData);
1790  } else if (auto *blindData = dstGrid->template getBlindData<int64_t>(i)) {
1791  this->template copyPointAttribute<DstBuildT>(i, blindData);
1792  } else {
1793  std::cerr << "unsupported point attribute \"" << toStr(metaData->mDataType) << "\"\n";
1794  }
1795  }// if
1796  }// loop
1797  } else { // if
1798  (void)metaData;
1799  }
1800 #endif
1801  updateChecksum(*dstGrid, mChecksum);
1802 }// CreateNanoGrid::postProcess<T>
1803 
1804 //================================================================================================
1805 
1806 template <typename SrcGridT>
1807 template <typename DstBuildT>
1808 inline typename enable_if<BuildTraits<DstBuildT>::is_index>::type
1809 CreateNanoGrid<SrcGridT>::postProcess(uint32_t channels)
1810 {
1811  const std::string typeName = toStr(mapToGridType<SrcValueT>());
1812  const uint64_t valueCount = this->valueCount();
1813  auto *dstGrid = this->template dstGrid<DstBuildT>();
1814  for (uint32_t i=0; i<channels; ++i) {
1815  const std::string name = "channel_"+std::to_string(i);
1816  int j = dstGrid->findBlindData(name.c_str());
1817  if (j<0) throw std::runtime_error("missing " + name);
1818  auto *metaData = this->dstMeta(j);// partially set in processGrid
1819  metaData->mDataClass = GridBlindDataClass::ChannelArray;
1820  metaData->mDataType = mapToGridType<SrcValueT>();
1821  SrcValueT *blindData = const_cast<SrcValueT*>(metaData->template getBlindData<SrcValueT>());
1822  if (i>0) {// concurrent copy from previous channel
1823  nanovdb::forEach(0,valueCount,1024,[&](const nanovdb::Range1D &r){
1824  SrcValueT *dst=blindData+r.begin(), *end=dst+r.size(), *src=dst-valueCount;
1825  while(dst!=end) *dst++ = *src++;
1826  });
1827  } else {
1828  this->template copyValues<DstBuildT>(blindData);
1829  }
1830  }// loop over channels
1831  gridStats(*(this->template dstGrid<DstBuildT>()), std::min(StatsMode::BBox, mStats));
1832  updateChecksum(*dstGrid, mChecksum);
1833 }// CreateNanoGrid::postProcess<ValueIndex or ValueOnIndex>
1834 
1835 //================================================================================================
1836 
1837 template <typename SrcGridT>
1838 template <typename DstBuildT>
1839 typename enable_if<BuildTraits<DstBuildT>::is_index>::type
1841 {// copy values from the source grid into the provided buffer
1842  assert(mBufferPtr && buffer);
1843  using StatsT = typename FloatTraits<SrcValueT>::FloatType;
1844 
1845  if (this->valueCount()==0) this->template countValues<DstBuildT>();
1846 
1847  auto copyNodeValues = [&](const auto &node, SrcValueT *v) {
1848  if constexpr(BuildTraits<DstBuildT>::is_onindex) {
1849  for (auto it = node.cbeginValueOn(); it; ++it) *v++ = *it;
1850  } else {
1851  for (auto it = node.cbeginValueAll(); it; ++it) *v++ = *it;
1852  }
1853  if (mIncludeStats) {
1854  if constexpr(SrcNodeAccT::IS_NANOVDB) {// resolved at compile time
1855  *v++ = node.minimum();
1856  *v++ = node.maximum();
1857  if constexpr(is_same<SrcValueT, StatsT>::value) {
1858  *v++ = node.average();
1859  *v++ = node.stdDeviation();
1860  } else {// eg when SrcValueT=Vec3f and StatsT=float
1861  *v++ = SrcValueT(node.average());
1862  *v++ = SrcValueT(node.stdDeviation());
1863  }
1864  } else {// openvdb and nanovdb::build::Grid have no stats
1865  *v++ = buffer[0];// background
1866  *v++ = buffer[0];// background
1867  *v++ = buffer[0];// background
1868  *v++ = buffer[0];// background
1869  }
1870  }
1871  };// copyNodeValues
1872 
1873  const SrcRootT &root = mSrcNodeAcc.root();
1874  buffer[0] = root.background();// Value array always starts with the background value
1875  if (mIncludeTiles) {
1876  copyNodeValues(root, buffer + 1u);
1877  forEach(0, mSrcNodeAcc.nodeCount(2), 1, [&](const Range1D& r) {
1878  for (auto i = r.begin(); i!=r.end(); ++i) {
1879  copyNodeValues(mSrcNodeAcc.template node<2>(i), buffer + mValIdx[2][i]);
1880  }
1881  });
1882  forEach(0, mSrcNodeAcc.nodeCount(1), 1, [&](const Range1D& r) {
1883  for (auto i = r.begin(); i!=r.end(); ++i) {
1884  copyNodeValues(mSrcNodeAcc.template node<1>(i), buffer + mValIdx[1][i]);
1885  }
1886  });
1887  }
1888  forEach(0, mSrcNodeAcc.nodeCount(0), 4, [&](const Range1D& r) {
1889  for (auto i = r.begin(); i!=r.end(); ++i) {
1890  copyNodeValues(mSrcNodeAcc.template node<0>(i), buffer + mValIdx[0][i]);
1891  }
1892  });
1893 }// CreateNanoGrid::copyValues<ValueIndex or ValueOnIndex>
1894 
1895 
1896 //================================================================================================
1897 
1898 #if defined(NANOVDB_USE_OPENVDB) && !defined(__CUDACC__)
1899 
1900 template <typename SrcGridT>
1901 template<typename T>
1905 {
1906  static_assert(is_same<T, SrcGridT>::value, "expected default template parameter");
1907  return 0u;
1908 }// CreateNanoGrid::countPoints<T>
1909 
1910 template <typename SrcGridT>
1911 template<typename T>
1914 CreateNanoGrid<SrcGridT>::countPoints() const
1915 {
1916  static_assert(is_same<T, SrcGridT>::value, "expected default template parameter");
1917  return reduce(0, mSrcNodeAcc.nodeCount(0), 8, uint64_t(0), [&](auto &r, uint64_t sum) {
1918  for (auto i=r.begin(); i!=r.end(); ++i) sum += mSrcNodeAcc.template node<0>(i).getLastValue();
1919  return sum;}, std::plus<uint64_t>());
1920 }// CreateNanoGrid::countPoints<PointIndexGrid or PointDataGrid>
1921 
1922 template <typename SrcGridT>
1923 template<typename DstBuildT, typename AttT, typename CodecT, typename T>
1925 CreateNanoGrid<SrcGridT>::copyPointAttribute(size_t attIdx, AttT *attPtr)
1926 {
1927  static_assert(std::is_same<SrcGridT, T>::value, "Expected default parameter");
1928  using HandleT = openvdb::points::AttributeHandle<AttT, CodecT>;
1929  forEach(0, mSrcNodeAcc.nodeCount(0), 16, [&](const auto& r) {
1930  auto *dstData = this->template dstNode<DstBuildT,0>(r.begin())->data();
1931  for (auto i = r.begin(); i != r.end(); ++i, ++dstData) {
1932  auto& srcLeaf = mSrcNodeAcc.template node<0>(i);
1933  HandleT handle(srcLeaf.constAttributeArray(attIdx));
1934  AttT *p = attPtr + dstData->mMinimum;
1935  for (auto iter = srcLeaf.beginIndexOn(); iter; ++iter) *p++ = handle.get(*iter);
1936  }
1937  });
1938 }// CreateNanoGrid::copyPointAttribute
1939 
1940 #endif
1941 
1942 //================================================================================================
1943 
1944 template<typename SrcGridT, typename DstBuildT, typename BufferT>
1945 typename disable_if<BuildTraits<DstBuildT>::is_index || BuildTraits<DstBuildT>::is_Fp, GridHandle<BufferT>>::type
1946 createNanoGrid(const SrcGridT &srcGrid,
1947  StatsMode sMode,
1948  ChecksumMode cMode,
1949  int verbose,
1950  const BufferT &buffer)
1951 {
1952  CreateNanoGrid<SrcGridT> converter(srcGrid);
1953  converter.setStats(sMode);
1954  converter.setChecksum(cMode);
1955  converter.setVerbose(verbose);
1956  return converter.template getHandle<DstBuildT, BufferT>(buffer);
1957 }// createNanoGrid<T>
1958 
1959 //================================================================================================
1960 
1961 template<typename SrcGridT, typename DstBuildT, typename BufferT>
1962 typename enable_if<BuildTraits<DstBuildT>::is_index, GridHandle<BufferT>>::type
1963 createNanoGrid(const SrcGridT &srcGrid,
1964  uint32_t channels,
1965  bool includeStats,
1966  bool includeTiles,
1967  int verbose,
1968  const BufferT &buffer)
1969 {
1970  CreateNanoGrid<SrcGridT> converter(srcGrid);
1971  converter.setVerbose(verbose);
1972  return converter.template getHandle<DstBuildT, BufferT>(channels, includeStats, includeTiles, buffer);
1973 }
1974 
1975 //================================================================================================
1976 
1977 template<typename SrcGridT, typename DstBuildT, typename OracleT, typename BufferT>
1978 typename enable_if<is_same<FpN, DstBuildT>::value, GridHandle<BufferT>>::type
1979 createNanoGrid(const SrcGridT &srcGrid,
1980  StatsMode sMode,
1981  ChecksumMode cMode,
1982  bool ditherOn,
1983  int verbose,
1984  const OracleT &oracle,
1985  const BufferT &buffer)
1986 {
1987  CreateNanoGrid<SrcGridT> converter(srcGrid);
1988  converter.setStats(sMode);
1989  converter.setChecksum(cMode);
1990  converter.enableDithering(ditherOn);
1991  converter.setVerbose(verbose);
1992  return converter.template getHandle<DstBuildT, OracleT, BufferT>(oracle, buffer);
1993 }// createNanoGrid<FpN>
1994 
1995 //================================================================================================
1996 
1997 template<typename SrcGridT, typename DstBuildT, typename BufferT>
1998 typename enable_if<BuildTraits<DstBuildT>::is_FpX, GridHandle<BufferT>>::type
1999 createNanoGrid(const SrcGridT &srcGrid,
2000  StatsMode sMode,
2001  ChecksumMode cMode,
2002  bool ditherOn,
2003  int verbose,
2004  const BufferT &buffer)
2005 {
2006  CreateNanoGrid<SrcGridT> converter(srcGrid);
2007  converter.setStats(sMode);
2008  converter.setChecksum(cMode);
2009  converter.enableDithering(ditherOn);
2010  converter.setVerbose(verbose);
2011  return converter.template getHandle<DstBuildT, BufferT>(buffer);
2012 }// createNanoGrid<Fp4,8,16>
2013 
2014 //================================================================================================
2015 
2016 #if defined(NANOVDB_USE_OPENVDB) && !defined(__CUDACC__)
2017 template<typename BufferT>
2018 GridHandle<BufferT>
2019 openToNanoVDB(const openvdb::GridBase::Ptr& base,
2020  StatsMode sMode,
2021  ChecksumMode cMode,
2022  int verbose)
2023 {
2024  // We need to define these types because they are not defined in OpenVDB
2025  using openvdb_Vec4fTree = typename openvdb::tree::Tree4<openvdb::Vec4f, 5, 4, 3>::Type;
2026  using openvdb_Vec4dTree = typename openvdb::tree::Tree4<openvdb::Vec4d, 5, 4, 3>::Type;
2027  using openvdb_Vec4fGrid = openvdb::Grid<openvdb_Vec4fTree>;
2028  using openvdb_Vec4dGrid = openvdb::Grid<openvdb_Vec4dTree>;
2029  using openvdb_UInt32Grid = openvdb::Grid<openvdb::UInt32Tree>;
2030 
2031  if (auto grid = openvdb::GridBase::grid<openvdb::FloatGrid>(base)) {
2032  return createNanoGrid<openvdb::FloatGrid, float, BufferT>(*grid, sMode, cMode, verbose);
2033  } else if (auto grid = openvdb::GridBase::grid<openvdb::DoubleGrid>(base)) {
2034  return createNanoGrid<openvdb::DoubleGrid, double, BufferT>(*grid, sMode, cMode, verbose);
2035  } else if (auto grid = openvdb::GridBase::grid<openvdb::Int32Grid>(base)) {
2036  return createNanoGrid<openvdb::Int32Grid, int32_t,BufferT>(*grid, sMode, cMode, verbose);
2037  } else if (auto grid = openvdb::GridBase::grid<openvdb::Int64Grid>(base)) {
2038  return createNanoGrid<openvdb::Int64Grid, int64_t, BufferT>(*grid, sMode, cMode, verbose);
2039  } else if (auto grid = openvdb::GridBase::grid<openvdb_UInt32Grid>(base)) {
2040  return createNanoGrid<openvdb_UInt32Grid, uint32_t, BufferT>(*grid, sMode, cMode, verbose);
2041  } else if (auto grid = openvdb::GridBase::grid<openvdb::Vec3fGrid>(base)) {
2042  return createNanoGrid<openvdb::Vec3fGrid, nanovdb::Vec3f, BufferT>(*grid, sMode, cMode, verbose);
2043  } else if (auto grid = openvdb::GridBase::grid<openvdb::Vec3dGrid>(base)) {
2044  return createNanoGrid<openvdb::Vec3dGrid, nanovdb::Vec3d, BufferT>(*grid, sMode, cMode, verbose);
2045  } else if (auto grid = openvdb::GridBase::grid<openvdb::tools::PointIndexGrid>(base)) {
2046  return createNanoGrid<openvdb::tools::PointIndexGrid, uint32_t, BufferT>(*grid, sMode, cMode, verbose);
2047  } else if (auto grid = openvdb::GridBase::grid<openvdb::points::PointDataGrid>(base)) {
2048  return createNanoGrid<openvdb::points::PointDataGrid, uint32_t, BufferT>(*grid, sMode, cMode, verbose);
2049  } else if (auto grid = openvdb::GridBase::grid<openvdb::MaskGrid>(base)) {
2050  return createNanoGrid<openvdb::MaskGrid, nanovdb::ValueMask, BufferT>(*grid, sMode, cMode, verbose);
2051  } else if (auto grid = openvdb::GridBase::grid<openvdb::BoolGrid>(base)) {
2052  return createNanoGrid<openvdb::BoolGrid, bool, BufferT>(*grid, sMode, cMode, verbose);
2053  } else if (auto grid = openvdb::GridBase::grid<openvdb_Vec4fGrid>(base)) {
2054  return createNanoGrid<openvdb_Vec4fGrid, nanovdb::Vec4f, BufferT>(*grid, sMode, cMode, verbose);
2055  } else if (auto grid = openvdb::GridBase::grid<openvdb_Vec4dGrid>(base)) {
2056  return createNanoGrid<openvdb_Vec4dGrid, nanovdb::Vec4d, BufferT>(*grid, sMode, cMode, verbose);
2057  } else {
2058  OPENVDB_THROW(openvdb::RuntimeError, "Unrecognized OpenVDB grid type");
2059  }
2060 }// openToNanoVDB
2061 #endif
2062 
2063 } // namespace nanovdb
2064 
2065 #endif // NANOVDB_CREATE_NANOGRID_H_HAS_BEEN_INCLUDED
PointIndex< Index32, 1 > PointDataIndex32
Definition: Types.h:181
This file defines a minimum set of tree nodes and tools that can be used (instead of OpenVDB) to buil...
float getTolerance() const
auto data() FMT_NOEXCEPT-> T *
Definition: core.h:808
A simple vector class with three components, similar to openvdb::math::Vec3.
Definition: NanoVDB.h:1279
GridBlindDataClass
Blind-data Classes that are currently supported by NanoVDB.
Definition: NanoVDB.h:393
bool operator()(float exact, float approx) const
Return true if the approximate value is within the accepted relative error bounds of the exact value...
__hostdev__ uint64_t pointCount() const
Definition: NanoVDB.h:5974
GLbitfield flags
Definition: glcorearb.h:1596
disable_if< is_same< DstBuildT, FpN >::value||BuildTraits< DstBuildT >::is_index, GridHandle< BufferT > >::type getHandle(const BufferT &buffer=BufferT())
Converts the source grid into a nanovdb grid with the specified destination build type...
std::string upper(string_view a)
Return an all-upper case version of a (locale-independent).
Definition: strutil.h:402
~AbsDiff()=default
auto to_string(const T &value) -> std::string
Definition: format.h:2597
NodeManagerHandle manages the memory of a NodeManager.
Definition: NodeManager.h:31
GLenum GLint * range
Definition: glcorearb.h:1925
gridName(grid.gridName())
Definition: IO.h:332
Compression oracle based on absolute difference.
const TreeType & tree() const
float getTolerance() const
const RootType & root() const
void
Definition: png.h:1083
GridType
List of types that are currently supported by NanoVDB.
Definition: NanoVDB.h:294
uint64_t nodeCount(int level) const
Struct to derive node type from its level in a given grid, tree or root while preserving constness...
Definition: NanoVDB.h:3386
bool hasLongGridName() const
GLboolean * data
Definition: glcorearb.h:131
GridClass
Classes (superset of OpenVDB) that are currently supported by NanoVDB.
Definition: NanoVDB.h:339
A unified wrapper for tbb::parallel_reduce and a naive std::future analog.
const GLdouble * v
Definition: glcorearb.h:837
auto printf(const S &fmt, const T &...args) -> int
Definition: printf.h:626
Creates any nanovdb Grid from any source grid (certain combinations are obviously not allowed) ...
typename GridType::ValueType ValueType
~RelDiff()=default
static constexpr bool IS_NANOVDB
BlindMetaData(const std::string &name, GridBlindDataSemantic dataSemantic, GridBlindDataClass dataClass, GridType dataType, size_t i, size_t valueCount, size_t valueSize)
GLsizei const GLchar *const * string
Definition: glcorearb.h:814
GLsizei const GLfloat * value
Definition: glcorearb.h:824
enable_if< BuildTraits< DstBuildT >::is_index >::type copyValues(SrcValueT *buffer)
Copy values from the source grid into a provided buffer.
GLint level
Definition: glcorearb.h:108
Highest level of the data structure. Contains a tree and a world->index transform (that currently onl...
Definition: NanoVDB.h:3685
StatsMode
Grid flags which indicate what extra information is present in the grid buffer.
Definition: GridStats.h:38
GLboolean GLboolean GLboolean GLboolean a
Definition: glcorearb.h:1222
const std::string & getName() const
This is a buffer that contains a shared or private pool to either externally or internally managed ho...
Definition: HostBuffer.h:114
T prefixSum(std::vector< T > &vec, bool threaded=true, OpT op=OpT())
Computes inclusive prefix sum of a vector.
Definition: PrefixSum.h:71
ImageBuf OIIO_API min(Image_or_Const A, Image_or_Const B, ROI roi={}, int nthreads=0)
**But if you need a or simply need to know when the task has note that the like this
Definition: thread.h:617
MaskT< LOG2DIM > mMask
Definition: NanoVDB.h:5924
void forEach(RangeT range, const FuncT &func)
simple wrapper for tbb::parallel_for with a naive std fallback
Definition: ForEach.h:40
void setTolerance(float tolerance)
NodeManager allows for sequential access to nodes.
Definition: NodeManager.h:27
GLdouble GLdouble GLdouble q
Definition: glad.h:2445
Codec
Define compression codecs.
Definition: NanoVDB.h:7826
The NodeAccessor provides a uniform API for accessing nodes got NanoVDB, OpenVDB and build Grids...
typename GridT::TreeType TreeType
GLuint buffer
Definition: glcorearb.h:660
RelDiff(float tolerance=-1.0f)
Compression oracle based on relative difference.
const nanovdb::Map & map() const
NANOVDB_HOSTDEV_DISABLE_WARNING __hostdev__ uint32_t CountOn(uint64_t v)
Definition: NanoVDB.h:2643
#define NANOVDB_ASSERT(x)
Definition: NanoVDB.h:190
Defines GridHandle, which manages a host, and possibly a device, memory buffer containing one or more...
disable_if< BuildTraits< DstBuildT >::is_index||BuildTraits< DstBuildT >::is_Fp, GridHandle< BufferT > >::type createNanoGrid(const SrcGridT &srcGrid, StatsMode sMode=StatsMode::Default, ChecksumMode cMode=ChecksumMode::Default, int verbose=0, const BufferT &buffer=BufferT())
Freestanding function that creates a NanoGrid<T> from any source grid.
__hostdev__ uint64_t memUsage() const
Return the actual memory footprint of this root node.
Definition: NanoVDB.h:4612
This class serves to manage a buffer containing one or more NanoVDB Grids.
Definition: GridHandle.h:37
Define static boolean tests for template build types.
Definition: NanoVDB.h:448
CreateNanoGrid(const SrcGridT &srcGrid)
Constructor from a source grid.
Computes a pair of 32bit checksums, of a Grid, by means of Cyclic Redundancy Check (CRC) ...
typename TreeType::RootNodeType RootType
A unified wrapper for tbb::parallel_for and a naive std::thread fallback.
#define NANOVDB_DATA_ALIGNMENT
Definition: NanoVDB.h:154
typename SrcNodeAccT::TreeType SrcTreeT
GLdouble n
Definition: glcorearb.h:2008
__hostdev__ uint64_t offset() const
Definition: NanoVDB.h:5973
GLfloat f
Definition: glcorearb.h:1926
GridBlindDataSemantic
Blind-data Semantics that are currently understood by NanoVDB.
Definition: NanoVDB.h:401
GLintptr offset
Definition: glcorearb.h:665
void setVerbose(int mode=1)
Set the level of verbosity.
Definition: core.h:760
IMATH_NAMESPACE::V2f float
GLboolean reset
Definition: glad.h:5138
TreeT TreeType
Definition: NanoVDB.h:3688
Defines look up table to do dithering of 8^3 leaf nodes.
BBox< Coord > CoordBBox
Definition: NanoVDB.h:2516
std::ostream & operator<<(std::ostream &os, const AbsDiff &diff)
T reduce(RangeT range, const T &identity, const FuncT &func, const JoinT &join)
Definition: Reduce.h:42
GridClass gridClass() const
GLuint GLuint end
Definition: glcorearb.h:475
static GridBlindDataSemantic mapToSemantics(const std::string &name)
__hostdev__ uint64_t AlignUp(uint64_t byteCount)
round up byteSize to the nearest wordSize, e.g. to align to machine word: AlignUp<sizeof(size_t)(n) ...
Definition: NanoVDB.h:1269
Defines an affine transform and its inverse represented as a 3x3 matrix and a vec3 translation...
Definition: NanoVDB.h:3139
static GridType mapToType(const std::string &name)
GLint GLuint mask
Definition: glcorearb.h:124
Grid< PointDataTree > PointDataGrid
Point data grid.
void updateChecksum(NanoGrid< BuildT > &grid, ChecksumMode mode=ChecksumMode::Default)
Updates the checksum of a grid.
static __hostdev__ size_t memUsage()
Return memory usage in bytes for the class.
Definition: NanoVDB.h:5156
Range< 1, size_t > Range1D
Definition: Range.h:30
GLdouble GLdouble GLint GLint order
Definition: glad.h:2676
void setStats(StatsMode mode=StatsMode::Default)
Set the mode used for computing statistics of the destination grid.
SYS_FORCE_INLINE float log2(float x)
Definition: SYS_FPUMath.h:90
GLuint const GLchar * name
Definition: glcorearb.h:786
Custom Range class that is compatible with the tbb::blocked_range classes.
typename GridT::BuildType BuildType
GLboolean GLboolean GLboolean b
Definition: glcorearb.h:1222
NodeManagerHandle< BufferT > createNodeManager(const NanoGrid< BuildT > &grid, const BufferT &buffer=BufferT())
brief Construct a NodeManager and return its handle
Definition: NodeManager.h:284
MaskT< LOG2DIM > mValueMask
Definition: NanoVDB.h:5738
GLenum mode
Definition: glcorearb.h:99
uint8_t ArrayType
Definition: NanoVDB.h:5566
typename NodeTrait< SrcRootT, LEVEL >::type SrcNodeT
__hostdev__ Type Max(Type a, Type b)
Definition: NanoVDB.h:1091
GLint j
Definition: glad.h:2733
bool operator<(const BlindMetaData &other) const
GLsizeiptr size
Definition: glcorearb.h:664
GLenum GLenum dst
Definition: glcorearb.h:1793
const char * toStr(GridType gridType)
Maps a GridType to a c-string.
Definition: NanoVDB.h:326
void enableDithering(bool on=true)
Enable or disable dithering, i.e. randomization of the quantization error.
ChecksumMode
List of different modes for computing for a checksum.
Definition: GridChecksum.h:38
Grid< PointIndexTree > PointIndexGrid
Point index grid.
OPENVDB_API uint32_t getGridClass(std::ios_base &)
Return the class (GRID_LEVEL_SET, GRID_UNKNOWN, etc.) of the grid currently being read from or writte...
bool operator()(float exact, float approx) const
Return true if the approximate value is within the accepted absolute error bounds of the exact value...
std::string lower(string_view a)
Return an all-upper case version of a (locale-independent).
Definition: strutil.h:395
NodeAccessor(const GridT &grid)
BlindMetaData(const std::string &name, const std::string &type, GridBlindDataClass dataClass, size_t i, size_t valueCount, size_t valueSize)
void init(nanovdb::GridClass gClass, float background)
uint64_t addBlindData(const std::string &name, GridBlindDataSemantic dataSemantic, GridBlindDataClass dataClass, GridType dataType, size_t count, size_t size)
Add blind data to the destination grid.
typename NodeTrait< const TreeType, LEVEL >::type NodeType
const NodeType< LEVEL > & node(uint32_t i) const
Space-partitioning acceleration structure for points. Partitions the points into voxels to accelerate...
Multi-threaded implementations of inclusive prefix sum.
GLuint index
Definition: glcorearb.h:786
Dummy type for a voxel whose value equals its binary active state.
Definition: NanoVDB.h:264
Top-most node of the VDB tree structure.
Definition: NanoVDB.h:4316
typename SrcNodeAccT::ValueType SrcValueT
ImageBuf OIIO_API max(Image_or_Const A, Image_or_Const B, ROI roi={}, int nthreads=0)
uint64_t mOffset
Definition: NanoVDB.h:5959
typename NodeTrait< TreeType, LEVEL >::type NodeType
A simple vector class with four components, similar to openvdb::math::Vec4.
Definition: NanoVDB.h:1708
void setTolerance(float tolerance)
__hostdev__ T Abs(T x)
Definition: NanoVDB.h:1166
void setChecksum(ChecksumMode mode=ChecksumMode::Default)
Set the mode used for computing checksums of the destination grid.
static __hostdev__ uint64_t memUsage()
return memory usage in bytes for the class
Definition: NanoVDB.h:4011
GLubyte GLubyte GLubyte GLubyte w
Definition: glcorearb.h:857
tree::Tree< tree::RootNode< tree::InternalNode< tree::InternalNode< PointDataLeafNode< PointDataIndex32, 3 >, 4 >, 5 >>> PointDataTree
Point index tree configured to match the default VDB configurations.
Definition: core.h:1131
typename SrcNodeAccT::RootType SrcRootT
Fp4 BuildType
Definition: NanoVDB.h:5565
GLboolean r
Definition: glcorearb.h:1222
const GridType & grid() const
typename GridT::ValueType ValueType
PointIndex< Index32, 0 > PointIndex32
Definition: Types.h:178
Re-computes min/max/avg/var/bbox information for each node in a pre-existing NanoVDB grid...
type
Definition: core.h:1059
uint64_t valueCount() const
This method only has affect when getHandle was called with DstBuildT = ValueIndex or ValueOnIndex...
Trait that maps any type to the corresponding nanovdb type.
Attribute-owned data structure for points. Point attributes are stored in leaf nodes and ordered by v...
bool ValueType
Definition: NanoVDB.h:5729
typename SrcNodeAccT::BuildType SrcBuildT
HUSD_API const char * dataType()
A unified wrapper for tbb::parallel_invoke and a naive std::thread analog.
const NodeType< LEVEL > & node(uint32_t i) const
C++11 implementation of std::enable_if.
Definition: NanoVDB.h:469
void gridStats(NanoGrid< BuildT > &grid, StatsMode mode=StatsMode::Default)
Re-computes the min/max, stats and bbox information for an existing NanoVDB Grid. ...
Definition: GridStats.h:722
static constexpr bool IS_OPENVDB
NodeAccessor< SrcGridT > SrcNodeAccT
GLint GLsizei count
Definition: glcorearb.h:405
int invoke(const Func &taskFunc1, Rest...taskFuncN)
Definition: Invoke.h:64
C++11 implementation of std::is_same.
Definition: NanoVDB.h:418
tree::Tree< tree::RootNode< tree::InternalNode< tree::InternalNode< PointIndexLeafNode< PointIndex32, 3 >, 4 >, 5 >>> PointIndexTree
Point index tree configured to match the default OpenVDB tree configuration.
#define OPENVDB_THROW(exception, message)
Definition: Exceptions.h:74
ImageBuf OIIO_API channels(const ImageBuf &src, int nchannels, cspan< int > channelorder, cspan< float > channelvalues={}, cspan< std::string > newchannelnames={}, bool shuffle_channel_names=false, int nthreads=0)
**Note that the tasks the is the thread number *for the pool
Definition: thread.h:637
AbsDiff(float tolerance=-1.0f)
GLenum src
Definition: glcorearb.h:1793