OpenVDB  11.0.0
GridOperators.h
Go to the documentation of this file.
1 // Copyright Contributors to the OpenVDB Project
2 // SPDX-License-Identifier: MPL-2.0
3 
4 /// @file tools/GridOperators.h
5 ///
6 /// @brief Apply an operator to an input grid to produce an output grid
7 /// with the same active voxel topology but a potentially different value type.
8 
9 #ifndef OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
10 #define OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
11 
12 #include <openvdb/openvdb.h>
13 #include <openvdb/Grid.h>
14 #include <openvdb/math/Operators.h>
16 #include <openvdb/thread/Threading.h>
19 
20 #include "ValueTransformer.h" // for tools::foreach()
21 
22 #include <tbb/parallel_for.h>
23 
24 namespace openvdb {
26 namespace OPENVDB_VERSION_NAME {
27 namespace tools {
28 
29 /// @brief VectorToScalarConverter<VectorGridType>::Type is the type of a grid
30 /// having the same tree configuration as VectorGridType but a scalar value type, T,
31 /// where T is the type of the original vector components.
32 /// @details For example, VectorToScalarConverter<Vec3DGrid>::Type is equivalent to DoubleGrid.
33 template<typename VectorGridType> struct VectorToScalarConverter {
34  typedef typename VectorGridType::ValueType::value_type VecComponentValueT;
35  typedef typename VectorGridType::template ValueConverter<VecComponentValueT>::Type Type;
36 };
37 
38 /// @brief ScalarToVectorConverter<ScalarGridType>::Type is the type of a grid
39 /// having the same tree configuration as ScalarGridType but value type Vec3<T>
40 /// where T is ScalarGridType::ValueType.
41 /// @details For example, ScalarToVectorConverter<DoubleGrid>::Type is equivalent to Vec3DGrid.
42 template<typename ScalarGridType> struct ScalarToVectorConverter {
44  typedef typename ScalarGridType::template ValueConverter<VectorValueT>::Type Type;
45 };
46 
47 
48 /// @brief Compute the Closest-Point Transform (CPT) from a distance field.
49 /// @return a new vector-valued grid with the same numerical precision as the input grid
50 /// (for example, if the input grid is a DoubleGrid, the output grid will be a Vec3DGrid)
51 /// @details When a mask grid is specified, the solution is calculated only in
52 /// the intersection of the mask active topology and the input active topology
53 /// independent of the transforms associated with either grid.
54 template<typename GridType, typename InterruptT>
56 cpt(const GridType& grid, bool threaded, InterruptT* interrupt);
57 
58 template<typename GridType, typename MaskT, typename InterruptT>
60 cpt(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
61 
62 template<typename GridType>
64 cpt(const GridType& grid, bool threaded = true)
65 {
66  return cpt<GridType, util::NullInterrupter>(grid, threaded, nullptr);
67 }
68 
69 template<typename GridType, typename MaskT>
71 cpt(const GridType& grid, const MaskT& mask, bool threaded = true)
72 {
73  return cpt<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
74 }
75 
76 
77 /// @brief Compute the curl of the given vector-valued grid.
78 /// @return a new vector-valued grid
79 /// @details When a mask grid is specified, the solution is calculated only in
80 /// the intersection of the mask active topology and the input active topology
81 /// independent of the transforms associated with either grid.
82 template<typename GridType, typename InterruptT>
83 typename GridType::Ptr
84 curl(const GridType& grid, bool threaded, InterruptT* interrupt);
85 
86 template<typename GridType, typename MaskT, typename InterruptT>
87 typename GridType::Ptr
88 curl(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
89 
90 template<typename GridType>
91 typename GridType::Ptr
92 curl(const GridType& grid, bool threaded = true)
93 {
94  return curl<GridType, util::NullInterrupter>(grid, threaded, nullptr);
95 }
96 
97 template<typename GridType, typename MaskT>
98 typename GridType::Ptr
99 curl(const GridType& grid, const MaskT& mask, bool threaded = true)
100 {
101  return curl<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
102 }
103 
104 
105 /// @brief Compute the divergence of the given vector-valued grid.
106 /// @return a new scalar-valued grid with the same numerical precision as the input grid
107 /// (for example, if the input grid is a Vec3DGrid, the output grid will be a DoubleGrid)
108 /// @details When a mask grid is specified, the solution is calculated only in
109 /// the intersection of the mask active topology and the input active topology
110 /// independent of the transforms associated with either grid.
111 template<typename GridType, typename InterruptT>
113 divergence(const GridType& grid, bool threaded, InterruptT* interrupt);
114 
115 template<typename GridType, typename MaskT, typename InterruptT>
117 divergence(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
118 
119 template<typename GridType>
121 divergence(const GridType& grid, bool threaded = true)
122 {
123  return divergence<GridType, util::NullInterrupter>(grid, threaded, nullptr);
124 }
125 
126 template<typename GridType, typename MaskT>
128 divergence(const GridType& grid, const MaskT& mask, bool threaded = true)
129 {
130  return divergence<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
131 }
132 
133 
134 /// @brief Compute the gradient of the given scalar grid.
135 /// @return a new vector-valued grid with the same numerical precision as the input grid
136 /// (for example, if the input grid is a DoubleGrid, the output grid will be a Vec3DGrid)
137 /// @details When a mask grid is specified, the solution is calculated only in
138 /// the intersection of the mask active topology and the input active topology
139 /// independent of the transforms associated with either grid.
140 template<typename GridType, typename InterruptT>
142 gradient(const GridType& grid, bool threaded, InterruptT* interrupt);
143 
144 template<typename GridType, typename MaskT, typename InterruptT>
146 gradient(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
147 
148 template<typename GridType>
150 gradient(const GridType& grid, bool threaded = true)
151 {
152  return gradient<GridType, util::NullInterrupter>(grid, threaded, nullptr);
153 }
154 
155 template<typename GridType, typename MaskT>
157 gradient(const GridType& grid, const MaskT& mask, bool threaded = true)
158 {
159  return gradient<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
160 }
161 
162 
163 /// @brief Compute the Laplacian of the given scalar grid.
164 /// @return a new scalar grid
165 /// @details When a mask grid is specified, the solution is calculated only in
166 /// the intersection of the mask active topology and the input active topology
167 /// independent of the transforms associated with either grid.
168 template<typename GridType, typename InterruptT>
169 typename GridType::Ptr
170 laplacian(const GridType& grid, bool threaded, InterruptT* interrupt);
171 
172 template<typename GridType, typename MaskT, typename InterruptT>
173 typename GridType::Ptr
174 laplacian(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
175 
176 template<typename GridType>
177 typename GridType::Ptr
178 laplacian(const GridType& grid, bool threaded = true)
179 {
180  return laplacian<GridType, util::NullInterrupter>(grid, threaded, nullptr);
181 }
182 
183 template<typename GridType, typename MaskT>
184 typename GridType::Ptr
185 laplacian(const GridType& grid, const MaskT& mask, bool threaded = true)
186 {
187  return laplacian<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
188 }
189 
190 
191 /// @brief Compute the mean curvature of the given grid.
192 /// @return a new grid
193 /// @details When a mask grid is specified, the solution is calculated only in
194 /// the intersection of the mask active topology and the input active topology
195 /// independent of the transforms associated with either grid.
196 template<typename GridType, typename InterruptT>
197 typename GridType::Ptr
198 meanCurvature(const GridType& grid, bool threaded, InterruptT* interrupt);
199 
200 template<typename GridType, typename MaskT, typename InterruptT>
201 typename GridType::Ptr
202 meanCurvature(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
203 
204 template<typename GridType>
205 typename GridType::Ptr
206 meanCurvature(const GridType& grid, bool threaded = true)
207 {
208  return meanCurvature<GridType, util::NullInterrupter>(grid, threaded, nullptr);
209 }
210 
211 template<typename GridType, typename MaskT>
212 typename GridType::Ptr
213 meanCurvature(const GridType& grid, const MaskT& mask, bool threaded = true)
214 {
215  return meanCurvature<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
216 }
217 
218 
219 /// @brief Compute the magnitudes of the vectors of the given vector-valued grid.
220 /// @return a new scalar-valued grid with the same numerical precision as the input grid
221 /// (for example, if the input grid is a Vec3DGrid, the output grid will be a DoubleGrid)
222 /// @details When a mask grid is specified, the solution is calculated only in
223 /// the intersection of the mask active topology and the input active topology
224 /// independent of the transforms associated with either grid.
225 template<typename GridType, typename InterruptT>
227 magnitude(const GridType& grid, bool threaded, InterruptT* interrupt);
228 
229 template<typename GridType, typename MaskT, typename InterruptT>
231 magnitude(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
232 
233 template<typename GridType>
235 magnitude(const GridType& grid, bool threaded = true)
236 {
237  return magnitude<GridType, util::NullInterrupter>(grid, threaded, nullptr);
238 }
239 
240 template<typename GridType, typename MaskT>
242 magnitude(const GridType& grid, const MaskT& mask, bool threaded = true)
243 {
244  return magnitude<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
245 }
246 
247 
248 /// @brief Normalize the vectors of the given vector-valued grid.
249 /// @return a new vector-valued grid
250 /// @details When a mask grid is specified, the solution is calculated only in
251 /// the intersection of the mask active topology and the input active topology
252 /// independent of the transforms associated with either grid.
253 template<typename GridType, typename InterruptT>
254 typename GridType::Ptr
255 normalize(const GridType& grid, bool threaded, InterruptT* interrupt);
256 
257 template<typename GridType, typename MaskT, typename InterruptT>
258 typename GridType::Ptr
259 normalize(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt);
260 
261 template<typename GridType>
262 typename GridType::Ptr
263 normalize(const GridType& grid, bool threaded = true)
264 {
265  return normalize<GridType, util::NullInterrupter>(grid, threaded, nullptr);
266 }
267 
268 template<typename GridType, typename MaskT>
269 typename GridType::Ptr
270 normalize(const GridType& grid, const MaskT& mask, bool threaded = true)
271 {
272  return normalize<GridType, MaskT, util::NullInterrupter>(grid, mask, threaded, nullptr);
273 }
274 
275 
276 ////////////////////////////////////////
277 
278 
279 namespace gridop {
280 
281 /// @brief ToMaskGrid<T>::Type is the type of a grid having the same
282 /// tree hierarchy as grid type T but a value equal to its active state.
283 /// @details For example, ToMaskGrid<FloatGrid>::Type is equivalent to MaskGrid.
284 template<typename GridType>
285 struct ToMaskGrid {
287 };
288 
289 
290 /// @brief Apply an operator to an input grid to produce an output grid
291 /// with the same active voxel topology but a potentially different value type.
292 /// @details To facilitate inlining, this class is also templated on a Map type.
293 ///
294 /// @note This is a helper class and should never be used directly.
295 template<
296  typename InGridT,
297  typename MaskGridType,
298  typename OutGridT,
299  typename MapT,
300  typename OperatorT,
301  typename InterruptT = util::NullInterrupter>
303 {
304 public:
305  typedef typename OutGridT::TreeType OutTreeT;
306  typedef typename OutTreeT::LeafNodeType OutLeafT;
308 
309  GridOperator(const InGridT& grid, const MaskGridType* mask, const MapT& map,
310  InterruptT* interrupt = nullptr, bool densify = true)
311  : mAcc(grid.getConstAccessor())
312  , mMap(map)
313  , mInterrupt(interrupt)
314  , mMask(mask)
315  , mDensify(densify) ///< @todo consider adding a "NeedsDensification" operator trait
316  {
317  }
318  GridOperator(const GridOperator&) = default;
319  GridOperator& operator=(const GridOperator&) = default;
320  virtual ~GridOperator() = default;
321 
322  typename OutGridT::Ptr process(bool threaded = true)
323  {
324  if (mInterrupt) mInterrupt->start("Processing grid");
325 
326  // Derive background value of the output grid
327  typename InGridT::TreeType tmp(mAcc.tree().background());
328  typename OutGridT::ValueType backg = OperatorT::result(mMap, tmp, math::Coord(0));
329 
330  // The output tree is topology copy, optionally densified, of the input tree.
331  // (Densification is necessary for some operators because applying the operator to
332  // a constant tile produces distinct output values, particularly along tile borders.)
333  /// @todo Can tiles be handled correctly without densification, or by densifying
334  /// only to the width of the operator stencil?
335  typename OutTreeT::Ptr tree(new OutTreeT(mAcc.tree(), backg, TopologyCopy()));
336  if (mDensify) tree->voxelizeActiveTiles();
337 
338  // create grid with output tree and unit transform
339  typename OutGridT::Ptr result(new OutGridT(tree));
340 
341  // Modify the solution area if a mask was supplied.
342  if (mMask) {
343  result->topologyIntersection(*mMask);
344  }
345 
346  // transform of output grid = transform of input grid
347  result->setTransform(math::Transform::Ptr(new math::Transform( mMap.copy() )));
348 
349  LeafManagerT leafManager(*tree);
350 
351  if (threaded) {
352  tbb::parallel_for(leafManager.leafRange(), *this);
353  } else {
354  (*this)(leafManager.leafRange());
355  }
356 
357  // If the tree wasn't densified, it might have active tiles that need to be processed.
358  if (!mDensify) {
359  using TileIter = typename OutTreeT::ValueOnIter;
360 
361  TileIter tileIter = tree->beginValueOn();
362  tileIter.setMaxDepth(tileIter.getLeafDepth() - 1); // skip leaf values (i.e., voxels)
363 
364  AccessorT inAcc = mAcc; // each thread needs its own accessor, captured by value
365  auto tileOp = [this, inAcc](const TileIter& it) {
366  // Apply the operator to the input grid's tile value at the iterator's
367  // current coordinates, and set the output tile's value to the result.
368  it.setValue(OperatorT::result(this->mMap, inAcc, it.getCoord()));
369  };
370 
371  // Apply the operator to tile values, optionally in parallel.
372  // (But don't share the functor; each thread needs its own accessor.)
373  tools::foreach(tileIter, tileOp, threaded, /*shareFunctor=*/false);
374  }
375 
376  if (mDensify) tree->prune();
377 
378  if (mInterrupt) mInterrupt->end();
379  return result;
380  }
381 
382  /// @brief Iterate sequentially over LeafNodes and voxels in the output
383  /// grid and apply the operator using a value accessor for the input grid.
384  ///
385  /// @note Never call this public method directly - it is called by
386  /// TBB threads only!
387  void operator()(const typename LeafManagerT::LeafRange& range) const
388  {
389  if (util::wasInterrupted(mInterrupt)) {
390  thread::cancelGroupExecution();
391  }
392 
393  for (typename LeafManagerT::LeafRange::Iterator leaf=range.begin(); leaf; ++leaf) {
394  for (typename OutLeafT::ValueOnIter value=leaf->beginValueOn(); value; ++value) {
395  value.setValue(OperatorT::result(mMap, mAcc, value.getCoord()));
396  }
397  }
398  }
399 
400 protected:
401  typedef typename InGridT::ConstAccessor AccessorT;
402  mutable AccessorT mAcc;
403  const MapT& mMap;
404  InterruptT* mInterrupt;
405  const MaskGridType* mMask;
406  const bool mDensify;
407 }; // end of GridOperator class
408 
409 } // namespace gridop
410 
411 
412 ////////////////////////////////////////
413 
414 
415 /// @brief Compute the closest-point transform of a scalar grid.
416 template<
417  typename InGridT,
418  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
419  typename InterruptT = util::NullInterrupter>
420 class Cpt
421 {
422 public:
423  typedef InGridT InGridType;
425 
426  Cpt(const InGridType& grid, InterruptT* interrupt = nullptr):
427  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
428  {
429  }
430 
431  Cpt(const InGridType& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
432  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
433  {
434  }
435 
436  typename OutGridType::Ptr process(bool threaded = true, bool useWorldTransform = true)
437  {
438  Functor functor(mInputGrid, mMask, threaded, useWorldTransform, mInterrupt);
439  processTypedMap(mInputGrid.transform(), functor);
440  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_CONTRAVARIANT_ABSOLUTE);
441  return functor.mOutputGrid;
442  }
443 
444 private:
445  struct IsOpT
446  {
447  template<typename MapT, typename AccT>
448  static typename OutGridType::ValueType
449  result(const MapT& map, const AccT& acc, const Coord& xyz)
450  {
451  return math::CPT<MapT, math::CD_2ND>::result(map, acc, xyz);
452  }
453  };
454  struct WsOpT
455  {
456  template<typename MapT, typename AccT>
457  static typename OutGridType::ValueType
458  result(const MapT& map, const AccT& acc, const Coord& xyz)
459  {
460  return math::CPT_RANGE<MapT, math::CD_2ND>::result(map, acc, xyz);
461  }
462  };
463  struct Functor
464  {
465  Functor(const InGridType& grid, const MaskGridType* mask,
466  bool threaded, bool worldspace, InterruptT* interrupt)
467  : mThreaded(threaded)
468  , mWorldSpace(worldspace)
469  , mInputGrid(grid)
470  , mInterrupt(interrupt)
471  , mMask(mask)
472  {}
473 
474  template<typename MapT>
475  void operator()(const MapT& map)
476  {
477  if (mWorldSpace) {
479  op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
480  mOutputGrid = op.process(mThreaded); // cache the result
481  } else {
483  op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
484  mOutputGrid = op.process(mThreaded); // cache the result
485  }
486  }
487  const bool mThreaded;
488  const bool mWorldSpace;
489  const InGridType& mInputGrid;
490  typename OutGridType::Ptr mOutputGrid;
491  InterruptT* mInterrupt;
492  const MaskGridType* mMask;
493  };
494  const InGridType& mInputGrid;
495  InterruptT* mInterrupt;
496  const MaskGridType* mMask;
497 }; // end of Cpt class
498 
499 
500 ////////////////////////////////////////
501 
502 
503 /// @brief Compute the curl of a vector grid.
504 template<
505  typename GridT,
506  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
507  typename InterruptT = util::NullInterrupter>
508 class Curl
509 {
510 public:
511  typedef GridT InGridType;
512  typedef GridT OutGridType;
513 
514  Curl(const GridT& grid, InterruptT* interrupt = nullptr):
515  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
516  {
517  }
518 
519  Curl(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
520  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
521  {
522  }
523 
524  typename GridT::Ptr process(bool threaded = true)
525  {
526  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
527  processTypedMap(mInputGrid.transform(), functor);
528  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
529  return functor.mOutputGrid;
530  }
531 
532 private:
533  struct Functor
534  {
535  Functor(const GridT& grid, const MaskGridType* mask,
536  bool threaded, InterruptT* interrupt):
537  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
538 
539  template<typename MapT>
540  void operator()(const MapT& map)
541  {
542  typedef math::Curl<MapT, math::CD_2ND> OpT;
544  op(mInputGrid, mMask, map, mInterrupt);
545  mOutputGrid = op.process(mThreaded); // cache the result
546  }
547 
548  const bool mThreaded;
549  const GridT& mInputGrid;
550  typename GridT::Ptr mOutputGrid;
551  InterruptT* mInterrupt;
552  const MaskGridType* mMask;
553  }; // Private Functor
554 
555  const GridT& mInputGrid;
556  InterruptT* mInterrupt;
557  const MaskGridType* mMask;
558 }; // end of Curl class
559 
560 
561 ////////////////////////////////////////
562 
563 
564 /// @brief Compute the divergence of a vector grid.
565 template<
566  typename InGridT,
567  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
568  typename InterruptT = util::NullInterrupter>
570 {
571 public:
572  typedef InGridT InGridType;
574 
575  Divergence(const InGridT& grid, InterruptT* interrupt = nullptr):
576  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
577  {
578  }
579 
580  Divergence(const InGridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
581  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
582  {
583  }
584 
585  typename OutGridType::Ptr process(bool threaded = true)
586  {
587  if (mInputGrid.getGridClass() == GRID_STAGGERED) {
588  Functor<math::FD_1ST> functor(mInputGrid, mMask, threaded, mInterrupt);
589  processTypedMap(mInputGrid.transform(), functor);
590  return functor.mOutputGrid;
591  } else {
592  Functor<math::CD_2ND> functor(mInputGrid, mMask, threaded, mInterrupt);
593  processTypedMap(mInputGrid.transform(), functor);
594  return functor.mOutputGrid;
595  }
596  }
597 
598 protected:
599  template<math::DScheme DiffScheme>
600  struct Functor
601  {
602  Functor(const InGridT& grid, const MaskGridType* mask,
603  bool threaded, InterruptT* interrupt):
604  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
605 
606  template<typename MapT>
607  void operator()(const MapT& map)
608  {
611  op(mInputGrid, mMask, map, mInterrupt);
612  mOutputGrid = op.process(mThreaded); // cache the result
613  }
614 
615  const bool mThreaded;
616  const InGridType& mInputGrid;
617  typename OutGridType::Ptr mOutputGrid;
618  InterruptT* mInterrupt;
619  const MaskGridType* mMask;
620  }; // Private Functor
621 
622  const InGridType& mInputGrid;
623  InterruptT* mInterrupt;
624  const MaskGridType* mMask;
625 }; // end of Divergence class
626 
627 
628 ////////////////////////////////////////
629 
630 
631 /// @brief Compute the gradient of a scalar grid.
632 template<
633  typename InGridT,
634  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
635  typename InterruptT = util::NullInterrupter>
636 class Gradient
637 {
638 public:
639  typedef InGridT InGridType;
641 
642  Gradient(const InGridT& grid, InterruptT* interrupt = nullptr):
643  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
644  {
645  }
646 
647  Gradient(const InGridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
648  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
649  {
650  }
651 
652  typename OutGridType::Ptr process(bool threaded = true)
653  {
654  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
655  processTypedMap(mInputGrid.transform(), functor);
656  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
657  return functor.mOutputGrid;
658  }
659 
660 protected:
661  struct Functor
662  {
663  Functor(const InGridT& grid, const MaskGridType* mask,
664  bool threaded, InterruptT* interrupt):
665  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
666 
667  template<typename MapT>
668  void operator()(const MapT& map)
669  {
672  op(mInputGrid, mMask, map, mInterrupt);
673  mOutputGrid = op.process(mThreaded); // cache the result
674  }
675 
676  const bool mThreaded;
677  const InGridT& mInputGrid;
678  typename OutGridType::Ptr mOutputGrid;
679  InterruptT* mInterrupt;
680  const MaskGridType* mMask;
681  }; // Private Functor
682 
683  const InGridT& mInputGrid;
684  InterruptT* mInterrupt;
685  const MaskGridType* mMask;
686 }; // end of Gradient class
687 
688 
689 ////////////////////////////////////////
690 
691 
692 template<
693  typename GridT,
694  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
695  typename InterruptT = util::NullInterrupter>
697 {
698 public:
699  typedef GridT InGridType;
700  typedef GridT OutGridType;
701 
702  Laplacian(const GridT& grid, InterruptT* interrupt = nullptr):
703  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
704  {
705  }
706 
707  Laplacian(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
708  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
709  {
710  }
711 
712  typename GridT::Ptr process(bool threaded = true)
713  {
714  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
715  processTypedMap(mInputGrid.transform(), functor);
716  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
717  return functor.mOutputGrid;
718  }
719 
720 protected:
721  struct Functor
722  {
723  Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
724  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
725 
726  template<typename MapT>
727  void operator()(const MapT& map)
728  {
731  op(mInputGrid, mMask, map, mInterrupt);
732  mOutputGrid = op.process(mThreaded); // cache the result
733  }
734 
735  const bool mThreaded;
736  const GridT& mInputGrid;
737  typename GridT::Ptr mOutputGrid;
738  InterruptT* mInterrupt;
739  const MaskGridType* mMask;
740  }; // Private Functor
741 
742  const GridT& mInputGrid;
743  InterruptT* mInterrupt;
744  const MaskGridType* mMask;
745 }; // end of Laplacian class
746 
747 
748 ////////////////////////////////////////
749 
750 
751 template<
752  typename GridT,
753  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
754  typename InterruptT = util::NullInterrupter>
756 {
757 public:
758  typedef GridT InGridType;
759  typedef GridT OutGridType;
760 
761  MeanCurvature(const GridT& grid, InterruptT* interrupt = nullptr):
762  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
763  {
764  }
765 
766  MeanCurvature(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
767  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
768  {
769  }
770 
771  typename GridT::Ptr process(bool threaded = true)
772  {
773  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
774  processTypedMap(mInputGrid.transform(), functor);
775  if (functor.mOutputGrid) functor.mOutputGrid->setVectorType(VEC_COVARIANT);
776  return functor.mOutputGrid;
777  }
778 
779 protected:
780  struct Functor
781  {
782  Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
783  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
784 
785  template<typename MapT>
786  void operator()(const MapT& map)
787  {
790  op(mInputGrid, mMask, map, mInterrupt);
791  mOutputGrid = op.process(mThreaded); // cache the result
792  }
793 
794  const bool mThreaded;
795  const GridT& mInputGrid;
796  typename GridT::Ptr mOutputGrid;
797  InterruptT* mInterrupt;
798  const MaskGridType* mMask;
799  }; // Private Functor
800 
801  const GridT& mInputGrid;
802  InterruptT* mInterrupt;
803  const MaskGridType* mMask;
804 }; // end of MeanCurvature class
805 
806 
807 ////////////////////////////////////////
808 
809 
810 template<
811  typename InGridT,
812  typename MaskGridType = typename gridop::ToMaskGrid<InGridT>::Type,
813  typename InterruptT = util::NullInterrupter>
815 {
816 public:
817  typedef InGridT InGridType;
819 
820  Magnitude(const InGridType& grid, InterruptT* interrupt = nullptr):
821  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
822  {
823  }
824 
825  Magnitude(const InGridType& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
826  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
827  {
828  }
829 
830  typename OutGridType::Ptr process(bool threaded = true)
831  {
832  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
833  processTypedMap(mInputGrid.transform(), functor);
834  return functor.mOutputGrid;
835  }
836 
837 protected:
838  struct OpT
839  {
840  template<typename MapT, typename AccT>
841  static typename OutGridType::ValueType
842  result(const MapT&, const AccT& acc, const Coord& xyz) { return acc.getValue(xyz).length();}
843  };
844  struct Functor
845  {
846  Functor(const InGridT& grid, const MaskGridType* mask,
847  bool threaded, InterruptT* interrupt):
848  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
849 
850  template<typename MapT>
851  void operator()(const MapT& map)
852  {
854  op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
855  mOutputGrid = op.process(mThreaded); // cache the result
856  }
857 
858  const bool mThreaded;
859  const InGridType& mInputGrid;
860  typename OutGridType::Ptr mOutputGrid;
861  InterruptT* mInterrupt;
862  const MaskGridType* mMask;
863  }; // Private Functor
864 
865  const InGridType& mInputGrid;
866  InterruptT* mInterrupt;
867  const MaskGridType* mMask;
868 }; // end of Magnitude class
869 
870 
871 ////////////////////////////////////////
872 
873 
874 template<
875  typename GridT,
876  typename MaskGridType = typename gridop::ToMaskGrid<GridT>::Type,
877  typename InterruptT = util::NullInterrupter>
879 {
880 public:
881  typedef GridT InGridType;
882  typedef GridT OutGridType;
883 
884  Normalize(const GridT& grid, InterruptT* interrupt = nullptr):
885  mInputGrid(grid), mInterrupt(interrupt), mMask(nullptr)
886  {
887  }
888 
889  Normalize(const GridT& grid, const MaskGridType& mask, InterruptT* interrupt = nullptr):
890  mInputGrid(grid), mInterrupt(interrupt), mMask(&mask)
891  {
892  }
893 
894  typename GridT::Ptr process(bool threaded = true)
895  {
896  Functor functor(mInputGrid, mMask, threaded, mInterrupt);
897  processTypedMap(mInputGrid.transform(), functor);
898  if (typename GridT::Ptr outGrid = functor.mOutputGrid) {
899  const VecType vecType = mInputGrid.getVectorType();
900  if (vecType == VEC_COVARIANT) {
901  outGrid->setVectorType(VEC_COVARIANT_NORMALIZE);
902  } else {
903  outGrid->setVectorType(vecType);
904  }
905  }
906  return functor.mOutputGrid;
907  }
908 
909 protected:
910  struct OpT
911  {
912  template<typename MapT, typename AccT>
913  static typename OutGridType::ValueType
914  result(const MapT&, const AccT& acc, const Coord& xyz)
915  {
916  typename OutGridType::ValueType vec = acc.getValue(xyz);
917  if ( !vec.normalize() ) vec.setZero();
918  return vec;
919  }
920  };
921  struct Functor
922  {
923  Functor(const GridT& grid, const MaskGridType* mask, bool threaded, InterruptT* interrupt):
924  mThreaded(threaded), mInputGrid(grid), mInterrupt(interrupt), mMask(mask) {}
925 
926  template<typename MapT>
927  void operator()(const MapT& map)
928  {
930  op(mInputGrid, mMask, map, mInterrupt, /*densify=*/false);
931  mOutputGrid = op.process(mThreaded); // cache the result
932  }
933 
934  const bool mThreaded;
935  const GridT& mInputGrid;
936  typename GridT::Ptr mOutputGrid;
937  InterruptT* mInterrupt;
938  const MaskGridType* mMask;
939  }; // Private Functor
940 
941  const GridT& mInputGrid;
942  InterruptT* mInterrupt;
943  const MaskGridType* mMask;
944 }; // end of Normalize class
945 
946 
947 ////////////////////////////////////////
948 
949 
950 template<typename GridType, typename InterruptT>
952 cpt(const GridType& grid, bool threaded, InterruptT* interrupt)
953 {
954  Cpt<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt);
955  return op.process(threaded);
956 }
957 
958 template<typename GridType, typename MaskT, typename InterruptT>
960 cpt(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
961 {
962  Cpt<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
963  return op.process(threaded);
964 }
965 
966 template<typename GridType, typename InterruptT>
967 typename GridType::Ptr
968 curl(const GridType& grid, bool threaded, InterruptT* interrupt)
969 {
970  Curl<GridType, typename gridop::ToMaskGrid<GridType>::Type, InterruptT> op(grid, interrupt);
971  return op.process(threaded);
972 }
973 
974 template<typename GridType, typename MaskT, typename InterruptT>
975 typename GridType::Ptr
976 curl(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
977 {
978  Curl<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
979  return op.process(threaded);
980 }
981 
982 template<typename GridType, typename InterruptT>
984 divergence(const GridType& grid, bool threaded, InterruptT* interrupt)
985 {
987  op(grid, interrupt);
988  return op.process(threaded);
989 }
990 
991 template<typename GridType, typename MaskT, typename InterruptT>
993 divergence(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
994 {
995  Divergence<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
996  return op.process(threaded);
997 }
998 
999 template<typename GridType, typename InterruptT>
1001 gradient(const GridType& grid, bool threaded, InterruptT* interrupt)
1002 {
1004  op(grid, interrupt);
1005  return op.process(threaded);
1006 }
1007 
1008 template<typename GridType, typename MaskT, typename InterruptT>
1010 gradient(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1011 {
1012  Gradient<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1013  return op.process(threaded);
1014 }
1015 
1016 template<typename GridType, typename InterruptT>
1017 typename GridType::Ptr
1018 laplacian(const GridType& grid, bool threaded, InterruptT* interrupt)
1019 {
1021  op(grid, interrupt);
1022  return op.process(threaded);
1023 }
1024 
1025 template<typename GridType, typename MaskT, typename InterruptT>
1026 typename GridType::Ptr
1027 laplacian(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1028 {
1029  Laplacian<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1030  return op.process(threaded);
1031 }
1032 
1033 template<typename GridType, typename InterruptT>
1034 typename GridType::Ptr
1035 meanCurvature(const GridType& grid, bool threaded, InterruptT* interrupt)
1036 {
1038  op(grid, interrupt);
1039  return op.process(threaded);
1040 }
1041 
1042 template<typename GridType, typename MaskT, typename InterruptT>
1043 typename GridType::Ptr
1044 meanCurvature(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1045 {
1046  MeanCurvature<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1047  return op.process(threaded);
1048 }
1049 
1050 template<typename GridType, typename InterruptT>
1052 magnitude(const GridType& grid, bool threaded, InterruptT* interrupt)
1053 {
1055  op(grid, interrupt);
1056  return op.process(threaded);
1057 }
1058 
1059 template<typename GridType, typename MaskT, typename InterruptT>
1061 magnitude(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1062 {
1063  Magnitude<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1064  return op.process(threaded);
1065 }
1066 
1067 template<typename GridType, typename InterruptT>
1068 typename GridType::Ptr
1069 normalize(const GridType& grid, bool threaded, InterruptT* interrupt)
1070 {
1072  op(grid, interrupt);
1073  return op.process(threaded);
1074 }
1075 
1076 template<typename GridType, typename MaskT, typename InterruptT>
1077 typename GridType::Ptr
1078 normalize(const GridType& grid, const MaskT& mask, bool threaded, InterruptT* interrupt)
1079 {
1080  Normalize<GridType, MaskT, InterruptT> op(grid, mask, interrupt);
1081  return op.process(threaded);
1082 }
1083 
1084 ////////////////////////////////////////
1085 
1086 
1087 // Explicit Template Instantiation
1088 
1089 #ifdef OPENVDB_USE_EXPLICIT_INSTANTIATION
1090 
1091 #ifdef OPENVDB_INSTANTIATE_GRIDOPERATORS
1093 #endif
1094 
1095 #define _FUNCTION(TreeT) \
1096  ScalarToVectorConverter<Grid<TreeT>>::Type::Ptr cpt(const Grid<TreeT>&, bool, util::NullInterrupter*)
1098 #undef _FUNCTION
1099 
1100 #define _FUNCTION(TreeT) \
1101  ScalarToVectorConverter<Grid<TreeT>>::Type::Ptr cpt(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1103 #undef _FUNCTION
1104 
1105 #define _FUNCTION(TreeT) \
1106  Grid<TreeT>::Ptr curl(const Grid<TreeT>&, bool, util::NullInterrupter*)
1108 #undef _FUNCTION
1109 
1110 #define _FUNCTION(TreeT) \
1111  Grid<TreeT>::Ptr curl(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1113 #undef _FUNCTION
1114 
1115 #define _FUNCTION(TreeT) \
1116  VectorToScalarConverter<Grid<TreeT>>::Type::Ptr divergence(const Grid<TreeT>&, bool, util::NullInterrupter*)
1118 #undef _FUNCTION
1119 
1120 #define _FUNCTION(TreeT) \
1121  VectorToScalarConverter<Grid<TreeT>>::Type::Ptr divergence(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1123 #undef _FUNCTION
1124 
1125 #define _FUNCTION(TreeT) \
1126  ScalarToVectorConverter<Grid<TreeT>>::Type::Ptr gradient(const Grid<TreeT>&, bool, util::NullInterrupter*)
1128 #undef _FUNCTION
1129 
1130 #define _FUNCTION(TreeT) \
1131  ScalarToVectorConverter<Grid<TreeT>>::Type::Ptr gradient(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1133 #undef _FUNCTION
1134 
1135 #define _FUNCTION(TreeT) \
1136  Grid<TreeT>::Ptr laplacian(const Grid<TreeT>&, bool, util::NullInterrupter*)
1138 #undef _FUNCTION
1139 
1140 #define _FUNCTION(TreeT) \
1141  Grid<TreeT>::Ptr laplacian(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1143 #undef _FUNCTION
1144 
1145 #define _FUNCTION(TreeT) \
1146  Grid<TreeT>::Ptr meanCurvature(const Grid<TreeT>&, bool, util::NullInterrupter*)
1148 #undef _FUNCTION
1149 
1150 #define _FUNCTION(TreeT) \
1151  Grid<TreeT>::Ptr meanCurvature(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1153 #undef _FUNCTION
1154 
1155 #define _FUNCTION(TreeT) \
1156  VectorToScalarConverter<Grid<TreeT>>::Type::Ptr magnitude(const Grid<TreeT>&, bool, util::NullInterrupter*)
1158 #undef _FUNCTION
1159 
1160 #define _FUNCTION(TreeT) \
1161  VectorToScalarConverter<Grid<TreeT>>::Type::Ptr magnitude(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1163 #undef _FUNCTION
1164 
1165 #define _FUNCTION(TreeT) \
1166  Grid<TreeT>::Ptr normalize(const Grid<TreeT>&, bool, util::NullInterrupter*)
1168 #undef _FUNCTION
1169 
1170 #define _FUNCTION(TreeT) \
1171  Grid<TreeT>::Ptr normalize(const Grid<TreeT>&, const BoolGrid&, bool, util::NullInterrupter*)
1173 #undef _FUNCTION
1174 
1175 #endif // OPENVDB_USE_EXPLICIT_INSTANTIATION
1176 
1177 
1178 } // namespace tools
1179 } // namespace OPENVDB_VERSION_NAME
1180 } // namespace openvdb
1181 
1182 #endif // OPENVDB_TOOLS_GRID_OPERATORS_HAS_BEEN_INCLUDED
Functor(const InGridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
Definition: GridOperators.h:846
const InGridType & mInputGrid
Definition: GridOperators.h:865
const GridT & mInputGrid
Definition: GridOperators.h:742
Definition: GridOperators.h:780
void operator()(const MapT &map)
Definition: GridOperators.h:607
VectorToScalarConverter< GridType >::Type::Ptr divergence(const GridType &grid, const MaskT &mask, bool threaded=true)
Definition: GridOperators.h:128
GridT::Ptr process(bool threaded=true)
Definition: GridOperators.h:712
InterruptT * mInterrupt
Definition: GridOperators.h:684
const MaskGridType * mMask
Definition: GridOperators.h:862
bool wasInterrupted(T *i, int percent=-1)
Definition: NullInterrupter.h:49
Definition: GridOperators.h:755
InGridT InGridType
Definition: GridOperators.h:817
InterruptT * mInterrupt
Definition: GridOperators.h:797
GridType::Ptr laplacian(const GridType &grid, const MaskT &mask, bool threaded=true)
Definition: GridOperators.h:185
const bool mThreaded
Definition: GridOperators.h:615
const MaskGridType * mMask
Definition: GridOperators.h:744
const MaskGridType * mMask
Definition: GridOperators.h:943
AccessorT mAcc
Definition: GridOperators.h:402
Functor(const GridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
Definition: GridOperators.h:923
Apply an operator to an input grid to produce an output grid with the same active voxel topology but ...
Definition: GridOperators.h:302
GridT InGridType
Definition: GridOperators.h:511
const InGridType & mInputGrid
Definition: GridOperators.h:859
GridT::Ptr process(bool threaded=true)
Definition: GridOperators.h:771
Divergence(const InGridT &grid, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:575
Center difference gradient operators, defined with respect to the range-space of the map...
Definition: Operators.h:619
Laplacian(const GridT &grid, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:702
static OutGridType::ValueType result(const MapT &, const AccT &acc, const Coord &xyz)
Definition: GridOperators.h:842
Compute the curl of a vector grid.
Definition: GridOperators.h:508
Definition: GridOperators.h:696
InterruptT * mInterrupt
Definition: GridOperators.h:743
LeafRange leafRange(size_t grainsize=1) const
Return a TBB-compatible LeafRange.
Definition: LeafManager.h:345
InGridT InGridType
Definition: GridOperators.h:572
void operator()(const MapT &map)
Definition: GridOperators.h:668
ScalarToVectorConverter<ScalarGridType>::Type is the type of a grid having the same tree configuratio...
Definition: GridOperators.h:42
#define OPENVDB_NUMERIC_TREE_INSTANTIATE(Function)
Definition: version.h.in:158
InterruptT * mInterrupt
Definition: GridOperators.h:679
ScalarToVectorConverter< GridType >::Type::Ptr cpt(const GridType &grid, const MaskT &mask, bool threaded=true)
Definition: GridOperators.h:71
const MaskGridType * mMask
Definition: GridOperators.h:680
ScalarGridType::template ValueConverter< VectorValueT >::Type Type
Definition: GridOperators.h:44
Signed (x, y, z) 32-bit integer coordinates.
Definition: Coord.h:24
Functor(const GridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
Definition: GridOperators.h:723
const GridT & mInputGrid
Definition: GridOperators.h:736
const MaskGridType * mMask
Definition: GridOperators.h:619
Cpt(const InGridType &grid, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:426
const bool mThreaded
Definition: GridOperators.h:934
VectorGridType::template ValueConverter< VecComponentValueT >::Type Type
Definition: GridOperators.h:35
const MaskGridType * mMask
Definition: GridOperators.h:624
VectorToScalarConverter< InGridT >::Type OutGridType
Definition: GridOperators.h:818
InGridT InGridType
Definition: GridOperators.h:423
ScalarToVectorConverter< InGridT >::Type OutGridType
Definition: GridOperators.h:640
Definition: Types.h:485
SharedPtr< Transform > Ptr
Definition: Transform.h:42
Base class for interrupters.
Definition: NullInterrupter.h:25
Definition: GridOperators.h:600
const GridT & mInputGrid
Definition: GridOperators.h:935
const MaskGridType * mMask
Definition: GridOperators.h:798
void foreach(T &&t, const F &func, std::integer_sequence< size_t, Is... >)
Definition: PointTransfer.h:334
OutGridType::Ptr process(bool threaded=true)
Definition: GridOperators.h:652
GridT InGridType
Definition: GridOperators.h:699
Normalize(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:889
static OutGridType::ValueType result(const MapT &, const AccT &acc, const Coord &xyz)
Definition: GridOperators.h:914
const bool mThreaded
Definition: GridOperators.h:794
Definition: Mat.h:165
GridT OutGridType
Definition: GridOperators.h:759
const MaskGridType * mMask
Definition: GridOperators.h:685
Compute the closest-point transform to a level set.
Definition: Operators.h:1665
const MaskGridType * mMask
Definition: GridOperators.h:405
GridType::Ptr normalize(const GridType &grid, const MaskT &mask, bool threaded=true)
Definition: GridOperators.h:270
InterruptT * mInterrupt
Definition: GridOperators.h:937
ScalarToVectorConverter< GridType >::Type::Ptr gradient(const GridType &grid, const MaskT &mask, bool threaded=true)
Definition: GridOperators.h:157
VectorToScalarConverter<VectorGridType>::Type is the type of a grid having the same tree configuratio...
Definition: GridOperators.h:33
InterruptT * mInterrupt
Definition: GridOperators.h:802
void operator()(const typename LeafManagerT::LeafRange &range) const
Iterate sequentially over LeafNodes and voxels in the output grid and apply the operator using a valu...
Definition: GridOperators.h:387
GridOperator(const InGridT &grid, const MaskGridType *mask, const MapT &map, InterruptT *interrupt=nullptr, bool densify=true)
Definition: GridOperators.h:309
math::Vec3< typename ScalarGridType::ValueType > VectorValueT
Definition: GridOperators.h:43
void operator()(const MapT &map)
Definition: GridOperators.h:927
const GridT & mInputGrid
Definition: GridOperators.h:795
Gradient(const InGridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:647
GridT::Ptr mOutputGrid
Definition: GridOperators.h:737
InGridT::ConstAccessor AccessorT
Definition: GridOperators.h:401
OutGridType::Ptr process(bool threaded=true)
Definition: GridOperators.h:830
GridT::Ptr mOutputGrid
Definition: GridOperators.h:796
GridType::Ptr curl(const GridType &grid, const MaskT &mask, bool threaded=true)
Definition: GridOperators.h:99
GridT OutGridType
Definition: GridOperators.h:882
MeanCurvature(const GridT &grid, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:761
Tag dispatch class that distinguishes topology copy constructors from deep copy constructors.
Definition: Types.h:683
Definition: GridOperators.h:721
const bool mDensify
Definition: GridOperators.h:406
VectorToScalarConverter< InGridT >::Type OutGridType
Definition: GridOperators.h:573
InterruptT * mInterrupt
Definition: GridOperators.h:618
const MaskGridType * mMask
Definition: GridOperators.h:867
Compute the mean curvature.
Definition: Operators.h:1761
GridT InGridType
Definition: GridOperators.h:881
MeanCurvature(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:766
GridT OutGridType
Definition: GridOperators.h:512
const InGridT & mInputGrid
Definition: GridOperators.h:677
Compute the Laplacian at a given location in a grid using finite differencing of various orders...
Definition: Operators.h:1419
Container class that associates a tree with a transform and metadata.
Definition: Grid.h:28
tree::LeafManager< OutTreeT > LeafManagerT
Definition: GridOperators.h:307
OutGridType::Ptr mOutputGrid
Definition: GridOperators.h:617
const InGridType & mInputGrid
Definition: GridOperators.h:622
Gradient(const InGridT &grid, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:642
GridT::Ptr process(bool threaded=true)
Definition: GridOperators.h:524
Functor(const InGridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
Definition: GridOperators.h:602
Divergence(const InGridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:580
Definition: Exceptions.h:13
VectorGridType::ValueType::value_type VecComponentValueT
Definition: GridOperators.h:34
void operator()(const MapT &map)
Definition: GridOperators.h:786
Definition: Transform.h:39
const MapT & mMap
Definition: GridOperators.h:403
const GridT & mInputGrid
Definition: GridOperators.h:941
GridT OutGridType
Definition: GridOperators.h:700
Cpt(const InGridType &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:431
VectorToScalarConverter< GridType >::Type::Ptr magnitude(const GridType &grid, const MaskT &mask, bool threaded=true)
Definition: GridOperators.h:242
VecType
Definition: Types.h:483
Laplacian(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:707
Grid< typename GridType::TreeType::template ValueConverter< ValueMask >::Type > Type
Definition: GridOperators.h:286
Definition: GridOperators.h:661
const MaskGridType * mMask
Definition: GridOperators.h:739
InterruptT * mInterrupt
Definition: GridOperators.h:861
bool processTypedMap(TransformType &transform, OpType &op)
Utility function that, given a generic map pointer, calls a functor on the fully-resoved map...
Definition: Transform.h:233
ValueAccessors are designed to help accelerate accesses into the OpenVDB Tree structures by storing c...
GridT::Ptr mOutputGrid
Definition: GridOperators.h:936
ScalarToVectorConverter< InGridT >::Type OutGridType
Definition: GridOperators.h:424
Compute the closest-point transform to a level set.
Definition: Operators.h:1718
const bool mThreaded
Definition: GridOperators.h:676
GridType
List of types that are currently supported by NanoVDB.
Definition: NanoVDB.h:317
Compute the divergence of a vector grid.
Definition: GridOperators.h:569
InterruptT * mInterrupt
Definition: GridOperators.h:738
InterruptT * mInterrupt
Definition: GridOperators.h:942
InterruptT * mInterrupt
Definition: GridOperators.h:404
void operator()(const MapT &map)
Definition: GridOperators.h:727
InterruptT * mInterrupt
Definition: GridOperators.h:623
Compute the curl of a vector-valued grid using differencing of various orders in the space defined by...
Definition: Operators.h:1274
Definition: GridOperators.h:844
InterruptT * mInterrupt
Definition: GridOperators.h:866
Definition: GridOperators.h:838
Functor(const InGridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
Definition: GridOperators.h:663
const bool mThreaded
Definition: GridOperators.h:735
OutGridT::TreeType OutTreeT
Definition: GridOperators.h:305
Definition: GridOperators.h:878
Curl(const GridT &grid, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:514
#define OPENVDB_REAL_TREE_INSTANTIATE(Function)
Definition: version.h.in:157
This class manages a linear array of pointers to a given tree&#39;s leaf nodes, as well as optional auxil...
Definition: LeafManager.h:84
ToMaskGrid<T>::Type is the type of a grid having the same tree hierarchy as grid type T but a value e...
Definition: GridOperators.h:285
Definition: GridOperators.h:921
OutGridType::Ptr process(bool threaded=true, bool useWorldTransform=true)
Definition: GridOperators.h:436
Normalize(const GridT &grid, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:884
OutGridType::Ptr process(bool threaded=true)
Definition: GridOperators.h:585
GridT::Ptr process(bool threaded=true)
Definition: GridOperators.h:894
GridT InGridType
Definition: GridOperators.h:758
Curl(const GridT &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:519
Definition: Types.h:457
Compute the divergence of a vector-valued grid using differencing of various orders, the result defined with respect to the range-space of the map.
Definition: Operators.h:949
OutGridT::Ptr process(bool threaded=true)
Definition: GridOperators.h:322
const bool mThreaded
Definition: GridOperators.h:858
Compute the closest-point transform of a scalar grid.
Definition: GridOperators.h:420
const InGridType & mInputGrid
Definition: GridOperators.h:616
A LeafManager manages a linear array of pointers to a given tree&#39;s leaf nodes, as well as optional au...
const MaskGridType * mMask
Definition: GridOperators.h:938
OutGridType::Ptr mOutputGrid
Definition: GridOperators.h:860
Magnitude(const InGridType &grid, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:820
const GridT & mInputGrid
Definition: GridOperators.h:801
InGridT InGridType
Definition: GridOperators.h:639
#define OPENVDB_VERSION_NAME
The version namespace name for this library version.
Definition: version.h.in:121
Compute the gradient of a scalar grid.
Definition: GridOperators.h:636
Functor(const GridT &grid, const MaskGridType *mask, bool threaded, InterruptT *interrupt)
Definition: GridOperators.h:782
Definition: GridOperators.h:910
OutTreeT::LeafNodeType OutLeafT
Definition: GridOperators.h:306
GridType::Ptr meanCurvature(const GridType &grid, const MaskT &mask, bool threaded=true)
Definition: GridOperators.h:213
Magnitude(const InGridType &grid, const MaskGridType &mask, InterruptT *interrupt=nullptr)
Definition: GridOperators.h:825
const InGridT & mInputGrid
Definition: GridOperators.h:683
#define OPENVDB_VEC3_TREE_INSTANTIATE(Function)
Definition: version.h.in:159
Definition: GridOperators.h:814
Iterator begin() const
Definition: LeafManager.h:155
const MaskGridType * mMask
Definition: GridOperators.h:803
OutGridType::Ptr mOutputGrid
Definition: GridOperators.h:678
#define OPENVDB_USE_VERSION_NAMESPACE
Definition: version.h.in:212
void operator()(const MapT &map)
Definition: GridOperators.h:851