HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
tensor.h
Go to the documentation of this file.
1 // Copyright (c) Microsoft Corporation. All rights reserved.
2 // Licensed under the MIT License.
3 
4 #pragma once
5 
6 #include <cstddef>
7 #include <iostream>
8 #include <memory>
9 #include <string>
10 #include <vector>
11 
12 #include "core/common/gsl.h"
13 #include "core/common/common.h"
17 #include "onnxruntime_config.h"
20 
21 struct OrtValue;
22 
23 namespace onnxruntime {
24 
25 // TODO:ensure dtype_!=nullptr
26 #ifdef __GNUC__
27 #pragma GCC diagnostic push
28 #ifdef HAS_NULL_DEREFERENCE
29 #pragma GCC diagnostic ignored "-Wnull-dereference"
30 #endif
31 #endif
32 /*
33  We want to keep tensor as simple as possible, it is just a placeholder
34  for a piece of memory, with additional shape information.
35  Memory is owned and managed by Executor / Workspace, so Tensor just uses
36  it, and won't do any allocation / release.
37 */
38 
39 class Tensor final {
40  public:
41  // NB! Removing Create() methods returning unique_ptr<Tensor>.
42  // Still available in other EPs that are dynamically linked.
43  // Strive not to allocate Tensor with new/delete as it is a shallow class and using it by value is just fine.
44  // Use InitOrtValue() methods to allocate for OrtValue.
45 
46  Tensor() = default; // to allow creating vector<Tensor> to support seq(tensor)
47 
48  /**
49  * Create tensor with given type, shape, pre-allocated memory and allocator info.
50  * This function does not check if the preallocated buffer(p_data) has enough room for the shape.
51  * \param elt_type Data type of the tensor elements.
52  * \param shape Shape of the tensor
53  * \param p_data A preallocated buffer. Can be NULL if the shape is empty.
54  * Tensor does not own the data and will not delete it
55  * \param location Memory info for location of p_data.
56  * \param offset Offset in bytes to start of Tensor within p_data.
57  * \param strides Strides span. Can be empty if the tensor is contiguous.
58  */
59  Tensor(MLDataType elt_type, const TensorShape& shape, void* p_data, const OrtMemoryInfo& location,
60  ptrdiff_t offset = 0, gsl::span<const int64_t> strides = {});
61 
62  /**
63  * Create tensor with given type, shape, pre-allocated memory and allocator which will be used to free the
64  * pre-allocated memory. The Tensor will take over ownership of p_data.
65  * This function does not check if the preallocated buffer(p_data) has enough room for the shape.
66  * \param elt_type Data type of the tensor elements.
67  * \param shape Shape of the tensor
68  * \param p_data A preallocated buffer. Can be NULL if the shape is empty.
69  * Tensor will own the memory and will delete it when the tensor instance is destructed.
70  * \param deleter Allocator used to free the pre-allocated memory
71  * \param offset Offset in bytes to start of Tensor within p_data.
72  * \param strides Strides span. Can be empty if the tensor is contiguous.
73  */
74  Tensor(MLDataType elt_type, const TensorShape& shape, void* p_data, std::shared_ptr<IAllocator> deleter,
75  ptrdiff_t offset = 0, gsl::span<const int64_t> strides = {});
76 
77  /// <summary>
78  /// Create a Tensor that allocates and owns the buffer required for the specified shape.
79  /// </summary>
80  /// <param name="elt_type">Data type of the tensor elements.</param>
81  /// <param name="shape">Tensor shape.</param>
82  /// <param name="allocator">Allocator to use to create and free buffer.</param>
83  Tensor(MLDataType elt_type, const TensorShape& shape, std::shared_ptr<IAllocator> allocator);
84 
85  ~Tensor();
86 
87  // Move is allowed
89 
90  Tensor(Tensor&& other) noexcept;
91  Tensor& operator=(Tensor&& other) noexcept;
92 
93  /// <summary>
94  /// Creates an instance of Tensor on the heap and initializes OrtValue with it.
95  /// </summary>
96  /// <param name="elt_type">Data type of the tensor elements.</param>
97  /// <param name="shape">Tensor shape.</param>
98  /// <param name="p_data">Tensor data.</param>
99  /// <param name="location">Memory info for location of p_data.</param>
100  /// <param name="ort_value">OrtValue to populate with Tensor.</param>
101  /// <param name="offset">Optional offset if Tensor refers to a subset of p_data.</param>
102  /// <param name="strides">Optional strides if Tensor refers to a subset of p_data.</param>
103  static void InitOrtValue(MLDataType elt_type, const TensorShape& shape, void* p_data, const OrtMemoryInfo& location,
104  OrtValue& ort_value,
105  ptrdiff_t offset = 0, gsl::span<const int64_t> strides = {});
106 
107  /// <summary>
108  /// Creates an instance of Tensor on the heap which will take over ownership of the pre-allocated buffer.
109  /// </summary>
110  /// <param name="elt_type">Data type of the tensor elements.</param>
111  /// <param name="shape"Tensor shape.</param>
112  /// <param name="p_data">Tensor data.</param>
113  /// <param name="allocator">Allocator that was used to create p_data and will be used to free it.</param>
114  /// <param name="ort_value">OrtValue to populate with Tensor.</param>
115  /// <param name="offset">Optional offset if Tensor refers to a subset of p_data.</param>
116  /// <param name="strides">Optional strides if Tensor refers to a subset of p_data.</param>
117  static void InitOrtValue(MLDataType elt_type, const TensorShape& shape, void* p_data,
118  std::shared_ptr<IAllocator> allocator,
119  OrtValue& ort_value,
120  ptrdiff_t offset = 0, gsl::span<const int64_t> strides = {});
121 
122  /// <summary>
123  /// Creates an instance of Tensor on the heap and initializes OrtValue with it.
124  /// The Tensor instance will allocate and own the data required for `shape`.
125  /// </summary>
126  /// <param name="elt_type">Data type of the tensor elements.</param>
127  /// <param name="shape">Tensor shape.</param>
128  /// <param name="allocator">Allocator that was used to create p_data and will be used to free it.</param>
129  /// <param name="ort_value">OrtValue to populate with Tensor.</param>
130  static void InitOrtValue(MLDataType elt_type, const TensorShape& shape, std::shared_ptr<IAllocator> allocator,
131  OrtValue& ort_value);
132 
133  /// <summary>
134  /// Initializes OrtValue with an existing Tensor.
135  /// </summary>
136  /// <param name="tensor">Tensor.</param>
137  /// <param name="ort_value">OrtValue to populate with Tensor.</param>
138  static void InitOrtValue(Tensor&& tensor, OrtValue& ort_value);
139 
140  /// <summary>
141  /// Calculate the required storage for the tensor.
142  /// </summary>
143  /// <param name="elt_type">Data type of the tensor elements.</param>
144  /// <param name="shape">Tensor shape.</param>
145  /// <returns>Bytes required.</returns>
146  static size_t CalculateTensorStorageSize(MLDataType elt_type, const TensorShape& shape);
147 
148  /**
149  Returns the data type.
150  */
151  MLDataType DataType() const { return dtype_; }
152 
153  /**
154  Returns the data type enum constant
155  @remarks Use utils::ToTensorProtoElementType<T> for comparison.
156  */
157  int32_t GetElementType() const {
158  return dtype_->GetDataType();
159  }
160 
161  // Check if contains string data. This is a separate
162  // interface bc it is frequently used.
163  bool IsDataTypeString() const {
164  return utils::IsPrimitiveDataType<std::string>(dtype_);
165  }
166 
167  // Checks if the Tensor contains data type T
168  template <class T>
169  bool IsDataType() const {
170  return utils::IsPrimitiveDataType<T>(dtype_);
171  }
172 
173  /**
174  Returns the shape of the tensor.
175  */
176  const TensorShape& Shape() const noexcept { return shape_; }
177 
178  /**
179  Returns the location of the tensor's memory
180  */
181  const OrtMemoryInfo& Location() const { return alloc_info_; }
182 
183  /**
184  May return nullptr if tensor size is zero
185  */
186  template <typename T>
188  // Type check
189  ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_), "Tensor type mismatch. ",
190  "T ", "!=", dtype_);
191  return reinterpret_cast<T*>(static_cast<char*>(p_data_) + byte_offset_);
192  }
193 
194  /**
195  May return nullptr if tensor size is zero
196  */
197  template <typename T>
198  gsl::span<T> MutableDataAsSpan() {
199  // Type check
200  ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_), "Tensor type mismatch. ",
201  "T ", "!=", dtype_);
202  T* data = reinterpret_cast<T*>(static_cast<char*>(p_data_) + byte_offset_);
203  return gsl::make_span(data, static_cast<size_t>(shape_.Size()));
204  }
205 
206  template <typename T>
207  const T* Data() const {
208  // Type check
209  ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_), "Tensor type mismatch. ",
210  "T ", "!=", dtype_);
211  return reinterpret_cast<const T*>(static_cast<char*>(p_data_) + byte_offset_);
212  }
213 
214  template <typename T>
215  gsl::span<const T> DataAsSpan() const {
216  // Type check
217  ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_), "Tensor type mismatch. ",
218  "T ", "!=", dtype_);
219  const T* data = reinterpret_cast<const T*>(static_cast<char*>(p_data_) + byte_offset_);
220  return gsl::make_span(data, static_cast<typename gsl::span<T>::size_type>(shape_.Size()));
221  }
222 
224  ORT_ENFORCE(type == dtype_, "Tensor type mismatch.", type, "!=", dtype_);
225  return static_cast<char*>(p_data_) + byte_offset_;
226  }
227 
228  const void* DataRaw(MLDataType type) const {
229  ORT_ENFORCE(type == dtype_, "Tensor type mismatch.", type, "!=", dtype_);
230  return static_cast<char*>(p_data_) + byte_offset_;
231  }
232 
233  void* MutableDataRaw() noexcept {
234  return static_cast<char*>(p_data_) + byte_offset_;
235  }
236 
237  const void* DataRaw() const noexcept {
238  return static_cast<char*>(p_data_) + byte_offset_;
239  }
240 
241  bool OwnsBuffer() const noexcept {
242  return buffer_deleter_ != nullptr;
243  }
244 
245  /**
246  * Resizes the tensor without touching underlying storage.
247  * This requires the total size of the tensor to remains constant.
248  * @warning this function is NOT thread-safe.
249  */
250  inline void Reshape(const TensorShape& new_shape) {
251  ORT_ENFORCE(shape_.Size() == new_shape.Size(),
252  "Tensor size (" + std::to_string(shape_.Size()) +
253  ") != new size (" + std::to_string(new_shape.Size()) + ")");
254  shape_ = new_shape;
255  }
256 
257  /**
258  * Get the byte offset with respect to the p_data
259  * @warning this is a temporary solution for reusing the buffer bigger than needed.
260  * @warning use with caution - make sure you do boundary check before calling this method (see view.cc)
261  */
262  inline ptrdiff_t ByteOffset() const {
263  return byte_offset_;
264  }
265 
266  /**
267  * Set the byte offset with respect to the p_data
268  * @warning this is a temporary solution for reusing the buffer bigger than needed.
269  */
270  inline void SetByteOffset(ptrdiff_t byte_offset) {
271  byte_offset_ = byte_offset;
272  }
273 
274  /**
275  The number of bytes of data.
276  */
277  size_t SizeInBytes() const;
278 
279 #ifdef ENABLE_STRIDED_TENSORS
280  /**
281  * Get the strides of the tensor.
282  */
283  gsl::span<const int64_t> Strides() const;
284 
285  /**
286  * Return if the tensor is contiguous.
287  */
288  bool IsContiguous() const noexcept { return is_contiguous_; }
289 
290  /**
291  * Set strides.
292  */
293  void SetShapeAndStrides(const TensorShape& new_shape, gsl::span<const int64_t> new_strides);
294 #endif
295 
296  // More API methods.
297  private:
298  void Init(MLDataType elt_type,
299  const TensorShape& shape,
300  void* p_raw_data,
301  AllocatorPtr deleter,
302  ptrdiff_t offset = 0,
303  gsl::span<const int64_t> strides = {});
304 
305  void ReleaseBuffer();
306 
307 #ifdef ENABLE_STRIDED_TENSORS
308  bool CheckIsContiguous() const;
309 #endif
310 
311  void* p_data_;
312  /**
313  if buffer_deleter_ is null, it means tensor does not own the buffer.
314  otherwise tensor will use the deleter to release the buffer when
315  tensor is released.
316  */
317  AllocatorPtr buffer_deleter_;
318 
319  TensorShape shape_;
320 #ifdef ENABLE_STRIDED_TENSORS
321  mutable TensorShapeVector strides_;
322  bool is_contiguous_ = true;
323 #endif
324 
325  const PrimitiveDataTypeBase* dtype_;
326  OrtMemoryInfo alloc_info_;
327  ptrdiff_t byte_offset_;
328 };
329 #ifdef __GNUC__
330 #pragma GCC diagnostic pop
331 #endif
332 } // namespace onnxruntime
bool OwnsBuffer() const noexcept
Definition: tensor.h:241
MLDataType DataType() const
Definition: tensor.h:151
auto to_string(const T &value) -> std::string
Definition: format.h:2597
Base class for MLDataType.
Definition: data_types.h:76
int64_t Size() const
constexpr span< ElementType, Extent > make_span(span< ElementType, Extent > s) noexcept
Definition: UT_Span.h:559
size_t SizeInBytes() const
#define ORT_ENFORCE(condition,...)
Definition: common.h:172
ORT_DISALLOW_COPY_AND_ASSIGNMENT(Tensor)
void * MutableDataRaw() noexcept
Definition: tensor.h:233
const TensorShape & Shape() const noexcept
Definition: tensor.h:176
static size_t CalculateTensorStorageSize(MLDataType elt_type, const TensorShape &shape)
Calculate the required storage for the tensor.
InlinedVector< int64_t > TensorShapeVector
Definition: tensor_shape.h:30
GLintptr offset
Definition: glcorearb.h:665
bool IsDataType() const
Definition: tensor.h:169
const void * DataRaw() const noexcept
Definition: tensor.h:237
int32_t GetElementType() const
Definition: tensor.h:157
Tensor & operator=(Tensor &&other) noexcept
GLint location
Definition: glcorearb.h:805
void SetByteOffset(ptrdiff_t byte_offset)
Definition: tensor.h:270
const DataTypeImpl * MLDataType
Definition: data_types.h:67
gsl::span< T > MutableDataAsSpan()
Definition: tensor.h:198
const OrtMemoryInfo & Location() const
Definition: tensor.h:181
std::shared_ptr< IAllocator > AllocatorPtr
Definition: allocator.h:261
const void * DataRaw(MLDataType type) const
Definition: tensor.h:228
const T * Data() const
Definition: tensor.h:207
void Reshape(const TensorShape &new_shape)
Definition: tensor.h:250
static void InitOrtValue(MLDataType elt_type, const TensorShape &shape, void *p_data, const OrtMemoryInfo &location, OrtValue &ort_value, ptrdiff_t offset=0, gsl::span< const int64_t > strides={})
Creates an instance of Tensor on the heap and initializes OrtValue with it.
void * MutableDataRaw(MLDataType type)
Definition: tensor.h:223
gsl::span< const T > DataAsSpan() const
Definition: tensor.h:215
ptrdiff_t ByteOffset() const
Definition: tensor.h:262
type
Definition: core.h:1059
Definition: format.h:895
bool IsDataTypeString() const
Definition: tensor.h:163
GLsizei const GLuint const GLintptr const GLsizei * strides
Definition: glcorearb.h:2625