17 #include "onnxruntime_config.h"
23 namespace onnxruntime {
27 #pragma GCC diagnostic push
28 #ifdef HAS_NULL_DEREFERENCE
29 #pragma GCC diagnostic ignored "-Wnull-dereference"
90 Tensor(Tensor&& other) noexcept;
91 Tensor&
operator=(Tensor&& other) noexcept;
105 ptrdiff_t
offset = 0, gsl::span<const int64_t>
strides = {});
118 std::shared_ptr<IAllocator> allocator,
120 ptrdiff_t
offset = 0, gsl::span<const int64_t>
strides = {});
164 return utils::IsPrimitiveDataType<std::string>(dtype_);
170 return utils::IsPrimitiveDataType<T>(dtype_);
186 template <
typename T>
189 ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_),
"Tensor type mismatch. ",
191 return reinterpret_cast<T*
>(
static_cast<char*
>(p_data_) + byte_offset_);
197 template <
typename T>
200 ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_),
"Tensor type mismatch. ",
202 T*
data =
reinterpret_cast<T*
>(
static_cast<char*
>(p_data_) + byte_offset_);
206 template <
typename T>
209 ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_),
"Tensor type mismatch. ",
211 return reinterpret_cast<const T*
>(
static_cast<char*
>(p_data_) + byte_offset_);
214 template <
typename T>
217 ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_),
"Tensor type mismatch. ",
219 const T*
data =
reinterpret_cast<const T*
>(
static_cast<char*
>(p_data_) + byte_offset_);
220 return gsl::make_span(data,
static_cast<typename gsl::span<T>::size_type
>(shape_.
Size()));
224 ORT_ENFORCE(type == dtype_,
"Tensor type mismatch.", type,
"!=", dtype_);
225 return static_cast<char*
>(p_data_) + byte_offset_;
229 ORT_ENFORCE(type == dtype_,
"Tensor type mismatch.", type,
"!=", dtype_);
230 return static_cast<char*
>(p_data_) + byte_offset_;
234 return static_cast<char*
>(p_data_) + byte_offset_;
238 return static_cast<char*
>(p_data_) + byte_offset_;
242 return buffer_deleter_ !=
nullptr;
271 byte_offset_ = byte_offset;
279 #ifdef ENABLE_STRIDED_TENSORS
283 gsl::span<const int64_t> Strides()
const;
288 bool IsContiguous() const noexcept {
return is_contiguous_; }
293 void SetShapeAndStrides(
const TensorShape& new_shape, gsl::span<const int64_t> new_strides);
299 const TensorShape& shape,
303 gsl::span<const int64_t>
strides = {});
305 void ReleaseBuffer();
307 #ifdef ENABLE_STRIDED_TENSORS
308 bool CheckIsContiguous()
const;
320 #ifdef ENABLE_STRIDED_TENSORS
322 bool is_contiguous_ =
true;
325 const PrimitiveDataTypeBase* dtype_;
327 ptrdiff_t byte_offset_;
330 #pragma GCC diagnostic pop
bool OwnsBuffer() const noexcept
MLDataType DataType() const
Base class for MLDataType.
constexpr span< ElementType, Extent > make_span(span< ElementType, Extent > s) noexcept
size_t SizeInBytes() const
#define ORT_ENFORCE(condition,...)
ORT_DISALLOW_COPY_AND_ASSIGNMENT(Tensor)
void * MutableDataRaw() noexcept
int32_t GetDataType() const
const TensorShape & Shape() const noexcept
static size_t CalculateTensorStorageSize(MLDataType elt_type, const TensorShape &shape)
Calculate the required storage for the tensor.
InlinedVector< int64_t > TensorShapeVector
const void * DataRaw() const noexcept
int32_t GetElementType() const
Tensor & operator=(Tensor &&other) noexcept
void SetByteOffset(ptrdiff_t byte_offset)
const DataTypeImpl * MLDataType
gsl::span< T > MutableDataAsSpan()
const OrtMemoryInfo & Location() const
std::shared_ptr< IAllocator > AllocatorPtr
const void * DataRaw(MLDataType type) const
void Reshape(const TensorShape &new_shape)
static void InitOrtValue(MLDataType elt_type, const TensorShape &shape, void *p_data, const OrtMemoryInfo &location, OrtValue &ort_value, ptrdiff_t offset=0, gsl::span< const int64_t > strides={})
Creates an instance of Tensor on the heap and initializes OrtValue with it.
void * MutableDataRaw(MLDataType type)
gsl::span< const T > DataAsSpan() const
ptrdiff_t ByteOffset() const
bool IsDataTypeString() const
GLsizei const GLuint const GLintptr const GLsizei * strides