16 #include "onnxruntime_config.h"
22 namespace onnxruntime {
26 #pragma GCC diagnostic push
27 #ifdef HAS_NULL_DEREFERENCE
28 #pragma GCC diagnostic ignored "-Wnull-dereference"
73 gsl::span<const int64_t>
strides = {});
85 void* p_data, std::shared_ptr<IAllocator> allocator,
87 gsl::span<const int64_t>
strides = {});
91 gsl::span<const int64_t>
strides = {});
98 gsl::span<const int64_t>
strides = {});
111 std::shared_ptr<IAllocator> allocator,
113 gsl::span<const int64_t>
strides = {});
136 ptrdiff_t
offset = 0, gsl::span<const int64_t>
strides = {});
163 return utils::IsPrimitiveDataType<std::string>(dtype_);
169 return utils::IsPrimitiveDataType<T>(dtype_);
185 template <
typename T>
188 ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_),
"Tensor type mismatch. ",
190 return reinterpret_cast<T*
>(
static_cast<char*
>(p_data_) + byte_offset_);
196 template <
typename T>
199 ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_),
"Tensor type mismatch. ",
201 T*
data =
reinterpret_cast<T*
>(
static_cast<char*
>(p_data_) + byte_offset_);
205 template <
typename T>
208 ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_),
"Tensor type mismatch. ",
210 return reinterpret_cast<const T*
>(
static_cast<char*
>(p_data_) + byte_offset_);
213 template <
typename T>
216 ORT_ENFORCE(utils::IsPrimitiveDataType<T>(dtype_),
"Tensor type mismatch. ",
218 const T*
data =
reinterpret_cast<const T*
>(
static_cast<char*
>(p_data_) + byte_offset_);
219 return gsl::make_span(data,
static_cast<typename gsl::span<T>::size_type
>(shape_.
Size()));
223 ORT_ENFORCE(type == dtype_,
"Tensor type mismatch.", type,
"!=", dtype_);
224 return static_cast<char*
>(p_data_) + byte_offset_;
228 ORT_ENFORCE(type == dtype_,
"Tensor type mismatch.", type,
"!=", dtype_);
229 return static_cast<char*
>(p_data_) + byte_offset_;
233 return static_cast<char*
>(p_data_) + byte_offset_;
237 return static_cast<char*
>(p_data_) + byte_offset_;
241 return buffer_deleter_ !=
nullptr;
270 byte_offset_ = byte_offset;
278 #ifdef ENABLE_STRIDED_TENSORS
282 gsl::span<const int64_t> Strides()
const;
287 bool IsContiguous()
const noexcept {
return is_contiguous_; }
292 void SetShapeAndStrides(
const TensorShape& new_shape, gsl::span<const int64_t> new_strides);
298 const TensorShape& shape,
302 gsl::span<const int64_t>
strides = {});
304 void ReleaseBuffer();
306 #ifdef ENABLE_STRIDED_TENSORS
307 bool CheckIsContiguous()
const;
319 #ifdef ENABLE_STRIDED_TENSORS
321 bool is_contiguous_ =
true;
324 const PrimitiveDataTypeBase* dtype_;
326 ptrdiff_t byte_offset_;
329 #pragma GCC diagnostic pop
bool OwnsBuffer() const noexcept
MLDataType DataType() const
Base class for MLDataType.
constexpr span< ElementType, Extent > make_span(span< ElementType, Extent > s) noexcept
size_t SizeInBytes() const
#define ORT_ENFORCE(condition,...)
ORT_DISALLOW_COPY_AND_ASSIGNMENT(Tensor)
void * MutableDataRaw() noexcept
int32_t GetDataType() const
const TensorShape & Shape() const noexcept
const void * DataRaw() const noexcept
int32_t GetElementType() const
Tensor & operator=(Tensor &&other) noexcept
absl::InlinedVector< int64_t, kTensorShapeSmallBufferElementsSize > TensorShapeVector
static void InitOrtValue(MLDataType p_type, const TensorShape &shape, void *p_data, const OrtMemoryInfo &location, OrtValue &ort_value, ptrdiff_t offset=0, gsl::span< const int64_t > strides={})
Creates an instance of Tensor on the heap using the appropriate __ctor and initializes OrtValue with ...
void SetByteOffset(ptrdiff_t byte_offset)
static size_t CalculateTensorStorageSize(MLDataType p_type, const TensorShape &shape, gsl::span< const int64_t > strides={})
const DataTypeImpl * MLDataType
gsl::span< T > MutableDataAsSpan()
const OrtMemoryInfo & Location() const
std::shared_ptr< IAllocator > AllocatorPtr
const void * DataRaw(MLDataType type) const
void Reshape(const TensorShape &new_shape)
void * MutableDataRaw(MLDataType type)
gsl::span< const T > DataAsSpan() const
ptrdiff_t ByteOffset() const
bool IsDataTypeString() const
GLsizei const GLuint const GLintptr const GLsizei * strides