15 #include "onnxruntime_config.h"
17 namespace onnxruntime {
19 #pragma GCC diagnostic push
20 #ifdef HAS_NULL_DEREFERENCE
21 #pragma GCC diagnostic ignored "-Wnull-dereference"
34 out.reserve(span.size());
35 out.assign(span.begin(), span.end());
62 TensorShape(
const int64_t* dimension_sizes,
size_t dimension_count) :
TensorShape(gsl::
span<const int64_t>(dimension_sizes, dimension_count)) {}
67 return TensorShape(External{}, gsl::span<int64_t>(
const_cast<int64_t*
>(data.data()), data.size()));
73 int64_t
operator[](
size_t idx)
const {
return values_[idx]; }
80 return values_.size();
86 void CopyDims(int64_t* dims,
size_t num_dims)
const {
95 void CopyDims(int64_t* dims,
size_t start_dim,
size_t num_dims)
const {
96 memcpy(dims, values_.data() + start_dim,
sizeof(int64_t) *
std::min(num_dims,
NumDimensions() - start_dim));
102 gsl::span<const int64_t>
GetDims()
const {
return values_; }
113 int64_t
Size()
const;
156 size_t len = values_.size();
157 return len == 0 || (len == 1 && values_[0] == 1);
164 void Allocate(
size_t size);
166 gsl::span<int64_t> values_;
168 std::unique_ptr<int64_t[]> allocated_buffer_;
constexpr size_t kTensorShapeSmallBufferElementsSize
static const TensorShape FromExistingBuffer(const std::vector< int64_t > &data)
int64_t & operator[](size_t idx)
constexpr span< ElementType, Extent > make_span(span< ElementType, Extent > s) noexcept
int64_t SizeHelper(size_t start, size_t end) const
GLsizei const GLchar *const * string
TensorShape Slice(size_t dimstart, size_t dimend) const
friend struct ProviderHostImpl
ImageBuf OIIO_API min(Image_or_Const A, Image_or_Const B, ROI roi={}, int nthreads=0)
gsl::span< const int64_t > ToConstSpan(const TensorShapeVector &vec)
TensorShape(const int64_t *dimension_sizes, size_t dimension_count)
bool operator!=(const TensorShape &other) const noexcept
int64_t SizeFromDimension(size_t dimension) const
int64_t operator[](size_t idx) const
InlinedVector< int64_t > TensorShapeVector
size_t NumDimensions() const noexcept
std::string ToString() const
TensorShape(TensorShape &&other) noexcept
TensorShape & operator=(const TensorShape &other)
TensorShape & operator=(const gsl::span< const int64_t > &dims)
TensorShapeVector AsShapeVector() const
absl::InlinedVector< T, N, Allocator > InlinedVector
int64_t SizeToDimension(size_t dimension) const
TensorShape(const TensorShape &other)
bool operator==(const TensorShape &other) const noexcept
void CopyDims(int64_t *dims, size_t num_dims) const
TensorShape Slice(size_t dimstart) const
TensorShapeVector ToShapeVector(const gsl::span< const int64_t > &span)
gsl::span< const int64_t > GetDims() const
bool SpanEq(gsl::span< T1, Extent1 > a, gsl::span< T2, Extent2 > b)
TensorShape(std::initializer_list< int64_t > dims)
std::ostream & operator<<(std::ostream &out, AllocKind alloc_kind)
TensorShape(const std::vector< int64_t > &dims, size_t start, size_t end)
void CopyDims(int64_t *dims, size_t start_dim, size_t num_dims) const
PcpNodeRef_ChildrenIterator begin(const PcpNodeRef::child_const_range &r)
Support for range-based for loops for PcpNodeRef children ranges.