HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
tensor_shape.h
Go to the documentation of this file.
1 // Copyright (c) Microsoft Corporation. All rights reserved.
2 // Licensed under the MIT License.
3 
4 #pragma once
5 
6 #include <algorithm>
7 #include <cstring>
8 #include <iosfwd>
9 #include <string>
10 #include <vector>
11 
12 #include "core/common/gsl.h"
14 #include "core/common/span_utils.h"
15 #include "onnxruntime_config.h"
16 
17 namespace onnxruntime {
18 #ifdef __GNUC__
19 #pragma GCC diagnostic push
20 #ifdef HAS_NULL_DEREFERENCE
21 #pragma GCC diagnostic ignored "-Wnull-dereference"
22 #endif
23 #endif
24 
26 
27 // Use this type to build a shape and then create TensorShape.
28 // We opt to re-use a common instantiation instead of a typedef with kTensorShapeSmallBufferElementsSize
29 // To reduce on binary size.
31 
32 inline TensorShapeVector ToShapeVector(const gsl::span<const int64_t>& span) {
34  out.reserve(span.size());
35  out.assign(span.begin(), span.end());
36  return out;
37 }
38 
39 inline gsl::span<const int64_t> ToConstSpan(const TensorShapeVector& vec) {
40  return gsl::make_span(vec);
41 }
42 
43 class TensorShape {
44  // We use negative numbers for unknown symbolic dimension. Each negative
45  // number represents a unique symbolic dimension.
46  public:
47  TensorShape() = default;
48 
49  TensorShape(const TensorShape& other) : TensorShape(other.GetDims()) {}
50  TensorShape& operator=(const TensorShape& other);
51  TensorShape& operator=(const gsl::span<const int64_t>& dims) {
52  *this = TensorShape(dims);
53  return *this;
54  }
55 
56  TensorShape(TensorShape&& other) noexcept { operator=(std::move(other)); }
57  TensorShape& operator=(TensorShape&& other) noexcept;
58 
59  TensorShape(gsl::span<const int64_t> dims);
60  TensorShape(const TensorShapeVector& dims) : TensorShape(gsl::make_span(dims)) {}
61  TensorShape(std::initializer_list<int64_t> dims) : TensorShape(gsl::make_span(dims.begin(), dims.end())) {}
62  TensorShape(const int64_t* dimension_sizes, size_t dimension_count) : TensorShape(gsl::span<const int64_t>(dimension_sizes, dimension_count)) {}
63  TensorShape(const std::vector<int64_t>& dims, size_t start, size_t end) : TensorShape(gsl::span<const int64_t>(&dims[start], end - start)) {}
64 
65  // Create a TensorShape that points to an existing buffer internally. As no copy is made, 'data' must remain valid for the life of the TensorShape
66  static const TensorShape FromExistingBuffer(const std::vector<int64_t>& data) {
67  return TensorShape(External{}, gsl::span<int64_t>(const_cast<int64_t*>(data.data()), data.size()));
68  }
69 
70  /**
71  Return the dimension specified by <idx>.
72  */
73  int64_t operator[](size_t idx) const { return values_[idx]; }
74  int64_t& operator[](size_t idx) { return values_[idx]; }
75 
76  bool operator==(const TensorShape& other) const noexcept { return SpanEq(GetDims(), other.GetDims()); }
77  bool operator!=(const TensorShape& other) const noexcept { return !(*this == other); }
78 
79  size_t NumDimensions() const noexcept {
80  return values_.size();
81  }
82 
83  /**
84  Copy dims into an array with given size
85  */
86  void CopyDims(int64_t* dims, size_t num_dims) const {
87  memcpy(dims, values_.data(), sizeof(int64_t) * std::min(num_dims, NumDimensions()));
88  }
89 
90  /**
91  Copy dims from a specific start dim into an array with given size
92  `start_dim` is expected to be in the inclusive range [0, NumDimensions() - 1]
93  and this function does no checks to ensure that
94  */
95  void CopyDims(int64_t* dims, size_t start_dim, size_t num_dims) const {
96  memcpy(dims, values_.data() + start_dim, sizeof(int64_t) * std::min(num_dims, NumDimensions() - start_dim));
97  }
98 
99  /**
100  Return underlying vector representation.
101  */
102  gsl::span<const int64_t> GetDims() const { return values_; }
103 
105  return ToShapeVector(values_);
106  }
107 
108  /**
109  * Return the total number of elements. Returns 1 for an empty (rank 0) TensorShape.
110  *
111  * May return -1
112  */
113  int64_t Size() const;
114 
115  /**
116  Return the total number of elements up to the specified dimension.
117  If the dimension interval is empty (dimension == 0), return 1.
118  @param dimension Return size up to this dimension. Value must be between 0 and this->NumDimensions(), inclusive.
119  */
120  int64_t SizeToDimension(size_t dimension) const;
121 
122  /**
123  Return the total number of elements from the specified dimension to the end of the tensor shape.
124  If the dimension interval is empty (dimension == this->NumDimensions()), return 1.
125  @param dimension Return size from this dimension to the end. Value must be between 0 and this->NumDimensions(),
126  inclusive.
127  */
128  int64_t SizeFromDimension(size_t dimension) const;
129 
130  /**
131  Return a new TensorShape of the dimensions from dimstart to dimend.
132  */
133  TensorShape Slice(size_t dimstart, size_t dimend) const;
134 
135  /**
136  Return a new TensorShape of the dimensions from dimstart to end.
137  */
138  TensorShape Slice(size_t dimstart) const { return Slice(dimstart, values_.size()); }
139 
140  /**
141  output dimensions nicely formatted
142  */
143  std::string ToString() const;
144 
145  /**
146  Calculate size between start and end.
147  Assumes start and end are between 0 and this->NumDimensions(), inclusive, and that
148  start < end.
149  */
150  int64_t SizeHelper(size_t start, size_t end) const;
151 
152  /**
153  empty shape or 1D shape (1) is regarded as scalar tensor
154  */
155  bool IsScalar() const {
156  size_t len = values_.size();
157  return len == 0 || (len == 1 && values_[0] == 1);
158  }
159 
160  private:
161  struct External {};
162  TensorShape(External, gsl::span<int64_t> buffer) : values_{buffer} {}
163 
164  void Allocate(size_t size);
165 
166  gsl::span<int64_t> values_;
167  int64_t small_buffer_[kTensorShapeSmallBufferElementsSize]{0};
168  std::unique_ptr<int64_t[]> allocated_buffer_;
169 
170  friend struct ProviderHostImpl; // So that the shared provider interface can access Allocate
171 };
172 
173 // operator<< to nicely output to a stream
174 std::ostream& operator<<(std::ostream& out, const TensorShape& shape);
175 
176 } // namespace onnxruntime
constexpr size_t kTensorShapeSmallBufferElementsSize
Definition: tensor_shape.h:25
static const TensorShape FromExistingBuffer(const std::vector< int64_t > &data)
Definition: tensor_shape.h:66
int64_t & operator[](size_t idx)
Definition: tensor_shape.h:74
int64_t Size() const
constexpr span< ElementType, Extent > make_span(span< ElementType, Extent > s) noexcept
Definition: UT_Span.h:559
int64_t SizeHelper(size_t start, size_t end) const
GLuint start
Definition: glcorearb.h:475
GLsizei const GLchar *const * string
Definition: glcorearb.h:814
Definition: span.h:73
TensorShape Slice(size_t dimstart, size_t dimend) const
friend struct ProviderHostImpl
Definition: tensor_shape.h:170
ImageBuf OIIO_API min(Image_or_Const A, Image_or_Const B, ROI roi={}, int nthreads=0)
gsl::span< const int64_t > ToConstSpan(const TensorShapeVector &vec)
Definition: tensor_shape.h:39
TensorShape(const int64_t *dimension_sizes, size_t dimension_count)
Definition: tensor_shape.h:62
bool operator!=(const TensorShape &other) const noexcept
Definition: tensor_shape.h:77
int64_t SizeFromDimension(size_t dimension) const
int64_t operator[](size_t idx) const
Definition: tensor_shape.h:73
InlinedVector< int64_t > TensorShapeVector
Definition: tensor_shape.h:30
size_t NumDimensions() const noexcept
Definition: tensor_shape.h:79
std::string ToString() const
Definition: core.h:760
TensorShape(TensorShape &&other) noexcept
Definition: tensor_shape.h:56
TensorShape & operator=(const TensorShape &other)
TensorShape & operator=(const gsl::span< const int64_t > &dims)
Definition: tensor_shape.h:51
GLuint GLuint end
Definition: glcorearb.h:475
TensorShapeVector AsShapeVector() const
Definition: tensor_shape.h:104
absl::InlinedVector< T, N, Allocator > InlinedVector
int64_t SizeToDimension(size_t dimension) const
TensorShape(const TensorShape &other)
Definition: tensor_shape.h:49
bool operator==(const TensorShape &other) const noexcept
Definition: tensor_shape.h:76
void CopyDims(int64_t *dims, size_t num_dims) const
Definition: tensor_shape.h:86
TensorShape Slice(size_t dimstart) const
Definition: tensor_shape.h:138
GLsizeiptr size
Definition: glcorearb.h:664
TensorShapeVector ToShapeVector(const gsl::span< const int64_t > &span)
Definition: tensor_shape.h:32
gsl::span< const int64_t > GetDims() const
Definition: tensor_shape.h:102
bool SpanEq(gsl::span< T1, Extent1 > a, gsl::span< T2, Extent2 > b)
Definition: span_utils.h:87
TensorShape(std::initializer_list< int64_t > dims)
Definition: tensor_shape.h:61
std::ostream & operator<<(std::ostream &out, AllocKind alloc_kind)
TensorShape(const std::vector< int64_t > &dims, size_t start, size_t end)
Definition: tensor_shape.h:63
void CopyDims(int64_t *dims, size_t start_dim, size_t num_dims) const
Definition: tensor_shape.h:95
Definition: format.h:895
PcpNodeRef_ChildrenIterator begin(const PcpNodeRef::child_const_range &r)
Support for range-based for loops for PcpNodeRef children ranges.
Definition: node.h:558