#include <op_kernel_context.h>
|
| OpKernelContext (_Inout_ IExecutionFrame *frame, _In_ const OpKernel *kernel, _In_ Stream *stream, _In_opt_ concurrency::ThreadPool *threadpool, _In_ const logging::Logger &logger) |
|
virtual | ~OpKernelContext ()=default |
|
virtual int | NumVariadicInputs (size_t arg_num) const |
|
virtual MLDataType | InputType (int index) const |
|
virtual MLDataType | OutputType (int index) const |
|
const OrtValue * | GetInputOrtValue (int index) const |
|
template<typename T > |
const T * | Input (int index) const |
|
template<typename T > |
const T & | RequiredInput (int index) const |
|
template<typename T > |
T * | Output (int index) |
|
Tensor * | Output (int index, const TensorShape &shape) |
|
Tensor * | Output (int index, const std::vector< int64_t > &shape) |
|
Tensor * | Output (int index, const std::initializer_list< int64_t > &shape) |
|
Tensor & | RequiredOutput (int index, const TensorShape &shape) |
|
SparseTensor * | OutputSparse (int index, const TensorShape &shape) |
|
template<typename T > |
void | OutputOptionalWithoutData (int index) |
|
virtual bool | TryGetInferredInputShape (int index, TensorShape &shape) const |
|
virtual bool | TryGetInferredOutputShape (int index, TensorShape &shape) const |
|
const logging::Logger & | Logger () const |
|
virtual int | InputCount () const |
|
virtual int | ImplicitInputCount () const |
|
virtual int | OutputCount () const |
|
virtual Status | GetTempSpaceAllocator (AllocatorPtr *output) const |
|
Status | GetTempSpaceCPUAllocator (AllocatorPtr *output) const |
|
virtual int | GetDeviceId () const |
|
virtual Stream * | GetComputeStream () const |
|
const std::string & | GetOpDomain () const |
|
const std::string & | GetOpType () const |
|
const std::string & | GetNodeName () const |
|
_Ret_maybenull_
onnxruntime::concurrency::ThreadPool * | GetOperatorThreadPool () const |
|
virtual bool | GetUseDeterministicCompute () const |
|
AllocatorPtr | GetAllocator (const OrtDevice &device) const |
|
template<> |
Tensor * | Output (int index) |
|
template<> |
SparseTensor * | Output (int index) |
|
Definition at line 11 of file op_kernel_context.h.
virtual onnxruntime::OpKernelContext::~OpKernelContext |
( |
| ) |
|
|
virtualdefault |
virtual Stream* onnxruntime::OpKernelContext::GetComputeStream |
( |
| ) |
const |
|
inlinevirtual |
Return the compute stream associated with the EP that the kernel is partitioned to. For EPs that do not have a compute stream (e.g. CPU EP), a nullptr is returned.
Definition at line 152 of file op_kernel_context.h.
virtual int onnxruntime::OpKernelContext::GetDeviceId |
( |
| ) |
const |
|
inlinevirtual |
const OrtValue* onnxruntime::OpKernelContext::GetImplicitInputMLValue |
( |
int |
index | ) |
const |
|
protected |
virtual const OrtValue* onnxruntime::OpKernelContext::GetInputMLValue |
( |
int |
index | ) |
const |
|
protectedvirtual |
const OrtValue* onnxruntime::OpKernelContext::GetInputOrtValue |
( |
int |
index | ) |
const |
|
inline |
const std::string& onnxruntime::OpKernelContext::GetNodeName |
( |
| ) |
const |
Returns the node name of the underlying kernel
const std::string& onnxruntime::OpKernelContext::GetOpDomain |
( |
| ) |
const |
Returns the opset domain of the underlying kernel
const std::string& onnxruntime::OpKernelContext::GetOpType |
( |
| ) |
const |
Returns the optype of the underlying kernel
virtual OrtValue* onnxruntime::OpKernelContext::GetOrCreateOutputMLValue |
( |
int |
index | ) |
|
|
protectedvirtual |
OrtValue* onnxruntime::OpKernelContext::GetOutputMLValue |
( |
int |
index | ) |
|
|
protected |
virtual Status onnxruntime::OpKernelContext::GetTempSpaceAllocator |
( |
AllocatorPtr * |
output | ) |
const |
|
virtual |
Return an allocator on device 0, with memtype of OrtMemTypeDefault.
Status onnxruntime::OpKernelContext::GetTempSpaceCPUAllocator |
( |
AllocatorPtr * |
output | ) |
const |
Return the allocator associated with the CPU EP with memtype of OrtMemTypeDefault.
virtual bool onnxruntime::OpKernelContext::GetUseDeterministicCompute |
( |
| ) |
const |
|
inlinevirtual |
virtual int onnxruntime::OpKernelContext::ImplicitInputCount |
( |
| ) |
const |
|
inlinevirtual |
template<typename T >
const T* onnxruntime::OpKernelContext::Input |
( |
int |
index | ) |
const |
|
inline |
virtual int onnxruntime::OpKernelContext::InputCount |
( |
| ) |
const |
|
inlinevirtual |
virtual MLDataType onnxruntime::OpKernelContext::InputType |
( |
int |
index | ) |
const |
|
virtual |
virtual int onnxruntime::OpKernelContext::NumVariadicInputs |
( |
size_t |
arg_num | ) |
const |
|
virtual |
Return the number of inputs for a variadic argument.
- Parameters
-
arg_num | The operator argument number. |
- Returns
- Number of inputs the argument has.
template<typename T >
T* onnxruntime::OpKernelContext::Output |
( |
int |
index | ) |
|
|
inline |
Tensor* onnxruntime::OpKernelContext::Output |
( |
int |
index, |
|
|
const std::vector< int64_t > & |
shape |
|
) |
| |
Tensor* onnxruntime::OpKernelContext::Output |
( |
int |
index, |
|
|
const std::initializer_list< int64_t > & |
shape |
|
) |
| |
template<>
Tensor* onnxruntime::OpKernelContext::Output |
( |
int |
index | ) |
|
|
inline |
virtual int onnxruntime::OpKernelContext::OutputCount |
( |
| ) |
const |
|
inlinevirtual |
template<typename T >
void onnxruntime::OpKernelContext::OutputOptionalWithoutData |
( |
int |
index | ) |
|
|
inline |
virtual MLDataType onnxruntime::OpKernelContext::OutputType |
( |
int |
index | ) |
const |
|
virtual |
template<typename T >
const T& onnxruntime::OpKernelContext::RequiredInput |
( |
int |
index | ) |
const |
|
inline |
virtual bool onnxruntime::OpKernelContext::TryGetInferredInputShape |
( |
int |
index, |
|
|
TensorShape & |
shape |
|
) |
| const |
|
virtual |
virtual bool onnxruntime::OpKernelContext::TryGetInferredOutputShape |
( |
int |
index, |
|
|
TensorShape & |
shape |
|
) |
| const |
|
virtual |
The documentation for this class was generated from the following file: