HDK
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
onnxruntime_lite_custom_op.h File Reference
#include "onnxruntime_cxx_api.h"
#include <optional>
#include <numeric>
#include <functional>
#include <unordered_set>
+ Include dependency graph for onnxruntime_lite_custom_op.h:

Go to the source code of this file.

Classes

class  Ort::Custom::ArgBase
 
class  Ort::Custom::TensorBase
 
struct  Ort::Custom::Span< T >
 
class  Ort::Custom::Tensor< T >
 
class  Ort::Custom::Tensor< std::string >
 
class  Ort::Custom::Tensor< std::string_view >
 
struct  Ort::Custom::TensorArray
 
struct  Ort::Custom::OrtLiteCustomOp
 
struct  Ort::Custom::OrtLiteCustomFunc< Args >
 
struct  Ort::Custom::OrtLiteCustomFunc< Args >::Kernel
 
struct  Ort::Custom::OrtLiteCustomStruct< CustomOp >
 
struct  Ort::Custom::OrtLiteCustomStruct< CustomOp >::Kernel
 

Namespaces

 Ort
 All C++ Onnxruntime APIs are defined inside this namespace.
 
 Ort::Custom
 

Macros

#define CREATE_TUPLE_INPUT(data_type)
 
#define CREATE_TUPLE_OUTPUT(data_type)
 
#define CREATE_TUPLE(data_type)
 
#define PARSE_INPUT_BASE(pack_type, onnx_type)
 
#define PARSE_INPUT(data_type, onnx_type)
 
#define PARSE_OUTPUT(data_type, onnx_type)
 
#define PARSE_ARGS(data_type, onnx_type)
 

Typedefs

using Ort::Custom::ArgPtr = std::unique_ptr< Custom::ArgBase >
 
using Ort::Custom::ArgPtrs = std::vector< ArgPtr >
 
using Ort::Custom::TensorPtr = std::unique_ptr< Custom::TensorBase >
 
using Ort::Custom::TensorPtrs = std::vector< TensorPtr >
 
using Ort::Custom::Variadic = TensorArray
 

Functions

template<typename... Args>
OrtLiteCustomOp * Ort::Custom::CreateLiteCustomOp (const char *op_name, const char *execution_provider, void(*custom_compute_fn)(Args...), Status(*shape_infer_fn)(ShapeInferContext &)={}, int start_ver=1, int end_ver=MAX_CUSTOM_OP_END_VER)
 
template<typename... Args>
OrtLiteCustomOp * Ort::Custom::CreateLiteCustomOp (const char *op_name, const char *execution_provider, Status(*custom_compute_fn_v2)(Args...), Status(*shape_infer_fn)(ShapeInferContext &)={}, int start_ver=1, int end_ver=MAX_CUSTOM_OP_END_VER)
 
template<typename CustomOp >
OrtLiteCustomOp * Ort::Custom::CreateLiteCustomOp (const char *op_name, const char *execution_provider, int start_ver=1, int end_ver=MAX_CUSTOM_OP_END_VER)
 

Macro Definition Documentation

#define CREATE_TUPLE (   data_type)
Value:
#define CREATE_TUPLE_OUTPUT(data_type)
#define CREATE_TUPLE_INPUT(data_type)

Definition at line 627 of file onnxruntime_lite_custom_op.h.

#define CREATE_TUPLE_INPUT (   data_type)

Definition at line 498 of file onnxruntime_lite_custom_op.h.

#define CREATE_TUPLE_OUTPUT (   data_type)
Value:
template <size_t ith_input, size_t ith_output, typename T, typename... Ts> \
static typename std::enable_if<std::is_same<T, Custom::Tensor<data_type>*>::value, std::tuple<T, Ts...>>::type \
CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) { \
args.push_back(std::make_unique<Custom::Tensor<data_type>>(context, ith_output, false)); \
std::tuple<T> current = std::tuple<T>{reinterpret_cast<T>(args.back().get())}; \
auto next = CreateTuple<ith_input, ith_output + 1, Ts...>(context, args, num_input, num_output, ep); \
return std::tuple_cat(current, next); \
} \
template <size_t ith_input, size_t ith_output, typename T, typename... Ts> \
static typename std::enable_if<std::is_same<T, Custom::Tensor<data_type>&>::value, std::tuple<T, Ts...>>::type \
CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) { \
args.push_back(std::make_unique<Custom::Tensor<data_type>>(context, ith_output, false)); \
std::tuple<T> current = std::tuple<T>{reinterpret_cast<T>(*args.back().get())}; \
auto next = CreateTuple<ith_input, ith_output + 1, Ts...>(context, args, num_input, num_output, ep); \
return std::tuple_cat(current, next); \
} \
template <size_t ith_input, size_t ith_output, typename T, typename... Ts> \
static typename std::enable_if<std::is_same<T, std::optional<Custom::Tensor<data_type>*>>::value, std::tuple<T, Ts...>>::type \
CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) { \
if (ith_output < num_output) { \
args.push_back(std::make_unique<Custom::Tensor<data_type>>(context, ith_output, false)); \
std::tuple<T> current = std::tuple<T>{reinterpret_cast<Custom::Tensor<data_type>*>(args.back().get())}; \
auto next = CreateTuple<ith_input, ith_output + 1, Ts...>(context, args, num_input, num_output, ep); \
return std::tuple_cat(current, next); \
} else { \
std::tuple<T> current = std::tuple<T>{}; \
auto next = CreateTuple<ith_input, ith_output + 1, Ts...>(context, args, num_input, num_output, ep); \
return std::tuple_cat(current, next); \
} \
}
GLsizei const GLchar *const * string
Definition: glcorearb.h:814
GLsizei const GLfloat * value
Definition: glcorearb.h:824
std::vector< ArgPtr > ArgPtrs
if(num_boxed_items<=0)
Definition: UT_RTreeImpl.h:697
**If you just want to fire and args
Definition: thread.h:609

Definition at line 596 of file onnxruntime_lite_custom_op.h.

#define PARSE_ARGS (   data_type,
  onnx_type 
)
Value:
PARSE_INPUT(data_type, onnx_type) \
PARSE_OUTPUT(data_type, onnx_type)
#define PARSE_OUTPUT(data_type, onnx_type)
#define PARSE_INPUT(data_type, onnx_type)
#define PARSE_INPUT (   data_type,
  onnx_type 
)
Value:
PARSE_INPUT_BASE(const Custom::Tensor<data_type>*, onnx_type) \
PARSE_INPUT_BASE(const Custom::Tensor<data_type>&, onnx_type) \
PARSE_INPUT_BASE(const Custom::Span<data_type>*, onnx_type) \
PARSE_INPUT_BASE(const Custom::Span<data_type>&, onnx_type) \
PARSE_INPUT_BASE(data_type, onnx_type)
#define PARSE_INPUT_BASE(pack_type, onnx_type)
#define PARSE_INPUT_BASE (   pack_type,
  onnx_type 
)
Value:
template <typename T, typename... Ts> \
static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, pack_type>::value>::type \
ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) { \
input_types.push_back(onnx_type); \
ParseArgs<Ts...>(input_types, output_types); \
} \
template <typename T, typename... Ts> \
static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, const std::optional<pack_type>>::value>::type \
ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) { \
input_types.push_back(onnx_type); \
ParseArgs<Ts...>(input_types, output_types); \
} \
template <typename T, typename... Ts> \
static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, std::optional<pack_type>>::value>::type \
ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) { \
input_types.push_back(onnx_type); \
ParseArgs<Ts...>(input_types, output_types); \
}
GLsizei const GLfloat * value
Definition: glcorearb.h:824
Definition: core.h:1131
type
Definition: core.h:1059
#define PARSE_OUTPUT (   data_type,
  onnx_type 
)
Value:
template <typename T, typename... Ts> \
static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, Custom::Tensor<data_type>*>::value>::type \
ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) { \
output_types.push_back(onnx_type); \
ParseArgs<Ts...>(input_types, output_types); \
} \
template <typename T, typename... Ts> \
static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, Custom::Tensor<data_type>&>::value>::type \
ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) { \
output_types.push_back(onnx_type); \
ParseArgs<Ts...>(input_types, output_types); \
} \
template <typename T, typename... Ts> \
static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, std::optional<Custom::Tensor<data_type>*>>::value>::type \
ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) { \
output_types.push_back(onnx_type); \
ParseArgs<Ts...>(input_types, output_types); \
}
Definition: core.h:1131
type
Definition: core.h:1059