ie.hpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480
  1. // This file is part of OpenCV project.
  2. // It is subject to the license terms in the LICENSE file found in the top-level directory
  3. // of this distribution and at http://opencv.org/license.html.
  4. //
  5. // Copyright (C) 2019-2021 Intel Corporation
  6. #ifndef OPENCV_GAPI_INFER_IE_HPP
  7. #define OPENCV_GAPI_INFER_IE_HPP
  8. #include <unordered_map>
  9. #include <unordered_set>
  10. #include <string>
  11. #include <array>
  12. #include <tuple> // tuple, tuple_size
  13. #include <map>
  14. #include <opencv2/gapi/opencv_includes.hpp>
  15. #include <opencv2/gapi/util/any.hpp>
  16. #include <opencv2/core/cvdef.h> // GAPI_EXPORTS
  17. #include <opencv2/gapi/gkernel.hpp> // GKernelPackage
  18. #include <opencv2/gapi/infer.hpp> // Generic
  19. namespace cv {
  20. namespace gapi {
  21. // FIXME: introduce a new sub-namespace for NN?
  22. /**
  23. * @brief This namespace contains G-API OpenVINO backend functions,
  24. * structures, and symbols.
  25. */
  26. namespace ie {
  27. GAPI_EXPORTS cv::gapi::GBackend backend();
  28. /**
  29. * Specifies how G-API and IE should trait input data
  30. *
  31. * In OpenCV, the same cv::Mat is used to represent both
  32. * image and tensor data. Sometimes those are hardly distinguishable,
  33. * so this extra parameter is used to give G-API a hint.
  34. *
  35. * This hint controls how G-API reinterprets the data when converting
  36. * it to IE Blob format (and which layout/etc is assigned to this data).
  37. */
  38. enum class TraitAs: int
  39. {
  40. TENSOR, //!< G-API traits an associated cv::Mat as a raw tensor and passes dimensions as-is
  41. IMAGE //!< G-API traits an associated cv::Mat as an image so creates an "image" blob (NCHW/NHWC, etc)
  42. };
  43. using IEConfig = std::map<std::string, std::string>;
  44. namespace detail {
  45. struct ParamDesc {
  46. std::string model_path;
  47. std::string weights_path;
  48. std::string device_id;
  49. std::vector<std::string> input_names;
  50. std::vector<std::string> output_names;
  51. using ConstInput = std::pair<cv::Mat, TraitAs>;
  52. std::unordered_map<std::string, ConstInput> const_inputs;
  53. std::size_t num_in;
  54. std::size_t num_out;
  55. enum class Kind {Load, Import};
  56. Kind kind;
  57. bool is_generic;
  58. IEConfig config;
  59. std::map<std::string, std::vector<std::size_t>> reshape_table;
  60. std::unordered_set<std::string> layer_names_to_reshape;
  61. // NB: Number of asyncrhonious infer requests
  62. size_t nireq;
  63. // NB: An optional config to setup RemoteContext for IE
  64. cv::util::any context_config;
  65. // NB: batch_size can't be equal to 1 by default, because some of models
  66. // have 2D (Layout::NC) input and if the first dimension not equal to 1
  67. // net.setBatchSize(1) will overwrite it.
  68. cv::optional<size_t> batch_size;
  69. };
  70. } // namespace detail
  71. // FIXME: this is probably a shared (reusable) thing
  72. template<typename Net>
  73. struct PortCfg {
  74. using In = std::array
  75. < std::string
  76. , std::tuple_size<typename Net::InArgs>::value >;
  77. using Out = std::array
  78. < std::string
  79. , std::tuple_size<typename Net::OutArgs>::value >;
  80. };
  81. /**
  82. * @brief This structure provides functions
  83. * that fill inference parameters for "OpenVINO Toolkit" model.
  84. */
  85. template<typename Net> class Params {
  86. public:
  87. /** @brief Class constructor.
  88. Constructs Params based on model information and specifies default values for other
  89. inference description parameters. Model is loaded and compiled using "OpenVINO Toolkit".
  90. @param model Path to topology IR (.xml file).
  91. @param weights Path to weights (.bin file).
  92. @param device target device to use.
  93. */
  94. Params(const std::string &model,
  95. const std::string &weights,
  96. const std::string &device)
  97. : desc{ model, weights, device, {}, {}, {}
  98. , std::tuple_size<typename Net::InArgs>::value // num_in
  99. , std::tuple_size<typename Net::OutArgs>::value // num_out
  100. , detail::ParamDesc::Kind::Load
  101. , false
  102. , {}
  103. , {}
  104. , {}
  105. , 1u
  106. , {}
  107. , {}} {
  108. };
  109. /** @overload
  110. Use this constructor to work with pre-compiled network.
  111. Model is imported from a pre-compiled blob.
  112. @param model Path to model.
  113. @param device target device to use.
  114. */
  115. Params(const std::string &model,
  116. const std::string &device)
  117. : desc{ model, {}, device, {}, {}, {}
  118. , std::tuple_size<typename Net::InArgs>::value // num_in
  119. , std::tuple_size<typename Net::OutArgs>::value // num_out
  120. , detail::ParamDesc::Kind::Import
  121. , false
  122. , {}
  123. , {}
  124. , {}
  125. , 1u
  126. , {}
  127. , {}} {
  128. };
  129. /** @brief Specifies sequence of network input layers names for inference.
  130. The function is used to associate cv::gapi::infer<> inputs with the model inputs.
  131. Number of names has to match the number of network inputs as defined in G_API_NET().
  132. In case a network has only single input layer, there is no need to specify name manually.
  133. @param layer_names std::array<std::string, N> where N is the number of inputs
  134. as defined in the @ref G_API_NET. Contains names of input layers.
  135. @return reference to this parameter structure.
  136. */
  137. Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &layer_names) {
  138. desc.input_names.clear();
  139. desc.input_names.reserve(layer_names.size());
  140. std::copy(layer_names.begin(), layer_names.end(),
  141. std::back_inserter(desc.input_names));
  142. return *this;
  143. }
  144. /** @brief Specifies sequence of network output layers names for inference.
  145. The function is used to associate cv::gapi::infer<> outputs with the model outputs.
  146. Number of names has to match the number of network outputs as defined in G_API_NET().
  147. In case a network has only single output layer, there is no need to specify name manually.
  148. @param layer_names std::array<std::string, N> where N is the number of outputs
  149. as defined in the @ref G_API_NET. Contains names of output layers.
  150. @return reference to this parameter structure.
  151. */
  152. Params<Net>& cfgOutputLayers(const typename PortCfg<Net>::Out &layer_names) {
  153. desc.output_names.clear();
  154. desc.output_names.reserve(layer_names.size());
  155. std::copy(layer_names.begin(), layer_names.end(),
  156. std::back_inserter(desc.output_names));
  157. return *this;
  158. }
  159. /** @brief Specifies a constant input.
  160. The function is used to set a constant input. This input has to be
  161. a preprocessed tensor if its type is TENSOR. Need to provide name of the
  162. network layer which will receive provided data.
  163. @param layer_name Name of network layer.
  164. @param data cv::Mat that contains data which will be associated with network layer.
  165. @param hint Input type @sa cv::gapi::ie::TraitAs.
  166. @return reference to this parameter structure.
  167. */
  168. Params<Net>& constInput(const std::string &layer_name,
  169. const cv::Mat &data,
  170. TraitAs hint = TraitAs::TENSOR) {
  171. desc.const_inputs[layer_name] = {data, hint};
  172. return *this;
  173. }
  174. /** @brief Specifies OpenVINO plugin configuration.
  175. The function is used to set configuration for OpenVINO plugin. Some parameters
  176. can be different for each plugin. Please follow https://docs.openvinotoolkit.org/latest/index.html
  177. to check information about specific plugin.
  178. @param cfg Map of pairs: (config parameter name, config parameter value).
  179. @return reference to this parameter structure.
  180. */
  181. Params& pluginConfig(const IEConfig& cfg) {
  182. desc.config = cfg;
  183. return *this;
  184. }
  185. /** @overload
  186. Function with a rvalue parameter.
  187. @param cfg rvalue map of pairs: (config parameter name, config parameter value).
  188. @return reference to this parameter structure.
  189. */
  190. Params& pluginConfig(IEConfig&& cfg) {
  191. desc.config = std::move(cfg);
  192. return *this;
  193. }
  194. /** @brief Specifies configuration for RemoteContext in InferenceEngine.
  195. When RemoteContext is configured the backend imports the networks using the context.
  196. It also expects cv::MediaFrames to be actually remote, to operate with blobs via the context.
  197. @param ctx_cfg cv::util::any value which holds InferenceEngine::ParamMap.
  198. @return reference to this parameter structure.
  199. */
  200. Params& cfgContextParams(const cv::util::any& ctx_cfg) {
  201. desc.context_config = ctx_cfg;
  202. return *this;
  203. }
  204. /** @overload
  205. Function with an rvalue parameter.
  206. @param ctx_cfg cv::util::any value which holds InferenceEngine::ParamMap.
  207. @return reference to this parameter structure.
  208. */
  209. Params& cfgContextParams(cv::util::any&& ctx_cfg) {
  210. desc.context_config = std::move(ctx_cfg);
  211. return *this;
  212. }
  213. /** @brief Specifies number of asynchronous inference requests.
  214. @param nireq Number of inference asynchronous requests.
  215. @return reference to this parameter structure.
  216. */
  217. Params& cfgNumRequests(size_t nireq) {
  218. GAPI_Assert(nireq > 0 && "Number of infer requests must be greater than zero!");
  219. desc.nireq = nireq;
  220. return *this;
  221. }
  222. /** @brief Specifies new input shapes for the network inputs.
  223. The function is used to specify new input shapes for the network inputs.
  224. Follow https://docs.openvinotoolkit.org/latest/classInferenceEngine_1_1networkNetwork.html
  225. for additional information.
  226. @param reshape_table Map of pairs: name of corresponding data and its dimension.
  227. @return reference to this parameter structure.
  228. */
  229. Params<Net>& cfgInputReshape(const std::map<std::string, std::vector<std::size_t>>& reshape_table) {
  230. desc.reshape_table = reshape_table;
  231. return *this;
  232. }
  233. /** @overload */
  234. Params<Net>& cfgInputReshape(std::map<std::string, std::vector<std::size_t>>&& reshape_table) {
  235. desc.reshape_table = std::move(reshape_table);
  236. return *this;
  237. }
  238. /** @overload
  239. @param layer_name Name of layer.
  240. @param layer_dims New dimensions for this layer.
  241. @return reference to this parameter structure.
  242. */
  243. Params<Net>& cfgInputReshape(const std::string& layer_name, const std::vector<size_t>& layer_dims) {
  244. desc.reshape_table.emplace(layer_name, layer_dims);
  245. return *this;
  246. }
  247. /** @overload */
  248. Params<Net>& cfgInputReshape(std::string&& layer_name, std::vector<size_t>&& layer_dims) {
  249. desc.reshape_table.emplace(layer_name, layer_dims);
  250. return *this;
  251. }
  252. /** @overload
  253. @param layer_names set of names of network layers that will be used for network reshape.
  254. @return reference to this parameter structure.
  255. */
  256. Params<Net>& cfgInputReshape(const std::unordered_set<std::string>& layer_names) {
  257. desc.layer_names_to_reshape = layer_names;
  258. return *this;
  259. }
  260. /** @overload
  261. @param layer_names rvalue set of the selected layers will be reshaped automatically
  262. its input image size.
  263. @return reference to this parameter structure.
  264. */
  265. Params<Net>& cfgInputReshape(std::unordered_set<std::string>&& layer_names) {
  266. desc.layer_names_to_reshape = std::move(layer_names);
  267. return *this;
  268. }
  269. /** @brief Specifies the inference batch size.
  270. The function is used to specify inference batch size.
  271. Follow https://docs.openvinotoolkit.org/latest/classInferenceEngine_1_1CNNNetwork.html#a8e9d19270a48aab50cb5b1c43eecb8e9 for additional information
  272. @param size batch size which will be used.
  273. @return reference to this parameter structure.
  274. */
  275. Params<Net>& cfgBatchSize(const size_t size) {
  276. desc.batch_size = cv::util::make_optional(size);
  277. return *this;
  278. }
  279. // BEGIN(G-API's network parametrization API)
  280. GBackend backend() const { return cv::gapi::ie::backend(); }
  281. std::string tag() const { return Net::tag(); }
  282. cv::util::any params() const { return { desc }; }
  283. // END(G-API's network parametrization API)
  284. protected:
  285. detail::ParamDesc desc;
  286. };
  287. /*
  288. * @brief This structure provides functions for generic network type that
  289. * fill inference parameters.
  290. * @see struct Generic
  291. */
  292. template<>
  293. class Params<cv::gapi::Generic> {
  294. public:
  295. /** @brief Class constructor.
  296. Constructs Params based on model information and sets default values for other
  297. inference description parameters. Model is loaded and compiled using OpenVINO Toolkit.
  298. @param tag string tag of the network for which these parameters are intended.
  299. @param model path to topology IR (.xml file).
  300. @param weights path to weights (.bin file).
  301. @param device target device to use.
  302. */
  303. Params(const std::string &tag,
  304. const std::string &model,
  305. const std::string &weights,
  306. const std::string &device)
  307. : desc{ model, weights, device, {}, {}, {}, 0u, 0u,
  308. detail::ParamDesc::Kind::Load, true, {}, {}, {}, 1u,
  309. {}, {}},
  310. m_tag(tag) {
  311. };
  312. /** @overload
  313. This constructor for pre-compiled networks. Model is imported from pre-compiled
  314. blob.
  315. @param tag string tag of the network for which these parameters are intended.
  316. @param model path to model.
  317. @param device target device to use.
  318. */
  319. Params(const std::string &tag,
  320. const std::string &model,
  321. const std::string &device)
  322. : desc{ model, {}, device, {}, {}, {}, 0u, 0u,
  323. detail::ParamDesc::Kind::Import, true, {}, {}, {}, 1u,
  324. {}, {}},
  325. m_tag(tag) {
  326. };
  327. /** @see ie::Params::pluginConfig. */
  328. Params& pluginConfig(const IEConfig& cfg) {
  329. desc.config = cfg;
  330. return *this;
  331. }
  332. /** @overload */
  333. Params& pluginConfig(IEConfig&& cfg) {
  334. desc.config = std::move(cfg);
  335. return *this;
  336. }
  337. /** @see ie::Params::constInput. */
  338. Params& constInput(const std::string &layer_name,
  339. const cv::Mat &data,
  340. TraitAs hint = TraitAs::TENSOR) {
  341. desc.const_inputs[layer_name] = {data, hint};
  342. return *this;
  343. }
  344. /** @see ie::Params::cfgNumRequests. */
  345. Params& cfgNumRequests(size_t nireq) {
  346. GAPI_Assert(nireq > 0 && "Number of infer requests must be greater than zero!");
  347. desc.nireq = nireq;
  348. return *this;
  349. }
  350. /** @see ie::Params::cfgInputReshape */
  351. Params& cfgInputReshape(const std::map<std::string, std::vector<std::size_t>>&reshape_table) {
  352. desc.reshape_table = reshape_table;
  353. return *this;
  354. }
  355. /** @overload */
  356. Params& cfgInputReshape(std::map<std::string, std::vector<std::size_t>> && reshape_table) {
  357. desc.reshape_table = std::move(reshape_table);
  358. return *this;
  359. }
  360. /** @overload */
  361. Params& cfgInputReshape(std::string && layer_name, std::vector<size_t> && layer_dims) {
  362. desc.reshape_table.emplace(layer_name, layer_dims);
  363. return *this;
  364. }
  365. /** @overload */
  366. Params& cfgInputReshape(const std::string & layer_name, const std::vector<size_t>&layer_dims) {
  367. desc.reshape_table.emplace(layer_name, layer_dims);
  368. return *this;
  369. }
  370. /** @overload */
  371. Params& cfgInputReshape(std::unordered_set<std::string> && layer_names) {
  372. desc.layer_names_to_reshape = std::move(layer_names);
  373. return *this;
  374. }
  375. /** @overload */
  376. Params& cfgInputReshape(const std::unordered_set<std::string>&layer_names) {
  377. desc.layer_names_to_reshape = layer_names;
  378. return *this;
  379. }
  380. /** @see ie::Params::cfgBatchSize */
  381. Params& cfgBatchSize(const size_t size) {
  382. desc.batch_size = cv::util::make_optional(size);
  383. return *this;
  384. }
  385. // BEGIN(G-API's network parametrization API)
  386. GBackend backend() const { return cv::gapi::ie::backend(); }
  387. std::string tag() const { return m_tag; }
  388. cv::util::any params() const { return { desc }; }
  389. // END(G-API's network parametrization API)
  390. protected:
  391. detail::ParamDesc desc;
  392. std::string m_tag;
  393. };
  394. } // namespace ie
  395. } // namespace gapi
  396. } // namespace cv
  397. #endif // OPENCV_GAPI_INFER_IE_HPP