diff --git a/include/api/context.h b/include/api/context.h index d5fde38c945d0ecfcca2ff92e3bc3a7b0bc19206..81eea94c103d04ebecda4cd03775a4a1119e63e8 100644 --- a/include/api/context.h +++ b/include/api/context.h @@ -31,6 +31,7 @@ enum DeviceType { kAscend, kAscend910, kAscend310, + kHexagonDSP = 6, // add new type here kInvalidDeviceType = 100, }; @@ -99,6 +100,16 @@ class MS_API Context { /// \return Pointer to the custom delegate. std::shared_ptr GetDelegate() const; + /// \brief + /// + /// \param[in] + void SetMultiModalHW(bool float_mode); + + /// \brief + /// + /// \return + bool GetMultiModalHW() const; + /// \brief Get a mutable reference of DeviceInfoContext vector in this context. Only MindSpore Lite supports /// heterogeneous scenarios with multiple members in the vector. /// @@ -492,5 +503,10 @@ void AscendDeviceInfo::SetBufferOptimizeMode(const std::string &buffer_optimize_ SetBufferOptimizeMode(StringToChar(buffer_optimize_mode)); } std::string AscendDeviceInfo::GetBufferOptimizeMode() const { return CharToString(GetBufferOptimizeModeChar()); } + +class MS_API HexagonDspDeviceInfo : public DeviceInfoContext { + public: + enum DeviceType GetDeviceType() const override { return DeviceType::kHexagonDSP; }; +}; } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_CONTEXT_H diff --git a/include/c_api/context_c.h b/include/c_api/context_c.h index 980b55b6911675162964af5e1fa5726dfa4e9ce4..e1d38cb4b04d5fcebfe7b12f94c84b2053dd7082 100644 --- a/include/c_api/context_c.h +++ b/include/c_api/context_c.h @@ -25,54 +25,55 @@ extern "C" { #endif -typedef void *MSContextHandle; -typedef void *MSDeviceInfoHandle; +typedef void *OH_AI_ContextHandle; +typedef void *OH_AI_DeviceInfoHandle; /// \brief Create a context object. /// /// \return Context object handle. -MS_API MSContextHandle MSContextCreate(); +OH_AI_API OH_AI_ContextHandle OH_AI_ContextCreate(); /// \brief Destroy the context object. /// /// \param[in] context Context object handle address. -MS_API void MSContextDestroy(MSContextHandle *context); +OH_AI_API void OH_AI_ContextDestroy(OH_AI_ContextHandle *context); /// \brief Set the number of threads at runtime. /// /// \param[in] context Context object handle. /// \param[in] thread_num the number of threads at runtime. -MS_API void MSContextSetThreadNum(MSContextHandle context, int32_t thread_num); +OH_AI_API void OH_AI_ContextSetThreadNum(OH_AI_ContextHandle context, int32_t thread_num); /// \brief Obtain the current thread number setting. /// /// \param[in] context Context object handle. /// /// \return The current thread number setting. -MS_API int32_t MSContextGetThreadNum(const MSContextHandle context); +OH_AI_API int32_t OH_AI_ContextGetThreadNum(const OH_AI_ContextHandle context); /// \brief Set the thread affinity to CPU cores. /// /// \param[in] context Context object handle. /// \param[in] mode: 0: no affinities, 1: big cores first, 2: little cores first -MS_API void MSContextSetThreadAffinityMode(MSContextHandle context, int mode); +OH_AI_API void OH_AI_ContextSetThreadAffinityMode(OH_AI_ContextHandle context, int mode); /// \brief Obtain the thread affinity of CPU cores. /// /// \param[in] context Context object handle. /// /// \return Thread affinity to CPU cores. 0: no affinities, 1: big cores first, 2: little cores first -MS_API int MSContextGetThreadAffinityMode(const MSContextHandle context); +OH_AI_API int OH_AI_ContextGetThreadAffinityMode(const OH_AI_ContextHandle context); /// \brief Set the thread lists to CPU cores. /// -/// \note If core_list and mode are set by MSContextSetThreadAffinityMode at the same time, +/// \note If core_list and mode are set by OH_AI_ContextSetThreadAffinityMode at the same time, /// the core_list is effective, but the mode is not effective. /// /// \param[in] context Context object handle. /// \param[in] core_list: a array of thread core lists. /// \param[in] core_num The number of core. -MS_API void MSContextSetThreadAffinityCoreList(MSContextHandle context, const int32_t *core_list, size_t core_num); +OH_AI_API void OH_AI_ContextSetThreadAffinityCoreList(OH_AI_ContextHandle context, const int32_t *core_list, + size_t core_num); /// \brief Obtain the thread lists of CPU cores. /// @@ -80,98 +81,98 @@ MS_API void MSContextSetThreadAffinityCoreList(MSContextHandle context, const in /// \param[out] core_num The number of core. /// /// \return a array of thread core lists. -MS_API const int32_t *MSContextGetThreadAffinityCoreList(const MSContextHandle context, size_t *core_num); +OH_AI_API const int32_t *OH_AI_ContextGetThreadAffinityCoreList(const OH_AI_ContextHandle context, size_t *core_num); /// \brief Set the status whether to perform model inference or training in parallel. /// /// \param[in] context Context object handle. /// \param[in] is_parallel: true, parallel; false, not in parallel. -MS_API void MSContextSetEnableParallel(MSContextHandle context, bool is_parallel); +OH_AI_API void OH_AI_ContextSetEnableParallel(OH_AI_ContextHandle context, bool is_parallel); /// \brief Obtain the status whether to perform model inference or training in parallel. /// /// \param[in] context Context object handle. /// /// \return Bool value that indicates whether in parallel. -MS_API bool MSContextGetEnableParallel(const MSContextHandle context); +OH_AI_API bool OH_AI_ContextGetEnableParallel(const OH_AI_ContextHandle context); /// \brief Add device info to context object. /// /// \param[in] context Context object handle. /// \param[in] device_info Device info object handle. -MS_API void MSContextAddDeviceInfo(MSContextHandle context, MSDeviceInfoHandle device_info); +OH_AI_API void OH_AI_ContextAddDeviceInfo(OH_AI_ContextHandle context, OH_AI_DeviceInfoHandle device_info); /// \brief Create a device info object. /// /// \param[in] device_info Device info object handle. /// /// \return Device info object handle. -MS_API MSDeviceInfoHandle MSDeviceInfoCreate(MSDeviceType device_type); +OH_AI_API OH_AI_DeviceInfoHandle OH_AI_DeviceInfoCreate(OH_AI_DeviceType device_type); /// \brief Destroy the device info object. /// /// \param[in] device_info Device info object handle address. -MS_API void MSDeviceInfoDestroy(MSDeviceInfoHandle *device_info); +OH_AI_API void OH_AI_DeviceInfoDestroy(OH_AI_DeviceInfoHandle *device_info); /// \brief Set provider's name. /// /// \param[in] device_info Device info object handle. /// \param[in] provider define the provider's name. -MS_API void MSDeviceInfoSetProvider(MSDeviceInfoHandle device_info, const char *provider); +OH_AI_API void OH_AI_DeviceInfoSetProvider(OH_AI_DeviceInfoHandle device_info, const char *provider); /// \brief Obtain provider's name /// /// \param[in] device_info Device info object handle. /// /// \return provider's name. -MS_API const char *MSDeviceInfoGetProvider(const MSDeviceInfoHandle device_info); +OH_AI_API const char *OH_AI_DeviceInfoGetProvider(const OH_AI_DeviceInfoHandle device_info); /// \brief Set provider's device type. /// /// \param[in] device_info Device info object handle. /// \param[in] device define the provider's device type. EG: CPU. -MS_API void MSDeviceInfoSetProviderDevice(MSDeviceInfoHandle device_info, const char *device); +OH_AI_API void OH_AI_DeviceInfoSetProviderDevice(OH_AI_DeviceInfoHandle device_info, const char *device); /// \brief Obtain provider's device type. /// /// \param[in] device_info Device info object handle. /// /// \return provider's device type. -MS_API const char *MSDeviceInfoGetProviderDevice(const MSDeviceInfoHandle device_info); +OH_AI_API const char *OH_AI_DeviceInfoGetProviderDevice(const OH_AI_DeviceInfoHandle device_info); /// \brief Obtain the device type of the device info. /// /// \param[in] device_info Device info object handle. /// /// \return Device Type of the device info. -MS_API MSDeviceType MSDeviceInfoGetDeviceType(const MSDeviceInfoHandle device_info); +OH_AI_API OH_AI_DeviceType OH_AI_DeviceInfoGetDeviceType(const OH_AI_DeviceInfoHandle device_info); /// \brief Set enables to perform the float16 inference, Only valid for CPU/GPU. /// /// \param[in] device_info Device info object handle. /// \param[in] is_fp16 Enable float16 inference or not. -MS_API void MSDeviceInfoSetEnableFP16(MSDeviceInfoHandle device_info, bool is_fp16); +OH_AI_API void OH_AI_DeviceInfoSetEnableFP16(OH_AI_DeviceInfoHandle device_info, bool is_fp16); /// \brief Obtain enables to perform the float16 inference, Only valid for CPU/GPU. /// /// \param[in] device_info Device info object handle. /// /// \return Whether enable float16 inference. -MS_API bool MSDeviceInfoGetEnableFP16(const MSDeviceInfoHandle device_info); +OH_AI_API bool OH_AI_DeviceInfoGetEnableFP16(const OH_AI_DeviceInfoHandle device_info); /// \brief Set the NPU frequency, Only valid for NPU. /// /// \param[in] device_info Device info object handle. /// \param[in] frequency Can be set to 1 (low power consumption), 2 (balanced), 3 (high performance), 4 (extreme /// performance), default as 3. -MS_API void MSDeviceInfoSetFrequency(MSDeviceInfoHandle device_info, int frequency); +OH_AI_API void OH_AI_DeviceInfoSetFrequency(OH_AI_DeviceInfoHandle device_info, int frequency); /// \brief Obtain the NPU frequency, Only valid for NPU. /// /// \param[in] device_info Device info object handle. /// /// \return NPU frequency -MS_API int MSDeviceInfoGetFrequency(const MSDeviceInfoHandle device_info); +OH_AI_API int OH_AI_DeviceInfoGetFrequency(const OH_AI_DeviceInfoHandle device_info); #ifdef __cplusplus } diff --git a/include/c_api/data_type_c.h b/include/c_api/data_type_c.h index 3b736e0cc784b86079baea0b19461e1874b227f4..124ed17b3cdeca57eafa9b1e945160bf6fefc30b 100644 --- a/include/c_api/data_type_c.h +++ b/include/c_api/data_type_c.h @@ -22,29 +22,29 @@ extern "C" { #endif -typedef enum MSDataType { - kMSDataTypeUnknown = 0, - kMSDataTypeObjectTypeString = 12, - kMSDataTypeObjectTypeList = 13, - kMSDataTypeObjectTypeTuple = 14, - kMSDataTypeObjectTypeTensor = 17, - kMSDataTypeNumberTypeBegin = 29, - kMSDataTypeNumberTypeBool = 30, - kMSDataTypeNumberTypeInt8 = 32, - kMSDataTypeNumberTypeInt16 = 33, - kMSDataTypeNumberTypeInt32 = 34, - kMSDataTypeNumberTypeInt64 = 35, - kMSDataTypeNumberTypeUInt8 = 37, - kMSDataTypeNumberTypeUInt16 = 38, - kMSDataTypeNumberTypeUInt32 = 39, - kMSDataTypeNumberTypeUInt64 = 40, - kMSDataTypeNumberTypeFloat16 = 42, - kMSDataTypeNumberTypeFloat32 = 43, - kMSDataTypeNumberTypeFloat64 = 44, - kMSDataTypeNumberTypeEnd = 46, +typedef enum OH_AI_DataType { + OH_AI_DATATYPE_UNKNOWN = 0, + OH_AI_DATATYPE_OBJECTTYPE_STRING = 12, + OH_AI_DATATYPE_OBJECTTYPE_LIST = 13, + OH_AI_DATATYPE_OBJECTTYPE_TUPLE = 14, + OH_AI_DATATYPE_OBJECTTYPE_TENSOR = 17, + OH_AI_DATATYPE_NUMBERTYPE_BEGIN = 29, + OH_AI_DATATYPE_NUMBERTYPE_BOOL = 30, + OH_AI_DATATYPE_NUMBERTYPE_INT8 = 32, + OH_AI_DATATYPE_NUMBERTYPE_INT16 = 33, + OH_AI_DATATYPE_NUMBERTYPE_INT32 = 34, + OH_AI_DATATYPE_NUMBERTYPE_INT64 = 35, + OH_AI_DATATYPE_NUMBERTYPE_UINT8 = 37, + OH_AI_DATATYPE_NUMBERTYPE_UINT16 = 38, + OH_AI_DATATYPE_NUMBERTYPE_UINT32 = 39, + OH_AI_DATATYPE_NUMBERTYPE_UINT64 = 40, + OH_AI_DATATYPE_NUMBERTYPE_FLOAT16 = 42, + OH_AI_DATATYPE_NUMBERTYPE_FLOAT32 = 43, + OH_AI_DATATYPE_NUMBERTYPE_FLOAT64 = 44, + OH_AI_DATATYPE_NUMBERTYPE_END = 46, // add new enum here - kMSDataTypeInvalid = INT32_MAX, -} MSDataType; + OH_AI_DataTypeInvalid = INT32_MAX, +} OH_AI_DataType; #ifdef __cplusplus } diff --git a/include/c_api/format_c.h b/include/c_api/format_c.h index 7b73dabfddd790c8eb26e0181f5532b035399476..36b355fe6eead8465cf82b26972196b8d93f9a72 100644 --- a/include/c_api/format_c.h +++ b/include/c_api/format_c.h @@ -20,25 +20,25 @@ extern "C" { #endif -typedef enum MSFormat { - kMSFormatNCHW = 0, - kMSFormatNHWC = 1, - kMSFormatNHWC4 = 2, - kMSFormatHWKC = 3, - kMSFormatHWCK = 4, - kMSFormatKCHW = 5, - kMSFormatCKHW = 6, - kMSFormatKHWC = 7, - kMSFormatCHWK = 8, - kMSFormatHW = 9, - kMSFormatHW4 = 10, - kMSFormatNC = 11, - kMSFormatNC4 = 12, - kMSFormatNC4HW4 = 13, - kMSFormatNCDHW = 15, - kMSFormatNWC = 16, - kMSFormatNCW = 17 -} MSFormat; +typedef enum OH_AI_Format { + OH_AI_FORMAT_NCHW = 0, + OH_AI_FORMAT_NHWC = 1, + OH_AI_FORMAT_NHWC4 = 2, + OH_AI_FORMAT_HWKC = 3, + OH_AI_FORMAT_HWCK = 4, + OH_AI_FORMAT_KCHW = 5, + OH_AI_FORMAT_CKHW = 6, + OH_AI_FORMAT_KHWC = 7, + OH_AI_FORMAT_CHWK = 8, + OH_AI_FORMAT_HW = 9, + OH_AI_FORMAT_HW4 = 10, + OH_AI_FORMAT_NC = 11, + OH_AI_FORMAT_NC4 = 12, + OH_AI_FORMAT_NC4HW4 = 13, + OH_AI_FORMAT_NCDHW = 15, + OH_AI_FORMAT_NWC = 16, + OH_AI_FORMAT_NCW = 17 +} OH_AI_Format; #ifdef __cplusplus } diff --git a/include/c_api/model_c.h b/include/c_api/model_c.h index ddd31b52a86c23855670a40ec95eade9d8b77b66..346252d3923cc4b73ffcb703ac5535231f9b05df 100644 --- a/include/c_api/model_c.h +++ b/include/c_api/model_c.h @@ -24,43 +24,43 @@ extern "C" { #endif -typedef void *MSModelHandle; +typedef void *OH_AI_ModelHandle; -typedef struct MSTensorHandleArray { +typedef struct OH_AI_TensorHandleArray { size_t handle_num; - MSTensorHandle *handle_list; -} MSTensorHandleArray; + OH_AI_TensorHandle *handle_list; +} OH_AI_TensorHandleArray; -#define MS_MAX_SHAPE_NUM 32 -typedef struct MSShapeInfo { +#define OH_AI_MAX_SHAPE_NUM 32 +typedef struct OH_AI_ShapeInfo { size_t shape_num; - int64_t shape[MS_MAX_SHAPE_NUM]; -} MSShapeInfo; + int64_t shape[OH_AI_MAX_SHAPE_NUM]; +} OH_AI_ShapeInfo; -typedef struct MSCallBackParamC { +typedef struct OH_AI_CallBackParam { char *node_name; char *node_type; -} MSCallBackParamC; +} OH_AI_CallBackParam; -typedef bool (*MSKernelCallBackC)(const MSTensorHandleArray inputs, const MSTensorHandleArray outputs, - const MSCallBackParamC kernel_Info); +typedef bool (*OH_AI_KernelCallBack)(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs, + const OH_AI_CallBackParam kernel_Info); /// \brief Create a model object. Only valid for Lite. /// /// \return Model object handle. -MS_API MSModelHandle MSModelCreate(); +OH_AI_API OH_AI_ModelHandle OH_AI_ModelCreate(); /// \brief Destroy the model object. Only valid for Lite. /// /// \param[in] model Model object handle address. -MS_API void MSModelDestroy(MSModelHandle *model); +OH_AI_API void OH_AI_ModelDestroy(OH_AI_ModelHandle *model); /// \brief Set workspace for the model object. Only valid for Iot. /// /// \param[in] model Model object handle. /// \param[in] workspace Define the workspace address. /// \param[in] workspace_size Define the workspace size. -MS_API void MSModelSetWorkspace(MSModelHandle model, void *workspace, size_t workspace_size); +OH_AI_API void OH_AI_ModelSetWorkspace(OH_AI_ModelHandle model, void *workspace, size_t workspace_size); /// \brief Build the model from model file buffer so that it can run on a device. Only valid for Lite. /// @@ -70,9 +70,9 @@ MS_API void MSModelSetWorkspace(MSModelHandle model, void *workspace, size_t wor /// \param[in] model_type Define The type of model file. /// \param[in] model_context Define the context used to store options during execution. /// -/// \return MSStatus. -MS_API MSStatus MSModelBuild(MSModelHandle model, const void *model_data, size_t data_size, MSModelType model_type, - const MSContextHandle model_context); +/// \return OH_AI_Status. +OH_AI_API OH_AI_Status OH_AI_ModelBuild(OH_AI_ModelHandle model, const void *model_data, size_t data_size, + OH_AI_ModelType model_type, const OH_AI_ContextHandle model_context); /// \brief Load and build the model from model path so that it can run on a device. Only valid for Lite. /// @@ -81,9 +81,9 @@ MS_API MSStatus MSModelBuild(MSModelHandle model, const void *model_data, size_t /// \param[in] model_type Define The type of model file. /// \param[in] model_context Define the context used to store options during execution. /// -/// \return MSStatus. -MS_API MSStatus MSModelBuildFromFile(MSModelHandle model, const char *model_path, MSModelType model_type, - const MSContextHandle model_context); +/// \return OH_AI_Status. +OH_AI_API OH_AI_Status OH_AI_ModelBuildFromFile(OH_AI_ModelHandle model, const char *model_path, + OH_AI_ModelType model_type, const OH_AI_ContextHandle model_context); /// \brief Resizes the shapes of inputs. /// @@ -92,9 +92,9 @@ MS_API MSStatus MSModelBuildFromFile(MSModelHandle model, const char *model_path /// \param[in] shape_infos Defines the new shapes of inputs, should be consistent with inputs. /// \param[in] shape_info_num The num of shape_infos. /// -/// \return MSStatus. -MS_API MSStatus MSModelResize(MSModelHandle model, const MSTensorHandleArray inputs, MSShapeInfo *shape_infos, - size_t shape_info_num); +/// \return OH_AI_Status. +OH_AI_API OH_AI_Status OH_AI_ModelResize(OH_AI_ModelHandle model, const OH_AI_TensorHandleArray inputs, + OH_AI_ShapeInfo *shape_infos, size_t shape_info_num); /// \brief Inference model. /// @@ -104,23 +104,24 @@ MS_API MSStatus MSModelResize(MSModelHandle model, const MSTensorHandleArray inp /// \param[in] before CallBack before predict. /// \param[in] after CallBack after predict. /// -/// \return MSStatus. -MS_API MSStatus MSModelPredict(MSModelHandle model, const MSTensorHandleArray inputs, MSTensorHandleArray *outputs, - const MSKernelCallBackC before, const MSKernelCallBackC after); +/// \return OH_AI_Status. +OH_AI_API OH_AI_Status OH_AI_ModelPredict(OH_AI_ModelHandle model, const OH_AI_TensorHandleArray inputs, + OH_AI_TensorHandleArray *outputs, const OH_AI_KernelCallBack before, + const OH_AI_KernelCallBack after); /// \brief Obtains all input tensor handles of the model. /// /// \param[in] model Model object handle. /// /// \return The array that includes all input tensor handles. -MS_API MSTensorHandleArray MSModelGetInputs(const MSModelHandle model); +OH_AI_API OH_AI_TensorHandleArray OH_AI_ModelGetInputs(const OH_AI_ModelHandle model); /// \brief Obtains all output tensor handles of the model. /// /// \param[in] model Model object handle. /// /// \return The array that includes all output tensor handles. -MS_API MSTensorHandleArray MSModelGetOutputs(const MSModelHandle model); +OH_AI_API OH_AI_TensorHandleArray OH_AI_ModelGetOutputs(const OH_AI_ModelHandle model); /// \brief Obtains the input tensor handle of the model by name. /// @@ -128,7 +129,7 @@ MS_API MSTensorHandleArray MSModelGetOutputs(const MSModelHandle model); /// \param[in] tensor_name The name of tensor. /// /// \return The input tensor handle with the given name, if the name is not found, an NULL is returned. -MS_API MSTensorHandle MSModelGetInputByTensorName(const MSModelHandle model, const char *tensor_name); +OH_AI_API OH_AI_TensorHandle OH_AI_ModelGetInputByTensorName(const OH_AI_ModelHandle model, const char *tensor_name); /// \brief Obtains the output tensor handle of the model by name. /// @@ -136,7 +137,7 @@ MS_API MSTensorHandle MSModelGetInputByTensorName(const MSModelHandle model, con /// \param[in] tensor_name The name of tensor. /// /// \return The output tensor handle with the given name, if the name is not found, an NULL is returned. -MS_API MSTensorHandle MSModelGetOutputByTensorName(const MSModelHandle model, const char *tensor_name); +OH_AI_API OH_AI_TensorHandle OH_AI_ModelGetOutputByTensorName(const OH_AI_ModelHandle model, const char *tensor_name); #ifdef __cplusplus } diff --git a/include/c_api/status_c.h b/include/c_api/status_c.h index 62b19b994603b366e14dbe67176d8a2692c6fe97..102c7962a11294d75580d481305d702ee003e3e3 100644 --- a/include/c_api/status_c.h +++ b/include/c_api/status_c.h @@ -22,54 +22,58 @@ extern "C" { #endif -enum MSCompCode { - kMSCompCodeCore = 0x00000000u, - kMSCompCodeMD = 0x10000000u, - kMSCompCodeME = 0x20000000u, - kMSCompCodeMC = 0x30000000u, - kMSCompCodeLite = 0xF0000000u, +enum OH_AI_CompCode { + OH_AI_COMPCODE_CORE = 0x00000000u, + OH_AI_COMPCODE_MD = 0x10000000u, + OH_AI_COMPCODE_ME = 0x20000000u, + OH_AI_COMPCODE_MC = 0x30000000u, + OH_AI_COMPCODE_LITE = 0xF0000000u, }; -typedef enum MSStatus { - kMSStatusSuccess = 0, +typedef enum OH_AI_Status { + OH_AI_STATUS_SUCCESS = 0, // Core - kMSStatusCoreFailed = kMSCompCodeCore | 0x1, + OH_AI_STATUS_CORE_FAILED = OH_AI_COMPCODE_CORE | 0x1, // Lite // Common error code, range: [-1, -100) - kMSStatusLiteError = kMSCompCodeLite | (0x0FFFFFFF & -1), /**< Common error code. */ - kMSStatusLiteNullptr = kMSCompCodeLite | (0x0FFFFFFF & -2), /**< NULL pointer returned.*/ - kMSStatusLiteParamInvalid = kMSCompCodeLite | (0x0FFFFFFF & -3), /**< Invalid parameter.*/ - kMSStatusLiteNoChange = kMSCompCodeLite | (0x0FFFFFFF & -4), /**< No change. */ - kMSStatusLiteSuccessExit = kMSCompCodeLite | (0x0FFFFFFF & -5), /**< No error but exit. */ - kMSStatusLiteMemoryFailed = kMSCompCodeLite | (0x0FFFFFFF & -6), /**< Fail to create memory. */ - kMSStatusLiteNotSupport = kMSCompCodeLite | (0x0FFFFFFF & -7), /**< Fail to support. */ - kMSStatusLiteThreadPoolError = kMSCompCodeLite | (0x0FFFFFFF & -8), /**< Error occur in thread pool. */ - kMSStatusLiteUninitializedObj = kMSCompCodeLite | (0x0FFFFFFF & -9), /**< Object is not initialized. */ + OH_AI_STATUS_LITE_ERROR = OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -1), /**< Common error code. */ + OH_AI_STATUS_LITE_NULLPTR = OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -2), /**< NULL pointer returned.*/ + OH_AI_STATUS_LITE_PARAM_INVALID = OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -3), /**< Invalid parameter.*/ + OH_AI_STATUS_LITE_NO_CHANGE = OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -4), /**< No change. */ + OH_AI_STATUS_LITE_SUCCESS_EXIT = OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -5), /**< No error but exit. */ + OH_AI_STATUS_LITE_MEMORY_FAILED = OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -6), /**< Fail to create memory. */ + OH_AI_STATUS_LITE_NOT_SUPPORT = OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -7), /**< Fail to support. */ + OH_AI_STATUS_LITE_THREADPOOL_ERROR = OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -8), /**< Error occur in thread pool. */ + OH_AI_STATUS_LITE_UNINITIALIZED_OBJ = OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -9), /**< Object is not initialized. */ // Executor error code, range: [-100,-200) - kMSStatusLiteOutOfTensorRange = kMSCompCodeLite | (0x0FFFFFFF & -100), /**< Failed to check range. */ - kMSStatusLiteInputTensorError = kMSCompCodeLite | (0x0FFFFFFF & -101), /**< Failed to check input tensor. */ - kMSStatusLiteReentrantError = kMSCompCodeLite | (0x0FFFFFFF & -102), /**< Exist executor running. */ + OH_AI_STATUS_LITE_OUT_OF_TENSOR_RANGE = OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -100), /**< Failed to check range. */ + OH_AI_STATUS_LITE_INPUT_TENSOR_ERROR = + OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -101), /**< Failed to check input tensor. */ + OH_AI_STATUS_LITE_REENTRANT_ERROR = OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -102), /**< Exist executor running. */ // Graph error code, range: [-200,-300) - kMSStatusLiteGraphFileError = kMSCompCodeLite | (0x0FFFFFFF & -200), /**< Failed to verify graph file. */ + OH_AI_STATUS_LITE_GRAPH_FILE_ERROR = OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -200), /**< Failed to verify graph file. */ // Node error code, range: [-300,-400) - kMSStatusLiteNotFindOp = kMSCompCodeLite | (0x0FFFFFFF & -300), /**< Failed to find operator. */ - kMSStatusLiteInvalidOpName = kMSCompCodeLite | (0x0FFFFFFF & -301), /**< Invalid operator name. */ - kMSStatusLiteInvalidOpAttr = kMSCompCodeLite | (0x0FFFFFFF & -302), /**< Invalid operator attr. */ - kMSStatusLiteOpExecuteFailure = kMSCompCodeLite | (0x0FFFFFFF & -303), /**< Failed to execution operator. */ + OH_AI_STATUS_LITE_NOT_FIND_OP = OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -300), /**< Failed to find operator. */ + OH_AI_STATUS_LITE_INVALID_OP_NAME = OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -301), /**< Invalid operator name. */ + OH_AI_STATUS_LITE_INVALID_OP_ATTR = OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -302), /**< Invalid operator attr. */ + OH_AI_STATUS_LITE_OP_EXECUTE_FAILURE = + OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -303), /**< Failed to execution operator. */ // Tensor error code, range: [-400,-500) - kMSStatusLiteFormatError = kMSCompCodeLite | (0x0FFFFFFF & -400), /**< Failed to checking tensor format. */ + OH_AI_STATUS_LITE_FORMAT_ERROR = OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -400), /**< Failed to checking tensor format. */ // InferShape error code, range: [-500,-600) - kMSStatusLiteInferError = kMSCompCodeLite | (0x0FFFFFFF & -500), /**< Failed to infer shape. */ - kMSStatusLiteInferInvalid = kMSCompCodeLite | (0x0FFFFFFF & -501), /**< Invalid infer shape before runtime. */ + OH_AI_STATUS_LITE_INFER_ERROR = OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -500), /**< Failed to infer shape. */ + OH_AI_STATUS_LITE_INFER_INVALID = + OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -501), /**< Invalid infer shape before runtime. */ // User input param error code, range: [-600, 700) - kMSStatusLiteInputParamInvalid = kMSCompCodeLite | (0x0FFFFFFF & -600), /**< Invalid input param by user. */ -} MSStatus; + OH_AI_STATUS_LITE_INPUT_PARAM_INVALID = + OH_AI_COMPCODE_LITE | (0x0FFFFFFF & -600), /**< Invalid input param by user. */ +} OH_AI_Status; #ifdef __cplusplus } #endif diff --git a/include/c_api/tensor_c.h b/include/c_api/tensor_c.h index 9783bd901df091c02574611abc87080c0611c4d1..7197ace751aa2dfc993bb52b1ca3843c447fc8a6 100644 --- a/include/c_api/tensor_c.h +++ b/include/c_api/tensor_c.h @@ -25,7 +25,7 @@ extern "C" { #endif -typedef void *MSTensorHandle; +typedef void *OH_AI_TensorHandle; /// \brief Create a tensor object. /// @@ -37,53 +37,53 @@ typedef void *MSTensorHandle; /// \param[in] data_len The length of the memory, in bytes. /// /// \return Tensor object handle. -MS_API MSTensorHandle MSTensorCreate(const char *name, MSDataType type, const int64_t *shape, size_t shape_num, - const void *data, size_t data_len); +OH_AI_API OH_AI_TensorHandle OH_AI_TensorCreate(const char *name, OH_AI_DataType type, const int64_t *shape, + size_t shape_num, const void *data, size_t data_len); /// \brief Destroy the tensor object. /// /// \param[in] tensor Tensor object handle address. -MS_API void MSTensorDestroy(MSTensorHandle *tensor); +OH_AI_API void OH_AI_TensorDestroy(OH_AI_TensorHandle *tensor); /// \brief Obtain a deep copy of the tensor. /// /// \param[in] tensor Tensor object handle. /// /// \return Tensor object handle. -MS_API MSTensorHandle MSTensorClone(MSTensorHandle tensor); +OH_AI_API OH_AI_TensorHandle OH_AI_TensorClone(OH_AI_TensorHandle tensor); /// \brief Set the name for the tensor. /// /// \param[in] tensor Tensor object handle. /// \param[in] name The name of the tensor. -MS_API void MSTensorSetName(MSTensorHandle tensor, const char *name); +OH_AI_API void OH_AI_TensorSetName(OH_AI_TensorHandle tensor, const char *name); /// \brief Obtain the name of the tensor. /// /// \param[in] tensor Tensor object handle. /// /// \return The name of the tensor. -MS_API const char *MSTensorGetName(const MSTensorHandle tensor); +OH_AI_API const char *OH_AI_TensorGetName(const OH_AI_TensorHandle tensor); /// \brief Set the data type for the tensor. /// /// \param[in] tensor Tensor object handle. /// \param[in] type The data type of the tensor. -MS_API void MSTensorSetDataType(MSTensorHandle tensor, MSDataType type); +OH_AI_API void OH_AI_TensorSetDataType(OH_AI_TensorHandle tensor, OH_AI_DataType type); /// \brief Obtain the data type of the tensor. /// /// \param[in] tensor Tensor object handle. /// /// \return The date type of the tensor. -MS_API MSDataType MSTensorGetDataType(const MSTensorHandle tensor); +OH_AI_API OH_AI_DataType OH_AI_TensorGetDataType(const OH_AI_TensorHandle tensor); /// \brief Set the shape for the tensor. /// /// \param[in] tensor Tensor object handle. /// \param[in] shape The shape array. /// \param[in] shape_num Dimension of shape. -MS_API void MSTensorSetShape(MSTensorHandle tensor, const int64_t *shape, size_t shape_num); +OH_AI_API void OH_AI_TensorSetShape(OH_AI_TensorHandle tensor, const int64_t *shape, size_t shape_num); /// \brief Obtain the shape of the tensor. /// @@ -91,54 +91,54 @@ MS_API void MSTensorSetShape(MSTensorHandle tensor, const int64_t *shape, size_t /// \param[out] shape_num Dimension of shape. /// /// \return The shape array of the tensor. -MS_API const int64_t *MSTensorGetShape(const MSTensorHandle tensor, size_t *shape_num); +OH_AI_API const int64_t *OH_AI_TensorGetShape(const OH_AI_TensorHandle tensor, size_t *shape_num); /// \brief Set the format for the tensor. /// /// \param[in] tensor Tensor object handle. /// \param[in] format The format of the tensor. -MS_API void MSTensorSetFormat(MSTensorHandle tensor, MSFormat format); +OH_AI_API void OH_AI_TensorSetFormat(OH_AI_TensorHandle tensor, OH_AI_Format format); /// \brief Obtain the format of the tensor. /// /// \param[in] tensor Tensor object handle. /// /// \return The format of the tensor. -MS_API MSFormat MSTensorGetFormat(const MSTensorHandle tensor); +OH_AI_API OH_AI_Format OH_AI_TensorGetFormat(const OH_AI_TensorHandle tensor); /// \brief Obtain the data for the tensor. /// /// \param[in] tensor Tensor object handle. /// \param[in] data A pointer to the data of the tensor. -MS_API void MSTensorSetData(MSTensorHandle tensor, void *data); +OH_AI_API void OH_AI_TensorSetData(OH_AI_TensorHandle tensor, void *data); /// \brief Obtain the data pointer of the tensor. /// /// \param[in] tensor Tensor object handle. /// /// \return The data pointer of the tensor. -MS_API const void *MSTensorGetData(const MSTensorHandle tensor); +OH_AI_API const void *OH_AI_TensorGetData(const OH_AI_TensorHandle tensor); /// \brief Obtain the mutable data pointer of the tensor. If the internal data is empty, it will allocate memory. /// /// \param[in] tensor Tensor object handle. /// /// \return The data pointer of the tensor. -MS_API void *MSTensorGetMutableData(const MSTensorHandle tensor); +OH_AI_API void *OH_AI_TensorGetMutableData(const OH_AI_TensorHandle tensor); /// \brief Obtain the element number of the tensor. /// /// \param[in] tensor Tensor object handle. /// /// \return The element number of the tensor. -MS_API int64_t MSTensorGetElementNum(const MSTensorHandle tensor); +OH_AI_API int64_t OH_AI_TensorGetElementNum(const OH_AI_TensorHandle tensor); /// \brief Obtain the data size fo the tensor. /// /// \param[in] tensor Tensor object handle. /// /// \return The data size of the tensor. -MS_API size_t MSTensorGetDataSize(const MSTensorHandle tensor); +OH_AI_API size_t OH_AI_TensorGetDataSize(const OH_AI_TensorHandle tensor); #ifdef __cplusplus } diff --git a/include/c_api/types_c.h b/include/c_api/types_c.h index 342e24bdeb081209c961e60efaba4953614157d1..fd5f85ccd929a56c1701fe62e17c853e00e418a4 100644 --- a/include/c_api/types_c.h +++ b/include/c_api/types_c.h @@ -20,27 +20,28 @@ extern "C" { #endif -#ifndef MS_API +#ifndef OH_AI_API #ifdef _WIN32 -#define MS_API __declspec(dllexport) +#define OH_AI_API __declspec(dllexport) #else -#define MS_API __attribute__((visibility("default"))) +#define OH_AI_API __attribute__((visibility("default"))) #endif #endif -typedef enum MSModelType { - kMSModelTypeMindIR = 0, +typedef enum OH_AI_ModelType { + OH_AI_MODELTYPE_MINDIR = 0, // insert new data type here - kMSModelTypeInvalid = 0xFFFFFFFF -} MSModelType; + OH_AI_MODELTYPE_INVALID = 0xFFFFFFFF +} OH_AI_ModelType; -typedef enum MSDeviceType { - kMSDeviceTypeCPU = 0, - kMSDeviceTypeGPU, - kMSDeviceTypeKirinNPU, +typedef enum OH_AI_DeviceType { + OH_AI_DEVICETYPE_CPU = 0, + OH_AI_DEVICETYPE_GPU, + OH_AI_DEVICETYPE_KIRIN_NPU, + OH_AI_DEVICETYPE_HEXAGON_DSP = 6, // add new type here - kMSDeviceTypeInvalid = 100, -} MSDeviceType; + OH_AI_DEVICETYPE_INVALID = 100, +} OH_AI_DeviceType; #ifdef __cplusplus } diff --git a/mindspore/lite/include/lite_types.h b/mindspore/lite/include/lite_types.h index 35a7dbe94601102b83e1cedba085ac91ce93448a..5c6f711b1d259cf599e31538a6ed694cdfb424f0 100644 --- a/mindspore/lite/include/lite_types.h +++ b/mindspore/lite/include/lite_types.h @@ -31,7 +31,8 @@ typedef enum { DT_GPU, /**< GPU device type */ DT_NPU, /**< NPU device type */ DT_ASCEND, /**< ASCEND device type */ - DT_END /**< NO device type */ + DT_DSP = 6, + DT_END /**< NO device type */ } DeviceType; typedef enum { diff --git a/mindspore/lite/src/c_api/context_c.cc b/mindspore/lite/src/c_api/context_c.cc index cb2595addfb59e5c7a3365e508d40e5fd8d6fb27..4a9e6ad17282a486fb31edc1006f7a02a5b3b239 100644 --- a/mindspore/lite/src/c_api/context_c.cc +++ b/mindspore/lite/src/c_api/context_c.cc @@ -16,18 +16,16 @@ #include "include/c_api/context_c.h" #include "src/c_api/context_c.h" #include "src/common/log_adapter.h" +#include "nnacl/op_base.h" // ================ Context ================ -MSContextHandle MSContextCreate() { +OH_AI_ContextHandle OH_AI_ContextCreate() { auto impl = new (std::nothrow) mindspore::ContextC; - if (impl == nullptr) { - MS_LOG(ERROR) << "memory allocation failed."; - return nullptr; - } - return static_cast(impl); + MS_CHECK_TRUE_MSG(impl != nullptr, nullptr, "memory allocation failed."); + return static_cast(impl); } -void MSContextDestroy(MSContextHandle *context) { +void OH_AI_ContextDestroy(OH_AI_ContextHandle *context) { if (context != nullptr && *context != nullptr) { auto impl = static_cast(*context); delete impl; @@ -35,104 +33,74 @@ void MSContextDestroy(MSContextHandle *context) { } } -void MSContextSetThreadNum(MSContextHandle context, int32_t thread_num) { - if (context == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return; - } +void OH_AI_ContextSetThreadNum(OH_AI_ContextHandle context, int32_t thread_num) { + MS_CHECK_TRUE_MSG(context != nullptr, , "param is nullptr."); auto impl = static_cast(context); impl->thread_num = thread_num; } -int32_t MSContextGetThreadNum(const MSContextHandle context) { - if (context == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return 0; - } +int32_t OH_AI_ContextGetThreadNum(const OH_AI_ContextHandle context) { + MS_CHECK_TRUE_MSG(context != nullptr, 0, "param is nullptr."); auto impl = static_cast(context); return impl->thread_num; } -void MSContextSetThreadAffinityMode(MSContextHandle context, int mode) { - if (context == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return; - } +void OH_AI_ContextSetThreadAffinityMode(OH_AI_ContextHandle context, int mode) { + MS_CHECK_TRUE_MSG(context != nullptr, , "param is nullptr."); auto impl = static_cast(context); impl->affinity_mode = mode; return; } -int MSContextGetThreadAffinityMode(const MSContextHandle context) { - if (context == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return 0; - } +int OH_AI_ContextGetThreadAffinityMode(const OH_AI_ContextHandle context) { + MS_CHECK_TRUE_MSG(context != nullptr, 0, "param is nullptr."); auto impl = static_cast(context); return impl->affinity_mode; } -void MSContextSetThreadAffinityCoreList(MSContextHandle context, const int32_t *core_list, size_t core_num) { - if (context == nullptr || core_list == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return; - } +void OH_AI_ContextSetThreadAffinityCoreList(OH_AI_ContextHandle context, const int32_t *core_list, size_t core_num) { + MS_CHECK_TRUE_MSG(context != nullptr && core_list != nullptr, , "param is nullptr."); const std::vector vec_core_list(core_list, core_list + core_num); auto impl = static_cast(context); impl->affinity_core_list = vec_core_list; return; } -const int32_t *MSContextGetThreadAffinityCoreList(const MSContextHandle context, size_t *core_num) { - if (context == nullptr || core_num == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return nullptr; - } +const int32_t *OH_AI_ContextGetThreadAffinityCoreList(const OH_AI_ContextHandle context, size_t *core_num) { + MS_CHECK_TRUE_MSG(context != nullptr && core_num != nullptr, nullptr, "param is nullptr."); auto impl = static_cast(context); *core_num = impl->affinity_core_list.size(); return impl->affinity_core_list.data(); } -void MSContextSetEnableParallel(MSContextHandle context, bool is_parallel) { - if (context == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return; - } +void OH_AI_ContextSetEnableParallel(OH_AI_ContextHandle context, bool is_parallel) { + MS_CHECK_TRUE_MSG(context != nullptr, , "param is nullptr."); auto impl = static_cast(context); impl->enable_parallel = is_parallel; } -bool MSContextGetEnableParallel(const MSContextHandle context) { - if (context == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return false; - } +bool OH_AI_ContextGetEnableParallel(const OH_AI_ContextHandle context) { + MS_CHECK_TRUE_MSG(context != nullptr, false, "param is nullptr."); auto impl = static_cast(context); return impl->enable_parallel; } -void MSContextAddDeviceInfo(MSContextHandle context, MSDeviceInfoHandle device_info) { - if (context == nullptr || device_info == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return; - } +void OH_AI_ContextAddDeviceInfo(OH_AI_ContextHandle context, OH_AI_DeviceInfoHandle device_info) { + MS_CHECK_TRUE_MSG(context != nullptr && device_info != nullptr, , "param is nullptr."); auto impl = static_cast(context); std::shared_ptr device(static_cast(device_info)); impl->device_info_list.push_back(device); } // ================ DeviceInfo ================ -MSDeviceInfoHandle MSDeviceInfoCreate(MSDeviceType device_type) { +OH_AI_DeviceInfoHandle OH_AI_DeviceInfoCreate(OH_AI_DeviceType device_type) { mindspore::DeviceInfoC *impl = new (std::nothrow) mindspore::DeviceInfoC; - if (impl == nullptr) { - MS_LOG(ERROR) << "memory allocation failed."; - return nullptr; - } + MS_CHECK_TRUE_MSG(impl != nullptr, nullptr, "memory allocation failed."); impl->device_type = device_type; - return static_cast(impl); + return static_cast(impl); } -void MSDeviceInfoDestroy(MSDeviceInfoHandle *device_info) { +void OH_AI_DeviceInfoDestroy(OH_AI_DeviceInfoHandle *device_info) { if (device_info != nullptr && *device_info != nullptr) { auto impl = static_cast(*device_info); delete impl; @@ -140,71 +108,50 @@ void MSDeviceInfoDestroy(MSDeviceInfoHandle *device_info) { } } -void MSDeviceInfoSetProvider(MSDeviceInfoHandle device_info, const char *provider) { - if (device_info == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return; - } +void OH_AI_DeviceInfoSetProvider(OH_AI_DeviceInfoHandle device_info, const char *provider) { + MS_CHECK_TRUE_MSG(device_info != nullptr, , "param is nullptr."); auto impl = static_cast(device_info); impl->provider = provider; } -const char *MSDeviceInfoGetProvider(const MSDeviceInfoHandle device_info) { - if (device_info == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return nullptr; - } +const char *OH_AI_DeviceInfoGetProvider(const OH_AI_DeviceInfoHandle device_info) { + MS_CHECK_TRUE_MSG(device_info != nullptr, nullptr, "param is nullptr."); auto impl = static_cast(device_info); return impl->provider.c_str(); } -void MSDeviceInfoSetProviderDevice(MSDeviceInfoHandle device_info, const char *device) { - if (device_info == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return; - } +void OH_AI_DeviceInfoSetProviderDevice(OH_AI_DeviceInfoHandle device_info, const char *device) { + MS_CHECK_TRUE_MSG(device_info != nullptr, , "param is nullptr."); auto impl = static_cast(device_info); impl->provider_device = device; } -const char *MSDeviceInfoGetProviderDevice(const MSDeviceInfoHandle device_info) { - if (device_info == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return nullptr; - } +const char *OH_AI_DeviceInfoGetProviderDevice(const OH_AI_DeviceInfoHandle device_info) { + MS_CHECK_TRUE_MSG(device_info != nullptr, nullptr, "param is nullptr."); auto impl = static_cast(device_info); return impl->provider_device.c_str(); } -MSDeviceType MSDeviceInfoGetDeviceType(const MSDeviceInfoHandle device_info) { - if (device_info == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return kMSDeviceTypeInvalid; - } +OH_AI_DeviceType OH_AI_DeviceInfoGetDeviceType(const OH_AI_DeviceInfoHandle device_info) { + MS_CHECK_TRUE_MSG(device_info != nullptr, OH_AI_DEVICETYPE_INVALID, "param is nullptr."); auto impl = static_cast(device_info); return impl->device_type; } -void MSDeviceInfoSetEnableFP16(MSDeviceInfoHandle device_info, bool is_fp16) { - if (device_info == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return; - } +void OH_AI_DeviceInfoSetEnableFP16(OH_AI_DeviceInfoHandle device_info, bool is_fp16) { + MS_CHECK_TRUE_MSG(device_info != nullptr, , "param is nullptr."); auto impl = static_cast(device_info); - if (impl->device_type == kMSDeviceTypeCPU || impl->device_type == kMSDeviceTypeGPU) { + if (impl->device_type == OH_AI_DEVICETYPE_CPU || impl->device_type == OH_AI_DEVICETYPE_GPU) { impl->enable_fp16 = is_fp16; } else { MS_LOG(ERROR) << "Unsupported Feature."; } } -bool MSDeviceInfoGetEnableFP16(const MSDeviceInfoHandle device_info) { - if (device_info == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return false; - } +bool OH_AI_DeviceInfoGetEnableFP16(const OH_AI_DeviceInfoHandle device_info) { + MS_CHECK_TRUE_MSG(device_info != nullptr, false, "param is nullptr."); auto impl = static_cast(device_info); - if (impl->device_type == kMSDeviceTypeCPU || impl->device_type == kMSDeviceTypeGPU) { + if (impl->device_type == OH_AI_DEVICETYPE_CPU || impl->device_type == OH_AI_DEVICETYPE_GPU) { return impl->enable_fp16; } else { MS_LOG(ERROR) << "Unsupported Feature. device_type: " << impl->device_type; @@ -212,26 +159,20 @@ bool MSDeviceInfoGetEnableFP16(const MSDeviceInfoHandle device_info) { } } -void MSDeviceInfoSetFrequency(MSDeviceInfoHandle device_info, int frequency) { // only for KirinNPU - if (device_info == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return; - } +void OH_AI_DeviceInfoSetFrequency(OH_AI_DeviceInfoHandle device_info, int frequency) { // only for KirinNPU + MS_CHECK_TRUE_MSG(device_info != nullptr, , "param is nullptr."); auto impl = static_cast(device_info); - if (impl->device_type == kMSDeviceTypeKirinNPU) { + if (impl->device_type == OH_AI_DEVICETYPE_KIRIN_NPU) { impl->frequency = frequency; } else { MS_LOG(ERROR) << "Unsupported Feature."; } } -int MSDeviceInfoGetFrequency(const MSDeviceInfoHandle device_info) { // only for KirinNPU - if (device_info == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return -1; - } +int OH_AI_DeviceInfoGetFrequency(const OH_AI_DeviceInfoHandle device_info) { // only for KirinNPU + MS_CHECK_TRUE_MSG(device_info != nullptr, -1, "param is nullptr."); auto impl = static_cast(device_info); - if (impl->device_type == kMSDeviceTypeKirinNPU) { + if (impl->device_type == OH_AI_DEVICETYPE_KIRIN_NPU) { return impl->frequency; } else { MS_LOG(ERROR) << "Unsupported Feature."; diff --git a/mindspore/lite/src/c_api/context_c.h b/mindspore/lite/src/c_api/context_c.h index 8de91ea8cdd432685f934444f51d7c65563250a1..25676e461d4574b1934fd2dd86ca3f9276141576 100644 --- a/mindspore/lite/src/c_api/context_c.h +++ b/mindspore/lite/src/c_api/context_c.h @@ -26,7 +26,7 @@ class Allocator; class Delegate; typedef struct DeviceInfoC { - MSDeviceType device_type; + OH_AI_DeviceType device_type; bool enable_fp16 = false; int frequency = 3; std::string provider; diff --git a/mindspore/lite/src/c_api/model_c.cc b/mindspore/lite/src/c_api/model_c.cc index 75d54a76f2d828122d490eeaa56303f0c02c00fa..99a4863578b31a4e5c5e48b88f019e7afe5bd4a4 100644 --- a/mindspore/lite/src/c_api/model_c.cc +++ b/mindspore/lite/src/c_api/model_c.cc @@ -36,13 +36,11 @@ class ModelC { Status Build(const std::string &model_path, ModelType model_type, const ContextC *model_context); Status Resize(const std::vector &inputs, const std::vector> &shapes); - Status Predict(const MSTensorHandle *inputs, size_t input_num, MSTensorHandle **outputs, size_t *output_num, - const MSKernelCallBackC &before, const MSKernelCallBackC &after); + Status Predict(const OH_AI_TensorHandle *inputs, size_t input_num, OH_AI_TensorHandle **outputs, size_t *output_num, + const OH_AI_KernelCallBack &before, const OH_AI_KernelCallBack &after); MSTensor::Impl **GetInputs(size_t *input_num); MSTensor::Impl **GetOutputs(size_t *output_num); - MSTensor::Impl *GetInputByTensorName(const std::string &name); - MSTensor::Impl *GetOutputByTensorName(const std::string &name); private: std::shared_ptr session_ = nullptr; @@ -50,7 +48,7 @@ class ModelC { std::map tensor_map_; std::vector inputs_; std::vector outputs_; - Status RunGraph(const MSKernelCallBackC &before, const MSKernelCallBackC &after); + Status RunGraph(const OH_AI_KernelCallBack &before, const OH_AI_KernelCallBack &after); void ResetTensorData(std::vector old_data, std::vector tensors); MSTensor::Impl *TensorToTensorImpl(mindspore::tensor::MSTensor *tensor); }; @@ -58,10 +56,7 @@ class ModelC { Status ModelC::Build(const void *model_data, size_t data_size, ModelType model_type, const ContextC *model_context) { context_.reset(model_context); session_ = std::make_shared(); - if (session_ == nullptr) { - MS_LOG(ERROR) << "create session failed"; - return kLiteNullptr; - } + MS_CHECK_TRUE_MSG(session_ != nullptr, kLiteNullptr, "create session failed"); auto ret = session_->Init(ContextUtils::Convert(model_context)); if (ret != mindspore::lite::RET_OK) { MS_LOG(ERROR) << "init session failed"; @@ -77,10 +72,7 @@ Status ModelC::Build(const void *model_data, size_t data_size, ModelType model_t Status ModelC::Build(const std::string &model_path, ModelType model_type, const ContextC *model_context) { context_.reset(model_context); session_ = std::make_shared(); - if (session_ == nullptr) { - MS_LOG(ERROR) << "create session failed"; - return kLiteNullptr; - } + MS_CHECK_TRUE_MSG(session_ != nullptr, kLiteNullptr, "create session failed"); auto ret = session_->Init(ContextUtils::Convert(model_context)); if (ret != mindspore::lite::RET_OK) { MS_LOG(ERROR) << "init session failed"; @@ -98,10 +90,8 @@ Status ModelC::Resize(const std::vector &inputs, const std::ve size_t input_num = inputs.size(); for (size_t i = 0; i < input_num; i++) { auto input = inputs[i]; - if (input == nullptr || input->lite_tensor() == nullptr) { - MS_LOG(ERROR) << "Input tensor is null."; - return kLiteInputTensorError; - } + MS_CHECK_TRUE_MSG(input != nullptr && input->lite_tensor() != nullptr, kLiteInputTensorError, + "Input tensor is null."); inner_input.push_back(input->lite_tensor()); } size_t shape_num = shapes.size(); @@ -110,10 +100,7 @@ Status ModelC::Resize(const std::vector &inputs, const std::ve std::transform(shapes[i].begin(), shapes[i].end(), std::back_inserter(inner_shapes[i]), [](int64_t value) { return static_cast(value); }); } - if (session_ == nullptr) { - MS_LOG(ERROR) << "Session implement is null."; - return kLiteNullptr; - } + MS_CHECK_TRUE_MSG(session_ != nullptr, kLiteNullptr, "Session implement is null."); auto ret = session_->Resize(inner_input, inner_shapes); return static_cast(ret); } @@ -124,12 +111,9 @@ void ModelC::ResetTensorData(std::vector old_data, std::vectorGetInputs(); if (model_inputs.size() != input_num) { MS_LOG(ERROR) << "Wrong input size."; @@ -175,11 +159,11 @@ Status ModelC::Predict(const MSTensorHandle *inputs, size_t input_num, MSTensorH return ret; } - *outputs = reinterpret_cast(GetOutputs(output_num)); + *outputs = reinterpret_cast(GetOutputs(output_num)); return kSuccess; } -Status ModelC::RunGraph(const MSKernelCallBackC &before, const MSKernelCallBackC &after) { +Status ModelC::RunGraph(const OH_AI_KernelCallBack &before, const OH_AI_KernelCallBack &after) { if (before == nullptr || after == nullptr) { auto ret = session_->RunGraph(); return static_cast(ret); @@ -189,8 +173,8 @@ Status ModelC::RunGraph(const MSKernelCallBackC &before, const MSKernelCallBackC const CallBackParam &call_param) { std::vector inputs_impl; std::vector outputs_impl; - std::vector op_inputs; - std::vector op_outputs; + std::vector op_inputs; + std::vector op_outputs; size_t op_input_num = before_inputs.size(); for (size_t i = 0; i < op_input_num; i++) { inputs_impl.emplace_back(before_inputs[i]); @@ -201,10 +185,10 @@ Status ModelC::RunGraph(const MSKernelCallBackC &before, const MSKernelCallBackC outputs_impl.emplace_back(before_outputs[i]); op_outputs.push_back(&(outputs_impl.back())); } - const MSCallBackParamC op_info = {const_cast(call_param.node_name.c_str()), - const_cast(call_param.node_type.c_str())}; - MSTensorHandleArray inputs = {op_input_num, op_inputs.data()}; - MSTensorHandleArray outputs = {op_output_num, op_outputs.data()}; + const OH_AI_CallBackParam op_info = {const_cast(call_param.node_name.c_str()), + const_cast(call_param.node_type.c_str())}; + OH_AI_TensorHandleArray inputs = {op_input_num, op_inputs.data()}; + OH_AI_TensorHandleArray outputs = {op_output_num, op_outputs.data()}; return before(inputs, outputs, op_info); }; @@ -213,8 +197,8 @@ Status ModelC::RunGraph(const MSKernelCallBackC &before, const MSKernelCallBackC const CallBackParam &call_param) { std::vector inputs_impl; std::vector outputs_impl; - std::vector op_inputs; - std::vector op_outputs; + std::vector op_inputs; + std::vector op_outputs; size_t op_input_num = after_inputs.size(); for (size_t i = 0; i < op_input_num; i++) { inputs_impl.emplace_back(after_inputs[i]); @@ -225,10 +209,10 @@ Status ModelC::RunGraph(const MSKernelCallBackC &before, const MSKernelCallBackC outputs_impl.emplace_back(after_outputs[i]); op_outputs.push_back(&(outputs_impl.back())); } - const MSCallBackParamC op_info = {const_cast(call_param.node_name.c_str()), - const_cast(call_param.node_type.c_str())}; - MSTensorHandleArray inputs = {op_input_num, op_inputs.data()}; - MSTensorHandleArray outputs = {op_output_num, op_outputs.data()}; + const OH_AI_CallBackParam op_info = {const_cast(call_param.node_name.c_str()), + const_cast(call_param.node_type.c_str())}; + OH_AI_TensorHandleArray inputs = {op_input_num, op_inputs.data()}; + OH_AI_TensorHandleArray outputs = {op_output_num, op_outputs.data()}; return after(inputs, outputs, op_info); }; auto ret = session_->RunGraph(before_call_back, after_call_back); @@ -252,10 +236,7 @@ MSTensor::Impl *ModelC::TensorToTensorImpl(mindspore::tensor::MSTensor *tensor) } MSTensor::Impl **ModelC::GetInputs(size_t *input_num) { - if (session_ == nullptr || input_num == nullptr) { - MS_LOG(ERROR) << "Session is null."; - return nullptr; - } + MS_CHECK_TRUE_MSG(session_ != nullptr && input_num != nullptr, nullptr, "param is nullptr."); auto inputs = session_->GetInputs(); *input_num = inputs.size(); if (inputs_.capacity() < *input_num) { @@ -268,10 +249,7 @@ MSTensor::Impl **ModelC::GetInputs(size_t *input_num) { } MSTensor::Impl **ModelC::GetOutputs(size_t *output_num) { - if (session_ == nullptr || output_num == nullptr) { - MS_LOG(ERROR) << "Session is null."; - return nullptr; - } + MS_CHECK_TRUE_MSG(session_ != nullptr && output_num != nullptr, nullptr, "param is nullptr."); auto outputs = session_->GetOutputs(); *output_num = outputs.size(); if (outputs_.capacity() < *output_num) { @@ -284,36 +262,18 @@ MSTensor::Impl **ModelC::GetOutputs(size_t *output_num) { }); return outputs_.data(); } - -MSTensor::Impl *ModelC::GetInputByTensorName(const std::string &name) { - if (session_ == nullptr) { - MS_LOG(ERROR) << "Session is null."; - return nullptr; - } - auto tensor = session_->GetInputsByTensorName(name); - return TensorToTensorImpl(tensor); -} - -MSTensor::Impl *ModelC::GetOutputByTensorName(const std::string &name) { - if (session_ == nullptr) { - MS_LOG(ERROR) << "Session is null."; - return nullptr; - } - auto tensor = session_->GetOutputByTensorName(name); - return TensorToTensorImpl(tensor); -} } // namespace mindspore -MSModelHandle MSModelCreate() { +OH_AI_ModelHandle OH_AI_ModelCreate() { auto impl = new (std::nothrow) mindspore::ModelC(); if (impl == nullptr) { MS_LOG(ERROR) << "Model implement is null."; return nullptr; } - return static_cast(impl); + return static_cast(impl); } -void MSModelDestroy(MSModelHandle *model) { +void OH_AI_ModelDestroy(OH_AI_ModelHandle *model) { if (model != nullptr && *model != nullptr) { auto impl = static_cast(*model); delete impl; @@ -321,127 +281,108 @@ void MSModelDestroy(MSModelHandle *model) { } } -void MSModelSetWorkspace(MSModelHandle model, void *workspace, size_t workspace_size) { +void OH_AI_ModelSetWorkspace(OH_AI_ModelHandle model, void *workspace, size_t workspace_size) { MS_LOG(ERROR) << "Unsupported Feature."; return; } -MSStatus MSModelBuild(MSModelHandle model, const void *model_data, size_t data_size, MSModelType model_type, - const MSContextHandle model_context) { - if (model == nullptr || model_data == nullptr || model_context == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return kMSStatusLiteNullptr; - } - if (model_type == kMSModelTypeInvalid) { +OH_AI_Status OH_AI_ModelBuild(OH_AI_ModelHandle model, const void *model_data, size_t data_size, + OH_AI_ModelType model_type, const OH_AI_ContextHandle model_context) { + MS_CHECK_TRUE_MSG(model != nullptr && model_data != nullptr && model_context != nullptr, OH_AI_STATUS_LITE_NULLPTR, + "param is nullptr."); + if (model_type == OH_AI_MODELTYPE_INVALID) { MS_LOG(ERROR) << "param is invalid."; - return kMSStatusLiteParamInvalid; + return OH_AI_STATUS_LITE_PARAM_INVALID; } mindspore::ContextC *context = static_cast(model_context); auto impl = static_cast(model); auto ret = impl->Build(model_data, data_size, static_cast(model_type), context); - return static_cast(ret.StatusCode()); + return static_cast(ret.StatusCode()); } -MSStatus MSModelBuildFromFile(MSModelHandle model, const char *model_path, MSModelType model_type, - const MSContextHandle model_context) { - if (model == nullptr || model_path == nullptr || model_context == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return kMSStatusLiteNullptr; - } - if (model_type == kMSModelTypeInvalid) { +OH_AI_Status OH_AI_ModelBuildFromFile(OH_AI_ModelHandle model, const char *model_path, OH_AI_ModelType model_type, + const OH_AI_ContextHandle model_context) { + MS_CHECK_TRUE_MSG(model != nullptr && model_path != nullptr && model_context != nullptr, OH_AI_STATUS_LITE_NULLPTR, + "param is nullptr."); + if (model_type == OH_AI_MODELTYPE_INVALID) { MS_LOG(ERROR) << "param is invalid."; - return kMSStatusLiteParamInvalid; + return OH_AI_STATUS_LITE_PARAM_INVALID; } mindspore::ContextC *context = static_cast(model_context); auto impl = static_cast(model); auto ret = impl->Build(model_path, static_cast(model_type), context); - return static_cast(ret.StatusCode()); + return static_cast(ret.StatusCode()); } -MSStatus MSModelResize(MSModelHandle model, const MSTensorHandleArray inputs, MSShapeInfo *shape_infos, - size_t shape_info_num) { - if (model == nullptr || shape_infos == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return kMSStatusLiteNullptr; - } +OH_AI_Status OH_AI_ModelResize(OH_AI_ModelHandle model, const OH_AI_TensorHandleArray inputs, + OH_AI_ShapeInfo *shape_infos, size_t shape_info_num) { + MS_CHECK_TRUE_MSG(model != nullptr && shape_infos != nullptr, OH_AI_STATUS_LITE_NULLPTR, "param is nullptr."); std::vector vec_inputs; std::transform(inputs.handle_list, inputs.handle_list + inputs.handle_num, std::back_inserter(vec_inputs), - [](MSTensorHandle value) { return static_cast(value); }); - std::vector> vec_dims; + [](OH_AI_TensorHandle value) { return static_cast(value); }); + std::vector> vec_diOH_AI_; for (size_t i = 0; i < shape_info_num; i++) { std::vector shape(shape_infos[i].shape, shape_infos[i].shape + shape_infos[i].shape_num); - vec_dims.push_back(shape); + vec_diOH_AI_.push_back(shape); } auto impl = static_cast(model); - auto ret = impl->Resize(vec_inputs, vec_dims); - return static_cast(ret.StatusCode()); + auto ret = impl->Resize(vec_inputs, vec_diOH_AI_); + return static_cast(ret.StatusCode()); } -MSStatus MSModelPredict(MSModelHandle model, const MSTensorHandleArray inputs, MSTensorHandleArray *outputs, - const MSKernelCallBackC before, const MSKernelCallBackC after) { - if (model == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return kMSStatusLiteNullptr; - } +OH_AI_Status OH_AI_ModelPredict(OH_AI_ModelHandle model, const OH_AI_TensorHandleArray inputs, + OH_AI_TensorHandleArray *outputs, const OH_AI_KernelCallBack before, + const OH_AI_KernelCallBack after) { + MS_CHECK_TRUE_MSG(model != nullptr, OH_AI_STATUS_LITE_NULLPTR, "param is nullptr."); auto impl = static_cast(model); auto ret = impl->Predict(inputs.handle_list, inputs.handle_num, &(outputs->handle_list), &(outputs->handle_num), before, after); if (!ret.IsOk()) { MS_LOG(ERROR) << "Predict fail, ret :" << ret; } - return static_cast(ret.StatusCode()); + return static_cast(ret.StatusCode()); } -MSTensorHandleArray MSModelGetInputs(const MSModelHandle model) { - if (model == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return {0, nullptr}; - } +OH_AI_TensorHandleArray OH_AI_ModelGetInputs(const OH_AI_ModelHandle model) { + OH_AI_TensorHandleArray result = {0, nullptr}; + MS_CHECK_TRUE_MSG(model != nullptr, result, "param is nullptr."); auto impl = static_cast(model); size_t input_num; - auto handles = reinterpret_cast(impl->GetInputs(&input_num)); + auto handles = reinterpret_cast(impl->GetInputs(&input_num)); return {input_num, handles}; } -MSTensorHandleArray MSModelGetOutputs(const MSModelHandle model) { - if (model == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return {0, nullptr}; - } +OH_AI_TensorHandleArray OH_AI_ModelGetOutputs(const OH_AI_ModelHandle model) { + OH_AI_TensorHandleArray result = {0, nullptr}; + MS_CHECK_TRUE_MSG(model != nullptr, result, "param is nullptr."); auto impl = static_cast(model); size_t output_num; - auto handles = reinterpret_cast(impl->GetOutputs(&output_num)); + auto handles = reinterpret_cast(impl->GetOutputs(&output_num)); return {output_num, handles}; } -MSTensorHandle MSModelGetInputByTensorName(const MSModelHandle model, const char *tensor_name) { - if (model == nullptr || tensor_name == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return nullptr; - } +OH_AI_TensorHandle OH_AI_ModelGetInputByTensorName(const OH_AI_ModelHandle model, const char *tensor_name) { + MS_CHECK_TRUE_MSG(model != nullptr && tensor_name != nullptr, nullptr, "param is nullptr."); auto impl = static_cast(model); size_t input_num; auto inputs = impl->GetInputs(&input_num); for (size_t i = 0; i < input_num; i++) { if (inputs[i]->Name() == tensor_name) { - return static_cast(inputs[i]); + return static_cast(inputs[i]); } } MS_LOG(ERROR) << "tensor is not exist."; return nullptr; } -MSTensorHandle MSModelGetOutputByTensorName(const MSModelHandle model, const char *tensor_name) { - if (model == nullptr || tensor_name == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return nullptr; - } +OH_AI_TensorHandle OH_AI_ModelGetOutputByTensorName(const OH_AI_ModelHandle model, const char *tensor_name) { + MS_CHECK_TRUE_MSG(model != nullptr && tensor_name != nullptr, nullptr, "param is nullptr."); auto impl = static_cast(model); size_t output_num; auto outputs = impl->GetOutputs(&output_num); for (size_t i = 0; i < output_num; i++) { if (outputs[i]->Name() == tensor_name) { - return static_cast(outputs[i]); + return static_cast(outputs[i]); } } MS_LOG(ERROR) << "tensor is not exist."; diff --git a/mindspore/lite/src/c_api/tensor_c.cc b/mindspore/lite/src/c_api/tensor_c.cc index 2ad5bda9046434c4e7c60386a3f607bb22730b00..31536225b9e357674547ee1d3a747879034197a2 100644 --- a/mindspore/lite/src/c_api/tensor_c.cc +++ b/mindspore/lite/src/c_api/tensor_c.cc @@ -18,13 +18,11 @@ #include "include/ms_tensor.h" #include "src/cxx_api/tensor/tensor_impl.h" #include "src/runtime/inner_allocator.h" +#include "nnacl/op_base.h" -MSTensorHandle MSTensorCreate(const char *name, MSDataType type, const int64_t *shape, size_t shape_num, - const void *data, size_t data_len) { - if (name == nullptr || shape == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return nullptr; - } +OH_AI_TensorHandle OH_AI_TensorCreate(const char *name, OH_AI_DataType type, const int64_t *shape, size_t shape_num, + const void *data, size_t data_len) { + MS_CHECK_TRUE_MSG(name != nullptr && shape != nullptr, nullptr, "param is nullptr."); std::vector vec_shape(shape_num); for (size_t i = 0; i < shape_num; i++) { vec_shape[i] = shape[i]; @@ -40,7 +38,7 @@ MSTensorHandle MSTensorCreate(const char *name, MSDataType type, const int64_t * return impl; } -void MSTensorDestroy(MSTensorHandle *tensor) { +void OH_AI_TensorDestroy(OH_AI_TensorHandle *tensor) { if (tensor != nullptr && *tensor != nullptr) { auto impl = static_cast(*tensor); delete impl; @@ -48,11 +46,8 @@ void MSTensorDestroy(MSTensorHandle *tensor) { } } -MSTensorHandle MSTensorClone(MSTensorHandle tensor) { - if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return nullptr; - } +OH_AI_TensorHandle OH_AI_TensorClone(OH_AI_TensorHandle tensor) { + MS_CHECK_TRUE_MSG(tensor != nullptr, nullptr, "param is nullptr."); auto impl = static_cast(tensor); auto lite_tensor = static_cast(impl->lite_tensor()); auto clone = mindspore::lite::Tensor::CopyTensor(*lite_tensor, true, lite_tensor->allocator()); @@ -70,48 +65,33 @@ MSTensorHandle MSTensorClone(MSTensorHandle tensor) { return clone_impl; } -void MSTensorSetName(MSTensorHandle tensor, const char *name) { - if (tensor == nullptr || name == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return; - } +void OH_AI_TensorSetName(OH_AI_TensorHandle tensor, const char *name) { + MS_CHECK_TRUE_MSG(tensor != nullptr && name != nullptr, , "param is nullptr."); auto impl = static_cast(tensor); impl->SetName(name); } -const char *MSTensorGetName(const MSTensorHandle tensor) { - if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return nullptr; - } +const char *OH_AI_TensorGetName(const OH_AI_TensorHandle tensor) { + MS_CHECK_TRUE_MSG(tensor != nullptr, nullptr, "param is nullptr."); auto impl = static_cast(tensor); return impl->Name().c_str(); } -void MSTensorSetDataType(MSTensorHandle tensor, MSDataType type) { - if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return; - } +void OH_AI_TensorSetDataType(OH_AI_TensorHandle tensor, OH_AI_DataType type) { + MS_CHECK_TRUE_MSG(tensor != nullptr, , "param is nullptr."); auto impl = static_cast(tensor); impl->SetDataType(static_cast(type)); } -MSDataType MSTensorGetDataType(const MSTensorHandle tensor) { - if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return kMSDataTypeUnknown; - } +OH_AI_DataType OH_AI_TensorGetDataType(const OH_AI_TensorHandle tensor) { + MS_CHECK_TRUE_MSG(tensor != nullptr, OH_AI_DATATYPE_UNKNOWN, "param is nullptr."); auto impl = static_cast(tensor); auto dtype = impl->DataType(); - return static_cast(dtype); + return static_cast(dtype); } -void MSTensorSetShape(MSTensorHandle tensor, const int64_t *shape, size_t shape_num) { - if (tensor == nullptr || shape == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return; - } +void OH_AI_TensorSetShape(OH_AI_TensorHandle tensor, const int64_t *shape, size_t shape_num) { + MS_CHECK_TRUE_MSG(tensor != nullptr && shape != nullptr, , "param is nullptr."); auto impl = static_cast(tensor); std::vector vec_shape(shape_num); for (size_t i = 0; i < shape_num; i++) { @@ -120,75 +100,51 @@ void MSTensorSetShape(MSTensorHandle tensor, const int64_t *shape, size_t shape_ impl->SetShape(vec_shape); } -const int64_t *MSTensorGetShape(const MSTensorHandle tensor, size_t *shape_num) { - if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return nullptr; - } +const int64_t *OH_AI_TensorGetShape(const OH_AI_TensorHandle tensor, size_t *shape_num) { + MS_CHECK_TRUE_MSG(tensor != nullptr, nullptr, "param is nullptr."); auto impl = static_cast(tensor); *shape_num = impl->Shape().size(); return impl->Shape().data(); } -void MSTensorSetFormat(MSTensorHandle tensor, MSFormat format) { - if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return; - } +void OH_AI_TensorSetFormat(OH_AI_TensorHandle tensor, OH_AI_Format format) { + MS_CHECK_TRUE_MSG(tensor != nullptr, , "param is nullptr."); auto impl = static_cast(tensor); return impl->SetFormat(static_cast(format)); } -MSFormat MSTensorGetFormat(const MSTensorHandle tensor) { - if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return kMSFormatNHWC; - } +OH_AI_Format OH_AI_TensorGetFormat(const OH_AI_TensorHandle tensor) { + MS_CHECK_TRUE_MSG(tensor != nullptr, OH_AI_FORMAT_NHWC, "param is nullptr."); auto impl = static_cast(tensor); - return static_cast(impl->format()); + return static_cast(impl->format()); } -void MSTensorSetData(MSTensorHandle tensor, void *data) { - if (tensor == nullptr || data == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return; - } +void OH_AI_TensorSetData(OH_AI_TensorHandle tensor, void *data) { + MS_CHECK_TRUE_MSG(tensor != nullptr && data != nullptr, , "param is nullptr."); auto impl = static_cast(tensor); return impl->SetData(data); } -const void *MSTensorGetData(const MSTensorHandle tensor) { - if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return nullptr; - } +const void *OH_AI_TensorGetData(const OH_AI_TensorHandle tensor) { + MS_CHECK_TRUE_MSG(tensor != nullptr, nullptr, "param is nullptr."); auto impl = static_cast(tensor); return impl->Data().get(); } -void *MSTensorGetMutableData(const MSTensorHandle tensor) { - if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return nullptr; - } +void *OH_AI_TensorGetMutableData(const OH_AI_TensorHandle tensor) { + MS_CHECK_TRUE_MSG(tensor != nullptr, nullptr, "param is nullptr."); auto impl = static_cast(tensor); return impl->MutableData(); } -int64_t MSTensorGetElementNum(const MSTensorHandle tensor) { - if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return 0; - } +int64_t OH_AI_TensorGetElementNum(const OH_AI_TensorHandle tensor) { + MS_CHECK_TRUE_MSG(tensor != nullptr, 0, "param is nullptr."); auto impl = static_cast(tensor); return impl->ElementNum(); } -size_t MSTensorGetDataSize(const MSTensorHandle tensor) { - if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; - return 0; - } +size_t OH_AI_TensorGetDataSize(const OH_AI_TensorHandle tensor) { + MS_CHECK_TRUE_MSG(tensor != nullptr, 0, "param is nullptr."); auto impl = static_cast(tensor); return impl->DataSize(); } diff --git a/mindspore/lite/src/cxx_api/context.cc b/mindspore/lite/src/cxx_api/context.cc index 7c42a3ac5e15042f8d8bb473b55b008f65a4be42..68cdff2a96152e285f624d3e9a1de81a96fc25ff 100644 --- a/mindspore/lite/src/cxx_api/context.cc +++ b/mindspore/lite/src/cxx_api/context.cc @@ -160,6 +160,22 @@ std::shared_ptr Context::GetDelegate() const { return data_->delegate; } +void Context::SetMultiModalHW(bool float_mode) { + if (data_ == nullptr) { + MS_LOG(ERROR) << "Invalid context."; + return; + } + data_->float_mode = float_mode; +} + +bool Context::GetMultiModalHW() const { + if (data_ == nullptr) { + MS_LOG(ERROR) << "Invalid context."; + return false; + } + return data_->float_mode; +} + std::vector> &Context::MutableDeviceInfo() { static std::vector> empty{}; if (data_ == nullptr) { diff --git a/mindspore/lite/src/cxx_api/context.h b/mindspore/lite/src/cxx_api/context.h index 1c94a34ff702a0d9f35ee243854a27be443fb98f..7dded21b518193ca6d09cb04980b0ecae2ec6754 100644 --- a/mindspore/lite/src/cxx_api/context.h +++ b/mindspore/lite/src/cxx_api/context.h @@ -36,6 +36,7 @@ struct Context::Data { std::vector affinity_core_list_; int affinity_mode_ = 0; std::shared_ptr delegate = nullptr; + bool float_mode = false; }; struct DeviceInfoContext::Data { diff --git a/mindspore/lite/src/cxx_api/converters.cc b/mindspore/lite/src/cxx_api/converters.cc index 6d27820490aa52e1025e4d2e9a1ee83516b7a14e..b239e37a86c3364d6f9e3652cea49b7d34134bf9 100644 --- a/mindspore/lite/src/cxx_api/converters.cc +++ b/mindspore/lite/src/cxx_api/converters.cc @@ -95,13 +95,14 @@ lite::InnerContext *ContextUtils::Convert(Context *context) { cpu_context->GetProvider(), cpu_context->GetProviderDevice(), inner_context.get()); } else if (device->GetDeviceType() == kGPU) { auto gpu_context = device->Cast(); +#ifdef ENABLE_OPENGL_TEXTURE + bool enable_gl_texture = gpu_context->GetEnableGLTexture(); + void *gl_context = gpu_context->GetGLContext(); + void *gl_display = gpu_context->GetGLDisplay(); +#else bool enable_gl_texture = false; void *gl_context = nullptr; void *gl_display = nullptr; -#ifdef ENABLE_OPENGL_TEXTURE - enable_gl_texture = gpu_context->GetEnableGLTexture(); - gl_context = gpu_context->GetGLContext(); - gl_display = gpu_context->GetGLDisplay(); #endif ret = AddGpuDevice(gpu_context->GetEnableFP16(), gpu_context->GetDeviceID(), gpu_context->GetRankID(), @@ -139,16 +140,16 @@ lite::InnerContext *ContextUtils::Convert(const ContextC *context_c) { for (auto &device_info_c : device_list) { MS_CHECK_TRUE_RET(device_info_c != nullptr, nullptr); lite::DeviceInfo device_info = {0}; - if (device_info_c->device_type == kMSDeviceTypeCPU) { + if (device_info_c->device_type == OH_AI_DEVICETYPE_CPU) { if (device_info_c->allocator == nullptr) { device_info_c->allocator = Allocator::Create(); } ret = AddCpuDevice(device_info_c->allocator, context_c->affinity_mode, device_info_c->enable_fp16, device_info_c->provider, device_info_c->provider_device, inner_context.get()); - } else if (device_info_c->device_type == kMSDeviceTypeGPU) { + } else if (device_info_c->device_type == OH_AI_DEVICETYPE_GPU) { ret = AddGpuDevice(device_info_c->enable_fp16, 0, 0, 0, false, nullptr, nullptr, device_info_c->provider, device_info_c->provider_device, device_info_c->allocator, inner_context.get()); - } else if (device_info_c->device_type == kMSDeviceTypeKirinNPU) { + } else if (device_info_c->device_type == OH_AI_DEVICETYPE_KIRIN_NPU) { ret = AddNpuDevice(device_info_c->frequency, inner_context.get()); } if (ret != kSuccess) { diff --git a/mindspore/lite/tools/benchmark/benchmark_c_api.cc b/mindspore/lite/tools/benchmark/benchmark_c_api.cc index 9671734cb3f26c07fbe3e8fff9131276fd99e90e..682e18cf98c0b4e31f3fc5a767bb21c69db85c81 100644 --- a/mindspore/lite/tools/benchmark/benchmark_c_api.cc +++ b/mindspore/lite/tools/benchmark/benchmark_c_api.cc @@ -52,31 +52,31 @@ int BenchmarkCApi::RunBenchmark() { BENCHMARK_LOG_ERROR("InitContext failed, ret: " << ret); return ret; } - model_ = MSModelCreate(); - ret = MSModelBuildFromFile(model_, flags_->model_file_.c_str(), kMSModelTypeMindIR, context_); - if (ret != kMSStatusSuccess) { - BENCHMARK_LOG_ERROR("MSModelBuildFromFile failed, ret: " << ret); + model_ = OH_AI_ModelCreate(); + ret = OH_AI_ModelBuildFromFile(model_, flags_->model_file_.c_str(), OH_AI_MODELTYPE_MINDIR, context_); + if (ret != OH_AI_STATUS_SUCCESS) { + BENCHMARK_LOG_ERROR("OH_AI_ModelBuildFromFile failed, ret: " << ret); return ret; } - inputs_ = MSModelGetInputs(model_); + inputs_ = OH_AI_ModelGetInputs(model_); if (inputs_.handle_list == nullptr) { - BENCHMARK_LOG_ERROR("MSModelGetInputs failed, ret: " << ret); + BENCHMARK_LOG_ERROR("OH_AI_ModelGetInputs failed, ret: " << ret); return ret; } if (!flags_->resize_dims_.empty()) { - std::vector shape_infos; + std::vector shape_infos; std::transform(flags_->resize_dims_.begin(), flags_->resize_dims_.end(), std::back_inserter(shape_infos), [&](auto &shapes) { - MSShapeInfo shape_info; + OH_AI_ShapeInfo shape_info; shape_info.shape_num = shapes.size(); for (size_t i = 0; i < shape_info.shape_num; i++) { shape_info.shape[i] = shapes[i]; } return shape_info; }); - ret = MSModelResize(model_, inputs_, shape_infos.data(), inputs_.handle_num); - if (ret != kMSStatusSuccess) { - BENCHMARK_LOG_ERROR("MSModelResize failed, ret: " << ret); + ret = OH_AI_ModelResize(model_, inputs_, shape_infos.data(), inputs_.handle_num); + if (ret != OH_AI_STATUS_SUCCESS) { + BENCHMARK_LOG_ERROR("OH_AI_ModelResize failed, ret: " << ret); return ret; } } @@ -85,7 +85,7 @@ int BenchmarkCApi::RunBenchmark() { std::cout << "PrepareTime = " << ((end_prepare_time - start_prepare_time) / kFloatMSEC) << " ms" << std::endl; ret = LoadInput(); - if (ret != kMSStatusSuccess) { + if (ret != OH_AI_STATUS_SUCCESS) { BENCHMARK_LOG_ERROR("LoadInput failed, ret: " << ret) return ret; } @@ -94,7 +94,7 @@ int BenchmarkCApi::RunBenchmark() { } else { ret = MarkPerformance(); } - if (ret != kMSStatusSuccess) { + if (ret != OH_AI_STATUS_SUCCESS) { BENCHMARK_LOG_ERROR("Run failed, ret: " << ret); return ret; } @@ -105,42 +105,42 @@ int BenchmarkCApi::RunBenchmark() { } int BenchmarkCApi::InitContext() { - constexpr int kFrequencyDefault = 3; - context_ = MSContextCreate(); + context_ = OH_AI_ContextCreate(); if (context_ == nullptr) { - BENCHMARK_LOG_ERROR("MSContextCreate failed"); + BENCHMARK_LOG_ERROR("OH_AI_ContextCreate failed"); return RET_ERROR; } - MSContextSetThreadNum(context_, flags_->num_threads_); - MSContextSetEnableParallel(context_, flags_->enable_parallel_); - MSContextSetThreadAffinityMode(context_, flags_->cpu_bind_mode_); + OH_AI_ContextSetThreadNum(context_, flags_->num_threads_); + OH_AI_ContextSetEnableParallel(context_, flags_->enable_parallel_); + OH_AI_ContextSetThreadAffinityMode(context_, flags_->cpu_bind_mode_); if (flags_->device_ == "GPU") { - MSDeviceInfoHandle gpu_device_info = MSDeviceInfoCreate(kMSDeviceTypeGPU); - MSDeviceInfoSetEnableFP16(gpu_device_info, flags_->enable_fp16_); - MSContextAddDeviceInfo(context_, gpu_device_info); + OH_AI_DeviceInfoHandle gpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_GPU); + OH_AI_DeviceInfoSetEnableFP16(gpu_device_info, flags_->enable_fp16_); + OH_AI_ContextAddDeviceInfo(context_, gpu_device_info); } if (flags_->device_ == "NPU") { - MSDeviceInfoHandle npu_device_info = MSDeviceInfoCreate(kMSDeviceTypeKirinNPU); - MSDeviceInfoSetFrequency(npu_device_info, kFrequencyDefault); - MSContextAddDeviceInfo(context_, npu_device_info); + constexpr int kFrequencyDefault = 3; + OH_AI_DeviceInfoHandle npu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_KIRIN_NPU); + OH_AI_DeviceInfoSetFrequency(npu_device_info, kFrequencyDefault); + OH_AI_ContextAddDeviceInfo(context_, npu_device_info); } - MSDeviceInfoHandle cpu_device_info = MSDeviceInfoCreate(kMSDeviceTypeCPU); - MSDeviceInfoSetEnableFP16(cpu_device_info, flags_->enable_fp16_); - MSContextAddDeviceInfo(context_, cpu_device_info); + OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU); + OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, flags_->enable_fp16_); + OH_AI_ContextAddDeviceInfo(context_, cpu_device_info); return RET_OK; } int BenchmarkCApi::GenerateInputData() { for (size_t i = 0; i < inputs_.handle_num; i++) { - MSTensorHandle tensor = inputs_.handle_list[i]; - auto data_type = MSTensorGetDataType(tensor); - if (data_type == kMSDataTypeObjectTypeString) { - BENCHMARK_LOG_ERROR("Unsupported kMSDataTypeObjectTypeString"); + OH_AI_TensorHandle tensor = inputs_.handle_list[i]; + auto data_type = OH_AI_TensorGetDataType(tensor); + if (data_type == OH_AI_DATATYPE_OBJECTTYPE_STRING) { + BENCHMARK_LOG_ERROR("Unsupported OH_AI_DATATYPE_OBJECTTYPE_STRING"); return RET_ERROR; } else { - auto data_ptr = MSTensorGetMutableData(tensor); - auto data_size = MSTensorGetDataSize(tensor); + auto data_ptr = OH_AI_TensorGetMutableData(tensor); + auto data_size = OH_AI_TensorGetDataSize(tensor); (void)GenerateRandomData(data_size, data_ptr, static_cast(data_type)); } } @@ -153,26 +153,26 @@ int BenchmarkCApi::ReadInputFile() { return RET_ERROR; } else { for (size_t i = 0; i < flags_->input_data_list_.size(); i++) { - MSTensorHandle tensor = inputs_.handle_list[i]; + OH_AI_TensorHandle tensor = inputs_.handle_list[i]; size_t size; auto bin_buf = lite::ReadFile(flags_->input_data_list_[i].c_str(), &size); if (bin_buf == nullptr) { BENCHMARK_LOG_ERROR("ReadFile failed"); return RET_ERROR; } - if (MSTensorGetDataType(tensor) == kMSDataTypeObjectTypeString) { - BENCHMARK_LOG_ERROR("Unsupported kMSDataTypeObjectTypeString"); + if (OH_AI_TensorGetDataType(tensor) == OH_AI_DATATYPE_OBJECTTYPE_STRING) { + BENCHMARK_LOG_ERROR("Unsupported OH_AI_DataTypeObjectTypeString"); return RET_ERROR; } else { - auto tensor_data_size = MSTensorGetDataSize(tensor); + auto tensor_data_size = OH_AI_TensorGetDataSize(tensor); if (tensor_data_size != size) { BENCHMARK_LOG_ERROR("Input file size error, required: " << tensor_data_size << ", in fact: " << size); delete[] bin_buf; return RET_ERROR; } - auto input_data = MSTensorGetMutableData(tensor); + auto input_data = OH_AI_TensorGetMutableData(tensor); if (input_data == nullptr) { - BENCHMARK_LOG_ERROR("MSTensorGetMutableData failed"); + BENCHMARK_LOG_ERROR("OH_AI_TensorGetMutableData failed"); return RET_ERROR; } memcpy(input_data, bin_buf, size); @@ -191,9 +191,9 @@ int BenchmarkCApi::MarkAccuracy() { BENCHMARK_LOG_ERROR("PrintInputData failed, ret: " << status); return status; } - status = MSModelPredict(model_, inputs_, &outputs_, before_call_back_, after_call_back_); - if (status != kMSStatusSuccess) { - BENCHMARK_LOG_ERROR("MSModelPredict failed, ret: " << status); + status = OH_AI_ModelPredict(model_, inputs_, &outputs_, before_call_back_, after_call_back_); + if (status != OH_AI_STATUS_SUCCESS) { + BENCHMARK_LOG_ERROR("OH_AI_ModelPredict failed, ret: " << status); return RET_ERROR; } status = ReadCalibData(); @@ -213,9 +213,9 @@ int BenchmarkCApi::MarkPerformance() { MS_LOG(INFO) << "Running warm up loops..."; std::cout << "Running warm up loops..." << std::endl; for (int i = 0; i < flags_->warm_up_loop_count_; i++) { - auto ret = MSModelPredict(model_, inputs_, &outputs_, before_call_back_, after_call_back_); - if (ret != kMSStatusSuccess) { - BENCHMARK_LOG_ERROR("MSModelPredict failed, ret: " << kMSStatusSuccess); + auto ret = OH_AI_ModelPredict(model_, inputs_, &outputs_, before_call_back_, after_call_back_); + if (ret != OH_AI_STATUS_SUCCESS) { + BENCHMARK_LOG_ERROR("OH_AI_ModelPredict failed, ret: " << OH_AI_STATUS_SUCCESS); return RET_ERROR; } } @@ -228,9 +228,9 @@ int BenchmarkCApi::MarkPerformance() { for (int i = 0; i < flags_->loop_count_; i++) { auto start = GetTimeUs(); - auto ret = MSModelPredict(model_, inputs_, &outputs_, before_call_back_, after_call_back_); - if (ret != kMSStatusSuccess) { - BENCHMARK_LOG_ERROR("MSModelPredict failed, ret: " << kMSStatusSuccess); + auto ret = OH_AI_ModelPredict(model_, inputs_, &outputs_, before_call_back_, after_call_back_); + if (ret != OH_AI_STATUS_SUCCESS) { + BENCHMARK_LOG_ERROR("OH_AI_ModelPredict failed, ret: " << OH_AI_STATUS_SUCCESS); return RET_ERROR; } auto end = GetTimeUs(); @@ -262,7 +262,7 @@ int BenchmarkCApi::MarkPerformance() { } int BenchmarkCApi::GetDataTypeByTensorName(const std::string &tensor_name) { - return MSTensorGetDataType(MSModelGetOutputByTensorName(model_, tensor_name.c_str())); + return OH_AI_TensorGetDataType(OH_AI_ModelGetOutputByTensorName(model_, tensor_name.c_str())); } int BenchmarkCApi::CompareOutput() { @@ -272,14 +272,14 @@ int BenchmarkCApi::CompareOutput() { int total_size = 0; for (const auto &calib_tensor : benchmark_data_) { std::string tensor_name = calib_tensor.first; - MSTensorHandle tensor = MSModelGetOutputByTensorName(model_, tensor_name.c_str()); + OH_AI_TensorHandle tensor = OH_AI_ModelGetOutputByTensorName(model_, tensor_name.c_str()); if (tensor == nullptr) { BENCHMARK_LOG_ERROR("Get tensor failed, tensor name: " << tensor_name); return RET_ERROR; } int ret; - if (static_cast(MSTensorGetDataType(tensor)) == kObjectTypeString) { - BENCHMARK_LOG_ERROR("Unsupported kMSDataTypeObjectTypeString"); + if (static_cast(OH_AI_TensorGetDataType(tensor)) == kObjectTypeString) { + BENCHMARK_LOG_ERROR("Unsupported kOH_AI_DataTypeObjectTypeString"); return RET_ERROR; } else { ret = CompareDataGetTotalBiasAndSize(tensor_name, tensor, &total_bias, &total_size); @@ -307,18 +307,18 @@ int BenchmarkCApi::CompareOutput() { return RET_OK; } -int BenchmarkCApi::CompareDataGetTotalBiasAndSize(const std::string &name, MSTensorHandle tensor, float *total_bias, +int BenchmarkCApi::CompareDataGetTotalBiasAndSize(const std::string &name, OH_AI_TensorHandle tensor, float *total_bias, int *total_size) { - auto tensor_data = MSTensorGetData(tensor); + auto tensor_data = OH_AI_TensorGetData(tensor); if (tensor_data == nullptr) { - BENCHMARK_LOG_ERROR("MSTensorGetData failed."); + BENCHMARK_LOG_ERROR("OH_AI_TensorGetData failed."); return RET_ERROR; } size_t shape_num; - const int64_t *shape = MSTensorGetShape(tensor, &shape_num); + const int64_t *shape = OH_AI_TensorGetShape(tensor, &shape_num); std::vector vec_shape(shape, shape + shape_num); float bias = 0; - switch (static_cast(MSTensorGetDataType(tensor))) { + switch (static_cast(OH_AI_TensorGetDataType(tensor))) { case TypeId::kNumberTypeFloat: case TypeId::kNumberTypeFloat32: { bias = CompareData(name, vec_shape, tensor_data); @@ -345,7 +345,7 @@ int BenchmarkCApi::CompareDataGetTotalBiasAndSize(const std::string &name, MSTen break; } default: - BENCHMARK_LOG_ERROR("Unsupported data type" << static_cast(MSTensorGetDataType(tensor))); + BENCHMARK_LOG_ERROR("Unsupported data type" << static_cast(OH_AI_TensorGetDataType(tensor))); return RET_ERROR; } if (bias < 0) { @@ -362,13 +362,13 @@ int BenchmarkCApi::PrintInputData() { for (size_t i = 0; i < inputs_.handle_num; i++) { auto input = inputs_.handle_list[i]; std::cout << "InData" << i << ": "; - auto data_type = static_cast(MSTensorGetDataType(input)); + auto data_type = static_cast(OH_AI_TensorGetDataType(input)); if (data_type == TypeId::kObjectTypeString) { - BENCHMARK_LOG_ERROR("Unsupported kMSDataTypeObjectTypeString."); + BENCHMARK_LOG_ERROR("Unsupported kOH_AI_DataTypeObjectTypeString."); return RET_ERROR; } - auto tensor_data = MSTensorGetData(input); - size_t print_num = std::min(MSTensorGetElementNum(input), kPrintDataNum); + auto tensor_data = OH_AI_TensorGetData(input); + size_t print_num = std::min(OH_AI_TensorGetElementNum(input), kPrintDataNum); for (size_t j = 0; j < print_num; j++) { if (data_type == TypeId::kNumberTypeFloat32 || data_type == TypeId::kNumberTypeFloat) { std::cout << static_cast(tensor_data)[j] << " "; @@ -420,8 +420,8 @@ float g_op_cost_total_ = 0.0f; std::map> g_op_times_by_type_; std::map> g_op_times_by_name_; -bool TimeBeforeCallback(const MSTensorHandleArray inputs, const MSTensorHandleArray outputs, - const MSCallBackParamC kernel_Info) { +bool TimeBeforeCallback(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs, + const OH_AI_CallBackParam kernel_Info) { if (g_op_times_by_type_.find(kernel_Info.node_type) == g_op_times_by_type_.end()) { g_op_times_by_type_.insert(std::make_pair(kernel_Info.node_type, std::make_pair(0, 0.0f))); } @@ -434,8 +434,8 @@ bool TimeBeforeCallback(const MSTensorHandleArray inputs, const MSTensorHandleAr return true; } -bool TimeAfterCallback(const MSTensorHandleArray inputs, const MSTensorHandleArray outputs, - const MSCallBackParamC kernel_Info) { +bool TimeAfterCallback(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs, + const OH_AI_CallBackParam kernel_Info) { uint64_t opEnd = mindspore::lite::GetTimeUs(); float cost = static_cast(opEnd - g_op_begin_) / mindspore::lite::kFloatMSEC; g_op_cost_total_ += cost; diff --git a/mindspore/lite/tools/benchmark/benchmark_c_api.h b/mindspore/lite/tools/benchmark/benchmark_c_api.h index 4ba9649a1960247b2b67ee4afaf64462c593f65d..f394a31b88bb8ffe51f246d1b0f24403473c10f2 100644 --- a/mindspore/lite/tools/benchmark/benchmark_c_api.h +++ b/mindspore/lite/tools/benchmark/benchmark_c_api.h @@ -25,10 +25,10 @@ #ifdef __cplusplus extern "C" { #endif -bool TimeBeforeCallback(const MSTensorHandleArray inputs, const MSTensorHandleArray outputs, - const MSCallBackParamC kernel_Info); -bool TimeAfterCallback(const MSTensorHandleArray inputs, const MSTensorHandleArray outputs, - const MSCallBackParamC kernel_Info); +bool TimeBeforeCallback(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs, + const OH_AI_CallBackParam kernel_Info); +bool TimeAfterCallback(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs, + const OH_AI_CallBackParam kernel_Info); #ifdef __cplusplus } #endif @@ -37,18 +37,18 @@ using mindspore::lite::BenchmarkBase; using mindspore::lite::BenchmarkFlags; namespace mindspore::tools { -class MS_API BenchmarkCApi : public BenchmarkBase { +class OH_AI_API BenchmarkCApi : public BenchmarkBase { public: explicit BenchmarkCApi(BenchmarkFlags *flags) : BenchmarkBase(flags) {} - virtual ~BenchmarkCApi() { MSModelDestroy(&model_); } + virtual ~BenchmarkCApi() { OH_AI_ModelDestroy(&model_); } int RunBenchmark() override; int LoadInput() override; protected: - int CompareDataGetTotalBiasAndSize(const std::string &name, MSTensorHandle tensor, float *total_bias, + int CompareDataGetTotalBiasAndSize(const std::string &name, OH_AI_TensorHandle tensor, float *total_bias, int *total_size); int InitContext(); int GenerateInputData() override; @@ -66,13 +66,13 @@ class MS_API BenchmarkCApi : public BenchmarkBase { int MarkAccuracy(); private: - MSModelHandle model_ = nullptr; - MSContextHandle context_ = nullptr; - MSTensorHandleArray inputs_; - MSTensorHandleArray outputs_; + OH_AI_ModelHandle model_ = nullptr; + OH_AI_ContextHandle context_ = nullptr; + OH_AI_TensorHandleArray inputs_; + OH_AI_TensorHandleArray outputs_; - MSKernelCallBackC before_call_back_ = nullptr; - MSKernelCallBackC after_call_back_ = nullptr; + OH_AI_KernelCallBack before_call_back_ = nullptr; + OH_AI_KernelCallBack after_call_back_ = nullptr; }; } // namespace mindspore::tools #endif // MINDSPORE_LITE_TOOLS_BENCHMARK_BENCHMARK_C_API_H_