diff --git a/mindspore-src/source/mindspore/lite/src/litert/c_api/context_c.cc b/mindspore-src/source/mindspore/lite/src/litert/c_api/context_c.cc index 9322a6424408e03a2660897eca63cf4ff54def4e..abd7779f7a0b3fbe37e90de57326a853528b64fd 100644 --- a/mindspore-src/source/mindspore/lite/src/litert/c_api/context_c.cc +++ b/mindspore-src/source/mindspore/lite/src/litert/c_api/context_c.cc @@ -32,12 +32,12 @@ OH_AI_ContextHandle OH_AI_ContextCreate() { auto impl = new (std::nothrow) mindspore::ContextC(); if (impl == nullptr) { - MS_LOG(ERROR) << "memory allocation failed."; + MS_LOG(ERROR) << "memory allocation failed, because OH_AI_ContextCreate impl is nullptr."; return nullptr; } impl->context_ = new (std::nothrow) mindspore::Context(); if (impl->context_ == nullptr) { - MS_LOG(ERROR) << "memory allocation failed."; + MS_LOG(ERROR) << "memory allocation failed, because OH_AI_ContextCreate impl->context is nullptr."; delete impl; return nullptr; } @@ -47,7 +47,7 @@ OH_AI_ContextHandle OH_AI_ContextCreate() { void OH_AI_ContextDestroy(OH_AI_ContextHandle *context) { if (context == nullptr || *context == nullptr) { - MS_LOG(ERROR) << "context is nullptr."; + MS_LOG(ERROR) << "OH_AI_ContextDestroy context is nullptr."; return; } auto impl = static_cast(*context); @@ -68,7 +68,7 @@ int32_t OH_AI_Inner_ContextNeedDestroy() { void OH_AI_ContextSetThreadNum(OH_AI_ContextHandle context, int32_t thread_num) { if (context == nullptr) { - MS_LOG(ERROR) << "context is nullptr."; + MS_LOG(ERROR) << "OH_AI_ContextSetThreadNum context is nullptr."; return; } auto impl = static_cast(context); @@ -77,7 +77,7 @@ void OH_AI_ContextSetThreadNum(OH_AI_ContextHandle context, int32_t thread_num) int32_t OH_AI_ContextGetThreadNum(const OH_AI_ContextHandle context) { if (context == nullptr) { - MS_LOG(ERROR) << "context is nullptr."; + MS_LOG(ERROR) << "OH_AI_ContextGetThreadNum context is nullptr."; return 0; } auto impl = static_cast(context); @@ -86,7 +86,7 @@ int32_t OH_AI_ContextGetThreadNum(const OH_AI_ContextHandle context) { void OH_AI_ContextSetThreadAffinityMode(OH_AI_ContextHandle context, int mode) { if (context == nullptr) { - MS_LOG(ERROR) << "context is nullptr."; + MS_LOG(ERROR) << "OH_AI_ContextSetThreadAffinityMode context is nullptr."; return; } auto impl = static_cast(context); @@ -96,7 +96,7 @@ void OH_AI_ContextSetThreadAffinityMode(OH_AI_ContextHandle context, int mode) { int OH_AI_ContextGetThreadAffinityMode(const OH_AI_ContextHandle context) { if (context == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_ContextGetThreadAffinityMode param is nullptr."; return 0; } auto impl = static_cast(context); @@ -105,7 +105,7 @@ int OH_AI_ContextGetThreadAffinityMode(const OH_AI_ContextHandle context) { void OH_AI_ContextSetThreadAffinityCoreList(OH_AI_ContextHandle context, const int32_t *core_list, size_t core_num) { if (context == nullptr || core_list == nullptr) { - MS_LOG(ERROR) << "context or core_list is nullptr."; + MS_LOG(ERROR) << "OH_AI_ContextSetThreadAffinityCoreList context or core_list is nullptr."; return; } const std::vector vec_core_list(core_list, core_list + core_num); @@ -116,7 +116,7 @@ void OH_AI_ContextSetThreadAffinityCoreList(OH_AI_ContextHandle context, const i const int32_t *OH_AI_ContextGetThreadAffinityCoreList(const OH_AI_ContextHandle context, size_t *core_num) { if (context == nullptr || core_num == nullptr) { - MS_LOG(ERROR) << "context or core_num is nullptr."; + MS_LOG(ERROR) << "OH_AI_ContextGetThreadAffinityCoreList context or core_num is nullptr."; return nullptr; } auto impl = static_cast(context); @@ -124,7 +124,7 @@ const int32_t *OH_AI_ContextGetThreadAffinityCoreList(const OH_AI_ContextHandle *core_num = affinity_core_list.size(); int32_t *core_list = static_cast(malloc((*core_num) * sizeof(int32_t))); if (core_list == nullptr) { - MS_LOG(ERROR) << "malloc core_list is null."; + MS_LOG(ERROR) << "OH_AI_ContextGetThreadAffinityCoreList malloc core_list is null."; return nullptr; } for (size_t i = 0; i < affinity_core_list.size(); i++) { @@ -135,7 +135,7 @@ const int32_t *OH_AI_ContextGetThreadAffinityCoreList(const OH_AI_ContextHandle void OH_AI_ContextSetEnableParallel(OH_AI_ContextHandle context, bool is_parallel) { if (context == nullptr) { - MS_LOG(ERROR) << "context is nullptr."; + MS_LOG(ERROR) << "OH_AI_ContextSetEnableParallel context is nullptr."; return; } auto impl = static_cast(context); @@ -144,7 +144,7 @@ void OH_AI_ContextSetEnableParallel(OH_AI_ContextHandle context, bool is_paralle bool OH_AI_ContextGetEnableParallel(const OH_AI_ContextHandle context) { if (context == nullptr) { - MS_LOG(ERROR) << "context is nullptr."; + MS_LOG(ERROR) << "OH_AI_ContextGetEnableParallel context is nullptr."; return false; } auto impl = static_cast(context); @@ -153,7 +153,7 @@ bool OH_AI_ContextGetEnableParallel(const OH_AI_ContextHandle context) { void OH_AI_ContextAddDeviceInfo(OH_AI_ContextHandle context, OH_AI_DeviceInfoHandle device_info) { if (context == nullptr || device_info == nullptr) { - MS_LOG(ERROR) << "context or device_info is nullptr."; + MS_LOG(ERROR) << "OH_AI_ContextAddDeviceInfo context or device_info is nullptr."; return; } auto impl = static_cast(context); @@ -177,7 +177,7 @@ OH_AI_DeviceInfoHandle OH_AI_DeviceInfoCreate(OH_AI_DeviceType device_type) { impl = nullptr; } if (impl == nullptr) { - MS_LOG(ERROR) << "memory allocation failed."; + MS_LOG(ERROR) << "OH_AI_DeviceInfoCreate memory allocation failed."; return nullptr; } return static_cast(impl); @@ -185,7 +185,7 @@ OH_AI_DeviceInfoHandle OH_AI_DeviceInfoCreate(OH_AI_DeviceType device_type) { void OH_AI_DeviceInfoDestroy(OH_AI_DeviceInfoHandle *device_info) { if (device_info == nullptr || *device_info == nullptr) { - MS_LOG(ERROR) << "device_info is nullptr."; + MS_LOG(ERROR) << "OH_AI_DeviceInfoDestroy device_info is nullptr."; return; } auto impl = static_cast(*device_info); @@ -195,11 +195,11 @@ void OH_AI_DeviceInfoDestroy(OH_AI_DeviceInfoHandle *device_info) { void OH_AI_DeviceInfoSetProvider(OH_AI_DeviceInfoHandle device_info, const char *provider) { if (device_info == nullptr) { - MS_LOG(ERROR) << "device_info is nullptr."; + MS_LOG(ERROR) << "OH_AI_DeviceInfoSetProvider device_info is nullptr."; return; } if (provider == nullptr) { - MS_LOG(ERROR) << "provider is nullptr."; + MS_LOG(ERROR) << "OH_AI_DeviceInfoSetProvider provider is nullptr."; return; } auto impl = static_cast(device_info); @@ -208,13 +208,13 @@ void OH_AI_DeviceInfoSetProvider(OH_AI_DeviceInfoHandle device_info, const char const char *OH_AI_DeviceInfoGetProvider(const OH_AI_DeviceInfoHandle device_info) { if (device_info == nullptr) { - MS_LOG(ERROR) << "device_info is nullptr."; + MS_LOG(ERROR) << "OH_AI_DeviceInfoGetProvider device_info is nullptr."; return nullptr; } auto impl = static_cast(device_info); char *provider = static_cast(malloc(impl->GetProvider().size() + 1)); if (provider == nullptr) { - MS_LOG(ERROR) << "malloc provider is null."; + MS_LOG(ERROR) << "OH_AI_DeviceInfoGetProvider malloc provider is null."; return nullptr; } for (size_t i = 0; i < impl->GetProvider().size(); i++) { @@ -226,11 +226,11 @@ const char *OH_AI_DeviceInfoGetProvider(const OH_AI_DeviceInfoHandle device_info void OH_AI_DeviceInfoSetProviderDevice(OH_AI_DeviceInfoHandle device_info, const char *device) { if (device_info == nullptr) { - MS_LOG(ERROR) << "device_info is nullptr."; + MS_LOG(ERROR) << "OH_AI_DeviceInfoSetProviderDevice device_info is nullptr."; return; } if (device == nullptr) { - MS_LOG(ERROR) << "device is nullptr."; + MS_LOG(ERROR) << "OH_AI_DeviceInfoSetProviderDevice device is nullptr."; return; } auto impl = static_cast(device_info); @@ -239,13 +239,13 @@ void OH_AI_DeviceInfoSetProviderDevice(OH_AI_DeviceInfoHandle device_info, const const char *OH_AI_DeviceInfoGetProviderDevice(const OH_AI_DeviceInfoHandle device_info) { if (device_info == nullptr) { - MS_LOG(ERROR) << "device_info is nullptr."; + MS_LOG(ERROR) << "OH_AI_DeviceInfoGetProviderDevice device_info is nullptr."; return nullptr; } auto impl = static_cast(device_info); char *provider_device = static_cast(malloc(impl->GetProviderDevice().size() + 1)); if (provider_device == nullptr) { - MS_LOG(ERROR) << "malloc provider_device is null."; + MS_LOG(ERROR) << "OH_AI_DeviceInfoGetProviderDevice malloc provider_device is null."; return nullptr; } for (size_t i = 0; i < impl->GetProviderDevice().size(); i++) { @@ -257,7 +257,7 @@ const char *OH_AI_DeviceInfoGetProviderDevice(const OH_AI_DeviceInfoHandle devic OH_AI_DeviceType OH_AI_DeviceInfoGetDeviceType(const OH_AI_DeviceInfoHandle device_info) { if (device_info == nullptr) { - MS_LOG(ERROR) << "device_info is nullptr."; + MS_LOG(ERROR) << "OH_AI_DeviceInfoGetDeviceType device_info is nullptr."; return OH_AI_DEVICETYPE_INVALID; } auto impl = static_cast(device_info); @@ -266,7 +266,7 @@ OH_AI_DeviceType OH_AI_DeviceInfoGetDeviceType(const OH_AI_DeviceInfoHandle devi void OH_AI_DeviceInfoSetEnableFP16(OH_AI_DeviceInfoHandle device_info, bool is_fp16) { if (device_info == nullptr) { - MS_LOG(ERROR) << "device_info is nullptr."; + MS_LOG(ERROR) << "OH_AI_DeviceInfoSetEnableFP16 device_info is nullptr."; return; } auto impl_device = static_cast(device_info); @@ -280,13 +280,13 @@ void OH_AI_DeviceInfoSetEnableFP16(OH_AI_DeviceInfoHandle device_info, bool is_f auto impl = static_cast(device_info); impl->SetEnableFP16(is_fp16); } else { - MS_LOG(ERROR) << "Unsupported Feature."; + MS_LOG(ERROR) << "OH_AI_DeviceInfoSetEnableFP16 Unsupported Feature."; } } bool OH_AI_DeviceInfoGetEnableFP16(const OH_AI_DeviceInfoHandle device_info) { if (device_info == nullptr) { - MS_LOG(ERROR) << "device_info is nullptr."; + MS_LOG(ERROR) << "OH_AI_DeviceInfoGetEnableFP16 device_info is nullptr."; return false; } auto impl_device = static_cast(device_info); @@ -300,14 +300,14 @@ bool OH_AI_DeviceInfoGetEnableFP16(const OH_AI_DeviceInfoHandle device_info) { auto impl = static_cast(device_info); return impl->GetEnableFP16(); } else { - MS_LOG(ERROR) << "Unsupported Feature. device_type: " << impl_device->GetDeviceType(); + MS_LOG(ERROR) << "OH_AI_DeviceInfoGetEnableFP16 Unsupported Feature. device_type: " << impl_device->GetDeviceType(); return false; } } void OH_AI_DeviceInfoSetFrequency(OH_AI_DeviceInfoHandle device_info, int frequency) { // only for KirinNPU if (device_info == nullptr) { - MS_LOG(ERROR) << "device_info is nullptr."; + MS_LOG(ERROR) << "OH_AI_DeviceInfoSetFrequency device_info is nullptr."; return; } auto impl_device = static_cast(device_info); @@ -315,13 +315,13 @@ void OH_AI_DeviceInfoSetFrequency(OH_AI_DeviceInfoHandle device_info, int freque auto impl = static_cast(device_info); impl->SetFrequency(frequency); } else { - MS_LOG(ERROR) << "Unsupported Feature."; + MS_LOG(ERROR) << "OH_AI_DeviceInfoSetFrequency Unsupported Feature."; } } int OH_AI_DeviceInfoGetFrequency(const OH_AI_DeviceInfoHandle device_info) { // only for KirinNPU if (device_info == nullptr) { - MS_LOG(ERROR) << "device_info is nullptr."; + MS_LOG(ERROR) << "OH_AI_DeviceInfoGetFrequency device_info is nullptr."; return -1; } auto impl_device = static_cast(device_info); @@ -329,7 +329,7 @@ int OH_AI_DeviceInfoGetFrequency(const OH_AI_DeviceInfoHandle device_info) { // auto impl = static_cast(device_info); return impl->GetFrequency(); } else { - MS_LOG(ERROR) << "Unsupported Feature."; + MS_LOG(ERROR) << "OH_AI_DeviceInfoGetFrequency Unsupported Feature."; return -1; } } @@ -344,7 +344,7 @@ NNRTDeviceDesc *OH_AI_GetAllNNRTDeviceDescs(size_t *num) { void *hiai_handle_{nullptr}; auto ret_load = mindspore::lite::LoadHiaiFLibraryFromPath(&hiai_handle_); if (!ret_load || hiai_handle_ == nullptr) { - MS_LOG(ERROR) << "Load HiAI_Foundation so failed."; + MS_LOG(ERROR) << "OH_AI_GetAllNNRTDeviceDescs Load HiAI_Foundation so failed."; } #endif *num = 0; @@ -387,7 +387,7 @@ NNRTDeviceDesc *OH_AI_GetAllNNRTDeviceDescs(size_t *num) { NNRTDeviceDesc *OH_AI_GetElementOfNNRTDeviceDescs(NNRTDeviceDesc *descs, size_t index) { if (descs == nullptr) { - MS_LOG(ERROR) << "descs is null"; + MS_LOG(ERROR) << "OH_AI_GetElementOfNNRTDeviceDescs descs is null"; return nullptr; } return descs + index; @@ -395,7 +395,7 @@ NNRTDeviceDesc *OH_AI_GetElementOfNNRTDeviceDescs(NNRTDeviceDesc *descs, size_t void OH_AI_DestroyAllNNRTDeviceDescs(NNRTDeviceDesc **desc) { if (desc == nullptr) { - MS_LOG(WARNING) << "desc is null"; + MS_LOG(WARNING) << "OH_AI_DestroyAllNNRTDeviceDescs desc is null"; return; } free(*desc); @@ -404,7 +404,7 @@ void OH_AI_DestroyAllNNRTDeviceDescs(NNRTDeviceDesc **desc) { size_t OH_AI_GetDeviceIdFromNNRTDeviceDesc(const NNRTDeviceDesc *desc) { if (desc == nullptr) { - MS_LOG(ERROR) << "NNRT desc is null"; + MS_LOG(ERROR) << "OH_AI_GetDeviceIdFromNNRTDeviceDesc NNRT desc is null"; return 0; } return desc->device_id; @@ -412,7 +412,7 @@ size_t OH_AI_GetDeviceIdFromNNRTDeviceDesc(const NNRTDeviceDesc *desc) { const char *OH_AI_GetNameFromNNRTDeviceDesc(const NNRTDeviceDesc *desc) { if (desc == nullptr) { - MS_LOG(ERROR) << "NNRT desc is null"; + MS_LOG(ERROR) << "OH_AI_GetNameFromNNRTDeviceDesc NNRT desc is null"; return nullptr; } return desc->device_name; @@ -420,7 +420,7 @@ const char *OH_AI_GetNameFromNNRTDeviceDesc(const NNRTDeviceDesc *desc) { OH_AI_NNRTDeviceType OH_AI_GetTypeFromNNRTDeviceDesc(const NNRTDeviceDesc *desc) { if (desc == nullptr) { - MS_LOG(ERROR) << "NNRT desc is null"; + MS_LOG(ERROR) << "OH_AI_GetTypeFromNNRTDeviceDesc NNRT desc is null"; return OH_AI_NNRTDeviceType::OH_AI_NNRTDEVICE_OTHERS; } return desc->device_type; @@ -430,11 +430,11 @@ OH_AI_DeviceInfoHandle OH_AI_CreateNNRTDeviceInfoByName(const char *name) { size_t num = 0; NNRTDeviceDesc *desc = OH_AI_GetAllNNRTDeviceDescs(&num); if (desc == nullptr) { - MS_LOG(ERROR) << "Get all device desc failed"; + MS_LOG(ERROR) << "OH_AI_CreateNNRTDeviceInfoByName Get all device desc failed"; return nullptr; } if (name == nullptr) { - MS_LOG(ERROR) << "NNRT device name is nullptr"; + MS_LOG(ERROR) << "OH_AI_CreateNNRTDeviceInfoByName NNRT device name is nullptr"; return nullptr; } OH_AI_DeviceInfoHandle handle = nullptr; @@ -453,7 +453,7 @@ OH_AI_DeviceInfoHandle OH_AI_CreateNNRTDeviceInfoByType(OH_AI_NNRTDeviceType typ size_t num = 0; NNRTDeviceDesc *desc = OH_AI_GetAllNNRTDeviceDescs(&num); if (desc == nullptr) { - MS_LOG(ERROR) << "Get all device desc failed"; + MS_LOG(ERROR) << "OH_AI_CreateNNRTDeviceInfoByType Get all device desc failed"; return nullptr; } @@ -471,11 +471,11 @@ OH_AI_DeviceInfoHandle OH_AI_CreateNNRTDeviceInfoByType(OH_AI_NNRTDeviceType typ void OH_AI_DeviceInfoSetDeviceId(OH_AI_DeviceInfoHandle device_info, size_t device_id) { if (device_info == nullptr) { - MS_LOG(ERROR) << "device info is null"; + MS_LOG(ERROR) << "OH_AI_DeviceInfoSetDeviceId device info is null"; return; } if (OH_AI_DeviceInfoGetDeviceType(device_info) != OH_AI_DEVICETYPE_NNRT) { - MS_LOG(ERROR) << "Set device_id of non-NNRT device is not allowable, ignored"; + MS_LOG(ERROR) << "OH_AI_DeviceInfoSetDeviceId Set device_id of non-NNRT device is not allowable, ignored"; return; } auto impl = reinterpret_cast(device_info); @@ -484,11 +484,11 @@ void OH_AI_DeviceInfoSetDeviceId(OH_AI_DeviceInfoHandle device_info, size_t devi size_t OH_AI_DeviceInfoGetDeviceId(const OH_AI_DeviceInfoHandle device_info) { if (device_info == nullptr) { - MS_LOG(ERROR) << "device info is null"; + MS_LOG(ERROR) << "OH_AI_DeviceInfoGetDeviceId device info is null"; return 0; } if (OH_AI_DeviceInfoGetDeviceType(device_info) != OH_AI_DEVICETYPE_NNRT) { - MS_LOG(ERROR) << "Get device_id of non-NNRT device is not allowable, ignored"; + MS_LOG(ERROR) << "OH_AI_DeviceInfoGetDeviceId Get device_id of non-NNRT device is not allowable, ignored"; return 0; } auto impl = reinterpret_cast(device_info); @@ -497,7 +497,7 @@ size_t OH_AI_DeviceInfoGetDeviceId(const OH_AI_DeviceInfoHandle device_info) { void OH_AI_DeviceInfoSetPerformanceMode(OH_AI_DeviceInfoHandle device_info, OH_AI_PerformanceMode mode) { if (device_info == nullptr) { - MS_LOG(ERROR) << "device info is null"; + MS_LOG(ERROR) << "OH_AI_DeviceInfoSetPerformanceMode device info is null"; return; } if (OH_AI_DeviceInfoGetDeviceType(device_info) != OH_AI_DEVICETYPE_NNRT) { @@ -510,7 +510,7 @@ void OH_AI_DeviceInfoSetPerformanceMode(OH_AI_DeviceInfoHandle device_info, OH_A OH_AI_PerformanceMode OH_AI_DeviceInfoGetPerformanceMode(const OH_AI_DeviceInfoHandle device_info) { if (device_info == nullptr) { - MS_LOG(ERROR) << "device info is null"; + MS_LOG(ERROR) << "OH_AI_DeviceInfoGetPerformanceMode device info is null"; return OH_AI_PERFORMANCE_NONE; } if (OH_AI_DeviceInfoGetDeviceType(device_info) != OH_AI_DEVICETYPE_NNRT) { @@ -523,7 +523,7 @@ OH_AI_PerformanceMode OH_AI_DeviceInfoGetPerformanceMode(const OH_AI_DeviceInfoH void OH_AI_DeviceInfoSetPriority(OH_AI_DeviceInfoHandle device_info, OH_AI_Priority priority) { if (device_info == nullptr) { - MS_LOG(ERROR) << "device info is null"; + MS_LOG(ERROR) << "OH_AI_DeviceInfoSetPriority device info is null"; return; } if (OH_AI_DeviceInfoGetDeviceType(device_info) != OH_AI_DEVICETYPE_NNRT) { @@ -536,7 +536,7 @@ void OH_AI_DeviceInfoSetPriority(OH_AI_DeviceInfoHandle device_info, OH_AI_Prior OH_AI_Priority OH_AI_DeviceInfoGetPriority(const OH_AI_DeviceInfoHandle device_info) { if (device_info == nullptr) { - MS_LOG(ERROR) << "device info is null"; + MS_LOG(ERROR) << "OH_AI_DeviceInfoGetPriority device info is null"; return OH_AI_PRIORITY_NONE; } if (OH_AI_DeviceInfoGetDeviceType(device_info) != OH_AI_DEVICETYPE_NNRT) { @@ -550,7 +550,7 @@ OH_AI_Priority OH_AI_DeviceInfoGetPriority(const OH_AI_DeviceInfoHandle device_i OH_AI_API OH_AI_Status OH_AI_DeviceInfoAddExtension(OH_AI_DeviceInfoHandle device_info, const char *name, const char*value, size_t value_size) { if (device_info == nullptr) { - MS_LOG(ERROR) << "device info is null"; + MS_LOG(ERROR) << "OH_AI_DeviceInfoAddExtension device info is null"; return OH_AI_STATUS_LITE_NULLPTR; } if (name == nullptr || value == nullptr) { diff --git a/mindspore-src/source/mindspore/lite/src/litert/c_api/model_c.cc b/mindspore-src/source/mindspore/lite/src/litert/c_api/model_c.cc index c581ffef33a5016f9127ff8da9f73ed7a86923f9..90c148c2c981d154baf3e11e40d7773b6c7f2fff 100644 --- a/mindspore-src/source/mindspore/lite/src/litert/c_api/model_c.cc +++ b/mindspore-src/source/mindspore/lite/src/litert/c_api/model_c.cc @@ -69,7 +69,7 @@ class ModelC { MSTensor **ModelC::GetInputs(size_t *input_num) { if (model_ == nullptr) { - MS_LOG(ERROR) << "model_ is nullptr."; + MS_LOG(ERROR) << "ModelC::GetInputs model_ is nullptr."; return nullptr; } if (!inputs_.empty()) { @@ -99,7 +99,7 @@ MSTensor **ModelC::GetOutputs(size_t *output_num) { MSTensor **ModelC::GetOutputsTensor(size_t *output_num, std::vector *vec_tensors) { if (model_ == nullptr) { - MS_LOG(ERROR) << "model_ is nullptr."; + MS_LOG(ERROR) << "ModelC::GetOutputsTensor model_ is nullptr."; return nullptr; } if (!vec_tensors->empty()) { @@ -155,7 +155,7 @@ OH_AI_ModelHandle OH_AI_ModelCreate() { } impl->model_ = std::make_shared(); if (impl->model_ == nullptr) { - MS_LOG(ERROR) << "model_ is nullptr."; + MS_LOG(ERROR) << "OH_AI_ModelCreate model_ is nullptr."; delete impl; return nullptr; } @@ -164,7 +164,7 @@ OH_AI_ModelHandle OH_AI_ModelCreate() { void OH_AI_ModelDestroy(OH_AI_ModelHandle *model) { if (model == nullptr || *model == nullptr) { - MS_LOG(ERROR) << "model is nullptr."; + MS_LOG(ERROR) << "OH_AI_ModelDestroy model is nullptr."; return; } auto impl = static_cast(*model); @@ -173,29 +173,29 @@ void OH_AI_ModelDestroy(OH_AI_ModelHandle *model) { } void OH_AI_ModelSetWorkspace(OH_AI_ModelHandle model, void *workspace, size_t workspace_size) { - MS_LOG(ERROR) << "Unsupported Feature."; + MS_LOG(ERROR) << "OH_AI_ModelSetWorkspace Unsupported Feature."; return; } size_t OH_AI_ModelCalcWorkspaceSize(OH_AI_ModelHandle model) { - MS_LOG(ERROR) << "Unsupported Feature."; + MS_LOG(ERROR) << "OH_AI_ModelCalcWorkspaceSize Unsupported Feature."; return 0; } OH_AI_Status OH_AI_ModelBuild(OH_AI_ModelHandle model, const void *model_data, size_t data_size, OH_AI_ModelType model_type, const OH_AI_ContextHandle model_context) { if (model == nullptr || model_data == nullptr || model_context == nullptr) { - MS_LOG(ERROR) << "model/model_data/model_context is nullptr."; + MS_LOG(ERROR) << "OH_AI_ModelBuild model/model_data/model_context is nullptr."; return OH_AI_STATUS_LITE_NULLPTR; } if (model_type == OH_AI_MODELTYPE_INVALID) { - MS_LOG(ERROR) << "model_type is invalid."; + MS_LOG(ERROR) << "OH_AI_ModelBuild model_type is invalid."; return OH_AI_STATUS_LITE_PARAM_INVALID; } mindspore::ContextC *context = static_cast(model_context); auto impl = static_cast(model); if (impl->context_.get() != context->context_ && context->owned_by_model_) { - MS_LOG(ERROR) << "context is owned by other model."; + MS_LOG(ERROR) << "OH_AI_ModelBuild context is owned by other model."; return OH_AI_STATUS_LITE_PARAM_INVALID; } if (impl->context_.get() != context->context_) { @@ -209,17 +209,17 @@ OH_AI_Status OH_AI_ModelBuild(OH_AI_ModelHandle model, const void *model_data, s OH_AI_Status OH_AI_ModelBuildFromFile(OH_AI_ModelHandle model, const char *model_path, OH_AI_ModelType model_type, const OH_AI_ContextHandle model_context) { if (model == nullptr || model_path == nullptr || model_context == nullptr) { - MS_LOG(ERROR) << "model/model_path/model_context is nullptr."; + MS_LOG(ERROR) << "OH_AI_ModelBuildFromFile model/model_path/model_context is nullptr."; return OH_AI_STATUS_LITE_NULLPTR; } if (model_type == OH_AI_MODELTYPE_INVALID) { - MS_LOG(ERROR) << "model_type is invalid."; + MS_LOG(ERROR) << "OH_AI_ModelBuildFromFile model_type is invalid."; return OH_AI_STATUS_LITE_PARAM_INVALID; } mindspore::ContextC *context = static_cast(model_context); auto impl = static_cast(model); if (impl->context_.get() != context->context_ && context->owned_by_model_) { - MS_LOG(ERROR) << "context is owned by other model."; + MS_LOG(ERROR) << "OH_AI_ModelBuildFromFile context is owned by other model."; return OH_AI_STATUS_LITE_PARAM_INVALID; } if (impl->context_.get() != context->context_) { @@ -233,7 +233,7 @@ OH_AI_Status OH_AI_ModelBuildFromFile(OH_AI_ModelHandle model, const char *model OH_AI_Status OH_AI_ModelResize(OH_AI_ModelHandle model, const OH_AI_TensorHandleArray inputs, OH_AI_ShapeInfo *shape_infos, size_t shape_info_num) { if (model == nullptr || shape_infos == nullptr) { - MS_LOG(ERROR) << "model/shape_infos is nullptr."; + MS_LOG(ERROR) << "OH_AI_ModelResize model/shape_infos is nullptr."; return OH_AI_STATUS_LITE_NULLPTR; } std::vector vec_inputs; @@ -258,14 +258,14 @@ OH_AI_Status OH_AI_ModelResize(OH_AI_ModelHandle model, const OH_AI_TensorHandle OH_AI_Status OH_AI_ModelPredict(OH_AI_ModelHandle model, const OH_AI_TensorHandleArray inputs, OH_AI_TensorHandleArray *outputs, const OH_AI_KernelCallBack before, const OH_AI_KernelCallBack after) { if (model == nullptr) { - MS_LOG(ERROR) << "model is nullptr."; + MS_LOG(ERROR) << "OH_AI_ModelPredict model is nullptr."; return OH_AI_STATUS_LITE_NULLPTR; } auto impl = static_cast(model); size_t input_num; (void)impl->GetInputs(&input_num); if (input_num != inputs.handle_num) { - MS_LOG(ERROR) << "Wrong input size."; + MS_LOG(ERROR) << "OH_AI_ModelPredict Wrong input size."; return OH_AI_STATUS_LITE_ERROR; } @@ -313,18 +313,18 @@ OH_AI_Status OH_AI_ModelPredict(OH_AI_ModelHandle model, const OH_AI_TensorHandl } OH_AI_Status OH_AI_ModelRunStep(OH_AI_ModelHandle model, const OH_AI_KernelCallBack before, const OH_AI_KernelCallBack after) { - MS_LOG(ERROR) << "Unsupported Feature."; + MS_LOG(ERROR) << "OH_AI_ModelRunStep Unsupported Feature."; return OH_AI_STATUS_LITE_NOT_SUPPORT; } OH_AI_Status OH_AI_ModelExportWeight(const OH_AI_ModelHandle model, const char *export_path) { - MS_LOG(ERROR) << "Unsupported Feature."; + MS_LOG(ERROR) << "OH_AI_ModelExportWeight Unsupported Feature."; return OH_AI_STATUS_LITE_NOT_SUPPORT; } OH_AI_TensorHandleArray OH_AI_ModelGetInputs(const OH_AI_ModelHandle model) { if (model == nullptr) { - MS_LOG(ERROR) << "model is nullptr."; + MS_LOG(ERROR) << "OH_AI_ModelGetInputs model is nullptr."; return {0, nullptr}; } auto impl = static_cast(model); @@ -335,7 +335,7 @@ OH_AI_TensorHandleArray OH_AI_ModelGetInputs(const OH_AI_ModelHandle model) { OH_AI_TensorHandleArray OH_AI_ModelGetOutputs(const OH_AI_ModelHandle model) { if (model == nullptr) { - MS_LOG(ERROR) << "model is nullptr."; + MS_LOG(ERROR) << "OH_AI_ModelGetOutputs model is nullptr."; return {0, nullptr}; } auto impl = static_cast(model); @@ -346,7 +346,7 @@ OH_AI_TensorHandleArray OH_AI_ModelGetOutputs(const OH_AI_ModelHandle model) { OH_AI_TensorHandle OH_AI_ModelGetInputByTensorName(const OH_AI_ModelHandle model, const char *tensor_name) { if (model == nullptr || tensor_name == nullptr) { - MS_LOG(ERROR) << "model/tensor_name is nullptr."; + MS_LOG(ERROR) << "OH_AI_ModelGetInputByTensorName model/tensor_name is nullptr."; return nullptr; } auto impl = static_cast(model); @@ -357,13 +357,13 @@ OH_AI_TensorHandle OH_AI_ModelGetInputByTensorName(const OH_AI_ModelHandle model return static_cast(inputs[i]); } } - MS_LOG(ERROR) << "tensor is not exist."; + MS_LOG(ERROR) << "OH_AI_ModelGetInputByTensorName tensor is not exist."; return nullptr; } OH_AI_TensorHandle OH_AI_ModelGetOutputByTensorName(const OH_AI_ModelHandle model, const char *tensor_name) { if (model == nullptr || tensor_name == nullptr) { - MS_LOG(ERROR) << "model/tensor_name is nullptr."; + MS_LOG(ERROR) << "OH_AI_ModelGetOutputByTensorName model/tensor_name is nullptr."; return nullptr; } auto impl = static_cast(model); @@ -374,7 +374,7 @@ OH_AI_TensorHandle OH_AI_ModelGetOutputByTensorName(const OH_AI_ModelHandle mode return static_cast(outputs[i]); } } - MS_LOG(ERROR) << "tensor is not exist."; + MS_LOG(ERROR) << "OH_AI_ModelGetOutputByTensorName tensor is not exist."; return nullptr; } @@ -456,11 +456,11 @@ void OH_AI_TrainCfgSetOptimizationLevel(OH_AI_TrainCfgHandle train_cfg, OH_AI_Op OH_AI_Status OH_AI_TrainModelBuild(OH_AI_ModelHandle model, const void *model_data, size_t data_size, OH_AI_ModelType model_type, const OH_AI_ContextHandle model_context, const OH_AI_TrainCfgHandle train_cfg) { if (model == nullptr || model_data == nullptr || model_context == nullptr) { - MS_LOG(ERROR) << "model/model_data/model_context is nullptr."; + MS_LOG(ERROR) << "OH_AI_TrainModelBuild model/model_data/model_context is nullptr."; return OH_AI_STATUS_LITE_NULLPTR; } if (model_type == OH_AI_MODELTYPE_INVALID) { - MS_LOG(ERROR) << "model_type is invalid."; + MS_LOG(ERROR) << "OH_AI_TrainModelBuild model_type is invalid."; return OH_AI_STATUS_LITE_PARAM_INVALID; } auto impl = static_cast(model); @@ -469,13 +469,13 @@ OH_AI_Status OH_AI_TrainModelBuild(OH_AI_ModelHandle model, const void *model_da auto status = mindspore::Serialization::Load(model_data, data_size, static_cast(model_type), &graph); if (status != mindspore::kSuccess) { - MS_LOG(ERROR) << "load ms file failed."; + MS_LOG(ERROR) << "OH_AI_TrainModelBuild load ms file failed."; return OH_AI_STATUS_LITE_ERROR; } auto context = static_cast(model_context); auto build_train_cfg = static_cast(train_cfg); if (impl->context_.get() != context->context_ && context->owned_by_model_) { - MS_LOG(ERROR) << "context is owned by other model."; + MS_LOG(ERROR) << "OH_AI_TrainModelBuild context is owned by other model."; return OH_AI_STATUS_LITE_PARAM_INVALID; } if (impl->context_.get() != context->context_) { @@ -485,7 +485,7 @@ OH_AI_Status OH_AI_TrainModelBuild(OH_AI_ModelHandle model, const void *model_da auto ret = impl->model_->Build(static_cast(graph), impl->context_, std::shared_ptr(build_train_cfg)); if (ret != mindspore::kSuccess) { - MS_LOG(ERROR) << "Load and compile failed"; + MS_LOG(ERROR) << "OH_AI_TrainModelBuild Load and compile failed"; } return static_cast(ret.StatusCode()); } @@ -528,7 +528,7 @@ OH_AI_Status OH_AI_TrainModelBuildFromFile(OH_AI_ModelHandle model, const char * OH_AI_Status OH_AI_ModelSetLearningRate(OH_AI_ModelHandle model, float learning_rate) { if (model == nullptr) { - MS_LOG(ERROR) << "model is nullptr."; + MS_LOG(ERROR) << "OH_AI_ModelSetLearningRate model is nullptr."; return OH_AI_STATUS_LITE_PARAM_INVALID; } auto impl = static_cast(model); @@ -538,7 +538,7 @@ OH_AI_Status OH_AI_ModelSetLearningRate(OH_AI_ModelHandle model, float learning_ float OH_AI_ModelGetLearningRate(OH_AI_ModelHandle model) { if (model == nullptr) { - MS_LOG(ERROR) << "model is nullptr."; + MS_LOG(ERROR) << "OH_AI_ModelGetLearningRate model is nullptr."; return OH_AI_STATUS_LITE_PARAM_INVALID; } auto impl = static_cast(model); @@ -547,7 +547,7 @@ float OH_AI_ModelGetLearningRate(OH_AI_ModelHandle model) { OH_AI_Status OH_AI_RunStep(OH_AI_ModelHandle model, const OH_AI_KernelCallBack before, const OH_AI_KernelCallBack after) { if (model == nullptr) { - MS_LOG(ERROR) << "model is nullptr."; + MS_LOG(ERROR) << "OH_AI_RunStep model is nullptr."; return OH_AI_STATUS_LITE_PARAM_INVALID; } auto impl = static_cast(model); @@ -557,7 +557,7 @@ OH_AI_Status OH_AI_RunStep(OH_AI_ModelHandle model, const OH_AI_KernelCallBack b OH_AI_TensorHandleArray OH_AI_ModelGetWeights(OH_AI_ModelHandle model) { if (model == nullptr) { - MS_LOG(ERROR) << "model is nullptr."; + MS_LOG(ERROR) << "OH_AI_ModelGetWeights model is nullptr."; return {0, nullptr}; } auto impl = static_cast(model); @@ -578,7 +578,7 @@ OH_AI_TensorHandleArray OH_AI_ModelGetWeights(OH_AI_ModelHandle model) { OH_AI_Status OH_AI_ModelUpdateWeights(OH_AI_ModelHandle model, const OH_AI_TensorHandleArray new_weights) { if (model == nullptr) { - MS_LOG(ERROR) << "model is nullptr."; + MS_LOG(ERROR) << "OH_AI_ModelUpdateWeights model is nullptr."; return OH_AI_STATUS_LITE_PARAM_INVALID; } auto impl = static_cast(model); @@ -592,7 +592,7 @@ OH_AI_Status OH_AI_ModelUpdateWeights(OH_AI_ModelHandle model, const OH_AI_Tenso bool OH_AI_ModelGetTrainMode(OH_AI_ModelHandle model) { if (model == nullptr) { - MS_LOG(ERROR) << "model is nullptr."; + MS_LOG(ERROR) << "OH_AI_ModelGetTrainMode model is nullptr."; return false; } auto impl = static_cast(model); @@ -601,7 +601,7 @@ bool OH_AI_ModelGetTrainMode(OH_AI_ModelHandle model) { OH_AI_Status OH_AI_ModelSetTrainMode(OH_AI_ModelHandle model, bool train) { if (model == nullptr) { - MS_LOG(ERROR) << "model is nullptr."; + MS_LOG(ERROR) << "OH_AI_ModelSetTrainMode model is nullptr."; return OH_AI_STATUS_LITE_PARAM_INVALID; } auto impl = static_cast(model); @@ -611,7 +611,7 @@ OH_AI_Status OH_AI_ModelSetTrainMode(OH_AI_ModelHandle model, bool train) { OH_AI_Status OH_AI_ModelSetupVirtualBatch(OH_AI_ModelHandle model, int virtual_batch_multiplier, float lr, float momentum) { if (model == nullptr) { - MS_LOG(ERROR) << "model is nullptr."; + MS_LOG(ERROR) << "OH_AI_ModelSetupVirtualBatch model is nullptr."; return OH_AI_STATUS_LITE_PARAM_INVALID; } auto impl = static_cast(model); @@ -623,7 +623,7 @@ OH_AI_Status OH_AI_ExportModel(OH_AI_ModelHandle model, OH_AI_ModelType model_ty OH_AI_QuantizationType quantization_type, bool export_inference_only, char **output_tensor_name, size_t num) { if (model == nullptr) { - MS_LOG(ERROR) << "model is nullptr."; + MS_LOG(ERROR) << "OH_AI_ExportModel model is nullptr."; return OH_AI_STATUS_LITE_PARAM_INVALID; } auto impl = static_cast(model); @@ -644,7 +644,7 @@ OH_AI_Status OH_AI_ExportModelBuffer(OH_AI_ModelHandle model, OH_AI_ModelType mo OH_AI_QuantizationType quantization_type, bool export_inference_only, char **output_tensor_name, size_t num) { if (model == nullptr) { - MS_LOG(ERROR) << "model is nullptr."; + MS_LOG(ERROR) << "OH_AI_ExportModelBuffer model is nullptr."; return OH_AI_STATUS_LITE_PARAM_INVALID; } auto impl = static_cast(model); @@ -659,7 +659,7 @@ OH_AI_Status OH_AI_ExportModelBuffer(OH_AI_ModelHandle model, OH_AI_ModelType mo auto data = reinterpret_cast(buffer.MutableData()); *model_data = reinterpret_cast(malloc(buffer.DataSize())); if (*model_data == nullptr) { - MS_LOG(ERROR) << "malloc model_data failed."; + MS_LOG(ERROR) << "ExportModel malloc model_data failed."; return OH_AI_STATUS_LITE_NULLPTR; } *data_size = buffer.DataSize(); @@ -674,7 +674,7 @@ OH_AI_Status OH_AI_ExportWeightsCollaborateWithMicro(OH_AI_ModelHandle model, OH bool is_inference, bool enable_fp16, char **changeable_weights_name, size_t num) { if (model == nullptr) { - MS_LOG(ERROR) << "model is nullptr."; + MS_LOG(ERROR) << "OH_AI_ExportWeightsCollaborateWithMicro model is nullptr."; return OH_AI_STATUS_LITE_PARAM_INVALID; } auto impl = static_cast(model); diff --git a/mindspore-src/source/mindspore/lite/src/litert/c_api/tensor_c.cc b/mindspore-src/source/mindspore/lite/src/litert/c_api/tensor_c.cc index 9e0b8b119fb26bedcd59b00b9a12c2e2a3588d7e..d8ca5c1fcd659cad2a48629c7d07a39ff4f3977a 100644 --- a/mindspore-src/source/mindspore/lite/src/litert/c_api/tensor_c.cc +++ b/mindspore-src/source/mindspore/lite/src/litert/c_api/tensor_c.cc @@ -46,7 +46,7 @@ void CleanAllocatorTable() { OH_AI_TensorHandle OH_AI_TensorCreate(const char *name, OH_AI_DataType type, const int64_t *shape, size_t shape_num, const void *data, size_t data_len) { if (name == nullptr || shape == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorCreate param is nullptr."; return nullptr; } std::vector vec_shape(shape_num); @@ -57,14 +57,14 @@ OH_AI_TensorHandle OH_AI_TensorCreate(const char *name, OH_AI_DataType type, con mindspore::lite::Tensor::CreateTensor(name, static_cast(type), vec_shape, data, data_len); auto lite_tensor_impl = std::make_shared(lite_tensor); if (lite_tensor_impl == nullptr || lite_tensor_impl->lite_tensor() == nullptr) { - MS_LOG(ERROR) << "Failed to allocate tensor impl."; + MS_LOG(ERROR) << "OH_AI_TensorCreate Failed to allocate tensor impl."; return nullptr; } lite_tensor_impl->set_from_session(false); lite_tensor_impl->set_own_data(lite_tensor_impl->lite_tensor()->own_data()); auto impl = new (std::nothrow) mindspore::MSTensor(lite_tensor_impl); if (impl == nullptr) { - MS_LOG(ERROR) << "Failed to allocate MSTensor."; + MS_LOG(ERROR) << "OH_AI_TensorCreate Failed to allocate MSTensor."; return nullptr; } return impl; @@ -72,7 +72,7 @@ OH_AI_TensorHandle OH_AI_TensorCreate(const char *name, OH_AI_DataType type, con void OH_AI_TensorDestroy(OH_AI_TensorHandle *tensor) { if (tensor == nullptr || *tensor == nullptr) { - MS_LOG(ERROR) << "tensor is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorDestroy tensor is nullptr."; return; } auto impl = static_cast(*tensor); @@ -82,13 +82,13 @@ void OH_AI_TensorDestroy(OH_AI_TensorHandle *tensor) { OH_AI_TensorHandle OH_AI_TensorClone(OH_AI_TensorHandle tensor) { if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorClone param is nullptr."; return nullptr; } auto impl = static_cast(tensor); auto clone_impl = impl->Clone(); if (clone_impl == nullptr) { - MS_LOG(ERROR) << "Failed to allocate tensor impl."; + MS_LOG(ERROR) << "OH_AI_TensorClone Failed to allocate tensor impl."; return nullptr; } std::static_pointer_cast(clone_impl->impl())->set_own_data(true); @@ -98,7 +98,7 @@ OH_AI_TensorHandle OH_AI_TensorClone(OH_AI_TensorHandle tensor) { void OH_AI_TensorSetName(OH_AI_TensorHandle tensor, const char *name) { if (tensor == nullptr || name == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorSetName param is nullptr."; return; } auto impl = static_cast(tensor); @@ -107,7 +107,7 @@ void OH_AI_TensorSetName(OH_AI_TensorHandle tensor, const char *name) { const char *OH_AI_TensorGetName(const OH_AI_TensorHandle tensor) { if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorGetName param is nullptr."; return nullptr; } auto ms_tensor = static_cast(tensor); @@ -116,7 +116,7 @@ const char *OH_AI_TensorGetName(const OH_AI_TensorHandle tensor) { void OH_AI_TensorSetDataType(OH_AI_TensorHandle tensor, OH_AI_DataType type) { if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorSetDataType param is nullptr."; return; } auto impl = static_cast(tensor); @@ -125,7 +125,7 @@ void OH_AI_TensorSetDataType(OH_AI_TensorHandle tensor, OH_AI_DataType type) { OH_AI_DataType OH_AI_TensorGetDataType(const OH_AI_TensorHandle tensor) { if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorGetDataType param is nullptr."; return OH_AI_DATATYPE_UNKNOWN; } auto impl = static_cast(tensor); @@ -135,7 +135,7 @@ OH_AI_DataType OH_AI_TensorGetDataType(const OH_AI_TensorHandle tensor) { void OH_AI_TensorSetShape(OH_AI_TensorHandle tensor, const int64_t *shape, size_t shape_num) { if (tensor == nullptr || shape == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorSetShape param is nullptr."; return; } auto impl = static_cast(tensor); @@ -148,7 +148,7 @@ void OH_AI_TensorSetShape(OH_AI_TensorHandle tensor, const int64_t *shape, size_ const int64_t *OH_AI_TensorGetShape(const OH_AI_TensorHandle tensor, size_t *shape_num) { if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorGetShape param is nullptr."; return nullptr; } auto impl = static_cast(tensor); @@ -158,7 +158,7 @@ const int64_t *OH_AI_TensorGetShape(const OH_AI_TensorHandle tensor, size_t *sha void OH_AI_TensorSetFormat(OH_AI_TensorHandle tensor, OH_AI_Format format) { if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorSetFormat param is nullptr."; return; } auto impl = static_cast(tensor); @@ -167,7 +167,7 @@ void OH_AI_TensorSetFormat(OH_AI_TensorHandle tensor, OH_AI_Format format) { OH_AI_Format OH_AI_TensorGetFormat(const OH_AI_TensorHandle tensor) { if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorGetFormat param is nullptr."; return OH_AI_FORMAT_NHWC; } auto impl = static_cast(tensor); @@ -176,7 +176,7 @@ OH_AI_Format OH_AI_TensorGetFormat(const OH_AI_TensorHandle tensor) { void OH_AI_TensorSetData(OH_AI_TensorHandle tensor, void *data) { if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorSetData param is nullptr."; return; } auto impl = static_cast(tensor); @@ -185,7 +185,7 @@ void OH_AI_TensorSetData(OH_AI_TensorHandle tensor, void *data) { OH_AI_Status OH_AI_TensorSetUserData(OH_AI_TensorHandle tensor, void *data, size_t data_size) { if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorSetUserData param is nullptr."; return OH_AI_STATUS_LITE_NULLPTR; } @@ -203,7 +203,7 @@ OH_AI_Status OH_AI_TensorSetUserData(OH_AI_TensorHandle tensor, void *data, size const void *OH_AI_TensorGetData(const OH_AI_TensorHandle tensor) { if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorGetData param is nullptr."; return nullptr; } auto impl = static_cast(tensor); @@ -212,7 +212,7 @@ const void *OH_AI_TensorGetData(const OH_AI_TensorHandle tensor) { void *OH_AI_TensorGetMutableData(const OH_AI_TensorHandle tensor) { if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorGetMutableData param is nullptr."; return nullptr; } auto impl = static_cast(tensor); @@ -221,7 +221,7 @@ void *OH_AI_TensorGetMutableData(const OH_AI_TensorHandle tensor) { int64_t OH_AI_TensorGetElementNum(const OH_AI_TensorHandle tensor) { if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorGetElementNum param is nullptr."; return 0; } auto impl = static_cast(tensor); @@ -230,7 +230,7 @@ int64_t OH_AI_TensorGetElementNum(const OH_AI_TensorHandle tensor) { size_t OH_AI_TensorGetDataSize(const OH_AI_TensorHandle tensor) { if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorGetDataSize param is nullptr."; return 0; } auto impl = static_cast(tensor); @@ -239,7 +239,7 @@ size_t OH_AI_TensorGetDataSize(const OH_AI_TensorHandle tensor) { OH_AI_Status OH_AI_TensorSetAllocator(OH_AI_TensorHandle tensor, void *allocator) { if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorSetAllocator param is nullptr."; return OH_AI_STATUS_LITE_NULLPTR; } auto impl = static_cast(tensor); @@ -261,7 +261,7 @@ OH_AI_Status OH_AI_TensorSetAllocator(OH_AI_TensorHandle tensor, void *allocator void *OH_AI_TensorGetAllocator(OH_AI_TensorHandle tensor) { if (tensor == nullptr) { - MS_LOG(ERROR) << "param is nullptr."; + MS_LOG(ERROR) << "OH_AI_TensorGetAllocator param is nullptr."; return nullptr; } auto impl = static_cast(tensor); diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/addfusion/add.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/addfusion/add.yaml new file mode 100644 index 0000000000000000000000000000000000000000..23face5603a41aed056d80856aa65442cb34653a --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/addfusion/add.yaml @@ -0,0 +1,158 @@ +op_name: Add +genonnx: + - model_name: add_fp32_dyn.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + - name: Y + data_type: 1 + dims: [None, None] + outputs: + - name: Z + data_type: 1 + dims: [None, None] + initializer: None + - model_name: add_fp32_fix.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256] + - name: Y + data_type: 1 + dims: [256, 256] + outputs: + - name: Z + data_type: 1 + dims: [256, 256] + initializer: None + - model_name: add_fp32_fix_input_3d.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2] + - name: Y + data_type: 1 + dims: [256, 256, 2] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2] + initializer: None + - model_name: add_fp32_fix_input_4d.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 2] + - name: Y + data_type: 1 + dims: [256, 256, 2, 2] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2, 2] + initializer: None + +gengold: + - gold_name: add_fp32_dyn_gold_1 + in_model: add_fp32_dyn.onnx + input_dtypes: [1, 1] + input_shapes: [[128, 128], [128, 128]] + - gold_name: add_fp32_fix_gold_1 + in_model: add_fp32_fix.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256], [256, 256]] + - gold_name: add_fp32_fix_input_3d + in_model: add_fp32_fix_input_3d.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256, 2], [256, 256, 2]] + - gold_name: add_fp32_fix_input_4d + in_model: add_fp32_fix_input_4d.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256, 2, 2], [256, 256, 2, 2]] + +convert: + - out_model: add_fp32_dyn_2_dyn.ms + in_model: add_fp32_dyn.onnx + input_shapes: None + - out_model: add_fp32_dyn_2_fix.ms + in_model: add_fp32_dyn.onnx + input_shapes: X:128,128;Y:128,128 + - out_model: add_fp32_fix_2_fix.ms + in_model: add_fp32_fix.onnx + input_shapes: None + fp16: off + - out_model: add_fp16_dyn_2_dyn.ms + in_model: add_fp32_dyn.onnx + input_shapes: None + fp16: on + - out_model: add_fp16_dyn_2_fix.ms + in_model: add_fp32_dyn.onnx + input_shapes: X:128,128;Y:128,128 + fp16: on + - out_model: add_fp16_fix_2_fix.ms + in_model: add_fp32_fix.onnx + input_shapes: None + fp16: on + - out_model: add_fp32_fix_input_3d.ms + in_model: add_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: add_fp32_fix_input_4d.ms + in_model: add_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: add_fp32_dyn_2_dyn.ms + gold_in: add_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128;Y:128,128 + - in_model: add_fp32_dyn_2_fix.ms + gold_in: add_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: add_fp32_fix_2_fix.ms + gold_in: add_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: add_fp16_dyn_2_dyn.ms + gold_in: add_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128;Y:128,128 + - in_model: add_fp16_dyn_2_fix.ms + gold_in: add_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: add_fp16_fix_2_fix.ms + gold_in: add_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: add_fp32_fix_input_3d.ms + gold_in: add_fp32_fix_input_3d + dtypes: 1 + input_shapes: None + - in_model: add_fp32_fix_input_4d.ms + gold_in: add_fp32_fix_input_4d + dtypes: 1 + input_shapes: None diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/argmaxfusion/argmax.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/argmaxfusion/argmax.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9145a594c43ab4ed48e12eeaa0fd50faea125f53 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/argmaxfusion/argmax.yaml @@ -0,0 +1,127 @@ +op_name: ArgMax +genonnx: + - model_name: argmax_fp32_dyn.onnx + node_param: + inputs: ["input"] + outputs: ["output"] + attributes: + axis: -1 + graph_param: + inputs: + - name: input + data_type: 1 + dims: [None, None] + outputs: + - name: output + data_type: 7 + dims: [None, None] + initializer: None + - model_name: argmax_fp32_fix.onnx + node_param: + inputs: ["input"] + outputs: ["output"] + attributes: + axis: -1 + graph_param: + inputs: + - name: input + data_type: 1 + dims: [2, 3] + outputs: + - name: output + data_type: 7 + dims: [2, 1] + initializer: None + - model_name: argmax_fp32_fix_input_3d.onnx + node_param: + inputs: ["input"] + outputs: ["output"] + attributes: + axis: -1 + graph_param: + inputs: + - name: input + data_type: 1 + dims: [2, 3, 4] + outputs: + - name: output + data_type: 7 + dims: [2, 3, 1] + initializer: None + - model_name: argmax_fp32_fix_input_4d.onnx + node_param: + inputs: ["input"] + outputs: ["output"] + attributes: + axis: -1 + graph_param: + inputs: + - name: input + data_type: 1 + dims: [2, 3, 4, 5] + outputs: + - name: output + data_type: 7 + dims: [2, 3, 4, 1] + initializer: None + +gengold: + - gold_name: argmax_fp32_dyn_gold_1 + in_model: argmax_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[5, 5]] + - gold_name: argmax_fp32_fix_gold_1 + in_model: argmax_fp32_fix.onnx + input_dtypes: [1] + input_shapes: [[2, 3]] + - gold_name: argmax_fp32_fix_input_3d + in_model: argmax_fp32_fix_input_3d.onnx + input_dtypes: [1] + input_shapes: [[2, 3, 4]] + - gold_name: argmax_fp32_fix_input_4d + in_model: argmax_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[2, 3, 4, 5]] + +convert: + - out_model: argmax_fp32_dyn_2_dyn.ms + in_model: argmax_fp32_dyn.onnx + input_shapes: None + - out_model: argmax_fp32_dyn_2_fix.ms + in_model: argmax_fp32_dyn.onnx + input_shapes: input:5,5 + - out_model: argmax_fp32_fix_2_fix.ms + in_model: argmax_fp32_fix.onnx + input_shapes: None + - out_model: argmax_fp16_dyn_2_dyn.ms + in_model: argmax_fp32_dyn.onnx + input_shapes: None + fp16: on + - out_model: argmax_fp32_fix_input_3d.ms + in_model: argmax_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: argmax_fp32_fix_input_4d.ms + in_model: argmax_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: argmax_fp32_dyn_2_dyn.ms + gold_in: argmax_fp32_dyn_gold_1 + dtypes: 1 + - in_model: argmax_fp32_dyn_2_fix.ms + gold_in: argmax_fp32_dyn_gold_1 + dtypes: 1 + - in_model: argmax_fp32_fix_2_fix.ms + gold_in: argmax_fp32_fix_gold_1 + dtypes: 1 + - in_model: argmax_fp16_dyn_2_dyn.ms + gold_in: argmax_fp32_dyn_gold_1 + dtypes: 1 + - in_model: argmax_fp32_fix_input_3d.ms + gold_in: argmax_fp32_fix_input_3d + dtypes: 1 + - in_model: argmax_fp32_fix_input_4d.ms + gold_in: argmax_fp32_fix_input_4d + dtypes: 1 \ No newline at end of file diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/avgpoolfusion/avgpool.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/avgpoolfusion/avgpool.yaml new file mode 100644 index 0000000000000000000000000000000000000000..53f6ca05a9c39afddc4f08ff02b1a3e98d564980 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/avgpoolfusion/avgpool.yaml @@ -0,0 +1,72 @@ +op_name: AveragePool +genonnx: + - model_name: averagepool_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: + kernel_shape: [3,3] + strides: [2,2] + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None, None, None] + outputs: + - name: Y + data_type: 1 + dims: [None, None, None, None] + initializer: None + - model_name: averagepool_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: + kernel_shape: [3,3] + strides: [2,2] + + graph_param: + inputs: + - name: X + data_type: 1 + dims: [1, 3, 224, 224] + outputs: + - name: Y + data_type: 1 + dims: [1, 3, 111, 111] + initializer: None + +gengold: + - gold_name: averagepool_fp32_dyn_gold_1 + in_model: averagepool_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[1, 6, 128, 128]] + - gold_name: averagepool_fp32_fix_input_4d + in_model: averagepool_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[1, 3, 224, 224]] + +convert: + - out_model: averagepool_fp32_dyn_2_dyn.ms + in_model: averagepool_fp32_dyn.onnx + input_shapes: None + - out_model: averagepool_fp32_dyn_2_fix.ms + in_model: averagepool_fp32_dyn.onnx + input_shapes: X:1, 6, 128, 128 + - out_model: averagepool_fp32_fix_input_4d.ms + in_model: averagepool_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: averagepool_fp32_dyn_2_dyn.ms + gold_in: averagepool_fp32_dyn_gold_1 + dtypes: 1 + - in_model: averagepool_fp32_dyn_2_fix.ms + gold_in: averagepool_fp32_dyn_gold_1 + dtypes: 1 + - in_model: averagepool_fp32_fix_input_4d.ms + gold_in: averagepool_fp32_fix_input_4d + dtypes: 1 + + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/avgpoolfusion/globalavgpool.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/avgpoolfusion/globalavgpool.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c70857fcbb900407713624d754290268bbfa63c0 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/avgpoolfusion/globalavgpool.yaml @@ -0,0 +1,70 @@ +op_name: GlobalAveragePool +genonnx: + - model_name: globalavepool_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None, None, None] + outputs: + - name: Y + data_type: 1 + dims: [None, None, None, None] + initializer: None + - model_name: globalavepool_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + + graph_param: + inputs: + - name: X + data_type: 1 + dims: [1, 3, 224, 224] + outputs: + - name: Y + data_type: 1 + dims: [1, 3, 1, 1] + initializer: None + +gengold: + - gold_name: globalavepool_fp32_dyn_gold_1 + in_model: globalavepool_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[1, 6, 128, 128]] + - gold_name: globalavepool_fp32_fix_input_4d + in_model: globalavepool_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[1, 3, 224, 224]] + +convert: + - out_model: globalavepool_fp32_dyn_2_dyn.ms + in_model: globalavepool_fp32_dyn.onnx + input_shapes: None + - out_model: globalavepool_fp32_dyn_2_fix.ms + in_model: globalavepool_fp32_dyn.onnx + input_shapes: X:1, 6, 128, 128 + - out_model: globalavepool_fp32_fix_input_4d.ms + in_model: globalavepool_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: globalavepool_fp32_dyn_2_dyn.ms + gold_in: globalavepool_fp32_dyn_gold_1 + dtypes: 1 + - in_model: globalavepool_fp32_dyn_2_fix.ms + gold_in: globalavepool_fp32_dyn_gold_1 + dtypes: 1 + - in_model: globalavepool_fp32_fix_input_4d.ms + gold_in: globalavepool_fp32_fix_input_4d + dtypes: 1 + + + + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/avgpoolfusion/globalmaxpool.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/avgpoolfusion/globalmaxpool.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ebda7a0e46db72136cbbdf9bd782ca130cd90ed0 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/avgpoolfusion/globalmaxpool.yaml @@ -0,0 +1,70 @@ +op_name: GlobalMaxPool +genonnx: + - model_name: globalmaxpool_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None, None, None] + outputs: + - name: Y + data_type: 1 + dims: [None, None, None, None] + initializer: None + - model_name: globalmaxpool_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + + graph_param: + inputs: + - name: X + data_type: 1 + dims: [1, 3, 224, 224] + outputs: + - name: Y + data_type: 1 + dims: [1, 3, 1, 1] + initializer: None + +gengold: + - gold_name: globalmaxpool_fp32_dyn_gold_1 + in_model: globalmaxpool_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[1, 6, 128, 128]] + - gold_name: globalmaxpool_fp32_fix_input_4d + in_model: globalmaxpool_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[1, 3, 224, 224]] + +convert: + - out_model: globalmaxpool_fp32_dyn_2_dyn.ms + in_model: globalmaxpool_fp32_dyn.onnx + input_shapes: None + - out_model: globalmaxpool_fp32_dyn_2_fix.ms + in_model: globalmaxpool_fp32_dyn.onnx + input_shapes: X:1, 6, 128, 128 + - out_model: globalmaxpool_fp32_fix_input_4d.ms + in_model: globalmaxpool_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: globalmaxpool_fp32_dyn_2_dyn.ms + gold_in: globalmaxpool_fp32_dyn_gold_1 + dtypes: 1 + - in_model: globalmaxpool_fp32_dyn_2_fix.ms + gold_in: globalmaxpool_fp32_dyn_gold_1 + dtypes: 1 + - in_model: globalmaxpool_fp32_fix_input_4d.ms + gold_in: globalmaxpool_fp32_fix_input_4d + dtypes: 1 + + + + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/avgpoolfusion/maxpool.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/avgpoolfusion/maxpool.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3015efda83b3acfef10165a2e55ace708be14729 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/avgpoolfusion/maxpool.yaml @@ -0,0 +1,71 @@ +op_name: MaxPool +genonnx: + - model_name: maxpool_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: + kernel_shape: [3,3] + strides: [2,2] + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None, None, None] + outputs: + - name: Y + data_type: 1 + dims: [None, None, None, None] + initializer: None + - model_name: maxpool_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: + kernel_shape: [3,3] + strides: [2,2] + + graph_param: + inputs: + - name: X + data_type: 1 + dims: [1, 3, 224, 224] + outputs: + - name: Y + data_type: 1 + dims: [1, 3, 111, 111] + initializer: None + +gengold: + - gold_name: maxpool_fp32_dyn_gold_1 + in_model: maxpool_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[1, 6, 128, 128]] + - gold_name: maxpool_fp32_fix_input_4d + in_model: maxpool_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[1, 3, 224, 224]] + +convert: + - out_model: maxpool_fp32_dyn_2_dyn.ms + in_model: maxpool_fp32_dyn.onnx + input_shapes: None + - out_model: maxpool_fp32_dyn_2_fix.ms + in_model: maxpool_fp32_dyn.onnx + input_shapes: X:1, 6, 128, 128 + - out_model: maxpool_fp32_fix_input_4d.ms + in_model: maxpool_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: maxpool_fp32_dyn_2_dyn.ms + gold_in: maxpool_fp32_dyn_gold_1 + dtypes: 1 + - in_model: maxpool_fp32_dyn_2_fix.ms + gold_in: maxpool_fp32_dyn_gold_1 + dtypes: 1 + - in_model: maxpool_fp32_fix_input_4d.ms + gold_in: maxpool_fp32_fix_input_4d + dtypes: 1 + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/batchnorm/batchnorm.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/batchnorm/batchnorm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..27d32925820b80c023ff116720956e4e677f5707 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/batchnorm/batchnorm.yaml @@ -0,0 +1,133 @@ +op_name: BatchNormalization +genonnx: + - model_name: batchnormalization_fp32_dyn.onnx + node_param: + inputs: ['X','scale','B','input_mean','input_var'] + outputs: ['Y'] + attributes: + epsilon: 0.00005 + momentum: 0.9 + training_mode: 0 + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None,3,None,None] + outputs: + - name: Y + data_type: 1 + dims: [None,3,None,None] + initializer: + - name: scale + data_type: 1 + dims: [3] + value: [1,1,1] + - name: B + data_type: 1 + dims: [3] + value: [0,0,0] + - name: input_mean + data_type: 1 + dims: [3] + value: [0,0,0] + - name: input_var + data_type: 1 + dims: [3] + value: [1,1,1] + - model_name: batchnormalization_fp32_fix.onnx + node_param: + inputs: ['X','scale','B','input_mean','input_var'] + outputs: ['Y'] + attributes: + epsilon: 0.00005 + momentum: 0.9 + training_mode: 0 #outputs should be 1 when Training_mode = False + graph_param: + inputs: + - name: X + data_type: 1 + dims: [128, 3, 32, 32] + outputs: + - name: Y + data_type: 1 + dims: [128, 3, 32, 32] + initializer: + - name: scale + data_type: 1 + dims: [3] + value: [1,1,1] + - name: B + data_type: 1 + dims: [3] + value: [0,0,0] + - name: input_mean + data_type: 1 + dims: [3] + value: [0,0,0] + - name: input_var + data_type: 1 + dims: [3] + value: [1,1,1] + +gengold: + - gold_name: batchnormalization_fp32_dyn_gold_1 + in_model: batchnormalization_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[64,3,32,32]] + - gold_name: batchnormalization_fp32_fix_gold_1 + in_model: batchnormalization_fp32_fix.onnx + input_dtypes: [1] + input_shapes: [[128, 3, 32, 32]] + +convert: + - out_model: batchnormalization_fp32_dyn_2_dyn.ms + in_model: batchnormalization_fp32_dyn.onnx + input_shapes: None + fp16: off + - out_model: batchnormalization_fp32_dyn_2_fix.ms + in_model: batchnormalization_fp32_dyn.onnx + input_shapes: X:64,3,32,32 + fp16: off + - out_model: batchnormalization_fp32_fix_2_fix.ms + in_model: batchnormalization_fp32_fix.onnx + input_shapes: None + fp16: off + - out_model: batchnormalization_fp16_dyn_2_dyn.ms + in_model: batchnormalization_fp32_dyn.onnx + input_shapes: None + fp16: on + - out_model: batchnormalization_fp16_dyn_2_fix.ms + in_model: batchnormalization_fp32_dyn.onnx + input_shapes: X:64,3,32,32 + fp16: on + - out_model: batchnormalization_fp16_fix_2_fix.ms + in_model: batchnormalization_fp32_fix.onnx + input_shapes: None + fp16: on + +run: + - in_model: batchnormalization_fp32_dyn_2_dyn.ms + gold_in: batchnormalization_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:64,3,32,32 + - in_model: batchnormalization_fp32_dyn_2_fix.ms + gold_in: batchnormalization_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: batchnormalization_fp32_fix_2_fix.ms + gold_in: batchnormalization_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: batchnormalization_fp16_dyn_2_dyn.ms + gold_in: batchnormalization_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:64,3,32,32 + - in_model: batchnormalization_fp16_dyn_2_fix.ms + gold_in: batchnormalization_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: batchnormalization_fp16_fix_2_fix.ms + gold_in: batchnormalization_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/div/div.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/div/div.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0b89a20ad1e9f28f553f41ebd6c1f547b345ca10 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/div/div.yaml @@ -0,0 +1,159 @@ +op_name: Div +genonnx: + - model_name: div_fp32_dyn.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + - name: Y + data_type: 1 + dims: [None, None] + outputs: + - name: Z + data_type: 1 + dims: [None, None] + initializer: None + - model_name: div_fp32_fix.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256] + - name: Y + data_type: 1 + dims: [256, 256] + outputs: + - name: Z + data_type: 1 + dims: [256, 256] + initializer: None + - model_name: div_fp32_fix_input_3d.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2] + - name: Y + data_type: 1 + dims: [256, 256, 2] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2] + initializer: None + - model_name: div_fp32_fix_input_4d.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 2] + - name: Y + data_type: 1 + dims: [256, 256, 2, 2] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2, 2] + initializer: None + +gengold: + - gold_name: div_fp32_dyn_gold_1 + in_model: div_fp32_dyn.onnx + input_dtypes: [1, 1] + input_shapes: [[128, 128], [128, 128]] + - gold_name: div_fp32_fix_gold_1 + in_model: div_fp32_fix.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256], [256, 256]] + - gold_name: div_fp32_fix_input_3d + in_model: div_fp32_fix_input_3d.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256, 2], [256, 256, 2]] + - gold_name: div_fp32_fix_input_4d + in_model: div_fp32_fix_input_4d.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256, 2, 2], [256, 256, 2, 2]] + +convert: + - out_model: div_fp32_dyn_2_dyn.ms + in_model: div_fp32_dyn.onnx + input_shapes: None + - out_model: div_fp32_dyn_2_fix.ms + in_model: div_fp32_dyn.onnx + input_shapes: X:128,128;Y:128,128 + - out_model: div_fp32_fix_2_fix.ms + in_model: div_fp32_fix.onnx + input_shapes: None + fp16: off + - out_model: div_fp16_dyn_2_dyn.ms + in_model: div_fp32_dyn.onnx + input_shapes: None + fp16: on + - out_model: div_fp16_dyn_2_fix.ms + in_model: div_fp32_dyn.onnx + input_shapes: X:128,128;Y:128,128 + fp16: on + - out_model: div_fp16_fix_2_fix.ms + in_model: div_fp32_fix.onnx + input_shapes: None + fp16: on + - out_model: div_fp32_fix_input_3d.ms + in_model: div_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: div_fp32_fix_input_4d.ms + in_model: div_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: div_fp32_dyn_2_dyn.ms + gold_in: div_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128;Y:128,128 + - in_model: div_fp32_dyn_2_fix.ms + gold_in: div_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: div_fp32_fix_2_fix.ms + gold_in: div_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: div_fp16_dyn_2_dyn.ms + gold_in: div_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128;Y:128,128 + - in_model: div_fp16_dyn_2_fix.ms + gold_in: div_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: div_fp16_fix_2_fix.ms + gold_in: div_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: div_fp32_fix_input_3d.ms + gold_in: div_fp32_fix_input_3d + dtypes: 1 + input_shapes: None + - in_model: div_fp32_fix_input_4d.ms + gold_in: div_fp32_fix_input_4d + dtypes: 1 + input_shapes: None + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/eltwise/eltwise.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/eltwise/eltwise.yaml new file mode 100644 index 0000000000000000000000000000000000000000..53b352d1b0118ed09c93e1e72c2509236a385f80 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/eltwise/eltwise.yaml @@ -0,0 +1,158 @@ +op_name: Sum +genonnx: + - model_name: sum_fp32_dyn.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + - name: Y + data_type: 1 + dims: [None, None] + outputs: + - name: Z + data_type: 1 + dims: [None, None] + initializer: None + - model_name: sum_fp32_fix.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256] + - name: Y + data_type: 1 + dims: [256, 256] + outputs: + - name: Z + data_type: 1 + dims: [256, 256] + initializer: None + - model_name: sum_fp32_fix_input_3d.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2] + - name: Y + data_type: 1 + dims: [256, 256, 2] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2] + initializer: None + - model_name: sum_fp32_fix_input_4d.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 2] + - name: Y + data_type: 1 + dims: [256, 256, 2, 2] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2, 2] + initializer: None + +gengold: + - gold_name: sum_fp32_dyn_gold_1 + in_model: sum_fp32_dyn.onnx + input_dtypes: [1, 1] + input_shapes: [[128, 128], [128, 128]] + - gold_name: sum_fp32_fix_gold_1 + in_model: sum_fp32_fix.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256], [256, 256]] + - gold_name: sum_fp32_fix_input_3d + in_model: sum_fp32_fix_input_3d.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256, 2], [256, 256, 2]] + - gold_name: sum_fp32_fix_input_4d + in_model: sum_fp32_fix_input_4d.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256, 2, 2], [256, 256, 2, 2]] + +convert: + - out_model: sum_fp32_dyn_2_dyn.ms + in_model: sum_fp32_dyn.onnx + input_shapes: None + - out_model: sum_fp32_dyn_2_fix.ms + in_model: sum_fp32_dyn.onnx + input_shapes: X:128,128;Y:128,128 + - out_model: sum_fp32_fix_2_fix.ms + in_model: sum_fp32_fix.onnx + input_shapes: None + fp16: off + - out_model: sum_fp16_dyn_2_dyn.ms + in_model: sum_fp32_dyn.onnx + input_shapes: None + fp16: on + - out_model: sum_fp16_dyn_2_fix.ms + in_model: sum_fp32_dyn.onnx + input_shapes: X:128,128;Y:128,128 + fp16: on + - out_model: sum_fp16_fix_2_fix.ms + in_model: sum_fp32_fix.onnx + input_shapes: None + fp16: on + - out_model: sum_fp32_fix_input_3d.ms + in_model: sum_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: sum_fp32_fix_input_4d.ms + in_model: sum_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: sum_fp32_dyn_2_dyn.ms + gold_in: sum_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128;Y:128,128 + - in_model: sum_fp32_dyn_2_fix.ms + gold_in: sum_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: sum_fp32_fix_2_fix.ms + gold_in: sum_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: sum_fp16_dyn_2_dyn.ms + gold_in: sum_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128;Y:128,128 + - in_model: sum_fp16_dyn_2_fix.ms + gold_in: sum_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: sum_fp16_fix_2_fix.ms + gold_in: sum_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: sum_fp32_fix_input_3d.ms + gold_in: sum_fp32_fix_input_3d + dtypes: 1 + input_shapes: None + - in_model: sum_fp32_fix_input_4d.ms + gold_in: sum_fp32_fix_input_4d + dtypes: 1 + input_shapes: None diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/equal/equal.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/equal/equal.yaml new file mode 100644 index 0000000000000000000000000000000000000000..029aef3d094674b4586b2ed018081b3de9d98321 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/equal/equal.yaml @@ -0,0 +1,159 @@ +op_name: Equal +genonnx: + - model_name: equal_fp32_dyn.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + - name: Y + data_type: 1 + dims: [None, None] + outputs: + - name: Z + data_type: 9 + dims: [None, None] + initializer: None + - model_name: equal_fp32_fix.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256] + - name: Y + data_type: 1 + dims: [256, 256] + outputs: + - name: Z + data_type: 9 + dims: [256, 256] + initializer: None + - model_name: equal_fp32_fix_input_3d.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2] + - name: Y + data_type: 1 + dims: [256, 256, 2] + outputs: + - name: Z + data_type: 9 + dims: [256, 256, 2] + initializer: None + - model_name: equal_fp32_fix_input_4d.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 2] + - name: Y + data_type: 1 + dims: [256, 256, 2, 2] + outputs: + - name: Z + data_type: 9 + dims: [256, 256, 2, 2] + initializer: None + +gengold: + - gold_name: equal_fp32_dyn_gold_1 + in_model: equal_fp32_dyn.onnx + input_dtypes: [1, 1] + input_shapes: [[128, 128], [128, 128]] + - gold_name: equal_fp32_fix_gold_1 + in_model: equal_fp32_fix.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256], [256, 256]] + - gold_name: equal_fp32_fix_input_3d + in_model: equal_fp32_fix_input_3d.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256, 2], [256, 256, 2]] + - gold_name: equal_fp32_fix_input_4d + in_model: equal_fp32_fix_input_4d.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256, 2, 2], [256, 256, 2, 2]] + +convert: + - out_model: equal_fp32_dyn_2_dyn.ms + in_model: equal_fp32_dyn.onnx + input_shapes: None + - out_model: equal_fp32_dyn_2_fix.ms + in_model: equal_fp32_dyn.onnx + input_shapes: X:128,128;Y:128,128 + - out_model: equal_fp32_fix_2_fix.ms + in_model: equal_fp32_fix.onnx + input_shapes: None + fp16: off + - out_model: equal_fp16_dyn_2_dyn.ms + in_model: equal_fp32_dyn.onnx + input_shapes: None + fp16: on + - out_model: equal_fp16_dyn_2_fix.ms + in_model: equal_fp32_dyn.onnx + input_shapes: X:128,128;Y:128,128 + fp16: on + - out_model: equal_fp16_fix_2_fix.ms + in_model: equal_fp32_fix.onnx + input_shapes: None + fp16: on + - out_model: equal_fp32_fix_input_3d.ms + in_model: equal_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: equal_fp32_fix_input_4d.ms + in_model: equal_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: equal_fp32_dyn_2_dyn.ms + gold_in: equal_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128;Y:128,128 + - in_model: equal_fp32_dyn_2_fix.ms + gold_in: equal_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: equal_fp32_fix_2_fix.ms + gold_in: equal_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: equal_fp16_dyn_2_dyn.ms + gold_in: equal_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128;Y:128,128 + - in_model: equal_fp16_dyn_2_fix.ms + gold_in: equal_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: equal_fp16_fix_2_fix.ms + gold_in: equal_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: equal_fp32_fix_input_3d.ms + gold_in: equal_fp32_fix_input_3d + dtypes: 1 + input_shapes: None + - in_model: equal_fp32_fix_input_4d.ms + gold_in: equal_fp32_fix_input_4d + dtypes: 1 + input_shapes: None + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/erf/erf.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/erf/erf.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a033bb26740f5c842e8dc9f880d865e355575fec --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/erf/erf.yaml @@ -0,0 +1,123 @@ +op_name: Erf +genonnx: + - model_name: erf_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + outputs: + - name: Y + data_type: 1 + dims: [None, None] + initializer: None + - model_name: erf_fp32_fix.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256] + outputs: + - name: Y + data_type: 1 + dims: [256, 256] + initializer: None + - model_name: erf_fp32_fix_input_3d.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2] + outputs: + - name: Y + data_type: 1 + dims: [256, 256, 2] + initializer: None + - model_name: erf_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 2] + outputs: + - name: Y + data_type: 1 + dims: [256, 256, 2, 2] + initializer: None + +gengold: + - gold_name: erf_fp32_dyn_gold_1 + in_model: erf_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[128, 128]] + - gold_name: erf_fp32_fix_gold_1 + in_model: erf_fp32_fix.onnx + input_dtypes: [1] + input_shapes: [[256, 256]] + - gold_name: erf_fp32_fix_input_3d + in_model: erf_fp32_fix_input_3d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 2]] + - gold_name: erf_fp32_fix_input_4d + in_model: erf_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 2, 2]] + +convert: + - out_model: erf_fp32_dyn_2_dyn.ms + in_model: erf_fp32_dyn.onnx + input_shapes: None + - out_model: erf_fp32_dyn_2_fix.ms + in_model: erf_fp32_dyn.onnx + input_shapes: X:128,128 + - out_model: erf_fp32_fix_2_fix.ms + in_model: erf_fp32_fix.onnx + input_shapes: None + - out_model: erf_fp16_dyn_2_dyn.ms + in_model: erf_fp32_dyn.onnx + input_shapes: None + fp16: on + - out_model: erf_fp32_fix_input_3d.ms + in_model: erf_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: erf_fp32_fix_input_4d.ms + in_model: erf_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: erf_fp32_dyn_2_dyn.ms + gold_in: erf_fp32_dyn_gold_1 + dtypes: 1 + - in_model: erf_fp32_dyn_2_fix.ms + gold_in: erf_fp32_dyn_gold_1 + dtypes: 1 + - in_model: erf_fp32_fix_2_fix.ms + gold_in: erf_fp32_fix_gold_1 + dtypes: 1 + - in_model: erf_fp16_dyn_2_dyn.ms + gold_in: erf_fp32_dyn_gold_1 + dtypes: 1 + - in_model: erf_fp32_fix_input_3d.ms + gold_in: erf_fp32_fix_input_3d + dtypes: 1 + - in_model: erf_fp32_fix_input_4d.ms + gold_in: erf_fp32_fix_input_4d + dtypes: 1 diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/log/log.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/log/log.yaml new file mode 100644 index 0000000000000000000000000000000000000000..27f48f5f1c00423d8905f5410954be24a0f41b1f --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/log/log.yaml @@ -0,0 +1,124 @@ +op_name: Log +genonnx: + - model_name: log_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + outputs: + - name: Y + data_type: 1 + dims: [None, None] + initializer: None + - model_name: log_fp32_fix.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256] + outputs: + - name: Y + data_type: 1 + dims: [256, 256] + initializer: None + - model_name: log_fp32_fix_input_3d.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2] + outputs: + - name: Y + data_type: 1 + dims: [256, 256, 2] + initializer: None + - model_name: log_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 2] + outputs: + - name: Y + data_type: 1 + dims: [256, 256, 2, 2] + initializer: None + +gengold: + - gold_name: log_fp32_dyn_gold_1 + in_model: log_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[128, 128]] + - gold_name: log_fp32_fix_gold_1 + in_model: log_fp32_fix.onnx + input_dtypes: [1] + input_shapes: [[256, 256]] + - gold_name: log_fp32_fix_input_3d + in_model: log_fp32_fix_input_3d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 2]] + - gold_name: log_fp32_fix_input_4d + in_model: log_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 2, 2]] + +convert: + - out_model: log_fp32_dyn_2_dyn.ms + in_model: log_fp32_dyn.onnx + input_shapes: None + - out_model: log_fp32_dyn_2_fix.ms + in_model: log_fp32_dyn.onnx + input_shapes: X:128,128 + - out_model: log_fp32_fix_2_fix.ms + in_model: log_fp32_fix.onnx + input_shapes: None + - out_model: log_fp16_dyn_2_dyn.ms + in_model: log_fp32_dyn.onnx + input_shapes: None + fp16: on + - out_model: log_fp32_fix_input_3d.ms + in_model: log_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: log_fp32_fix_input_4d.ms + in_model: log_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: log_fp32_dyn_2_dyn.ms + gold_in: log_fp32_dyn_gold_1 + dtypes: 1 + - in_model: log_fp32_dyn_2_fix.ms + gold_in: log_fp32_dyn_gold_1 + dtypes: 1 + - in_model: log_fp32_fix_2_fix.ms + gold_in: log_fp32_fix_gold_1 + dtypes: 1 + - in_model: log_fp16_dyn_2_dyn.ms + gold_in: log_fp32_dyn_gold_1 + dtypes: 1 + - in_model: log_fp32_fix_input_3d.ms + gold_in: log_fp32_fix_input_3d + dtypes: 1 + - in_model: log_fp32_fix_input_4d.ms + gold_in: log_fp32_fix_input_4d + dtypes: 1 + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/logicalnot/logicalnot.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/logicalnot/logicalnot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..652de4ec7ca18f57fa685b6c64427c937ca63131 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/logicalnot/logicalnot.yaml @@ -0,0 +1,149 @@ +op_name: Not +genonnx: + - model_name: not_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 9 + dims: [None, None] + outputs: + - name: Z + data_type: 9 + dims: [None, None] + initializer: None + - model_name: not_fp32_fix.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 9 + dims: [256, 256] + outputs: + - name: Z + data_type: 9 + dims: [256, 256] + initializer: None + - model_name: not_fp32_fix_input_3d.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 9 + dims: [256, 256, 2] + outputs: + - name: Z + data_type: 9 + dims: [256, 256, 2] + initializer: None + - model_name: not_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 9 + dims: [256, 256, 2, 2] + outputs: + - name: Z + data_type: 9 + dims: [256, 256, 2, 2] + initializer: None + +gengold: + - gold_name: not_fp32_dyn_gold_1 + in_model: not_fp32_dyn.onnx + input_dtypes: [9] + input_shapes: [[128, 128]] + - gold_name: not_fp32_fix_gold_1 + in_model: not_fp32_fix.onnx + input_dtypes: [9] + input_shapes: [[256, 256]] + - gold_name: not_fp32_fix_input_3d + in_model: not_fp32_fix_input_3d.onnx + input_dtypes: [9] + input_shapes: [[256, 256, 2]] + - gold_name: not_fp32_fix_input_4d + in_model: not_fp32_fix_input_4d.onnx + input_dtypes: [9] + input_shapes: [[256, 256, 2, 2]] + +convert: + - out_model: not_fp32_dyn_2_dyn.ms + in_model: not_fp32_dyn.onnx + input_shapes: None + - out_model: not_fp32_dyn_2_fix.ms + in_model: not_fp32_dyn.onnx + input_shapes: X:128,128;Y:128,128 + - out_model: not_fp32_fix_2_fix.ms + in_model: not_fp32_fix.onnx + input_shapes: None + fp16: off + - out_model: not_fp16_dyn_2_dyn.ms + in_model: not_fp32_dyn.onnx + input_shapes: None + fp16: on + - out_model: not_fp16_dyn_2_fix.ms + in_model: not_fp32_dyn.onnx + input_shapes: X:128,128;Y:128,128 + fp16: on + - out_model: not_fp16_fix_2_fix.ms + in_model: not_fp32_fix.onnx + input_shapes: None + fp16: on + - out_model: not_fp32_fix_input_3d.ms + in_model: not_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: not_fp32_fix_input_4d.ms + in_model: not_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: not_fp32_dyn_2_dyn.ms + gold_in: not_fp32_dyn_gold_1 + dtypes: 9 + input_shapes: X:128,128;Y:128,128 + - in_model: not_fp32_dyn_2_fix.ms + gold_in: not_fp32_dyn_gold_1 + dtypes: 9 + input_shapes: None + - in_model: not_fp32_fix_2_fix.ms + gold_in: not_fp32_fix_gold_1 + dtypes: 9 + input_shapes: None + - in_model: not_fp16_dyn_2_dyn.ms + gold_in: not_fp32_dyn_gold_1 + dtypes: 9 + input_shapes: X:128,128;Y:128,128 + - in_model: not_fp16_dyn_2_fix.ms + gold_in: not_fp32_dyn_gold_1 + dtypes: 9 + input_shapes: None + - in_model: not_fp16_fix_2_fix.ms + gold_in: not_fp32_fix_gold_1 + dtypes: 9 + input_shapes: None + - in_model: not_fp32_fix_input_3d.ms + gold_in: not_fp32_fix_input_3d + dtypes: 9 + input_shapes: None + - in_model: not_fp32_fix_input_4d.ms + gold_in: not_fp32_fix_input_4d + dtypes: 9 + input_shapes: None + + + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/logsoftmax/logsoftmax.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/logsoftmax/logsoftmax.yaml new file mode 100644 index 0000000000000000000000000000000000000000..71257ee1f705b3cae652ee737b5374b0d5ef6332 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/logsoftmax/logsoftmax.yaml @@ -0,0 +1,125 @@ +op_name: LogSoftmax +genonnx: + - model_name: logsoftmax_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: + axis: 0 + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + outputs: + - name: Z + data_type: 1 + dims: [None, None] + initializer: None + - model_name: logsoftmax_fp32_fix.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256] + outputs: + - name: Z + data_type: 1 + dims: [256, 256] + initializer: None + - model_name: logsoftmax_fp32_fix_input_3d.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 4] + outputs: + - name: Z + data_type: 1 + dims: [256, 256,4] + initializer: None + - model_name: logsoftmax_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 4] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2, 4] + initializer: None + +gengold: + - gold_name: logsoftmax_fp32_dyn_gold_1 + in_model: logsoftmax_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[128, 128]] + - gold_name: logsoftmax_fp32_fix_gold_1 + in_model: logsoftmax_fp32_fix.onnx + input_dtypes: [1] + input_shapes: [[256, 256]] + - gold_name: logsoftmax_fp32_fix_input_3d + in_model: logsoftmax_fp32_fix_input_3d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 4]] + - gold_name: logsoftmax_fp32_fix_input_4d + in_model: logsoftmax_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 2, 4]] + +convert: + - out_model: logsoftmax_fp32_dyn_2_dyn.ms + in_model: logsoftmax_fp32_dyn.onnx + input_shapes: None + fp16: off + - out_model: logsoftmax_fp32_dyn_2_fix.ms + in_model: logsoftmax_fp32_dyn.onnx + input_shapes: X:128,128 + fp16: off + - out_model: logsoftmax_fp32_fix_2_fix.ms + in_model: logsoftmax_fp32_fix.onnx + input_shapes: None + fp16: off + - out_model: logsoftmax_fp32_fix_input_3d.ms + in_model: logsoftmax_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: logsoftmax_fp32_fix_input_4d.ms + in_model: logsoftmax_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: logsoftmax_fp32_dyn_2_dyn.ms + gold_in: logsoftmax_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128 + - in_model: logsoftmax_fp32_dyn_2_fix.ms + gold_in: logsoftmax_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: logsoftmax_fp32_fix_2_fix.ms + gold_in: logsoftmax_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: logsoftmax_fp32_fix_input_3d.ms + gold_in: logsoftmax_fp32_fix_input_3d + dtypes: 1 + input_shapes: None + - in_model: logsoftmax_fp32_fix_input_4d.ms + gold_in: logsoftmax_fp32_fix_input_4d + dtypes: 1 + input_shapes: None diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/maximum/maximum.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/maximum/maximum.yaml new file mode 100644 index 0000000000000000000000000000000000000000..45cb6298920bf0773db67175eda7cac41e0d68a9 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/maximum/maximum.yaml @@ -0,0 +1,161 @@ +op_name: Max +genonnx: + - model_name: max_fp32_dyn.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + - name: Y + data_type: 1 + dims: [None, None] + outputs: + - name: Z + data_type: 1 + dims: [None, None] + initializer: None + - model_name: max_fp32_fix.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256] + - name: Y + data_type: 1 + dims: [256, 256] + outputs: + - name: Z + data_type: 1 + dims: [256, 256] + initializer: None + - model_name: max_fp32_fix_input_3d.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2] + - name: Y + data_type: 1 + dims: [256, 256, 2] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2] + initializer: None + - model_name: max_fp32_fix_input_4d.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 2] + - name: Y + data_type: 1 + dims: [256, 256, 2, 2] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2, 2] + initializer: None + +gengold: + - gold_name: max_fp32_dyn_gold_1 + in_model: max_fp32_dyn.onnx + input_dtypes: [1, 1] + input_shapes: [[128, 128], [128, 128]] + - gold_name: max_fp32_fix_gold_1 + in_model: max_fp32_fix.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256], [256, 256]] + - gold_name: max_fp32_fix_input_3d + in_model: max_fp32_fix_input_3d.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256, 2], [256, 256, 2]] + - gold_name: max_fp32_fix_input_4d + in_model: max_fp32_fix_input_4d.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256, 2, 2], [256, 256, 2, 2]] + +convert: + - out_model: max_fp32_dyn_2_dyn.ms + in_model: max_fp32_dyn.onnx + input_shapes: None + - out_model: max_fp32_dyn_2_fix.ms + in_model: max_fp32_dyn.onnx + input_shapes: X:128,128;Y:128,128 + - out_model: max_fp32_fix_2_fix.ms + in_model: max_fp32_fix.onnx + input_shapes: None + fp16: off + - out_model: max_fp16_dyn_2_dyn.ms + in_model: max_fp32_dyn.onnx + input_shapes: None + fp16: on + - out_model: max_fp16_dyn_2_fix.ms + in_model: max_fp32_dyn.onnx + input_shapes: X:128,128;Y:128,128 + fp16: on + - out_model: max_fp16_fix_2_fix.ms + in_model: max_fp32_fix.onnx + input_shapes: None + fp16: on + - out_model: max_fp32_fix_input_3d.ms + in_model: max_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: max_fp32_fix_input_4d.ms + in_model: max_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: max_fp32_dyn_2_dyn.ms + gold_in: max_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128;Y:128,128 + - in_model: max_fp32_dyn_2_fix.ms + gold_in: max_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: max_fp32_fix_2_fix.ms + gold_in: max_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: max_fp16_dyn_2_dyn.ms + gold_in: max_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128;Y:128,128 + - in_model: max_fp16_dyn_2_fix.ms + gold_in: max_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: max_fp16_fix_2_fix.ms + gold_in: max_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: max_fp32_fix_input_3d.ms + gold_in: max_fp32_fix_input_3d + dtypes: 1 + input_shapes: None + - in_model: max_fp32_fix_input_4d.ms + gold_in: max_fp32_fix_input_4d + dtypes: 1 + input_shapes: None + + + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/maxpoolfusion/globalmaxpool.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/maxpoolfusion/globalmaxpool.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ebda7a0e46db72136cbbdf9bd782ca130cd90ed0 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/maxpoolfusion/globalmaxpool.yaml @@ -0,0 +1,70 @@ +op_name: GlobalMaxPool +genonnx: + - model_name: globalmaxpool_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None, None, None] + outputs: + - name: Y + data_type: 1 + dims: [None, None, None, None] + initializer: None + - model_name: globalmaxpool_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + + graph_param: + inputs: + - name: X + data_type: 1 + dims: [1, 3, 224, 224] + outputs: + - name: Y + data_type: 1 + dims: [1, 3, 1, 1] + initializer: None + +gengold: + - gold_name: globalmaxpool_fp32_dyn_gold_1 + in_model: globalmaxpool_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[1, 6, 128, 128]] + - gold_name: globalmaxpool_fp32_fix_input_4d + in_model: globalmaxpool_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[1, 3, 224, 224]] + +convert: + - out_model: globalmaxpool_fp32_dyn_2_dyn.ms + in_model: globalmaxpool_fp32_dyn.onnx + input_shapes: None + - out_model: globalmaxpool_fp32_dyn_2_fix.ms + in_model: globalmaxpool_fp32_dyn.onnx + input_shapes: X:1, 6, 128, 128 + - out_model: globalmaxpool_fp32_fix_input_4d.ms + in_model: globalmaxpool_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: globalmaxpool_fp32_dyn_2_dyn.ms + gold_in: globalmaxpool_fp32_dyn_gold_1 + dtypes: 1 + - in_model: globalmaxpool_fp32_dyn_2_fix.ms + gold_in: globalmaxpool_fp32_dyn_gold_1 + dtypes: 1 + - in_model: globalmaxpool_fp32_fix_input_4d.ms + gold_in: globalmaxpool_fp32_fix_input_4d + dtypes: 1 + + + + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/maxpoolfusion/maxpool.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/maxpoolfusion/maxpool.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3015efda83b3acfef10165a2e55ace708be14729 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/maxpoolfusion/maxpool.yaml @@ -0,0 +1,71 @@ +op_name: MaxPool +genonnx: + - model_name: maxpool_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: + kernel_shape: [3,3] + strides: [2,2] + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None, None, None] + outputs: + - name: Y + data_type: 1 + dims: [None, None, None, None] + initializer: None + - model_name: maxpool_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: + kernel_shape: [3,3] + strides: [2,2] + + graph_param: + inputs: + - name: X + data_type: 1 + dims: [1, 3, 224, 224] + outputs: + - name: Y + data_type: 1 + dims: [1, 3, 111, 111] + initializer: None + +gengold: + - gold_name: maxpool_fp32_dyn_gold_1 + in_model: maxpool_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[1, 6, 128, 128]] + - gold_name: maxpool_fp32_fix_input_4d + in_model: maxpool_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[1, 3, 224, 224]] + +convert: + - out_model: maxpool_fp32_dyn_2_dyn.ms + in_model: maxpool_fp32_dyn.onnx + input_shapes: None + - out_model: maxpool_fp32_dyn_2_fix.ms + in_model: maxpool_fp32_dyn.onnx + input_shapes: X:1, 6, 128, 128 + - out_model: maxpool_fp32_fix_input_4d.ms + in_model: maxpool_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: maxpool_fp32_dyn_2_dyn.ms + gold_in: maxpool_fp32_dyn_gold_1 + dtypes: 1 + - in_model: maxpool_fp32_dyn_2_fix.ms + gold_in: maxpool_fp32_dyn_gold_1 + dtypes: 1 + - in_model: maxpool_fp32_fix_input_4d.ms + gold_in: maxpool_fp32_fix_input_4d + dtypes: 1 + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/minimum/minimum.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/minimum/minimum.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7064678d83dd31550cd1c99495bdf9b9fc1bd070 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/minimum/minimum.yaml @@ -0,0 +1,162 @@ +op_name: Min +genonnx: + - model_name: min_fp32_dyn.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + - name: Y + data_type: 1 + dims: [None, None] + outputs: + - name: Z + data_type: 1 + dims: [None, None] + initializer: None + - model_name: min_fp32_fix.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256] + - name: Y + data_type: 1 + dims: [256, 256] + outputs: + - name: Z + data_type: 1 + dims: [256, 256] + initializer: None + - model_name: min_fp32_fix_input_3d.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2] + - name: Y + data_type: 1 + dims: [256, 256, 2] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2] + initializer: None + - model_name: min_fp32_fix_input_4d.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 2] + - name: Y + data_type: 1 + dims: [256, 256, 2, 2] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2, 2] + initializer: None + +gengold: + - gold_name: min_fp32_dyn_gold_1 + in_model: min_fp32_dyn.onnx + input_dtypes: [1, 1] + input_shapes: [[128, 128], [128, 128]] + - gold_name: min_fp32_fix_gold_1 + in_model: min_fp32_fix.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256], [256, 256]] + - gold_name: min_fp32_fix_input_3d + in_model: min_fp32_fix_input_3d.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256, 2], [256, 256, 2]] + - gold_name: min_fp32_fix_input_4d + in_model: min_fp32_fix_input_4d.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256, 2, 2], [256, 256, 2, 2]] + +convert: + - out_model: min_fp32_dyn_2_dyn.ms + in_model: min_fp32_dyn.onnx + input_shapes: None + - out_model: min_fp32_dyn_2_fix.ms + in_model: min_fp32_dyn.onnx + input_shapes: X:128,128;Y:128,128 + - out_model: min_fp32_fix_2_fix.ms + in_model: min_fp32_fix.onnx + input_shapes: None + fp16: off + - out_model: min_fp16_dyn_2_dyn.ms + in_model: min_fp32_dyn.onnx + input_shapes: None + fp16: on + - out_model: min_fp16_dyn_2_fix.ms + in_model: min_fp32_dyn.onnx + input_shapes: X:128,128;Y:128,128 + fp16: on + - out_model: min_fp16_fix_2_fix.ms + in_model: min_fp32_fix.onnx + input_shapes: None + fp16: on + - out_model: min_fp32_fix_input_3d.ms + in_model: min_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: min_fp32_fix_input_4d.ms + in_model: min_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: min_fp32_dyn_2_dyn.ms + gold_in: min_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128;Y:128,128 + - in_model: min_fp32_dyn_2_fix.ms + gold_in: min_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: min_fp32_fix_2_fix.ms + gold_in: min_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: min_fp16_dyn_2_dyn.ms + gold_in: min_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128;Y:128,128 + - in_model: min_fp16_dyn_2_fix.ms + gold_in: min_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: min_fp16_fix_2_fix.ms + gold_in: min_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: min_fp32_fix_input_3d.ms + gold_in: min_fp32_fix_input_3d + dtypes: 1 + input_shapes: None + - in_model: min_fp32_fix_input_4d.ms + gold_in: min_fp32_fix_input_4d + dtypes: 1 + input_shapes: None + + + + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/powfusion/powfusion.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/powfusion/powfusion.yaml new file mode 100644 index 0000000000000000000000000000000000000000..95b415c56442f7dff892b57893ef89aa3b4c6a34 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/powfusion/powfusion.yaml @@ -0,0 +1,159 @@ +op_name: Pow +genonnx: + - model_name: div_fp32_dyn.onnx + node_param: + inputs: ["X", "exponent"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + - name: exponent + data_type: 1 + dims: [None, None] + outputs: + - name: Z + data_type: 1 + dims: [None, None] + initializer: None + - model_name: div_fp32_fix.onnx + node_param: + inputs: ["X", "exponent"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256] + - name: exponent + data_type: 1 + dims: [256, 256] + outputs: + - name: Z + data_type: 1 + dims: [256, 256] + initializer: None + - model_name: div_fp32_fix_input_3d.onnx + node_param: + inputs: ["X", "exponent"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2] + - name: exponent + data_type: 1 + dims: [256, 256, 2] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2] + initializer: None + - model_name: div_fp32_fix_input_4d.onnx + node_param: + inputs: ["X", "exponent"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 2] + - name: exponent + data_type: 1 + dims: [256, 256, 2, 2] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2, 2] + initializer: None + +gengold: + - gold_name: div_fp32_dyn_gold_1 + in_model: div_fp32_dyn.onnx + input_dtypes: [1, 1] + input_shapes: [[128, 128], [128, 128]] + - gold_name: div_fp32_fix_gold_1 + in_model: div_fp32_fix.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256], [256, 256]] + - gold_name: div_fp32_fix_input_3d + in_model: div_fp32_fix_input_3d.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256, 2], [256, 256, 2]] + - gold_name: div_fp32_fix_input_4d + in_model: div_fp32_fix_input_4d.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256, 2, 2], [256, 256, 2, 2]] + +convert: + - out_model: div_fp32_dyn_2_dyn.ms + in_model: div_fp32_dyn.onnx + input_shapes: None + - out_model: div_fp32_dyn_2_fix.ms + in_model: div_fp32_dyn.onnx + input_shapes: X:128,128;exponent:128,128 + - out_model: div_fp32_fix_2_fix.ms + in_model: div_fp32_fix.onnx + input_shapes: None + fp16: off + - out_model: div_fp16_dyn_2_dyn.ms + in_model: div_fp32_dyn.onnx + input_shapes: None + fp16: on + - out_model: div_fp16_dyn_2_fix.ms + in_model: div_fp32_dyn.onnx + input_shapes: X:128,128;exponent:128,128 + fp16: on + - out_model: div_fp16_fix_2_fix.ms + in_model: div_fp32_fix.onnx + input_shapes: None + fp16: on + - out_model: div_fp32_fix_input_3d.ms + in_model: div_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: div_fp32_fix_input_4d.ms + in_model: div_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: div_fp32_dyn_2_dyn.ms + gold_in: div_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128;exponent:128,128 + - in_model: div_fp32_dyn_2_fix.ms + gold_in: div_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: div_fp32_fix_2_fix.ms + gold_in: div_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: div_fp16_dyn_2_dyn.ms + gold_in: div_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128;exponent:128,128 + - in_model: div_fp16_dyn_2_fix.ms + gold_in: div_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: div_fp16_fix_2_fix.ms + gold_in: div_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: div_fp32_fix_input_3d.ms + gold_in: div_fp32_fix_input_3d + dtypes: 1 + input_shapes: None + - in_model: div_fp32_fix_input_4d.ms + gold_in: div_fp32_fix_input_4d + dtypes: 1 + input_shapes: None + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/reciprocal/reciprocal.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/reciprocal/reciprocal.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7547f09c1a7f5ba56aae88c46350f44377a3352d --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/reciprocal/reciprocal.yaml @@ -0,0 +1,124 @@ +op_name: Reciprocal +genonnx: + - model_name: reciprocal_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + outputs: + - name: Y + data_type: 1 + dims: [None, None] + initializer: None + - model_name: reciprocal_fp32_fix.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256] + outputs: + - name: Y + data_type: 1 + dims: [256, 256] + initializer: None + - model_name: reciprocal_fp32_fix_input_3d.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2] + outputs: + - name: Y + data_type: 1 + dims: [256, 256, 2] + initializer: None + - model_name: reciprocal_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 2] + outputs: + - name: Y + data_type: 1 + dims: [256, 256, 2, 2] + initializer: None + +gengold: + - gold_name: reciprocal_fp32_dyn_gold_1 + in_model: reciprocal_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[128, 128]] + - gold_name: reciprocal_fp32_fix_gold_1 + in_model: reciprocal_fp32_fix.onnx + input_dtypes: [1] + input_shapes: [[256, 256]] + - gold_name: reciprocal_fp32_fix_input_3d + in_model: reciprocal_fp32_fix_input_3d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 2]] + - gold_name: reciprocal_fp32_fix_input_4d + in_model: reciprocal_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 2, 2]] + +convert: + - out_model: reciprocal_fp32_dyn_2_dyn.ms + in_model: reciprocal_fp32_dyn.onnx + input_shapes: None + - out_model: reciprocal_fp32_dyn_2_fix.ms + in_model: reciprocal_fp32_dyn.onnx + input_shapes: X:128,128 + - out_model: reciprocal_fp32_fix_2_fix.ms + in_model: reciprocal_fp32_fix.onnx + input_shapes: None + - out_model: reciprocal_fp16_dyn_2_dyn.ms + in_model: reciprocal_fp32_dyn.onnx + input_shapes: None + fp16: on + - out_model: reciprocal_fp32_fix_input_3d.ms + in_model: reciprocal_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: reciprocal_fp32_fix_input_4d.ms + in_model: reciprocal_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: reciprocal_fp32_dyn_2_dyn.ms + gold_in: reciprocal_fp32_dyn_gold_1 + dtypes: 1 + - in_model: reciprocal_fp32_dyn_2_fix.ms + gold_in: reciprocal_fp32_dyn_gold_1 + dtypes: 1 + - in_model: reciprocal_fp32_fix_2_fix.ms + gold_in: reciprocal_fp32_fix_gold_1 + dtypes: 1 + - in_model: reciprocal_fp16_dyn_2_dyn.ms + gold_in: reciprocal_fp32_dyn_gold_1 + dtypes: 1 + - in_model: reciprocal_fp32_fix_input_3d.ms + gold_in: reciprocal_fp32_fix_input_3d + dtypes: 1 + - in_model: reciprocal_fp32_fix_input_4d.ms + gold_in: reciprocal_fp32_fix_input_4d + dtypes: 1 + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/round/round.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/round/round.yaml new file mode 100644 index 0000000000000000000000000000000000000000..de6d063decd707b7aab99db006a207e0be468af7 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/round/round.yaml @@ -0,0 +1,124 @@ +op_name: Round +genonnx: + - model_name: round_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + outputs: + - name: Y + data_type: 1 + dims: [None, None] + initializer: None + - model_name: round_fp32_fix.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256] + outputs: + - name: Y + data_type: 1 + dims: [256, 256] + initializer: None + - model_name: round_fp32_fix_input_3d.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2] + outputs: + - name: Y + data_type: 1 + dims: [256, 256, 2] + initializer: None + - model_name: round_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 2] + outputs: + - name: Y + data_type: 1 + dims: [256, 256, 2, 2] + initializer: None + +gengold: + - gold_name: round_fp32_dyn_gold_1 + in_model: round_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[128, 128]] + - gold_name: round_fp32_fix_gold_1 + in_model: round_fp32_fix.onnx + input_dtypes: [1] + input_shapes: [[256, 256]] + - gold_name: round_fp32_fix_input_3d + in_model: round_fp32_fix_input_3d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 2]] + - gold_name: round_fp32_fix_input_4d + in_model: round_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 2, 2]] + +convert: + - out_model: round_fp32_dyn_2_dyn.ms + in_model: round_fp32_dyn.onnx + input_shapes: None + - out_model: round_fp32_dyn_2_fix.ms + in_model: round_fp32_dyn.onnx + input_shapes: X:128,128 + - out_model: round_fp32_fix_2_fix.ms + in_model: round_fp32_fix.onnx + input_shapes: None + - out_model: round_fp16_dyn_2_dyn.ms + in_model: round_fp32_dyn.onnx + input_shapes: None + fp16: on + - out_model: round_fp32_fix_input_3d.ms + in_model: round_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: round_fp32_fix_input_4d.ms + in_model: round_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: round_fp32_dyn_2_dyn.ms + gold_in: round_fp32_dyn_gold_1 + dtypes: 1 + - in_model: round_fp32_dyn_2_fix.ms + gold_in: round_fp32_dyn_gold_1 + dtypes: 1 + - in_model: round_fp32_fix_2_fix.ms + gold_in: round_fp32_fix_gold_1 + dtypes: 1 + - in_model: round_fp16_dyn_2_dyn.ms + gold_in: round_fp32_dyn_gold_1 + dtypes: 1 + - in_model: round_fp32_fix_input_3d.ms + gold_in: round_fp32_fix_input_3d + dtypes: 1 + - in_model: round_fp32_fix_input_4d.ms + gold_in: round_fp32_fix_input_4d + dtypes: 1 + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/sin/sin.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/sin/sin.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2e6853e7b41790453b158a8d4befe60d47a70e34 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/sin/sin.yaml @@ -0,0 +1,119 @@ +op_name: Sin +genonnx: + - model_name: sin_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + outputs: + - name: Z + data_type: 1 + dims: [None, None] + initializer: None + - model_name: sin_fp32_fix.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256] + outputs: + - name: Z + data_type: 1 + dims: [256, 256] + initializer: None + - model_name: sin_fp32_fix_input_3d.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2] + initializer: None + - model_name: sin_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 2] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2, 2] + initializer: None + +gengold: + - gold_name: sin_fp32_dyn_gold_1 + in_model: sin_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[128, 128]] + - gold_name: sin_fp32_fix_gold_1 + in_model: sin_fp32_fix.onnx + input_dtypes: [1] + input_shapes: [[256, 256]] + - gold_name: sin_fp32_fix_input_3d + in_model: sin_fp32_fix_input_3d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 2]] + - gold_name: sin_fp32_fix_input_4d + in_model: sin_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 2, 2]] + +convert: + - out_model: sin_fp32_dyn_2_dyn.ms + in_model: sin_fp32_dyn.onnx + input_shapes: None + - out_model: sin_fp32_dyn_2_fix.ms + in_model: sin_fp32_dyn.onnx + input_shapes: X:128,128 + - out_model: sin_fp32_fix_2_fix.ms + in_model: sin_fp32_fix.onnx + input_shapes: None + - out_model: sin_fp32_fix_input_3d.ms + in_model: sin_fp32_fix_input_3d.onnx + input_shapes: None + - out_model: sin_fp32_fix_input_4d.ms + in_model: sin_fp32_fix_input_4d.onnx + input_shapes: None + +run: + - in_model: sin_fp32_dyn_2_dyn.ms + gold_in: sin_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128 + - in_model: sin_fp32_dyn_2_fix.ms + gold_in: sin_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: sin_fp32_fix_2_fix.ms + gold_in: sin_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: sin_fp32_fix_input_3d.ms + gold_in: sin_fp32_fix_input_3d + dtypes: 1 + input_shapes: None + - in_model: sin_fp32_fix_input_4d.ms + gold_in: sin_fp32_fix_input_4d + dtypes: 1 + input_shapes: None \ No newline at end of file diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/size/size.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/size/size.yaml new file mode 100644 index 0000000000000000000000000000000000000000..748df13fd404b6bc364e0c90365c5b551eaf7314 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/size/size.yaml @@ -0,0 +1,61 @@ +op_name: Size +genonnx: + - model_name: size_fp32_dyn.onnx + node_param: + inputs: ["data"] + outputs: ["size"] + attributes: None + graph_param: + inputs: + - name: data + data_type: 1 + dims: [None, None, None, None] + outputs: + - name: size + data_type: 7 + dims: [] + initializer: None + - model_name: size_fp32_fix_input_3d.onnx + node_param: + inputs: ["data"] + outputs: ["size"] + attributes: None + graph_param: + inputs: + - name: data + data_type: 1 + dims: [32, 1, 9] + outputs: + - name: size + data_type: 7 + dims: [] + initializer: None + +gengold: + - gold_name: size_fp32_dyn_gold_1 + in_model: size_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[2, 3, 4, 5]] + - gold_name: size_fp32_fix_input_3d + in_model: size_fp32_fix_input_3d.onnx + input_dtypes: [1] + input_shapes: [[32, 1, 9]] + +convert: + - out_model: size_fp32_dyn_2_dyn.ms + in_model: size_fp32_dyn.onnx + input_shapes: None + - out_model: size_fp32_fix_input_3d + in_model: size_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + +run: + - in_model: size_fp32_dyn_2_dyn.ms + gold_in: size_fp32_dyn_gold_1 + dtypes: 1 + - in_model: size_fp32_fix_input_3d.ms + gold_in: size_fp32_fix_input_3d + dtypes: 1 + + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/slicefusion/slicefusion.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/slicefusion/slicefusion.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d8ab7a3911d16be934f7325fc7ddfa74d61546d7 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/slicefusion/slicefusion.yaml @@ -0,0 +1,180 @@ +op_name: Slice +genonnx: + - model_name: slice_fp32_dyn.onnx + node_param: + inputs: ["data", "starts", "ends", "axes", "steps"] + outputs: ["output"] + attributes: None + graph_param: + inputs: + - name: data + data_type: 1 + dims: [None, None] + outputs: + - name: output + data_type: 1 + dims: [None, None] + initializer: + - name: starts + data_type: 6 + dims: [2] + value: [1, 0] + - name: ends + data_type: 6 + dims: [2] + value: [2, 3] + - name: axes + data_type: 6 + dims: [2] + value: [0, 1] + - name: steps + data_type: 6 + dims: [2] + value: [1, 2] + - model_name: slice_fp32_fix.onnx + node_param: + inputs: ["data", "starts", "ends"] + outputs: ["output"] + attributes: None + graph_param: + inputs: + - name: data + data_type: 1 + dims: [2, 4] + outputs: + - name: output + data_type: 1 + dims: [3] + initializer: + - name: starts + data_type: 6 + dims: [2] + value: [0, 1] + - name: ends + data_type: 6 + dims: [2] + value: [-1, 1000] + - model_name: slice_fp32_dyn_input_3d.onnx + node_param: + inputs: ["data", "starts", "ends", "axes", "steps"] + outputs: ["output"] + attributes: None + graph_param: + inputs: + - name: data + data_type: 1 + dims: [None, None, None] + outputs: + - name: output + data_type: 1 + dims: [None, None, None] + initializer: + - name: starts + data_type: 6 + dims: [3] + value: [1, 0, 0] + - name: ends + data_type: 6 + dims: [3] + value: [2, 3, 4] + - name: axes + data_type: 6 + dims: [3] + value: [0, 1, 2] + - name: steps + data_type: 6 + dims: [3] + value: [1, 2, 1] + - model_name: slice_fp32_dyn_input_4d.onnx + node_param: + inputs: ["data", "starts", "ends", "axes", "steps"] + outputs: ["output"] + attributes: None + graph_param: + inputs: + - name: data + data_type: 1 + dims: [None, None, None, None] + outputs: + - name: output + data_type: 1 + dims: [None, None, None, None] + initializer: + - name: starts + data_type: 6 + dims: [4] + value: [1, 0, 0, 0] + - name: ends + data_type: 6 + dims: [4] + value: [2, 3, 5, 7] + - name: axes + data_type: 6 + dims: [4] + value: [0, 1, 2, 3] + - name: steps + data_type: 6 + dims: [4] + value: [1, 2, 1, 1] + +gengold: + - gold_name: slice_fp32_dyn_gold_1 + in_model: slice_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[2, 4]] + - gold_name: slice_fp32_fix_gold_1 + in_model: slice_fp32_fix.onnx + input_dtypes: [1] + input_shapes: [[2, 4]] + - gold_name: slice_fp32_dyn_input_3d + in_model: slice_fp32_dyn_input_3d.onnx + input_dtypes: [1] + input_shapes: [[2, 4, 6]] + - gold_name: slice_fp32_dyn_input_4d + in_model: slice_fp32_dyn_input_4d.onnx + input_dtypes: [1] + input_shapes: [[2, 4, 6, 8]] + +convert: + - out_model: slice_fp32_dyn_2_dyn.ms + in_model: slice_fp32_dyn.onnx + input_shapes: None + fp16: off + - out_model: slice_fp32_dyn_2_fix.ms + in_model: slice_fp32_dyn.onnx + input_shapes: data:2,4 + fp16: off + - out_model: slice_fp32_fix_2_fix.ms + in_model: slice_fp32_fix.onnx + input_shapes: None + fp16: off + - out_model: slice_fp32_dyn_input_3d.ms + in_model: slice_fp32_dyn_input_3d.onnx + input_shapes: None + fp16: on + - out_model: slice_fp32_dyn_input_4d.ms + in_model: slice_fp32_dyn_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: slice_fp32_dyn_2_dyn.ms + gold_in: slice_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: data:2,4 + - in_model: slice_fp32_dyn_2_fix.ms + gold_in: slice_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: slice_fp32_fix_2_fix.ms + gold_in: slice_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: slice_fp32_dyn_input_3d.ms + gold_in: slice_fp32_dyn_input_3d + dtypes: 1 + input_shapes: None + - in_model: slice_fp32_dyn_input_4d.ms + gold_in: slice_fp32_dyn_input_4d + dtypes: 1 + input_shapes: None diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/spacetodepth/spacetodepth.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/spacetodepth/spacetodepth.yaml new file mode 100644 index 0000000000000000000000000000000000000000..59afb1544b3426ef83707d73c10849451119dce7 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/spacetodepth/spacetodepth.yaml @@ -0,0 +1,62 @@ +op_name: SpaceToDepth +genonnx: + - model_name: spacetodepth_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: + blocksize: 2 + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None, None, None] + outputs: + - name: Y + data_type: 1 + dims: [None, None, None, None] + initializer: None + - model_name: spacetodepth_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: + blocksize: 2 + graph_param: + inputs: + - name: X + data_type: 1 + dims: [1, 3, 2, 2] + outputs: + - name: Y + data_type: 1 + dims: [1, 12, 1, 1] + initializer: None + +gengold: + - gold_name: spacetodepth_fp32_dyn_gold_1 + in_model: spacetodepth_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[8, 8, 8, 8]] + - gold_name: spacetodepth_fp32_fix_input_4d + in_model: spacetodepth_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[1, 3, 2, 2]] + +convert: + - out_model: spacetodepth_fp32_dyn_2_dyn.ms + in_model: spacetodepth_fp32_dyn.onnx + input_shapes: None + - out_model: spacetodepth_fp32_fix_input_4d.ms + in_model: spacetodepth_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: spacetodepth_fp32_dyn_2_dyn.ms + gold_in: spacetodepth_fp32_dyn_gold_1 + dtypes: 1 + - in_model: spacetodepth_fp32_fix_input_4d.ms + gold_in: spacetodepth_fp32_fix_input_4d + dtypes: 1 + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/sqrt/sqrt.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/sqrt/sqrt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9f2354840f79da9c77b64f2f2f11552aa272ad7f --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/sqrt/sqrt.yaml @@ -0,0 +1,119 @@ +op_name: Sqrt +genonnx: + - model_name: sqrt_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + outputs: + - name: Z + data_type: 1 + dims: [None, None] + initializer: None + - model_name: sqrt_fp32_fix.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256] + outputs: + - name: Z + data_type: 1 + dims: [256, 256] + initializer: None + - model_name: sqrt_fp32_fix_input_3d.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2] + initializer: None + - model_name: sqrt_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 2] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2, 2] + initializer: None + +gengold: + - gold_name: sqrt_fp32_dyn_gold_1 + in_model: sqrt_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[128, 128]] + - gold_name: sqrt_fp32_fix_gold_1 + in_model: sqrt_fp32_fix.onnx + input_dtypes: [1] + input_shapes: [[256, 256]] + - gold_name: sqrt_fp32_fix_input_3d + in_model: sqrt_fp32_fix_input_3d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 2]] + - gold_name: sqrt_fp32_fix_input_4d + in_model: sqrt_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 2, 2]] + +convert: + - out_model: sqrt_fp32_dyn_2_dyn.ms + in_model: sqrt_fp32_dyn.onnx + input_shapes: None + - out_model: sqrt_fp32_dyn_2_fix.ms + in_model: sqrt_fp32_dyn.onnx + input_shapes: X:128,128 + - out_model: sqrt_fp32_fix_2_fix.ms + in_model: sqrt_fp32_fix.onnx + input_shapes: None + - out_model: sqrt_fp32_fix_input_3d.ms + in_model: sqrt_fp32_fix_input_3d.onnx + input_shapes: None + - out_model: sqrt_fp32_fix_input_4d.ms + in_model: sqrt_fp32_fix_input_4d.onnx + input_shapes: None + +run: + - in_model: sqrt_fp32_dyn_2_dyn.ms + gold_in: sqrt_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128 + - in_model: sqrt_fp32_dyn_2_fix.ms + gold_in: sqrt_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: sqrt_fp32_fix_2_fix.ms + gold_in: sqrt_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: sqrt_fp32_fix_input_3d.ms + gold_in: sqrt_fp32_fix_input_3d + dtypes: 1 + input_shapes: None + - in_model: sqrt_fp32_fix_input_4d.ms + gold_in: sqrt_fp32_fix_input_4d + dtypes: 1 + input_shapes: None diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/squeeze/squeeze.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/squeeze/squeeze.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c70f6f6089e38493db5a62fb4c4be7f1f14dba33 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/squeeze/squeeze.yaml @@ -0,0 +1,124 @@ +op_name: Squeeze +genonnx: + - model_name: squeeze_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + outputs: + - name: Z + data_type: 1 + dims: [None, None] + initializer: None + - model_name: squeeze_fp32_fix.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 1] + outputs: + - name: Z + data_type: 1 + dims: [256] + initializer: None + - model_name: squeeze_fp32_fix_input_3d.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 1, 4] + outputs: + - name: Z + data_type: 1 + dims: [256, 4] + initializer: None + - model_name: squeeze_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 1, 4] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 4] + initializer: None + +gengold: + - gold_name: squeeze_fp32_dyn_gold_1 + in_model: squeeze_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[128, 128]] + - gold_name: squeeze_fp32_fix_gold_1 + in_model: squeeze_fp32_fix.onnx + input_dtypes: [1] + input_shapes: [[256, 1]] + - gold_name: squeeze_fp32_fix_input_3d + in_model: squeeze_fp32_fix_input_3d.onnx + input_dtypes: [1] + input_shapes: [[256, 1, 4]] + - gold_name: squeeze_fp32_fix_input_4d + in_model: squeeze_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 1, 4]] + +convert: + - out_model: squeeze_fp32_dyn_2_dyn.ms + in_model: squeeze_fp32_dyn.onnx + input_shapes: None + fp16: off + - out_model: squeeze_fp32_dyn_2_fix.ms + in_model: squeeze_fp32_dyn.onnx + input_shapes: X:128,128 + fp16: off + - out_model: squeeze_fp32_fix_2_fix.ms + in_model: squeeze_fp32_fix.onnx + input_shapes: None + fp16: off + - out_model: squeeze_fp32_fix_input_3d.ms + in_model: squeeze_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: squeeze_fp32_fix_input_4d.ms + in_model: squeeze_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: squeeze_fp32_dyn_2_dyn.ms + gold_in: squeeze_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128 + - in_model: squeeze_fp32_dyn_2_fix.ms + gold_in: squeeze_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: squeeze_fp32_fix_2_fix.ms + gold_in: squeeze_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: squeeze_fp32_fix_input_3d.ms + gold_in: squeeze_fp32_fix_input_3d + dtypes: 1 + input_shapes: None + - in_model: squeeze_fp32_fix_input_4d.ms + gold_in: squeeze_fp32_fix_input_4d + dtypes: 1 + input_shapes: None diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/subfusion/sub.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/subfusion/sub.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0a7558426049abef0d8124c6dd394ec40a5dc9b4 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/subfusion/sub.yaml @@ -0,0 +1,159 @@ +op_name: Sub +genonnx: + - model_name: sub_fp32_dyn.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + - name: Y + data_type: 1 + dims: [None, None] + outputs: + - name: Z + data_type: 1 + dims: [None, None] + initializer: None + - model_name: sub_fp32_fix.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256] + - name: Y + data_type: 1 + dims: [256, 256] + outputs: + - name: Z + data_type: 1 + dims: [256, 256] + initializer: None + - model_name: sub_fp32_fix_input_3d.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2] + - name: Y + data_type: 1 + dims: [256, 256, 2] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2] + initializer: None + - model_name: sub_fp32_fix_input_4d.onnx + node_param: + inputs: ["X", "Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 2] + - name: Y + data_type: 1 + dims: [256, 256, 2, 2] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2, 2] + initializer: None + +gengold: + - gold_name: sub_fp32_dyn_gold_1 + in_model: sub_fp32_dyn.onnx + input_dtypes: [1, 1] + input_shapes: [[128, 128], [128, 128]] + - gold_name: sub_fp32_fix_gold_1 + in_model: sub_fp32_fix.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256], [256, 256]] + - gold_name: sub_fp32_fix_input_3d + in_model: sub_fp32_fix_input_3d.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256, 2], [256, 256, 2]] + - gold_name: sub_fp32_fix_input_4d + in_model: sub_fp32_fix_input_4d.onnx + input_dtypes: [1, 1] + input_shapes: [[256, 256, 2, 2], [256, 256, 2, 2]] + +convert: + - out_model: sub_fp32_dyn_2_dyn.ms + in_model: sub_fp32_dyn.onnx + input_shapes: None + - out_model: sub_fp32_dyn_2_fix.ms + in_model: sub_fp32_dyn.onnx + input_shapes: X:128,128;Y:128,128 + - out_model: sub_fp32_fix_2_fix.ms + in_model: sub_fp32_fix.onnx + input_shapes: None + fp16: off + - out_model: sub_fp16_dyn_2_dyn.ms + in_model: sub_fp32_dyn.onnx + input_shapes: None + fp16: on + - out_model: sub_fp16_dyn_2_fix.ms + in_model: sub_fp32_dyn.onnx + input_shapes: X:128,128;Y:128,128 + fp16: on + - out_model: sub_fp16_fix_2_fix.ms + in_model: sub_fp32_fix.onnx + input_shapes: None + fp16: on + - out_model: sub_fp32_fix_input_3d.ms + in_model: sub_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: sub_fp32_fix_input_4d.ms + in_model: sub_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: sub_fp32_dyn_2_dyn.ms + gold_in: sub_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128;Y:128,128 + - in_model: sub_fp32_dyn_2_fix.ms + gold_in: sub_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: sub_fp32_fix_2_fix.ms + gold_in: sub_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: sub_fp16_dyn_2_dyn.ms + gold_in: sub_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128;Y:128,128 + - in_model: sub_fp16_dyn_2_fix.ms + gold_in: sub_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: sub_fp16_fix_2_fix.ms + gold_in: sub_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: sub_fp32_fix_input_3d.ms + gold_in: sub_fp32_fix_input_3d + dtypes: 1 + input_shapes: None + - in_model: sub_fp32_fix_input_4d.ms + gold_in: sub_fp32_fix_input_4d + dtypes: 1 + input_shapes: None + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/transpose/transpose.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/transpose/transpose.yaml new file mode 100644 index 0000000000000000000000000000000000000000..998b803a8e1becfede72369a489d983af7f89029 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/transpose/transpose.yaml @@ -0,0 +1,129 @@ +op_name: Transpose +genonnx: + - model_name: transpose_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: + perm: [1,0] + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + outputs: + - name: Y + data_type: 1 + dims: [None, None] + initializer: None + - model_name: transpose_fp32_fix.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: + perm: [1,0] + graph_param: + inputs: + - name: X + data_type: 1 + dims: [128, 256] + outputs: + - name: Y + data_type: 1 + dims: [256, 128] + initializer: None + - model_name: transpose_fp32_fix_input_3d.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: + perm: [2,1,0] + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2] + outputs: + - name: Y + data_type: 1 + dims: [2, 256, 256] + initializer: None + + + - model_name: transpose_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Y"] + attributes: + perm: [2,3,0,1] + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 2] + outputs: + - name: Y + data_type: 1 + dims: [2, 2, 256, 256] + initializer: None + +gengold: + - gold_name: transpose_fp32_dyn_gold_1 + in_model: transpose_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[128, 64]] + - gold_name: transpose_fp32_fix_gold_1 + in_model: transpose_fp32_fix.onnx + input_dtypes: [1] + input_shapes: [[128, 256]] + - gold_name: transpose_fp32_fix_input_3d + in_model: transpose_fp32_fix_input_3d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 2]] + - gold_name: transpose_fp32_fix_input_4d + in_model: transpose_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 2, 2]] + +convert: + - out_model: transpose_fp32_dyn_2_dyn.ms + in_model: transpose_fp32_dyn.onnx + input_shapes: None + - out_model: transpose_fp32_dyn_2_fix.ms + in_model: transpose_fp32_dyn.onnx + input_shapes: X:128,64 + - out_model: transpose_fp32_fix_2_fix.ms + in_model: transpose_fp32_fix.onnx + input_shapes: None + - out_model: transpose_fp16_dyn_2_dyn.ms + in_model: transpose_fp32_dyn.onnx + input_shapes: None + fp16: on + - out_model: transpose_fp32_fix_input_3d.ms + in_model: transpose_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: transpose_fp32_fix_input_4d.ms + in_model: transpose_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: transpose_fp32_dyn_2_dyn.ms + gold_in: transpose_fp32_dyn_gold_1 + dtypes: 1 + - in_model: transpose_fp32_dyn_2_fix.ms + gold_in: transpose_fp32_dyn_gold_1 + dtypes: 1 + - in_model: transpose_fp32_fix_2_fix.ms + gold_in: transpose_fp32_fix_gold_1 + dtypes: 1 + - in_model: transpose_fp16_dyn_2_dyn.ms + gold_in: transpose_fp32_dyn_gold_1 + dtypes: 1 + - in_model: transpose_fp32_fix_input_3d.ms + gold_in: transpose_fp32_fix_input_3d + dtypes: 1 + - in_model: transpose_fp32_fix_input_4d.ms + gold_in: transpose_fp32_fix_input_4d + dtypes: 1 diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/tril/tril.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/tril/tril.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d6ef991f843f6e504de228fe09adf01687cd6ea7 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/tril/tril.yaml @@ -0,0 +1,128 @@ +op_name: Trilu +genonnx: + - model_name: tril_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: + upper: 0 + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + outputs: + - name: Z + data_type: 1 + dims: [None, None] + initializer: None + - model_name: tril_fp32_fix.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: + upper: 0 + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256] + outputs: + - name: Z + data_type: 1 + dims: [256, 256] + initializer: None + - model_name: tril_fp32_fix_input_3d.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: + upper: 0 + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 4] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 4] + initializer: None + - model_name: tril_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: + upper: 0 + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 4] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2, 4] + initializer: None + +gengold: + - gold_name: tril_fp32_dyn_gold_1 + in_model: tril_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[128, 128]] + - gold_name: tril_fp32_fix_gold_1 + in_model: tril_fp32_fix.onnx + input_dtypes: [1] + input_shapes: [[256, 256]] + - gold_name: tril_fp32_fix_input_3d + in_model: tril_fp32_fix_input_3d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 4]] + - gold_name: tril_fp32_fix_input_4d + in_model: tril_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 2, 4]] + +convert: + - out_model: tril_fp32_dyn_2_dyn.ms + in_model: tril_fp32_dyn.onnx + input_shapes: None + fp16: off + - out_model: tril_fp32_dyn_2_fix.ms + in_model: tril_fp32_dyn.onnx + input_shapes: X:128,128 + fp16: off + - out_model: tril_fp32_fix_2_fix.ms + in_model: tril_fp32_fix.onnx + input_shapes: None + fp16: off + - out_model: tril_fp32_fix_input_3d.ms + in_model: tril_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: tril_fp32_fix_input_4d.ms + in_model: tril_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: tril_fp32_dyn_2_dyn.ms + gold_in: tril_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128 + - in_model: tril_fp32_dyn_2_fix.ms + gold_in: tril_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: tril_fp32_fix_2_fix.ms + gold_in: tril_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: tril_fp32_fix_input_3d.ms + gold_in: tril_fp32_fix_input_3d + dtypes: 1 + input_shapes: None + - in_model: tril_fp32_fix_input_4d.ms + gold_in: tril_fp32_fix_input_4d + dtypes: 1 + input_shapes: None diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/triu/triu.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/triu/triu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2b4ed2de520e1996217aa3df9b87b27cbbdc34be --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/triu/triu.yaml @@ -0,0 +1,129 @@ +op_name: Trilu +genonnx: + - model_name: triu_fp32_dyn.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: + upper: 1 + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + outputs: + - name: Z + data_type: 1 + dims: [None, None] + initializer: None + - model_name: triu_fp32_fix.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: + upper: 1 + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256] + outputs: + - name: Z + data_type: 1 + dims: [256, 256] + initializer: None + - model_name: triu_fp32_fix_input_3d.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: + upper: 1 + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 4] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 4] + initializer: None + - model_name: triu_fp32_fix_input_4d.onnx + node_param: + inputs: ["X"] + outputs: ["Z"] + attributes: + upper: 1 + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 4] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2, 4] + initializer: None + +gengold: + - gold_name: triu_fp32_dyn_gold_1 + in_model: triu_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[128, 128]] + - gold_name: triu_fp32_fix_gold_1 + in_model: triu_fp32_fix.onnx + input_dtypes: [1] + input_shapes: [[256, 256]] + - gold_name: triu_fp32_fix_input_3d + in_model: triu_fp32_fix_input_3d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 4]] + - gold_name: triu_fp32_fix_input_4d + in_model: triu_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 2, 4]] + +convert: + - out_model: triu_fp32_dyn_2_dyn.ms + in_model: triu_fp32_dyn.onnx + input_shapes: None + fp16: off + - out_model: triu_fp32_dyn_2_fix.ms + in_model: triu_fp32_dyn.onnx + input_shapes: X:128,128 + fp16: off + - out_model: triu_fp32_fix_2_fix.ms + in_model: triu_fp32_fix.onnx + input_shapes: None + fp16: off + - out_model: triu_fp32_fix_input_3d.ms + in_model: triu_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: triu_fp32_fix_input_4d.ms + in_model: triu_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: triu_fp32_dyn_2_dyn.ms + gold_in: triu_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128 + - in_model: triu_fp32_dyn_2_fix.ms + gold_in: triu_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: triu_fp32_fix_2_fix.ms + gold_in: triu_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: triu_fp32_fix_input_3d.ms + gold_in: triu_fp32_fix_input_3d + dtypes: 1 + input_shapes: None + - in_model: triu_fp32_fix_input_4d.ms + gold_in: triu_fp32_fix_input_4d + dtypes: 1 + input_shapes: None + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/unsqueeze/unsqueeze.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/unsqueeze/unsqueeze.yaml new file mode 100644 index 0000000000000000000000000000000000000000..df563389a3911f34a347007ed7117dec5d50dc2a --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/unsqueeze/unsqueeze.yaml @@ -0,0 +1,142 @@ +op_name: Unsqueeze +genonnx: + - model_name: unsqueeze_fp32_dyn.onnx + node_param: + inputs: ["X","axes"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [None, None] + outputs: + - name: Z + data_type: 1 + dims: [None, None, None] + initializer: + - name: axes + data_type: 7 + dims: [1] + value: [0] + + - model_name: unsqueeze_fp32_fix.onnx + node_param: + inputs: ["X","axes"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256] + outputs: + - name: Z + data_type: 1 + dims: [256, 1, 256] + initializer: + - name: axes + data_type: 7 + dims: [1] + value: [1] + - model_name: unsqueeze_fp32_fix_input_3d.onnx + node_param: + inputs: ["X","axes"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 4] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 1, 4] + initializer: + - name: axes + data_type: 7 + dims: [1] + value: [2] + - model_name: unsqueeze_fp32_fix_input_4d.onnx + node_param: + inputs: ["X","axes"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: X + data_type: 1 + dims: [256, 256, 2, 4] + outputs: + - name: Z + data_type: 1 + dims: [256, 256, 2, 1, 4] + initializer: + - name: axes + data_type: 7 + dims: [1] + value: [3] + +gengold: + - gold_name: unsqueeze_fp32_dyn_gold_1 + in_model: unsqueeze_fp32_dyn.onnx + input_dtypes: [1] + input_shapes: [[128, 128]] + - gold_name: unsqueeze_fp32_fix_gold_1 + in_model: unsqueeze_fp32_fix.onnx + input_dtypes: [1] + input_shapes: [[256, 256]] + - gold_name: unsqueeze_fp32_fix_input_3d + in_model: unsqueeze_fp32_fix_input_3d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 4]] + - gold_name: unsqueeze_fp32_fix_input_4d + in_model: unsqueeze_fp32_fix_input_4d.onnx + input_dtypes: [1] + input_shapes: [[256, 256, 2, 4]] + +convert: + - out_model: unsqueeze_fp32_dyn_2_dyn.ms + in_model: unsqueeze_fp32_dyn.onnx + input_shapes: None + fp16: off + - out_model: unsqueeze_fp32_dyn_2_fix.ms + in_model: unsqueeze_fp32_dyn.onnx + input_shapes: X:128,128 + fp16: off + - out_model: unsqueeze_fp32_fix_2_fix.ms + in_model: unsqueeze_fp32_fix.onnx + input_shapes: None + fp16: off + - out_model: unsqueeze_fp32_fix_input_3d.ms + in_model: unsqueeze_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: unsqueeze_fp32_fix_input_4d.ms + in_model: unsqueeze_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: unsqueeze_fp32_dyn_2_dyn.ms + gold_in: unsqueeze_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: X:128,128 + - in_model: unsqueeze_fp32_dyn_2_fix.ms + gold_in: unsqueeze_fp32_dyn_gold_1 + dtypes: 1 + input_shapes: None + - in_model: unsqueeze_fp32_fix_2_fix.ms + gold_in: unsqueeze_fp32_fix_gold_1 + dtypes: 1 + input_shapes: None + - in_model: unsqueeze_fp32_fix_input_3d.ms + gold_in: unsqueeze_fp32_fix_input_3d + dtypes: 1 + input_shapes: None + - in_model: unsqueeze_fp32_fix_input_4d.ms + gold_in: unsqueeze_fp32_fix_input_4d + dtypes: 1 + input_shapes: None + diff --git a/mindspore-src/source/mindspore/lite/test/st/ops/op/where/where.yaml b/mindspore-src/source/mindspore/lite/test/st/ops/op/where/where.yaml new file mode 100644 index 0000000000000000000000000000000000000000..597c1582243d84df3ac1fde8e770adf53ef92680 --- /dev/null +++ b/mindspore-src/source/mindspore/lite/test/st/ops/op/where/where.yaml @@ -0,0 +1,143 @@ +op_name: Where +genonnx: + - model_name: where_fp32_dyn.onnx + node_param: + inputs: ["condition","X","Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: condition + data_type: 9 + dims: [None, None] + - name: X + data_type: 1 + dims: [None, None] + - name: Y + data_type: 1 + dims: [None, None] + outputs: + - name: Z + data_type: 1 + dims: [None, None] + initializer: None + - model_name: where_fp32_fix.onnx + node_param: + inputs: ["condition","X","Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: condition + data_type: 9 + dims: [2, 2] + - name: X + data_type: 1 + dims: [2, 2] + - name: Y + data_type: 1 + dims: [2, 2] + outputs: + - name: Z + data_type: 1 + dims: [2,2] + initializer: None + - model_name: where_fp32_fix_input_3d.onnx + node_param: + inputs: ["condition","X","Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: condition + data_type: 9 + dims: [2, 3, 3] + - name: X + data_type: 1 + dims: [2, 3, 3] + - name: Y + data_type: 1 + dims: [2, 3, 3] + outputs: + - name: Z + data_type: 1 + dims: [2, 3, 3] + initializer: None + - model_name: where_fp32_fix_input_4d.onnx + node_param: + inputs: ["condition","X","Y"] + outputs: ["Z"] + attributes: None + graph_param: + inputs: + - name: condition + data_type: 9 + dims: [2, 3, 8, 1] + - name: X + data_type: 1 + dims: [2, 3, 8 ,1] + - name: Y + data_type: 1 + dims: [2, 3, 8, 1] + outputs: + - name: Z + data_type: 1 + dims: [2, 3, 8 ,1] + initializer: None + +gengold: + - gold_name: where_fp32_dyn_gold_1 + in_model: where_fp32_dyn.onnx + input_dtypes: [9, 1, 1] + input_shapes: [[2, 2],[2, 2],[2, 2]] + - gold_name: where_fp32_fix_gold_1 + in_model: where_fp32_fix.onnx + input_dtypes: [9, 1, 1] + input_shapes: [[2, 2],[2, 2],[2, 2]] + - gold_name: where_fp32_fix_input_3d + in_model: where_fp32_fix_input_3d.onnx + input_dtypes: [9, 1, 1] + input_shapes: [[2, 3, 3],[2, 3, 3],[2, 3, 3]] + - gold_name: where_fp32_fix_input_4d + in_model: where_fp32_fix_input_4d.onnx + input_dtypes: [9, 1, 1] + input_shapes: [[2, 3, 8, 1],[2, 3, 8, 1],[2, 3, 8, 1]] + +convert: + - out_model: where_fp32_dyn_2_dyn.ms + in_model: where_fp32_dyn.onnx + input_shapes: None + - out_model: where_fp32_dyn_2_fix.ms + in_model: where_fp32_dyn.onnx + input_shapes: X:128,128 + - out_model: where_fp32_fix_2_fix.ms + in_model: where_fp32_fix.onnx + input_shapes: None + - out_model: where_fp32_fix_input_3d.ms + in_model: where_fp32_fix_input_3d.onnx + input_shapes: None + fp16: on + - out_model: where_fp32_fix_input_4d.ms + in_model: where_fp32_fix_input_4d.onnx + input_shapes: None + fp16: on + +run: + - in_model: where_fp32_dyn_2_dyn.ms + gold_in: where_fp32_dyn_gold_1 + dtypes: 1 + - in_model: where_fp32_dyn_2_fix.ms + gold_in: where_fp32_dyn_gold_1 + dtypes: 1 + - in_model: where_fp32_fix_2_fix.ms + gold_in: where_fp32_fix_gold_1 + dtypes: 1 + - in_model: where_fp32_fix_input_3d.ms + gold_in: where_fp32_fix_input_3d + dtypes: 1 + - in_model: where_fp32_fix_input_4d.ms + gold_in: where_fp32_fix_input_4d + dtypes: 1 + + +