diff --git a/frameworks/BUILD.gn b/frameworks/BUILD.gn index 66c57fbfd30604bb1ed3e831c285544fd60953b8..58580387f413bc23debf8c1c02d13fa5589dcd07 100644 --- a/frameworks/BUILD.gn +++ b/frameworks/BUILD.gn @@ -46,26 +46,35 @@ nnrt_sources = [ ] ops_sources = [ + "native/ops/abs_builder.cpp", "native/ops/add_builder.cpp", "native/ops/argmax_builder.cpp", "native/ops/avgpool_builder.cpp", "native/ops/batch_to_space_nd_builder.cpp", "native/ops/batchnorm_builder.cpp", "native/ops/bias_add_builder.cpp", + "native/ops/broadcast_to_builder.cpp", "native/ops/cast_builder.cpp", "native/ops/concat_builder.cpp", + "native/ops/constant_of_shape_builder.cpp", "native/ops/conv2d_builder.cpp", "native/ops/conv2d_transpose_builder.cpp", + "native/ops/depth_to_space_builder.cpp", "native/ops/depthwise_conv2d_native_builder.cpp", "native/ops/div_builder.cpp", "native/ops/eltwise_builder.cpp", + "native/ops/erf_builder.cpp", + "native/ops/exp_builder.cpp", "native/ops/expandims_builder.cpp", "native/ops/fill_builder.cpp", + "native/ops/flatten_builder.cpp", "native/ops/fullconnection_builder.cpp", "native/ops/gather_builder.cpp", "native/ops/gelu_builder.cpp", "native/ops/hswish_builder.cpp", + "native/ops/instance_norm_builder.cpp", "native/ops/layernorm_builder.cpp", + "native/ops/less_builder.cpp", "native/ops/lessequal_builder.cpp", "native/ops/matmul_builder.cpp", "native/ops/maximum_builder.cpp", @@ -78,6 +87,8 @@ ops_sources = [ "native/ops/pow_builder.cpp", "native/ops/prelu_builder.cpp", "native/ops/quant_dtype_cast_builder.cpp", + "native/ops/range_builder.cpp", + "native/ops/real_div_builder.cpp", "native/ops/reduceall_builder.cpp", "native/ops/reducemean_builder.cpp", "native/ops/reduceprod_builder.cpp", @@ -87,6 +98,7 @@ ops_sources = [ "native/ops/resize_bilinear_builder.cpp", "native/ops/rsqrt_builder.cpp", "native/ops/scale_builder.cpp", + "native/ops/select_builder.cpp", "native/ops/shape_builder.cpp", "native/ops/sigmoid_builder.cpp", "native/ops/slice_builder.cpp", @@ -94,6 +106,7 @@ ops_sources = [ "native/ops/space_to_batch_nd_builder.cpp", "native/ops/split_builder.cpp", "native/ops/sqrt_builder.cpp", + "native/ops/square_builder.cpp", "native/ops/squared_difference_builder.cpp", "native/ops/squeeze_builder.cpp", "native/ops/stack_builder.cpp", @@ -104,6 +117,7 @@ ops_sources = [ "native/ops/top_k_builder.cpp", "native/ops/transpose_builder.cpp", "native/ops/unsqueeze_builder.cpp", + "native/ops/unstack_builder.cpp", ] ohos_shared_library("libneural_network_runtime") { diff --git a/frameworks/native/ops/abs_builder.cpp b/frameworks/native/ops/abs_builder.cpp new file mode 100755 index 0000000000000000000000000000000000000000..182e1b50dabe953b75dd0330acc6410c5025b40f --- /dev/null +++ b/frameworks/native/ops/abs_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "abs_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Abs"; + +AbsBuilder::AbsBuilder() {} + +AbsBuilder::~AbsBuilder() {} + +OH_NN_ReturnCode AbsBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Abs] Build failed, the abs operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Abs] Build failed, passed invalid input or output index."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[Abs] Build failed, the abs expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr AbsBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Abs] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Abs_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(AbsBuilder, OH_NN_OPS_ABS); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/abs_builder.h b/frameworks/native/ops/abs_builder.h new file mode 100755 index 0000000000000000000000000000000000000000..59f59773713935a2dc3199ab56d19d9f6fefa683 --- /dev/null +++ b/frameworks/native/ops/abs_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_ABS_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_ABS_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class AbsBuilder : public OpsBuilder { +public: + AbsBuilder(); + ~AbsBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_ABS_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/broadcast_to_builder.cpp b/frameworks/native/ops/broadcast_to_builder.cpp new file mode 100755 index 0000000000000000000000000000000000000000..09351976212e61e5c6b596aa0cb8a516377893b6 --- /dev/null +++ b/frameworks/native/ops/broadcast_to_builder.cpp @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "broadcast_to_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "BroadcastTo"; + +BroadcastToBuilder::BroadcastToBuilder() {} + +BroadcastToBuilder::~BroadcastToBuilder() {} + +OH_NN_ReturnCode BroadcastToBuilder::SetShape(std::shared_ptr tensor) +{ + // Set Shape + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[BroadcastTo] The shape should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + m_shape.clear(); + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[BroadcastTo] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_shape.emplace_back(*(static_cast(buffer))); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode BroadcastToBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[BroadcastTo] Build failed, the broadcastTo operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[BroadcastTo] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + OH_NN_ReturnCode returnCode; + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_BROADCAST_TO_SHAPE: + returnCode = SetShape(tensor); + break; + default: + LOGE("[BroadcastTo] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[BroadcastTo] Build failed, passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr BroadcastToBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[BroadcastTo] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_BroadcastTo_CreatePrimitive(m_shape); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(BroadcastToBuilder, OH_NN_OPS_BROADCAST_TO); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/broadcast_to_builder.h b/frameworks/native/ops/broadcast_to_builder.h new file mode 100755 index 0000000000000000000000000000000000000000..dd5fcee2e3ba91c5c288f354d67385b90c14b491 --- /dev/null +++ b/frameworks/native/ops/broadcast_to_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_BROADCAST_TO_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_BROADCAST_TO_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class BroadcastToBuilder : public OpsBuilder { +public: + BroadcastToBuilder(); + ~BroadcastToBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetShape(std::shared_ptr tensor); + +private: + std::vector m_shape; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_BROADCAST_TO_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/constant_of_shape_builder.cpp b/frameworks/native/ops/constant_of_shape_builder.cpp new file mode 100755 index 0000000000000000000000000000000000000000..7ae707ceb02363630fca5525f82e5ef22247281f --- /dev/null +++ b/frameworks/native/ops/constant_of_shape_builder.cpp @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "constant_of_shape_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "ConstantOfShape"; + +ConstantOfShapeBuilder::ConstantOfShapeBuilder() {} + +ConstantOfShapeBuilder::~ConstantOfShapeBuilder() {} + +OH_NN_ReturnCode ConstantOfShapeBuilder::SetDataType(std::shared_ptr tensor) +{ + // Set DataType + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[ConstantOfShape] The data_type should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[ConstantOfShape] The data_type should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ConstantOfShape] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_data_type = *static_cast(buffer); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ConstantOfShapeBuilder::SetValue(std::shared_ptr tensor) +{ + // Set Value + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[ConstantOfShape] The value should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + m_value.clear(); + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[ConstantOfShape] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_value.emplace_back(*(static_cast(buffer))); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ConstantOfShapeBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[ConstantOfShape] Build failed, the constantOfShape operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[ConstantOfShape] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + OH_NN_ReturnCode returnCode; + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE: + returnCode = SetDataType(tensor); + break; + case OH_NN_CONSTANT_OF_SHAPE_VALUE: + returnCode = SetValue(tensor); + break; + default: + LOGE("[ConstantOfShape] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[ConstantOfShape] Build failed, passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ConstantOfShapeBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[ConstantOfShape] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_ConstantOfShape_CreatePrimitive(m_data_type, m_value); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(ConstantOfShapeBuilder, OH_NN_OPS_CONSTANT_OF_SHAPE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/constant_of_shape_builder.h b/frameworks/native/ops/constant_of_shape_builder.h new file mode 100755 index 0000000000000000000000000000000000000000..42442f8364f66723eee55e167b375601036ddb63 --- /dev/null +++ b/frameworks/native/ops/constant_of_shape_builder.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_CONSTANT_OF_SHAPE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_CONSTANT_OF_SHAPE_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ConstantOfShapeBuilder : public OpsBuilder { +public: + ConstantOfShapeBuilder(); + ~ConstantOfShapeBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetDataType(std::shared_ptr tensor); + OH_NN_ReturnCode SetValue(std::shared_ptr tensor); + +private: + int64_t m_data_type {0}; + std::vector m_value; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_CONSTANT_OF_SHAPE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/depth_to_space_builder.cpp b/frameworks/native/ops/depth_to_space_builder.cpp new file mode 100755 index 0000000000000000000000000000000000000000..5d1ba65afc9d67ae27024b6395cbdbe726fb63ed --- /dev/null +++ b/frameworks/native/ops/depth_to_space_builder.cpp @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "depth_to_space_builder.h" + +#include "frameworks/native/transform.h" +#include "frameworks/native/validation.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "DepthToSpace"; + +DepthToSpaceBuilder::DepthToSpaceBuilder() {} + +DepthToSpaceBuilder::~DepthToSpaceBuilder() {} + +OH_NN_ReturnCode DepthToSpaceBuilder::SetBlockSize(std::shared_ptr tensor) +{ + // Set BlockSize + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[DepthToSpace] The block_size should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[DepthToSpace] The block_size should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DepthToSpace] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_block_size = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DepthToSpaceBuilder::SetFormat(std::shared_ptr tensor) +{ + // Set Format + if (tensor->GetDataType() != OH_NN_INT8) { + LOGE("[DepthToSpace] The format should be type OH_NN_INT8."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[DepthToSpace] The format should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[DepthToSpace] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + int8_t* formatData = static_cast(buffer); + + if (!OHOS::NeuralNetworkRuntime::Validation::ValidateTensorFormat(static_cast(*formatData))) { + LOGE("[DepthToSpace] SetFormat failed. Format type is invalid."); + return OH_NN_INVALID_PARAMETER; + } + + auto pFormat = (OH_NN_Format)(*formatData); + m_format = NNToMS::TransformFormat(pFormat); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode DepthToSpaceBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[DepthToSpace] Build failed, the depthToSpace operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[DepthToSpace] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + OH_NN_ReturnCode returnCode; + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE: + returnCode = SetBlockSize(tensor); + break; + case OH_NN_DEPTH_TO_SPACE_FORMAT: + returnCode = SetFormat(tensor); + break; + default: + LOGE("[DepthToSpace] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[DepthToSpace] Build failed, passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr DepthToSpaceBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[DepthToSpace] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_DepthToSpace_CreatePrimitive(m_block_size, m_format); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(DepthToSpaceBuilder, OH_NN_OPS_DEPTH_TO_SPACE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/depth_to_space_builder.h b/frameworks/native/ops/depth_to_space_builder.h new file mode 100755 index 0000000000000000000000000000000000000000..4f5652283bc03b72a55dc256954925b8b3d703aa --- /dev/null +++ b/frameworks/native/ops/depth_to_space_builder.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_DEPTH_TO_SPACE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_DEPTH_TO_SPACE_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class DepthToSpaceBuilder : public OpsBuilder { +public: + DepthToSpaceBuilder(); + ~DepthToSpaceBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetBlockSize(std::shared_ptr tensor); + OH_NN_ReturnCode SetFormat(std::shared_ptr tensor); + +private: + int64_t m_block_size {0}; + mindspore::lite::Format m_format {mindspore::lite::FORMAT_NCHW}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_DEPTH_TO_SPACE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/erf_builder.cpp b/frameworks/native/ops/erf_builder.cpp new file mode 100755 index 0000000000000000000000000000000000000000..19424e78f9ee5340bf43a1fd7d88e591abac3b09 --- /dev/null +++ b/frameworks/native/ops/erf_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "erf_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Erf"; + +ErfBuilder::ErfBuilder() {} + +ErfBuilder::~ErfBuilder() {} + +OH_NN_ReturnCode ErfBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Erf] Build failed, the erf operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Erf] Build failed, passed invalid input or output index."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[Erf] Build failed, the erf expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ErfBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Erf] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Erf_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(ErfBuilder, OH_NN_OPS_ERF); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/erf_builder.h b/frameworks/native/ops/erf_builder.h new file mode 100755 index 0000000000000000000000000000000000000000..c4f688b93eaf38aebe19a42adfea07d5c50b2252 --- /dev/null +++ b/frameworks/native/ops/erf_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_ERF_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_ERF_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ErfBuilder : public OpsBuilder { +public: + ErfBuilder(); + ~ErfBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_ERF_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/exp_builder.cpp b/frameworks/native/ops/exp_builder.cpp new file mode 100755 index 0000000000000000000000000000000000000000..8755ff12275ec8d5727a1b2be1ba154951d7ab04 --- /dev/null +++ b/frameworks/native/ops/exp_builder.cpp @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "exp_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Exp"; + +ExpBuilder::ExpBuilder() {} + +ExpBuilder::~ExpBuilder() {} + +OH_NN_ReturnCode ExpBuilder::SetBase(std::shared_ptr tensor) +{ + // Set Base + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[Exp] The base should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[Exp] The base should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Exp] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_base = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ExpBuilder::SetScale(std::shared_ptr tensor) +{ + // Set Scale + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[Exp] The scale should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[Exp] The scale should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Exp] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_scale = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ExpBuilder::SetShift(std::shared_ptr tensor) +{ + // Set Shift + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[Exp] The shift should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[Exp] The shift should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Exp] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_shift = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode ExpBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Exp] Build failed, the exp operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Exp] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + OH_NN_ReturnCode returnCode; + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_EXP_BASE: + returnCode = SetBase(tensor); + break; + case OH_NN_EXP_SCALE: + returnCode = SetScale(tensor); + break; + case OH_NN_EXP_SHIFT: + returnCode = SetShift(tensor); + break; + default: + LOGE("[Exp] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Exp] Build failed, passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr ExpBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Exp] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_ExpFusion_CreatePrimitive(m_base, m_scale, m_shift); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(ExpBuilder, OH_NN_OPS_EXP); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/ops/exp_builder.h b/frameworks/native/ops/exp_builder.h new file mode 100755 index 0000000000000000000000000000000000000000..70857fe51d2301f02741e20f63f9ced199cd60e1 --- /dev/null +++ b/frameworks/native/ops/exp_builder.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_EXP_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_EXP_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class ExpBuilder : public OpsBuilder { +public: + ExpBuilder(); + ~ExpBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetBase(std::shared_ptr tensor); + OH_NN_ReturnCode SetScale(std::shared_ptr tensor); + OH_NN_ReturnCode SetShift(std::shared_ptr tensor); + +private: + float m_base {-1.0f}; + float m_scale {0.1f}; + float m_shift {0.0f}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_EXP_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/flatten_builder.cpp b/frameworks/native/ops/flatten_builder.cpp new file mode 100755 index 0000000000000000000000000000000000000000..555fde4f77a0fe1090d2bee37f60e394358f6303 --- /dev/null +++ b/frameworks/native/ops/flatten_builder.cpp @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "flatten_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Flatten"; + +FlattenBuilder::FlattenBuilder() {} + +FlattenBuilder::~FlattenBuilder() {} + +OH_NN_ReturnCode FlattenBuilder::SetAxis(std::shared_ptr tensor) +{ + // Set Axis + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Flatten] The axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[Flatten] The axis should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Flatten] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_axis = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode FlattenBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Flatten] Build failed, the flatten operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Flatten] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + OH_NN_ReturnCode returnCode; + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_FLATTEN_AXIS: + returnCode = SetAxis(tensor); + break; + default: + LOGE("[Flatten] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Flatten] Build failed, passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr FlattenBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Flatten] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Flatten_CreatePrimitive(m_axis); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(FlattenBuilder, OH_NN_OPS_FLATTEN); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/flatten_builder.h b/frameworks/native/ops/flatten_builder.h new file mode 100755 index 0000000000000000000000000000000000000000..63a612e8e68da48553367c5580a7423ff7339a82 --- /dev/null +++ b/frameworks/native/ops/flatten_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_FLATTEN_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_FLATTEN_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class FlattenBuilder : public OpsBuilder { +public: + FlattenBuilder(); + ~FlattenBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + +private: + int64_t m_axis {1}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_FLATTEN_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/instance_norm_builder.cpp b/frameworks/native/ops/instance_norm_builder.cpp new file mode 100755 index 0000000000000000000000000000000000000000..ba1dbf21474ac5d3b7033e8026dd01f7443d77b1 --- /dev/null +++ b/frameworks/native/ops/instance_norm_builder.cpp @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "instance_norm_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "InstanceNorm"; + +InstanceNormBuilder::InstanceNormBuilder() {} + +InstanceNormBuilder::~InstanceNormBuilder() {} + +OH_NN_ReturnCode InstanceNormBuilder::SetEpsilon(std::shared_ptr tensor) +{ + // Set Epsilon + if (tensor->GetDataType() != OH_NN_FLOAT32) { + LOGE("[InstanceNorm] The epsilon should be type OH_NN_FLOAT32."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[InstanceNorm] The epsilon should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[InstanceNorm] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_epsilon = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode InstanceNormBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[InstanceNorm] Build failed, the InstanceNorm operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[InstanceNorm] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + OH_NN_ReturnCode returnCode; + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_INSTANCE_NORM_EPSILON: + returnCode = SetEpsilon(tensor); + break; + default: + LOGE("[InstanceNorm] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[InstanceNorm] Build failed, passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr InstanceNormBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[InstanceNorm] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_InstanceNorm_CreatePrimitive(m_epsilon); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(InstanceNormBuilder, OH_NN_OPS_INSTANCE_NORM); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/instance_norm_builder.h b/frameworks/native/ops/instance_norm_builder.h new file mode 100755 index 0000000000000000000000000000000000000000..e9d22567f2c3220d12ecc47cd3bf95441f48d535 --- /dev/null +++ b/frameworks/native/ops/instance_norm_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_INSTANCE_NORM_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_INSTANCE_NORM_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class InstanceNormBuilder : public OpsBuilder { +public: + InstanceNormBuilder(); + ~InstanceNormBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetEpsilon(std::shared_ptr tensor); + +private: + float m_epsilon {0.0f}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_INSTANCE_NORM_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/less_builder.cpp b/frameworks/native/ops/less_builder.cpp new file mode 100755 index 0000000000000000000000000000000000000000..9da2189851d453ac202476f79ffef759238998f7 --- /dev/null +++ b/frameworks/native/ops/less_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "less_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Less"; + +LessBuilder::LessBuilder() {} + +LessBuilder::~LessBuilder() {} + +OH_NN_ReturnCode LessBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Less] Build failed, the less operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Less] Build failed, passed invalid input or output index."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[Less] Build failed, the less expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr LessBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Less] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Less_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(LessBuilder, OH_NN_OPS_LESS); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/less_builder.h b/frameworks/native/ops/less_builder.h new file mode 100755 index 0000000000000000000000000000000000000000..df94bc47cb0b08161a7ba5af81185da0503ddd70 --- /dev/null +++ b/frameworks/native/ops/less_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_LESS_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_LESS_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class LessBuilder : public OpsBuilder { +public: + LessBuilder(); + ~LessBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_LESS_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/range_builder.cpp b/frameworks/native/ops/range_builder.cpp new file mode 100755 index 0000000000000000000000000000000000000000..e05c96da1082c114a9b3d76fe66731871b716540 --- /dev/null +++ b/frameworks/native/ops/range_builder.cpp @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "range_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 0; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Range"; + +RangeBuilder::RangeBuilder() {} + +RangeBuilder::~RangeBuilder() {} + +OH_NN_ReturnCode RangeBuilder::SetDType(std::shared_ptr tensor) +{ + // Set DType + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Range] The d_type should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[Range] The d_type should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Range] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_d_type = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode RangeBuilder::SetStart(std::shared_ptr tensor) +{ + // Set Start + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Range] The start should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[Range] The start should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Range] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_start = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode RangeBuilder::SetLimit(std::shared_ptr tensor) +{ + // Set Limit + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Range] The limit should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[Range] The limit should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Range] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_limit = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode RangeBuilder::SetDelta(std::shared_ptr tensor) +{ + // Set Delta + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Range] The delta should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[Range] The delta should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Range] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_delta = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode RangeBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Range] Build failed, the Range operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Range] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + OH_NN_ReturnCode returnCode; + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_RANGE_DTYPE: + returnCode = SetDType(tensor); + break; + case OH_NN_RANGE_START: + returnCode = SetStart(tensor); + break; + case OH_NN_RANGE_LIMIT: + returnCode = SetLimit(tensor); + break; + case OH_NN_RANGE_DELTA: + returnCode = SetDelta(tensor); + break; + default: + LOGE("[Range] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Range] Build failed, passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr RangeBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Range] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Range_CreatePrimitive(m_d_type, m_start, m_limit, m_delta); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(RangeBuilder, OH_NN_OPS_RANGE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/range_builder.h b/frameworks/native/ops/range_builder.h new file mode 100755 index 0000000000000000000000000000000000000000..e10e859f0698ceb39e31a07c123cc45d0dbef433 --- /dev/null +++ b/frameworks/native/ops/range_builder.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_RANGE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_RANGE_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class RangeBuilder : public OpsBuilder { +public: + RangeBuilder(); + ~RangeBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetDType(std::shared_ptr tensor); + OH_NN_ReturnCode SetStart(std::shared_ptr tensor); + OH_NN_ReturnCode SetLimit(std::shared_ptr tensor); + OH_NN_ReturnCode SetDelta(std::shared_ptr tensor); + +private: + int64_t m_d_type {0.0f}; + int64_t m_start {0.0f}; + int64_t m_limit {0.0f}; + int64_t m_delta {1.0f}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_RANGE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/real_div_builder.cpp b/frameworks/native/ops/real_div_builder.cpp new file mode 100755 index 0000000000000000000000000000000000000000..e59584408999e02f2a77a64d74311eabf51c7866 --- /dev/null +++ b/frameworks/native/ops/real_div_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "real_div_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 2; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "RealDiv"; + +RealDivBuilder::RealDivBuilder() {} + +RealDivBuilder::~RealDivBuilder() {} + +OH_NN_ReturnCode RealDivBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[RealDiv] Build failed, the realDiv operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[RealDiv] Build failed, passed invalid input or output index."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[RealDiv] Build failed, the realDiv expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr RealDivBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[RealDiv] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_RealDiv_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(RealDivBuilder, OH_NN_OPS_REAL_DIV); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/real_div_builder.h b/frameworks/native/ops/real_div_builder.h new file mode 100755 index 0000000000000000000000000000000000000000..6c607253d8138d976fbebf0298be366775e3bf97 --- /dev/null +++ b/frameworks/native/ops/real_div_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_REAL_DIV_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_REAL_DIV_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class RealDivBuilder : public OpsBuilder { +public: + RealDivBuilder(); + ~RealDivBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_REAL_DIV_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/select_builder.cpp b/frameworks/native/ops/select_builder.cpp new file mode 100755 index 0000000000000000000000000000000000000000..955c267aff5c7ac9cff5cb3676ec091e5ac4a3a4 --- /dev/null +++ b/frameworks/native/ops/select_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "select_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 3; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Select"; + +SelectBuilder::SelectBuilder() {} + +SelectBuilder::~SelectBuilder() {} + +OH_NN_ReturnCode SelectBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Select] Build failed, the select operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Select] Build failed, passed invalid input or output index."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[Select] Build failed, the select expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr SelectBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Select] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Select_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(SelectBuilder, OH_NN_OPS_SELECT); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/select_builder.h b/frameworks/native/ops/select_builder.h new file mode 100755 index 0000000000000000000000000000000000000000..7af58774b677347ef0009d466ad6a9fea25fbaae --- /dev/null +++ b/frameworks/native/ops/select_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SELECT_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SELECT_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SelectBuilder : public OpsBuilder { +public: + SelectBuilder(); + ~SelectBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SELECT_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/square_builder.cpp b/frameworks/native/ops/square_builder.cpp new file mode 100755 index 0000000000000000000000000000000000000000..808c45c198e370df0c7d646de20ba141496ce99a --- /dev/null +++ b/frameworks/native/ops/square_builder.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "square_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Square"; + +SquareBuilder::SquareBuilder() {} + +SquareBuilder::~SquareBuilder() {} + +OH_NN_ReturnCode SquareBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Square] Build failed, the square operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Square] Build failed, passed invalid input or output index."); + return ret; + } + + if (!paramsIndex.empty()) { + LOGW("[Square] Build failed, the square expects no parameters, but receive %zu", paramsIndex.size()); + return OH_NN_INVALID_PARAMETER; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr SquareBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Square] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Square_CreatePrimitive(); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(SquareBuilder, OH_NN_OPS_SQUARE); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/square_builder.h b/frameworks/native/ops/square_builder.h new file mode 100755 index 0000000000000000000000000000000000000000..5f1665bff52d7ef997829ce555cd48d55f1eed4a --- /dev/null +++ b/frameworks/native/ops/square_builder.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_SQUARE_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_SQUARE_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class SquareBuilder : public OpsBuilder { +public: + SquareBuilder(); + ~SquareBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_SQUARE_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/ops/unstack_builder.cpp b/frameworks/native/ops/unstack_builder.cpp new file mode 100755 index 0000000000000000000000000000000000000000..b1d337c4ffc334792d2c57ad1d21d6ef03a8c0a8 --- /dev/null +++ b/frameworks/native/ops/unstack_builder.cpp @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "unstack_builder.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +static const int INPUT_NUM = 1; +static const int OUTPUT_NUM = 1; +static const std::string OP_NAME = "Unstack"; + +UnstackBuilder::UnstackBuilder() {} + +UnstackBuilder::~UnstackBuilder() {} + +OH_NN_ReturnCode UnstackBuilder::SetAxis(std::shared_ptr tensor) +{ + // Set Axis + if (tensor->GetDataType() != OH_NN_INT64) { + LOGE("[Unstack] The axis should be type OH_NN_INT64."); + return OH_NN_INVALID_PARAMETER; + } + + if (tensor->GetElementCount() != 1) { + LOGE("[Unstack] The axis should be scalar."); + return OH_NN_INVALID_PARAMETER; + } + + void* buffer = tensor->GetBuffer(); + if (buffer == nullptr) { + LOGE("[Unstack] Tensor buffer is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + m_axis = *(static_cast(buffer)); + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode UnstackBuilder::Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) +{ + if (m_isBuild) { + LOGE("[Unstack] Build failed, the Unstack operation has been build. cannot build again."); + return OH_NN_OPERATION_FORBIDDEN; + } + + auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM); + if (ret != OH_NN_SUCCESS) { + LOGE("[Unstack] Build failed, passed invalid input or output index."); + return ret; + } + + m_inputsIndex = inputsIndex; + m_outputsIndex = outputsIndex; + + OH_NN_ReturnCode returnCode; + for (int i : paramsIndex) { + std::shared_ptr tensor = allTensors[i]; + tensor->IdentifyOpParameter(); + switch (tensor->GetType()) { + case OH_NN_UNSTACK_AXIS: + returnCode = SetAxis(tensor); + break; + default: + LOGE("[Unstack] Build failed, param invalid, type=%d", tensor->GetType()); + return OH_NN_INVALID_PARAMETER; + } + + if (returnCode != OH_NN_SUCCESS) { + LOGE("[Unstack] Build failed, passed invalid param."); + return returnCode; + } + } + + m_name = OP_NAME; + m_isBuild = true; + return OH_NN_SUCCESS; +} + +LiteGraphPrimitvePtr UnstackBuilder::GetPrimitive() +{ + if (!m_isBuild) { + LOGE("[Unstack] GetPrimitive failed, cannot get primitive before call build."); + return {nullptr, DestroyLiteGraphPrimitive}; + } + + void* primitive = mindspore::lite::MindIR_Unstack_CreatePrimitive(m_axis); + LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ; + return graphPrimitivePtr; +} + +REGISTER_OPS(UnstackBuilder, OH_NN_OPS_UNSTACK); +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS diff --git a/frameworks/native/ops/unstack_builder.h b/frameworks/native/ops/unstack_builder.h new file mode 100755 index 0000000000000000000000000000000000000000..90453761c2c25e9ee1f8bbe01a28283c57ae49b4 --- /dev/null +++ b/frameworks/native/ops/unstack_builder.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NEURAL_NETWORK_RUNTIME_UNSTACK_BUILDER_H +#define NEURAL_NETWORK_RUNTIME_UNSTACK_BUILDER_H + +#include "mindir.h" + +#include "frameworks/native/ops_builder.h" +#include "frameworks/native/ops_registry.h" + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace Ops { +class UnstackBuilder : public OpsBuilder { +public: + UnstackBuilder(); + ~UnstackBuilder() override; + OH_NN_ReturnCode Build(const std::vector& paramsIndex, + const std::vector& inputsIndex, + const std::vector& outputsIndex, + const std::vector>& allTensors) override; + + LiteGraphPrimitvePtr GetPrimitive() override; + +private: + OH_NN_ReturnCode SetAxis(std::shared_ptr tensor); + +private: + int64_t m_axis {0}; +}; +} // namespace Ops +} // namespace NeuralNetworkRuntime +} // namespace OHOS + +#endif // NEURAL_NETWORK_RUNTIME_UNSTACK_BUILDER_H \ No newline at end of file diff --git a/frameworks/native/validation.cpp b/frameworks/native/validation.cpp index 69204c6b4d0c93a94ca3517e22f531903899e2f4..6f31c5def2b3c74d1b9d0bb15426b603843e085d 100644 --- a/frameworks/native/validation.cpp +++ b/frameworks/native/validation.cpp @@ -60,7 +60,7 @@ bool ValidateFuseType(OH_NN_FuseType fuseType) bool ValidateTensorType(OH_NN_TensorType nnTensorType) { - if ((nnTensorType >= OH_NN_TENSOR) && (nnTensorType <= OH_NN_UNSQUEEZE_AXIS)) { + if ((nnTensorType >= OH_NN_TENSOR) && (nnTensorType <= OH_NN_EXP_SHIFT)) { return true; } return false; diff --git a/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h b/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h index 46a47eac22e8c52baf10dbe4d010b4aac0124be8..16e78df4cb31a30cc51ee1b4e1b8dad94870d2a2 100644 --- a/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h +++ b/interfaces/kits/c/neural_network_runtime/neural_network_runtime_type.h @@ -1554,6 +1554,210 @@ typedef enum { * * output: n-dimensional tensor, with the same data type and shape as the input tensor. */ OH_NN_OPS_GELU = 56, + + /** + * Unstacks tensor in specified axis. + * Inputs: + * * input: n-dimensional tensor. A tensor to be unstacked and + * the rank of the tensor must be greater than 0. + * + * Parameters: + * * axis: dimension along witch to pack. Default: 0. Negative values wrap around. The range is [-R, R). + * + * Outputs: + * * output: A tuple of tensors, the shape of each objects is the same. + */ + OH_NN_OPS_UNSTACK = 57, + + /** + * Returns absolute value of a tensor element-wise. + * + * Inputs: + * * input: n-dimensional tensor. + * + * Outputs: + * * output: A tensor, has the same shape as the input. + */ + OH_NN_OPS_ABS = 58, + + /** + * Computes the Gauss error function of input element-wise. + * + * Inputs: + * * input: n-dimensional tensor. Its dimensions should be less than 8. + * The data type must be float16 or float32. + * + * Outputs: + * * output: A tensor, has the same shape and dtype as the input. + */ + OH_NN_OPS_ERF = 59, + + /** + * Returns exponential of a tensor element-wise. + * + * Inputs: + * * input: n-dimensional tensor. + * + * Parameters: + * * base: The base used to be base of the function. + * * scale: The scale used to be scale of the function. + * * shift: The shift used to be shift of the function. + * Outputs: + * * output: A tensor, has the same shape and dtype as the input. + */ + OH_NN_OPS_EXP = 60, + + /** + * Divides the first input tensor by the second input tensor in floating-point type element-wise. + * + * Inputs: + * * input1: n-dimensional tensor. The first input is a number or a bool or a tensor + * whose data type input is number or bool. + * * input2: n-dimensional tensor. The second input is a number or a bool when the first input is + * a tensor or a tensor whose data type is number or bool. + * + * Outputs: + * * output: A tensor, the shape is the same as the one after broadcasting, + * * and the data type is the one with higher precision or higher digits among the two inputs. + */ + OH_NN_OPS_REAL_DIV = 61, + + /** + * Computes the boolean value of input1 < input2 element-wise. + * + * Inputs: + * * input1: n-dimensional tensor. + * The first input is a number or a bool or a tensor whose data type is number or bool. + * * input2: n-dimensional tensor. The second input is a number or a bool + * when the first input is a tensor or a tensor whose data type is number or bool. + * + * Outputs: + * * output: A tensor, the shape is the same as the one after broadcasting, and the data type is bool. + */ + OH_NN_OPS_LESS = 62, + + /** + * Returns the selected elements, either from input1 or input2, depending on the condition. + * + * Inputs: + * * input_cond: n-dimensional tensor. The condition tensor, decides which element is chosen. + * * input1: n-dimensional tensor. + * * input2: n-dimensional tensor. + * + * Outputs: + * * output: A tensor, has the same shape as the input_cond. + */ + OH_NN_OPS_SELECT = 63, + + /** + * Returns square of a tensor element-wise. + * + * Inputs: + * * input: n-dimensional tensor. The input tensor whose dtype is number. + * + * Outputs: + * * output: A tensor, has the same shape and dtype as the input. + */ + OH_NN_OPS_SQUARE = 64, + + /** + * Flatten a tensor without changing its batch size on the 0-th axis. + * + * Inputs: + * * input: n-dimensional tensor. Tensor of shape (N, ···) to be flattened. + * + * Parameters: + * * axis: The dimension used to flatten data. + * + * Outputs: + * * output: A tensor, has the same shape as the output tensor is (N, X), + * where X is the product of the remaining dimension. + */ + OH_NN_OPS_FLATTEN = 65, + + /** + * Rearranges blocks of depth data into spatial dimensions. + * This is the reverse operation of SpaceToDepth. + * The depth of output tensor is input_depth/(block_size*block_size). + * The output tensor's height dimension is height*block_size. + * The output tensor's weight dimension is weight*block_size. + * The input tensor's depth must be divisible by block_size*block_size. The data format is "NCHW". + * + * Inputs: + * * input: The target tensor. It must be a 4-D tensor with shape (N, Cin, Hin, Win). + * + * Parameters: + * * block_size: The block size used to divide depth data. It must be >= 2. + * * format: The format is the format of the input tensor. + * + * Outputs: + * * output: A tensor with a format shape. + */ + OH_NN_OPS_DEPTH_TO_SPACE = 66, + + /** + * Returns a sequence taht starts at the begining, is a step, adn does not exceed the end (excluding the end). + * The data types of the three inputs must be the same. + * The data types of the tensor returned by the function is the same as that of the input data type. + * + * Parameters: + * * d_type: The data type of the output tensor. + * * start: The start value of the output tensor. + * * limit: The end value of the output tensor excluding the value itself. + * * delta: Difference between two adjacent elements. + * + * Outputs: + * * output: 1D Tensor. + */ + OH_NN_OPS_RANGE = 67, + + /** + * Normalize each feature map of each sample. + * + * Inputs: + * * input: A four-dimensional tensor(B, C, H, W). + * + * Parameters: + * epsilon: A small value to ensure that the variance is not 0. + * + * Outputs: + * * output: A tensor, has the same shape as the input. + */ + OH_NN_OPS_INSTANCE_NORM = 68, + + /** + * Flattena a tensor without changing its bacth size on the 0-th axis. + * + * Inputs: + * * input: n-dimensional tensor. + * + * Parameters: + * * data_type: The data_type of the output tensor. + * * value: Specifies the value of the constant tensor. + * + * Outputs: + * * output: A tensor, has the same shape as the input. + */ + OH_NN_OPS_CONSTANT_OF_SHAPE = 69, + + /** + * Broadcasts input tensor to a given shape. + * Input shape can be broadcast to target shape if for each dimension pair + * they are either equal or inputs is one or the target dimension is -1. + * In case of -1 in target shape, it will be replaced by the input shape's value in that dimension. + * When input shape is broadcast to target shape, it starts with trailing dimensions. + * + * Inputs: + * * input: n-dimensional tensor. + * + * Parameters: + * * shape: The target shape to broadcast. Can be fully specified, + * * or have -1 in one position where it will be substituted by the input tensor's shape in that position. + * + * Outputs: + * * output: A tensor, with the given shape and the same data types as input. + */ + OH_NN_OPS_BROADCAST_TO = 70, } OH_NN_OperationType; /** @@ -1760,6 +1964,44 @@ typedef enum { /** This enumerated value is used when the tensor is used as the Axis parameter of the Unsqueeze operator. */ OH_NN_UNSQUEEZE_AXIS = 77, + + /** This enumerated value is used when the tensor is used as the Axis parameter of the Unstack operator. */ + OH_NN_UNSTACK_AXIS = 78, + + /** This enumerated value is used when the tensor is used as the Axis parameter of the Flatten operator. */ + OH_NN_FLATTEN_AXIS = 79, + + /** This enumerated value is used when the tensor is used as the Block_Size parameter of the DepthToSpace operator. */ + OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE = 80, + /** This enumerated value is used when the tensor is used as the Format parameter of the DepthToSpace operator. */ + OH_NN_DEPTH_TO_SPACE_FORMAT = 81, + + /** This enumerated value is used when the tensor is used as the DType parameter of the Range operator. */ + OH_NN_RANGE_DTYPE = 82, + /** This enumerated value is used when the tensor is used as the Start parameter of the Range operator. */ + OH_NN_RANGE_START = 83, + /** This enumerated value is used when the tensor is used as the Limit parameter of the Range operator. */ + OH_NN_RANGE_LIMIT = 84, + /** This enumerated value is used when the tensor is used as the Delta parameter of the Range operator. */ + OH_NN_RANGE_DELTA = 85, + + /** This enumerated value is used when the tensor is used as the DataType parameter of the ConstantOfShape operator. */ + OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE = 86, + /** This enumerated value is used when the tensor is used as the Value parameter of the ConstantOfShape operator. */ + OH_NN_CONSTANT_OF_SHAPE_VALUE = 87, + + /** This enumerated value is used when the tensor is used as the Shape parameter of the BroadcastTo operator. */ + OH_NN_BROADCAST_TO_SHAPE = 88, + + /** This enumerated value is used when the tensor is used as the Epsilon parameter of the InstanceNorm operator. */ + OH_NN_INSTANCE_NORM_EPSILON = 89, + + /** This enumerated value is used when the tensor is used as the Base parameter of the Exp operator. */ + OH_NN_EXP_BASE = 90, + /** This enumerated value is used when the tensor is used as the Scale parameter of the Exp operator. */ + OH_NN_EXP_SCALE = 91, + /** This enumerated value is used when the tensor is used as the Shift parameter of the Exp operator. */ + OH_NN_EXP_SHIFT = 92, } OH_NN_TensorType; /** diff --git a/test/unittest/ops/BUILD.gn b/test/unittest/ops/BUILD.gn index 7f65e523bb170a3e57814a0bcc9c41305d04d1e6..d9da104cd6d9a85ebea96f67173f64f1dd16e3da 100644 --- a/test/unittest/ops/BUILD.gn +++ b/test/unittest/ops/BUILD.gn @@ -29,32 +29,40 @@ config("module_private_config") { ohos_unittest("OpsUnittest") { module_out_path = module_output_path - sources = [ "./add_test.cpp" ] + sources = [ "./abs_test.cpp" ] + sources += [ "./add_test.cpp" ] sources += [ "./argmax_test.cpp" ] sources += [ "./avgpool_pad_test.cpp" ] sources += [ "./avgpool_padmod_test.cpp" ] sources += [ "./batch_to_space_nd_test.cpp" ] sources += [ "./batchnorm_builder_test.cpp" ] sources += [ "./biasadd_test.cpp" ] + sources += [ "./broadcast_to_test.cpp" ] sources += [ "./cast_test.cpp" ] sources += [ "./concat_three_inputs_test.cpp" ] sources += [ "./concat_two_inputs_test.cpp" ] + sources += [ "./constant_of_shape_test.cpp" ] sources += [ "./conv2d_pad_test.cpp" ] sources += [ "./conv2d_padmode_test.cpp" ] sources += [ "./conv2d_tranpose_padmode_test.cpp" ] sources += [ "./conv2d_transpose_pad_test.cpp" ] + sources += [ "./depth_to_space_test.cpp" ] sources += [ "./depthwise_conv2d_native_pad_test.cpp" ] sources += [ "./depthwise_conv2d_native_padmode_test.cpp" ] sources += [ "./div_test.cpp" ] sources += [ "./eltwise_test.cpp" ] + sources += [ "./exp_test.cpp" ] sources += [ "./expandims_test.cpp" ] sources += [ "./fullconnection_test.cpp" ] sources += [ "./fullconnection_with_axis_test.cpp" ] sources += [ "./fill_builder_test.cpp" ] + sources += [ "./flatten_test.cpp" ] sources += [ "./gather_builder_test.cpp" ] sources += [ "./gelu_builder_test.cpp" ] sources += [ "./hswish_builder_test.cpp" ] + sources += [ "./instance_norm_test.cpp" ] sources += [ "./layernorm_builder_test.cpp" ] + sources += [ "./less_test.cpp" ] sources += [ "./lessequal_builder_test.cpp" ] sources += [ "./maximum_builder_test.cpp" ] sources += [ "./maxpool_pad_test.cpp" ] @@ -66,6 +74,8 @@ ohos_unittest("OpsUnittest") { sources += [ "./pow_builder_test.cpp" ] sources += [ "./prelu_builder_test.cpp" ] sources += [ "./quant_dtype_cast_builder_test.cpp" ] + sources += [ "./range_test.cpp" ] + sources += [ "./real_div_test.cpp" ] sources += [ "./reduce_all_builder_test.cpp" ] sources += [ "./reduce_mean_builder_test.cpp" ] sources += [ "./reduce_prod_builder_test.cpp" ] @@ -82,6 +92,7 @@ ohos_unittest("OpsUnittest") { sources += [ "./spacetobatchnd_builder_test.cpp" ] sources += [ "./split_builder_test.cpp" ] sources += [ "./sqrt_builder_test.cpp" ] + sources += [ "./square_test.cpp" ] sources += [ "./squared_difference_builder_test.cpp" ] sources += [ "./squeeze_builder_test.cpp" ] sources += [ "./stack_builder_test.cpp" ] @@ -92,6 +103,9 @@ ohos_unittest("OpsUnittest") { sources += [ "./topk_builder_test.cpp" ] sources += [ "./transpose_builder_test.cpp" ] sources += [ "./unsqueeze_builder_test.cpp" ] + sources += [ "./unstack_test.cpp" ] + sources += [ "./select_test.cpp" ] + sources += [ "./erf_test.cpp" ] sources += [ "./ops_test.cpp" ] sources += [ "../common/base_test.cpp" ] diff --git a/test/unittest/ops/abs_test.cpp b/test/unittest/ops/abs_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..707b9cedc06bb7d2c494c290c2a955b9c19d526b --- /dev/null +++ b/test/unittest/ops/abs_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/abs_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class AbsBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + AbsBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void AbsBuilderTest::SetUp() {} + +void AbsBuilderTest::TearDown() {} + +/** + * @tc.name: abs_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(AbsBuilderTest, abs_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: abs_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(AbsBuilderTest, abs_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: abs_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(AbsBuilderTest, abs_build_003, TestSize.Level2) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: abs_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(AbsBuilderTest, abs_build_004, TestSize.Level2) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: abs_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(AbsBuilderTest, abs_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: abs_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(AbsBuilderTest, abs_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: abs_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(AbsBuilderTest, abs_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: abs_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(AbsBuilderTest, abs_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/broadcast_to_test.cpp b/test/unittest/ops/broadcast_to_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7c45e98199f1b505e18c441304b8d9640ded23a5 --- /dev/null +++ b/test/unittest/ops/broadcast_to_test.cpp @@ -0,0 +1,243 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/broadcast_to_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class BroadcastToBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + BroadcastToBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {2}; + std::vector m_inputDim {1, 3}; + std::vector m_outputDim {2, 3}; + std::vector m_paramDim {2}; +}; + +void BroadcastToBuilderTest::SetUp() {} + +void BroadcastToBuilderTest::TearDown() {} + +void BroadcastToBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr shapeTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* shapeValue = new (std::nothrow) int64_t[2] {2, 3}; + int32_t shapeSize = 2; + EXPECT_NE(nullptr, shapeValue); + shapeTensor->SetBuffer(shapeValue, sizeof(int64_t) * shapeSize); + m_allTensors.emplace_back(shapeTensor); +} + +/** + * @tc.name: broadcast_to_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_BROADCAST_TO_SHAPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: broadcast_to_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_BROADCAST_TO_SHAPE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: broadcast_to_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_003, TestSize.Level2) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + m_params = {3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_BROADCAST_TO_SHAPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: broadcast_to_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_004, TestSize.Level2) +{ + m_outputs = {1, 2}; + m_params = {3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_BROADCAST_TO_SHAPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: broadcast_to_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: broadcast_to_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: broadcast_to_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid shape's dataType. + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_007, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr shapeTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_BROADCAST_TO_SHAPE); + float* shapeValue = new (std::nothrow) float[2] {2.0f, 3.0f}; + int32_t shapeSize = 2; + shapeTensor->SetBuffer(shapeValue, sizeof(float) * shapeSize); + m_allTensors.emplace_back(shapeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + shapeTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: broadcast_to_build_008 + * @tc.desc: Verify that the build function returns a failed message with passing invalid param. + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_008, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: broadcast_to_build_009 + * @tc.desc: Verify that the build function returns a failed message without set buffer for shape. + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToBuilderTest, broadcast_to_build_009, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr shapeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_BROADCAST_TO_SHAPE); + m_allTensors.emplace_back(shapeTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: broadcast_to_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToBuilderTest, broadcast_to_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_BROADCAST_TO_SHAPE); + + std::vector shapeValue = {2, 3}; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnShape = mindspore::lite::MindIR_BroadcastTo_GetShape(primitive.get()); + auto returnShapeSize = returnShape.size(); + for (size_t i = 0; i < returnShapeSize; ++i) { + EXPECT_EQ(returnShape[i], shapeValue[i]); + } +} + +/** + * @tc.name: broadcast_to_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(BroadcastToBuilderTest, broadcast_to_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/constant_of_shape_test.cpp b/test/unittest/ops/constant_of_shape_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..64a692df0b2b7fd401ae265a81dce6c17e3949d7 --- /dev/null +++ b/test/unittest/ops/constant_of_shape_test.cpp @@ -0,0 +1,322 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/constant_of_shape_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ConstantOfShapeBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveDataType(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveValue(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + ConstantOfShapeBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {2, 3}; + std::vector m_inputDim {3}; + std::vector m_outputDim {3}; + std::vector m_dataTypeDim {}; + std::vector m_valueDim {1}; +}; + +void ConstantOfShapeBuilderTest::SetUp() {} + +void ConstantOfShapeBuilderTest::TearDown() {} + +void ConstantOfShapeBuilderTest::SaveDataType(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr dataTypeTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* dataTypeValue = new (std::nothrow) int64_t [1]{0}; + dataTypeTensor->SetBuffer(dataTypeValue, sizeof(int64_t)); + m_allTensors.emplace_back(dataTypeTensor); +} + +void ConstantOfShapeBuilderTest::SaveValue(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr valueTensor = TransToNNTensor(dataType, dim, quantParam, type); + float* valueValue = new (std::nothrow) float [1]{1.0f}; + int32_t valueSize = 1; + EXPECT_NE(nullptr, valueValue); + valueTensor->SetBuffer(valueValue, sizeof(float) * valueSize); + m_allTensors.emplace_back(valueTensor); +} + +/** + * @tc.name: constant_of_shape_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveDataType(OH_NN_INT64, m_dataTypeDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE); + SaveValue(OH_NN_FLOAT32, m_valueDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_VALUE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: constant_of_shape_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveDataType(OH_NN_INT64, m_dataTypeDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE); + SaveValue(OH_NN_FLOAT32, m_valueDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_VALUE); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: constant_of_shape_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_003, TestSize.Level2) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + m_params = {3, 4}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveDataType(OH_NN_INT64, m_dataTypeDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE); + SaveValue(OH_NN_FLOAT32, m_valueDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_VALUE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: constant_of_shape_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_004, TestSize.Level2) +{ + m_outputs = {1, 2}; + m_params = {3, 4}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveDataType(OH_NN_INT64, m_dataTypeDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE); + SaveValue(OH_NN_FLOAT32, m_valueDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_VALUE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: constant_of_shape_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: constant_of_shape_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: constant_of_shape_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid dataType's dataType. + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_007, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr dataTypeTensor = TransToNNTensor(OH_NN_FLOAT32, m_dataTypeDim, + nullptr, OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE); + float* dataTypeValue = new (std::nothrow) float [1]{0.0f}; + dataTypeTensor->SetBuffer(&dataTypeValue, sizeof(float)); + m_allTensors.emplace_back(dataTypeTensor); + SaveValue(OH_NN_FLOAT32, m_valueDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_VALUE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + dataTypeTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: constant_of_shape_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid value's dataType. + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_008, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveDataType(OH_NN_INT64, m_dataTypeDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE); + std::shared_ptr valueTensor = TransToNNTensor(OH_NN_INT64, m_valueDim, + nullptr, OH_NN_CONSTANT_OF_SHAPE_VALUE); + int64_t* valueValue = new (std::nothrow) int64_t[1] {1}; + int32_t valueSize = 1; + EXPECT_NE(nullptr, valueValue); + valueTensor->SetBuffer(valueValue, sizeof(float) * valueSize); + m_allTensors.emplace_back(valueTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + valueTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: constant_of_shape_build_009 + * @tc.desc: Verify that the build function returns a failed message with passing invalid dataType param. + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_009, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveDataType(OH_NN_INT64, m_dataTypeDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SaveValue(OH_NN_FLOAT32, m_valueDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_VALUE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: constant_of_shape_build_010 + * @tc.desc: Verify that the build function returns a failed message with passing invalid value param. + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_010, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveDataType(OH_NN_INT64, m_dataTypeDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE); + SaveValue(OH_NN_FLOAT32, m_valueDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: constant_of_shape_build_011 + * @tc.desc: Verify that the build function returns a failed message without set buffer for dataType. + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_011, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr dataTypeTensor = TransToNNTensor(OH_NN_INT64, m_dataTypeDim, + nullptr, OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE); + m_allTensors.emplace_back(dataTypeTensor); + SaveValue(OH_NN_FLOAT32, m_valueDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_VALUE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: constant_of_shape_build_012 + * @tc.desc: Verify that the build function returns a failed message without set buffer for value. + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_build_012, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveDataType(OH_NN_INT64, m_dataTypeDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE); + std::shared_ptr valueTensor = TransToNNTensor(OH_NN_FLOAT32, m_valueDim, + nullptr, OH_NN_CONSTANT_OF_SHAPE_VALUE); + m_allTensors.emplace_back(valueTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: constant_of_shape_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveDataType(OH_NN_INT64, m_dataTypeDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_DATA_TYPE); + SaveValue(OH_NN_FLOAT32, m_valueDim, nullptr, OH_NN_CONSTANT_OF_SHAPE_VALUE); + + int64_t dataTypeValue = 0; + std::vector valueValue = {1}; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnDataTypeValue = mindspore::lite::MindIR_ConstantOfShape_GetDataType(primitive.get()); + EXPECT_EQ(returnDataTypeValue, dataTypeValue); + auto returnValue = mindspore::lite::MindIR_ConstantOfShape_GetValue(primitive.get()); + auto returnValueSize = returnValue.size(); + for (size_t i = 0; i < returnValueSize; ++i) { + EXPECT_EQ(returnValue[i], valueValue[i]); + } +} + +/** + * @tc.name: constant_of_shape_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(ConstantOfShapeBuilderTest, constant_of_shape_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/depth_to_space_test.cpp b/test/unittest/ops/depth_to_space_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5003c6ddae7df20a4088afbfbbefa9900056a1fc --- /dev/null +++ b/test/unittest/ops/depth_to_space_test.cpp @@ -0,0 +1,317 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/depth_to_space_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class DepthToSpaceBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveBlockSize(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveFormat(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + DepthToSpaceBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {2, 3}; + std::vector m_inputDim {1, 12, 1, 1}; + std::vector m_outputDim {1, 3, 2, 2}; + std::vector m_paramDim {}; +}; + +void DepthToSpaceBuilderTest::SetUp() {} + +void DepthToSpaceBuilderTest::TearDown() {} + +void DepthToSpaceBuilderTest::SaveBlockSize(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr blockSizeTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* blockSizeValue = new (std::nothrow) int64_t [1]{2}; + EXPECT_NE(nullptr, blockSizeValue); + blockSizeTensor->SetBuffer(blockSizeValue, sizeof(int64_t)); + m_allTensors.emplace_back(blockSizeTensor); +} + +void DepthToSpaceBuilderTest::SaveFormat(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr formatTensor = TransToNNTensor(dataType, dim, quantParam, type); + int8_t* formatValue = new (std::nothrow) int8_t(1); + EXPECT_NE(nullptr, formatValue); + formatTensor->SetBuffer(formatValue, sizeof(int8_t)); + m_allTensors.emplace_back(formatTensor); +} + +/** + * @tc.name: depth_to_space_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); + SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: depth_to_space_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); + SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: depth_to_space_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_003, TestSize.Level2) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + m_params = {3, 4}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); + SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: depth_to_space_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_004, TestSize.Level2) +{ + m_outputs = {1, 2}; + m_params = {3, 4}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); + SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: depth_to_space_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: depth_to_space_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: depth_to_space_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid blockSize's dataType. + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_007, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr blockSizeTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); + float* blockSizeValue = new (std::nothrow) float [1]{2.0f}; + EXPECT_NE(nullptr, blockSizeValue); + blockSizeTensor->SetBuffer(blockSizeValue, sizeof(float)); + m_allTensors.emplace_back(blockSizeTensor); + SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + blockSizeTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: depth_to_space_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid format's dataType. + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_008, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); + std::shared_ptr formatTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); + int64_t* formatValue = new (std::nothrow) int64_t(1); + formatTensor->SetBuffer(formatValue, sizeof(int64_t)); + m_allTensors.emplace_back(formatTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + formatTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: depth_to_space_build_009 + * @tc.desc: Verify that the build function returns a failed message with passing invalid blockSize param. + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_009, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: depth_to_space_build_010 + * @tc.desc: Verify that the build function returns a failed message with passing invalid format param. + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_010, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); + SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: depth_to_space_build_011 + * @tc.desc: Verify that the build function returns a failed message without set buffer for blockSize. + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_011, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr blockSizeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); + m_allTensors.emplace_back(blockSizeTensor); + SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: depth_to_space_build_012 + * @tc.desc: Verify that the build function returns a failed message without set buffer for format. + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_build_012, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); + std::shared_ptr formatTensor = TransToNNTensor(OH_NN_INT8, m_paramDim, + nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); + m_allTensors.emplace_back(formatTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: depth_to_space_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveBlockSize(OH_NN_INT64, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_BLOCK_SIZE); + SaveFormat(OH_NN_INT8, m_paramDim, nullptr, OH_NN_DEPTH_TO_SPACE_FORMAT); + + int64_t blockSizeValue = 2; + mindspore::lite::Format formatValue = mindspore::lite::FORMAT_NCHW; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnBlockSizeValue = mindspore::lite::MindIR_DepthToSpace_GetBlockSize(primitive.get()); + EXPECT_EQ(returnBlockSizeValue, blockSizeValue); + mindspore::lite::Format returnFormatValue = mindspore::lite::MindIR_DepthToSpace_GetFormat(primitive.get()); + EXPECT_EQ(returnFormatValue, formatValue); +} + +/** + * @tc.name: depth_to_space_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(DepthToSpaceBuilderTest, depth_to_space_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/erf_test.cpp b/test/unittest/ops/erf_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..47e14d28113076745e26d59250535266295fe1c4 --- /dev/null +++ b/test/unittest/ops/erf_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/erf_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ErfBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + ErfBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void ErfBuilderTest::SetUp() {} + +void ErfBuilderTest::TearDown() {} + +/** + * @tc.name: erf_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ErfBuilderTest, erf_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: erf_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(ErfBuilderTest, erf_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: erf_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(ErfBuilderTest, erf_build_003, TestSize.Level2) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: erf_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(ErfBuilderTest, erf_build_004, TestSize.Level2) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: erf_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(ErfBuilderTest, erf_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: erf_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(ErfBuilderTest, erf_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: erf_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(ErfBuilderTest, erf_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: erf_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(ErfBuilderTest, erf_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/exp_test.cpp b/test/unittest/ops/exp_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fc95dfac34f72267c8007c8dec5d123584228547 --- /dev/null +++ b/test/unittest/ops/exp_test.cpp @@ -0,0 +1,412 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/exp_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class ExpBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveBase(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveScale(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveShift(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + ExpBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {2, 3, 4}; + std::vector m_dim {1, 2, 2, 1}; + std::vector m_paramDim {}; +}; + +void ExpBuilderTest::SetUp() {} + +void ExpBuilderTest::TearDown() {} + +void ExpBuilderTest::SaveBase(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr baseTensor = TransToNNTensor(dataType, dim, quantParam, type); + float* baseValue = new (std::nothrow) float [1]{-1.0f}; + EXPECT_NE(nullptr, baseValue); + baseTensor->SetBuffer(baseValue, sizeof(float)); + m_allTensors.emplace_back(baseTensor); +} + +void ExpBuilderTest::SaveScale(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr scaleTensor = TransToNNTensor(dataType, dim, quantParam, type); + float* scaleValue = new (std::nothrow) float [1]{1.0f}; + EXPECT_NE(nullptr, scaleValue); + scaleTensor->SetBuffer(scaleValue, sizeof(float)); + m_allTensors.emplace_back(scaleTensor); +} + +void ExpBuilderTest::SaveShift(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr shiftTensor = TransToNNTensor(dataType, dim, quantParam, type); + float* shiftValue = new (std::nothrow) float [1]{0.0f}; + EXPECT_NE(nullptr, shiftValue); + shiftTensor->SetBuffer(shiftValue, sizeof(float)); + m_allTensors.emplace_back(shiftTensor); +} + +/** + * @tc.name: exp_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(ExpBuilderTest, exp_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveBase(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_BASE); + SaveScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SCALE); + SaveShift(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SHIFT); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: exp_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(ExpBuilderTest, exp_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveBase(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_BASE); + SaveScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SCALE); + SaveShift(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SHIFT); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: exp_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(ExpBuilderTest, exp_build_003, TestSize.Level2) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + m_params = {3, 4, 5}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveBase(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_BASE); + SaveScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SCALE); + SaveShift(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SHIFT); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: exp_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(ExpBuilderTest, exp_build_004, TestSize.Level2) +{ + m_outputs = {1, 2}; + m_params = {3, 4, 5}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveBase(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_BASE); + SaveScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SCALE); + SaveShift(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SHIFT); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: exp_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(ExpBuilderTest, exp_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: exp_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(ExpBuilderTest, exp_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: exp_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid base's dataType. + * @tc.type: FUNC + */ +HWTEST_F(ExpBuilderTest, exp_build_007, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr baseTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_EXP_BASE); + int64_t* baseValue = new (std::nothrow) int64_t [1]{-1}; + EXPECT_NE(nullptr, baseValue); + baseTensor->SetBuffer(baseValue, sizeof(int64_t)); + m_allTensors.emplace_back(baseTensor); + SaveScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SCALE); + SaveShift(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SHIFT); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + baseTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: exp_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid scale's dataType. + * @tc.type: FUNC + */ +HWTEST_F(ExpBuilderTest, exp_build_008, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + SaveBase(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_BASE); + std::shared_ptr scaleTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_EXP_SCALE); + int64_t* scaleValue = new (std::nothrow) int64_t [1]{1}; + EXPECT_NE(nullptr, scaleValue); + scaleTensor->SetBuffer(scaleValue, sizeof(int64_t)); + m_allTensors.emplace_back(scaleTensor); + SaveShift(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SHIFT); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + scaleTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: exp_build_009 + * @tc.desc: Verify that the build function returns a failed message with invalid base's dataType. + * @tc.type: FUNC + */ +HWTEST_F(ExpBuilderTest, exp_build_009, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + SaveBase(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_BASE); + SaveScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SCALE); + std::shared_ptr shiftTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_EXP_SHIFT); + int64_t* shiftValue = new (std::nothrow) int64_t [1]{0}; + EXPECT_NE(nullptr, shiftValue); + shiftTensor->SetBuffer(shiftValue, sizeof(int64_t)); + m_allTensors.emplace_back(shiftTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + shiftTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: exp_build_010 + * @tc.desc: Verify that the build function returns a failed message with passing invalid base param. + * @tc.type: FUNC + */ +HWTEST_F(ExpBuilderTest, exp_build_010, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + SaveBase(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SaveScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SCALE); + SaveShift(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SHIFT); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: exp_build_011 + * @tc.desc: Verify that the build function returns a failed message with passing invalid scale param. + * @tc.type: FUNC + */ +HWTEST_F(ExpBuilderTest, exp_build_011, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + SaveBase(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_BASE); + SaveScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SaveShift(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SHIFT); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: exp_build_012 + * @tc.desc: Verify that the build function returns a failed message with passing invalid shift param. + * @tc.type: FUNC + */ +HWTEST_F(ExpBuilderTest, exp_build_012, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + SaveBase(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_BASE); + SaveScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SCALE); + SaveShift(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: exp_build_013 + * @tc.desc: Verify that the build function returns a failed message without set buffer for base. + * @tc.type: FUNC + */ +HWTEST_F(ExpBuilderTest, exp_build_013, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + std::shared_ptr baseTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_EXP_BASE); + m_allTensors.emplace_back(baseTensor); + SaveScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SCALE); + SaveShift(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SHIFT); + + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: exp_build_014 + * @tc.desc: Verify that the build function returns a failed message without set buffer for scale. + * @tc.type: FUNC + */ +HWTEST_F(ExpBuilderTest, exp_build_014, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + SaveBase(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_BASE); + std::shared_ptr scaleTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_EXP_SCALE); + m_allTensors.emplace_back(scaleTensor); + SaveShift(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SHIFT); + + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: exp_build_015 + * @tc.desc: Verify that the build function returns a failed message without set buffer for shift. + * @tc.type: FUNC + */ +HWTEST_F(ExpBuilderTest, exp_build_015, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + SaveBase(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_BASE); + SaveScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SCALE); + std::shared_ptr shiftTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_EXP_SHIFT); + m_allTensors.emplace_back(shiftTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: exp_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(ExpBuilderTest, exp_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveBase(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_BASE); + SaveScale(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SCALE); + SaveShift(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_EXP_SHIFT); + + float baseValue = -1.0f; + float scaleValue = 1.0f; + float shiftValue = 0.0f; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnBaseValue = mindspore::lite::MindIR_ExpFusion_GetBase(primitive.get()); + EXPECT_EQ(returnBaseValue, baseValue); + auto returnScaleValue = mindspore::lite::MindIR_ExpFusion_GetScale(primitive.get()); + EXPECT_EQ(returnScaleValue, scaleValue); + auto returnShiftValue = mindspore::lite::MindIR_ExpFusion_GetShift(primitive.get()); + EXPECT_EQ(returnShiftValue, shiftValue); +} + +/** + * @tc.name: exp_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(ExpBuilderTest, exp_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/flatten_test.cpp b/test/unittest/ops/flatten_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..086e4f2221e71f0727c2f7c5487df4518ce6f88e --- /dev/null +++ b/test/unittest/ops/flatten_test.cpp @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/flatten_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class FlattenBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + FlattenBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {2}; + std::vector m_inputDim {1, 2, 2, 1}; + std::vector m_outputDim {4}; + std::vector m_paramDim {}; +}; + +void FlattenBuilderTest::SetUp() {} + +void FlattenBuilderTest::TearDown() {} + +void FlattenBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t *axisValue = new (std::nothrow) int64_t[1]{0}; + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); +} + +/** + * @tc.name: flatten_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(FlattenBuilderTest, flatten_build_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_FLATTEN_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: flatten_build_001 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(FlattenBuilderTest, flatten_build_002, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_FLATTEN_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: flatten_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(FlattenBuilderTest, flatten_build_003, TestSize.Level0) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + m_params = {3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_FLATTEN_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: flatten_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(FlattenBuilderTest, flatten_build_004, TestSize.Level0) +{ + m_outputs = {1, 2}; + m_params = {3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_FLATTEN_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: flatten_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(FlattenBuilderTest, flatten_build_005, TestSize.Level0) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: flatten_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(FlattenBuilderTest, flatten_build_006, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: flatten_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid axis's dataType. + * @tc.type: FUNC + */ +HWTEST_F(FlattenBuilderTest, flatten_build_007, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_FLATTEN_AXIS); + float *axisValue = new (std::nothrow) float[1]{1.0f}; + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(float)); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + axisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: flatten_build_008 + * @tc.desc: Verify that the build function returns a failed message with passing invalid param. + * @tc.type: FUNC + */ +HWTEST_F(FlattenBuilderTest, flatten_build_008, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: flatten_build_009 + * @tc.desc: Verify that the build function returns a failed message without set buffer for axis. + * @tc.type: FUNC + */ +HWTEST_F(FlattenBuilderTest, flatten_build_009, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_FLATTEN_AXIS); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: flatten_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(FlattenBuilderTest, flatten_getprimitive_001, TestSize.Level0) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_FLATTEN_AXIS); + + int64_t axisValue = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_Flatten_GetAxis(primitive.get()); + EXPECT_EQ(returnValue, axisValue); +} + +/** + * @tc.name: flatten_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(FlattenBuilderTest, flatten_getprimitive_002, TestSize.Level0) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/instance_norm_test.cpp b/test/unittest/ops/instance_norm_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2dbc7a0729b359524e710be517b26d71c2faf4c4 --- /dev/null +++ b/test/unittest/ops/instance_norm_test.cpp @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/instance_norm_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class InstanceNormBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + InstanceNormBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {2}; + std::vector m_inputDim {2, 2, 2}; + std::vector m_outputDim {2, 2, 2}; + std::vector m_paramDim {}; +}; + +void InstanceNormBuilderTest::SetUp() {} + +void InstanceNormBuilderTest::TearDown() {} + +void InstanceNormBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr epsilonTensor = TransToNNTensor(dataType, dim, quantParam, type); + float* epsilonValue = new (std::nothrow) float [1]{0.0f}; + EXPECT_NE(nullptr, epsilonValue); + epsilonTensor->SetBuffer(epsilonValue, sizeof(float)); + m_allTensors.emplace_back(epsilonTensor); +} + +/** + * @tc.name: instance_norm_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormBuilderTest, instance_norm_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_INSTANCE_NORM_EPSILON); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: instance_norm_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormBuilderTest, instance_norm_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_INSTANCE_NORM_EPSILON); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: instance_norm_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormBuilderTest, instance_norm_build_003, TestSize.Level2) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + m_params = {3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_INSTANCE_NORM_EPSILON); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: instance_norm_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormBuilderTest, instance_norm_build_004, TestSize.Level2) +{ + m_outputs = {1, 2}; + m_params = {3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_INSTANCE_NORM_EPSILON); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: instance_norm_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormBuilderTest, instance_norm_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: instance_norm_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormBuilderTest, instance_norm_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: instance_norm_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid epsilon's dataType. + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormBuilderTest, instance_norm_build_007, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr epsilonTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_INSTANCE_NORM_EPSILON); + int64_t* epsilonValue = new (std::nothrow) int64_t [1]{0.0f}; + EXPECT_NE(nullptr, epsilonValue); + epsilonTensor->SetBuffer(epsilonValue, sizeof(epsilonValue)); + m_allTensors.emplace_back(epsilonTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + epsilonTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: instance_norm_build_008 + * @tc.desc: Verify that the build function returns a failed message with passing invalid epsilon param. + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormBuilderTest, instance_norm_build_008, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: instance_norm_build_009 + * @tc.desc: Verify that the build function returns a failed message without set buffer for epsilon. + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormBuilderTest, instance_norm_build_009, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr epsilonTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_INSTANCE_NORM_EPSILON); + m_allTensors.emplace_back(epsilonTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: instance_norm_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormBuilderTest, instance_norm_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_inputDim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveParamsTensor(OH_NN_FLOAT32, m_paramDim, nullptr, OH_NN_INSTANCE_NORM_EPSILON); + + float epsilonValue = 0.0f; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnEpsilonValue = mindspore::lite::MindIR_InstanceNorm_GetEpsilon(primitive.get()); + EXPECT_EQ(returnEpsilonValue, epsilonValue); +} + +/** + * @tc.name: instance_norm_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(InstanceNormBuilderTest, instance_norm_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/less_test.cpp b/test/unittest/ops/less_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7ebb3b81bbb88fbe39698488295a72d8b0503d5e --- /dev/null +++ b/test/unittest/ops/less_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/less_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class LessBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + LessBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void LessBuilderTest::SetUp() {} + +void LessBuilderTest::TearDown() {} + +/** + * @tc.name: less_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(LessBuilderTest, less_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: less_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(LessBuilderTest, less_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: less_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(LessBuilderTest, less_build_003, TestSize.Level2) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: less_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(LessBuilderTest, less_build_004, TestSize.Level2) +{ + m_outputs = {2, 3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: less_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(LessBuilderTest, less_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: less_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(LessBuilderTest, less_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: less_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(LessBuilderTest, less_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: less_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(LessBuilderTest, less_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/range_test.cpp b/test/unittest/ops/range_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..643557c6e3a79af2a32a856a88a13b806ec63cb8 --- /dev/null +++ b/test/unittest/ops/range_test.cpp @@ -0,0 +1,463 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/range_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class RangeBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveDType(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveStart(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveLimit(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + void SaveDelta(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + RangeBuilder m_builder; + std::vector m_outputs {0}; + std::vector m_params {1, 2, 3, 4}; + std::vector m_outputDim {3}; + std::vector m_paramDim {}; +}; + +void RangeBuilderTest::SetUp() {} + +void RangeBuilderTest::TearDown() {} + +void RangeBuilderTest::SaveDType(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr dTypeTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* dTypeValue = new (std::nothrow) int64_t [1]{0}; + EXPECT_NE(nullptr, dTypeValue); + dTypeTensor->SetBuffer(dTypeValue, sizeof(int64_t)); + m_allTensors.emplace_back(dTypeTensor); +} + +void RangeBuilderTest::SaveStart(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr startTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* startValue = new (std::nothrow) int64_t [1]{0}; + EXPECT_NE(nullptr, startValue); + startTensor->SetBuffer(startValue, sizeof(int64_t)); + m_allTensors.emplace_back(startTensor); +} + +void RangeBuilderTest::SaveLimit(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr limitTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* limitValue = new (std::nothrow) int64_t [1]{3}; + EXPECT_NE(nullptr, limitValue); + limitTensor->SetBuffer(limitValue, sizeof(int64_t)); + m_allTensors.emplace_back(limitTensor); +} + +void RangeBuilderTest::SaveDelta(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr deltaTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* deltaValue = new (std::nothrow) int64_t [1]{1}; + EXPECT_NE(nullptr, deltaValue); + deltaTensor->SetBuffer(deltaValue, sizeof(int64_t)); + m_allTensors.emplace_back(deltaTensor); +} + +/** + * @tc.name: range_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_build_001, TestSize.Level2) +{ + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); + SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); + SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); + SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: range_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_build_002, TestSize.Level2) +{ + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); + SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); + SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); + SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: range_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_build_003, TestSize.Level2) +{ + m_outputs = {0, 1}; + m_params = {2, 3, 4, 5}; + + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); + SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); + SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); + SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: range_build_004 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_build_004, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: range_build_005 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: range_build_006 + * @tc.desc: Verify that the build function returns a failed message with invalid dType's dataType. + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_build_006, TestSize.Level2) +{ + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + std::shared_ptr dTypeTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_RANGE_DTYPE); + float* dTypeValue = new (std::nothrow) float [1]{0.0f}; + EXPECT_NE(nullptr, dTypeValue); + dTypeTensor->SetBuffer(dTypeValue, sizeof(float)); + m_allTensors.emplace_back(dTypeTensor); + SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); + SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); + SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + dTypeTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: range_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid start's dataType. + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_build_007, TestSize.Level2) +{ + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); + std::shared_ptr startTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_RANGE_START); + float* startValue = new (std::nothrow) float [1]{0.0f}; + EXPECT_NE(nullptr, startValue); + startTensor->SetBuffer(startValue, sizeof(float)); + m_allTensors.emplace_back(startTensor); + SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); + SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + startTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: range_build_008 + * @tc.desc: Verify that the build function returns a failed message with invalid limit's dataType. + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_build_008, TestSize.Level2) +{ + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); + SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); + std::shared_ptr limitTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_RANGE_LIMIT); + float* limitValue = new (std::nothrow) float [1]{3.0f}; + EXPECT_NE(nullptr, limitValue); + limitTensor->SetBuffer(limitValue, sizeof(float)); + m_allTensors.emplace_back(limitTensor); + SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + limitTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: range_build_009 + * @tc.desc: Verify that the build function returns a failed message with invalid delta's dataType. + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_build_009, TestSize.Level2) +{ + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); + SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); + SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); + std::shared_ptr deltaTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_RANGE_DELTA); + float* deltaValue = new (std::nothrow) float [1]{1.0f}; + EXPECT_NE(nullptr, deltaValue); + deltaTensor->SetBuffer(deltaValue, sizeof(float)); + m_allTensors.emplace_back(deltaTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + deltaTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: range_build_010 + * @tc.desc: Verify that the build function returns a failed message with passing invalid dType param. + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_build_010, TestSize.Level2) +{ + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); + SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); + SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: range_build_011 + * @tc.desc: Verify that the build function returns a failed message with passing invalid start param. + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_build_011, TestSize.Level2) +{ + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); + SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); + SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: range_build_012 + * @tc.desc: Verify that the build function returns a failed message with passing invalid limit param. + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_build_012, TestSize.Level2) +{ + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); + SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); + SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: range_build_013 + * @tc.desc: Verify that the build function returns a failed message with passing invalid delta param. + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_build_013, TestSize.Level2) +{ + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); + SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); + SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); + SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: range_build_014 + * @tc.desc: Verify that the build function returns a failed message without set buffer for dType. + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_build_014, TestSize.Level2) +{ + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + std::shared_ptr dTypeTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_RANGE_DTYPE); + m_allTensors.emplace_back(dTypeTensor); + SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); + SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); + SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: range_build_015 + * @tc.desc: Verify that the build function returns a failed message without set buffer for start. + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_build_015, TestSize.Level2) +{ + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); + std::shared_ptr startTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_RANGE_START); + m_allTensors.emplace_back(startTensor); + SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); + SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: range_build_016 + * @tc.desc: Verify that the build function returns a failed message without set buffer for limit. + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_build_016, TestSize.Level2) +{ + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); + SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); + std::shared_ptr limitTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_RANGE_LIMIT); + m_allTensors.emplace_back(limitTensor); + SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: range_build_017 + * @tc.desc: Verify that the build function returns a failed message without set buffer for delta. + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_build_017, TestSize.Level2) +{ + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + + SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); + SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); + SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); + std::shared_ptr deltaTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_RANGE_DELTA); + m_allTensors.emplace_back(deltaTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: range_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_getprimitive_001, TestSize.Level2) +{ + SaveOutputTensor(m_outputs, OH_NN_INT32, m_outputDim, nullptr); + SaveDType(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DTYPE); + SaveStart(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_START); + SaveLimit(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_LIMIT); + SaveDelta(OH_NN_INT64, m_paramDim, nullptr, OH_NN_RANGE_DELTA); + + int64_t dTypeValue = 0; + int64_t startValue = 0; + int64_t limitValue = 3; + int64_t deltaValue = 1; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnDTypeValue = mindspore::lite::MindIR_Range_GetDType(primitive.get()); + EXPECT_EQ(returnDTypeValue, dTypeValue); + auto returnStartValue = mindspore::lite::MindIR_Range_GetStart(primitive.get()); + EXPECT_EQ(returnStartValue, startValue); + auto returnLimitValue = mindspore::lite::MindIR_Range_GetLimit(primitive.get()); + EXPECT_EQ(returnLimitValue, limitValue); + auto returnDeltaValue = mindspore::lite::MindIR_Range_GetDelta(primitive.get()); + EXPECT_EQ(returnDeltaValue, deltaValue); +} + +/** + * @tc.name: range_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(RangeBuilderTest, range_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/real_div_test.cpp b/test/unittest/ops/real_div_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..92487c5f61fca823d198d7d797d615f146014c91 --- /dev/null +++ b/test/unittest/ops/real_div_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/real_div_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class RealDivBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + RealDivBuilder m_builder; + std::vector m_inputs {0, 1}; + std::vector m_outputs {2}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void RealDivBuilderTest::SetUp() {} + +void RealDivBuilderTest::TearDown() {} + +/** + * @tc.name: real_div_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(RealDivBuilderTest, real_div_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: real_div_build_001 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(RealDivBuilderTest, real_div_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: real_div_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(RealDivBuilderTest, real_div_build_003, TestSize.Level2) +{ + m_inputs = {0, 1, 2}; + m_outputs = {3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: real_div_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(RealDivBuilderTest, real_div_build_004, TestSize.Level2) +{ + m_outputs = {2, 3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: real_div_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(RealDivBuilderTest, real_div_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: real_div_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(RealDivBuilderTest, real_div_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: real_div_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(RealDivBuilderTest, real_div_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: real_div_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(RealDivBuilderTest, real_div_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/select_test.cpp b/test/unittest/ops/select_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..72038f28a39a562aae6865f049e55121480cbe73 --- /dev/null +++ b/test/unittest/ops/select_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/select_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SelectBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + SelectBuilder m_builder; + std::vector m_inputs {0, 1, 2}; + std::vector m_outputs {3}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void SelectBuilderTest::SetUp() {} + +void SelectBuilderTest::TearDown() {} + +/** + * @tc.name: sslect_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(SelectBuilderTest, select_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: select_build_001 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(SelectBuilderTest, select_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: select_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(SelectBuilderTest, select_build_003, TestSize.Level2) +{ + m_inputs = {0, 1, 2, 3}; + m_outputs = {4}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: select_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(SelectBuilderTest, select_build_004, TestSize.Level2) +{ + m_outputs = {3, 4}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: select_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(SelectBuilderTest, select_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: select_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(SelectBuilderTest, select_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: select_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(SelectBuilderTest, select_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: select_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(SelectBuilderTest, select_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/square_test.cpp b/test/unittest/ops/square_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5743ee5be1822822a0c3eb9651bcc53530d833aa --- /dev/null +++ b/test/unittest/ops/square_test.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/square_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class SquareBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + SquareBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_dim {1, 2, 2, 1}; +}; + +void SquareBuilderTest::SetUp() {} + +void SquareBuilderTest::TearDown() {} + +/** + * @tc.name: square_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(SquareBuilderTest, square_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: square_build_002 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(SquareBuilderTest, square_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: square_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(SquareBuilderTest, square_build_003, TestSize.Level2) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: square_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(SquareBuilderTest, square_build_004, TestSize.Level2) +{ + m_outputs = {1, 2}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: square_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(SquareBuilderTest, square_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: square_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(SquareBuilderTest, square_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: square_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(SquareBuilderTest, square_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_paramsIndex, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); +} + +/** + * @tc.name: square_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(SquareBuilderTest, square_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file diff --git a/test/unittest/ops/unstack_test.cpp b/test/unittest/ops/unstack_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f5e6706eb03562800bdb3d212be58c36b73751ae --- /dev/null +++ b/test/unittest/ops/unstack_test.cpp @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "frameworks/native/ops/unstack_builder.h" + +#include "ops_test.h" + +using namespace testing; +using namespace testing::ext; +using namespace OHOS::NeuralNetworkRuntime::Ops; + +namespace OHOS { +namespace NeuralNetworkRuntime { +namespace UnitTest { +class UnstackBuilderTest : public OpsTest { +public: + void SetUp() override; + void TearDown() override; + +protected: + void SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type); + +protected: + UnstackBuilder m_builder; + std::vector m_inputs {0}; + std::vector m_outputs {1}; + std::vector m_params {2}; + std::vector m_dim {1, 2, 2, 1}; + std::vector m_paramDim {}; +}; + +void UnstackBuilderTest::SetUp() {} + +void UnstackBuilderTest::TearDown() {} + +void UnstackBuilderTest::SaveParamsTensor(OH_NN_DataType dataType, + const std::vector &dim, const OH_NN_QuantParam* quantParam, OH_NN_TensorType type) +{ + std::shared_ptr axisTensor = TransToNNTensor(dataType, dim, quantParam, type); + int64_t* axisValue = new (std::nothrow) int64_t[1]{0}; + EXPECT_NE(nullptr, axisValue); + axisTensor->SetBuffer(axisValue, sizeof(int64_t)); + m_allTensors.emplace_back(axisTensor); +} + +/** + * @tc.name: unstack_build_001 + * @tc.desc: Verify that the build function returns a successful message. + * @tc.type: FUNC + */ +HWTEST_F(UnstackBuilderTest, unstack_build_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_UNSTACK_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_SUCCESS, ret); +} + +/** + * @tc.name: unstack_build_001 + * @tc.desc: Verify that the build function returns a failed message with true m_isBuild. + * @tc.type: FUNC + */ +HWTEST_F(UnstackBuilderTest, unstack_build_002, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_UNSTACK_AXIS); + + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); +} + +/** + * @tc.name: unstack_build_003 + * @tc.desc: Verify that the build function returns a failed message with invalided input. + * @tc.type: FUNC + */ +HWTEST_F(UnstackBuilderTest, unstack_build_003, TestSize.Level2) +{ + m_inputs = {0, 1}; + m_outputs = {2}; + m_params = {3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_UNSTACK_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unstack_build_004 + * @tc.desc: Verify that the build function returns a failed message with invalided output. + * @tc.type: FUNC + */ +HWTEST_F(UnstackBuilderTest, unstack_build_004, TestSize.Level2) +{ + m_outputs = {1, 2}; + m_params = {3}; + + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_UNSTACK_AXIS); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unstack_build_005 + * @tc.desc: Verify that the build function returns a failed message with empty allTensor. + * @tc.type: FUNC + */ +HWTEST_F(UnstackBuilderTest, unstack_build_005, TestSize.Level2) +{ + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputs, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unstack_build_006 + * @tc.desc: Verify that the build function returns a failed message without output tensor. + * @tc.type: FUNC + */ +HWTEST_F(UnstackBuilderTest, unstack_build_006, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputs, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unstack_build_007 + * @tc.desc: Verify that the build function returns a failed message with invalid axis's dataType. + * @tc.type: FUNC + */ +HWTEST_F(UnstackBuilderTest, unstack_build_007, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_FLOAT32, m_paramDim, + nullptr, OH_NN_UNSTACK_AXIS); + float* axisValue = new (std::nothrow) float[1]{1.0f}; + axisTensor->SetBuffer(axisValue, sizeof(axisValue)); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); + axisTensor->SetBuffer(nullptr, 0); +} + +/** + * @tc.name: unstack_build_008 + * @tc.desc: Verify that the build function returns a failed message with passing invalid axis param. + * @tc.type: FUNC + */ +HWTEST_F(UnstackBuilderTest, unstack_build_008, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_MUL_ACTIVATION_TYPE); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unstack_build_009 + * @tc.desc: Verify that the build function returns a failed message without set buffer for axis. + * @tc.type: FUNC + */ +HWTEST_F(UnstackBuilderTest, unstack_build_009, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + std::shared_ptr axisTensor = TransToNNTensor(OH_NN_INT64, m_paramDim, + nullptr, OH_NN_UNSTACK_AXIS); + m_allTensors.emplace_back(axisTensor); + + OH_NN_ReturnCode ret = m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors); + EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret); +} + +/** + * @tc.name: unstack_getprimitive_001 + * @tc.desc: Verify that the getPrimitive function returns a successful message + * @tc.type: FUNC + */ +HWTEST_F(UnstackBuilderTest, unstack_getprimitive_001, TestSize.Level2) +{ + SaveInputTensor(m_inputs, OH_NN_INT32, m_dim, nullptr); + SaveOutputTensor(m_outputs, OH_NN_INT32, m_dim, nullptr); + SaveParamsTensor(OH_NN_INT64, m_paramDim, nullptr, OH_NN_UNSTACK_AXIS); + + int64_t axisValue = 0; + EXPECT_EQ(OH_NN_SUCCESS, m_builder.Build(m_params, m_inputsIndex, m_outputsIndex, m_allTensors)); + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_NE(expectPrimitive, primitive); + + auto returnValue = mindspore::lite::MindIR_Unstack_GetAxis(primitive.get()); + EXPECT_EQ(returnValue, axisValue); +} + +/** + * @tc.name: unstack_getprimitive_002 + * @tc.desc: Verify that the getPrimitive function returns a failed message without build. + * @tc.type: FUNC + */ +HWTEST_F(UnstackBuilderTest, unstack_getprimitive_002, TestSize.Level2) +{ + LiteGraphPrimitvePtr primitive = m_builder.GetPrimitive(); + LiteGraphPrimitvePtr expectPrimitive(nullptr, DestroyLiteGraphPrimitive); + EXPECT_EQ(expectPrimitive, primitive); +} +} +} +} \ No newline at end of file