diff --git a/mindspore-lite/src/litert/kernel/dsp/ft04/applymomentum.cc b/mindspore-lite/src/litert/kernel/dsp/ft04/applymomentum.cc new file mode 100644 index 0000000000000000000000000000000000000000..ce2bcfa537f2b3111c645c954eca8478c2edefe2 --- /dev/null +++ b/mindspore-lite/src/litert/kernel/dsp/ft04/applymomentum.cc @@ -0,0 +1,202 @@ +/** + * Copyright 2025 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/litert/kernel/dsp/ft04/applymomentum.h" + +#include +#include +#include + +#include "src/common/utils.h" +#include "src/litert/kernel/cpu/nnacl_c/nnacl_common.h" +#include "src/litert/kernel/cpu/nnacl_c/fp32_grad/optimizer.h" +#include "src/litert/kernel_registry.h" + +using mindspore::kernel::KERNEL_ARCH::kDSP; +using mindspore::lite::KernelRegistrar; +using mindspore::lite::RET_ERROR; +using mindspore::lite::RET_OK; +using mindspore::schema::PrimitiveType_ApplyMomentum; + +namespace mindspore::kernel { +int ApplyMomentumDSPKernel::Prepare() { return RET_OK; } + +int ApplyMomentumDSPKernel::CheckSpecs() { + if (in_tensors_.size() != kApplyMomentumInputTensorSize) { + MS_LOG(WARNING) << "Input size mismatch: expected " << kApplyMomentumInputTensorSize << ", got " + << in_tensors_.size(); + return RET_ERROR; + } + if (out_tensors_.size() != kApplyMomentumOutputTensorSize) { + MS_LOG(WARNING) << "Output size mismatch: expected " << kApplyMomentumOutputTensorSize << ", got " + << out_tensors_.size(); + return RET_ERROR; + } + + auto weight_shape = in_tensors_[kApplyMomentumWeightIdx]->shape(); + if (weight_shape != in_tensors_[kApplyMomentumAccumulateIdx]->shape() || + weight_shape != in_tensors_[kApplyMomentumGradientIdx]->shape()) { + MS_LOG(WARNING) << "Weight, accumulate or gradient tensor shapes mismatch."; + return RET_ERROR; + } + + auto data_type = in_tensors_[kApplyMomentumWeightIdx]->data_type(); + if (data_type != kNumberTypeFloat32 && data_type != kNumberTypeFloat16) { + MS_LOG(WARNING) << "Unsupported data type: " << static_cast(data_type); + return RET_ERROR; + } + + auto check_scalar = [&](const lite::Tensor *tensor) -> bool { + if (tensor == nullptr || tensor->ElementsNum() != 1) { + return false; + } + auto tensor_type = tensor->data_type(); + if (data_type == kNumberTypeFloat32) { + return tensor_type == kNumberTypeFloat32; + } + return tensor_type == kNumberTypeFloat16; + }; + + if (!check_scalar(in_tensors_[kApplyMomentumLrIdx]) || !check_scalar(in_tensors_[kApplyMomentumMomentumIdx])) { + MS_LOG(WARNING) << "Optimizer scalar tensors are invalid."; + return RET_ERROR; + } + + return RET_OK; +} + +int ApplyMomentumDSPKernel::ApplyMomentumRunFp32() { + kernel_name_ = "fp_applymomentum_s"; + core_mask_ = 0xf; + return dsp_runtime_->RunKernel(kernel_name_, kernel_args_, core_mask_); +} + +int ApplyMomentumDSPKernel::ApplyMomentumRunFp16() { + kernel_name_ = "hp_applymomentum_s"; + core_mask_ = 0xf; + return dsp_runtime_->RunKernel(kernel_name_, kernel_args_, core_mask_); +} + +int ApplyMomentumDSPKernel::Run() { + auto allocator = dsp_runtime_->GetAllocator(); + + auto *weight = in_tensors_[kApplyMomentumWeightIdx]; + + int64_t elements_num = weight->ElementsNum(); + + auto data_type = weight->data_type(); + auto *param = reinterpret_cast(op_parameter_); + + uint64_t weight_device_ptr = allocator->GetDeviceMemPtr(weight->data()); + uint64_t accumulate_device_ptr = allocator->GetDeviceMemPtr(in_tensors_[kApplyMomentumAccumulateIdx]->data()); + uint64_t grad_device_ptr = allocator->GetDeviceMemPtr(in_tensors_[kApplyMomentumGradientIdx]->data()); + + size_t float_param_bytes = 0; + if (data_type == kNumberTypeFloat32) { + float_param_bytes = sizeof(float) * kApplyMomentumFloatParamSize; + } else if (data_type == kNumberTypeFloat16) { + float_param_bytes = sizeof(uint16_t) * kApplyMomentumFloatParamSize; + } else { + MS_LOG(ERROR) << "Unsupported data type: " << static_cast(data_type); + return RET_ERROR; + } + + void *float_params_buffer = allocator->Malloc(float_param_bytes); + + auto free_float_buffer = [&]() { + if (float_params_buffer != nullptr) { + allocator->Free(float_params_buffer); + float_params_buffer = nullptr; + } + }; + + const size_t scalar_indices[kApplyMomentumFloatParamSize] = {kApplyMomentumLrIdx, kApplyMomentumMomentumIdx}; + if (data_type == kNumberTypeFloat32) { + float float_params[kApplyMomentumFloatParamSize] = {0.f}; + for (size_t i = 0; i < kApplyMomentumFloatParamSize; ++i) { + const lite::Tensor *tensor = in_tensors_[scalar_indices[i]]; + + if (tensor->data_type() != kNumberTypeFloat32) { + free_float_buffer(); + MS_LOG(ERROR) << "Scalar tensor type mismatch: expected FP32."; + return RET_ERROR; + } + float_params[i] = *(reinterpret_cast(tensor->data())); + } + std::memcpy(float_params_buffer, float_params, float_param_bytes); + } else { + uint16_t float16_params[kApplyMomentumFloatParamSize] = {0}; + for (size_t i = 0; i < kApplyMomentumFloatParamSize; ++i) { + const lite::Tensor *tensor = in_tensors_[scalar_indices[i]]; + + if (tensor->data_type() != kNumberTypeFloat16) { + free_float_buffer(); + MS_LOG(ERROR) << "Scalar tensor type mismatch: expected FP16."; + return RET_ERROR; + } + float16_params[i] = *(reinterpret_cast(tensor->data())); + } + std::memcpy(float_params_buffer, float16_params, float_param_bytes); + } + + uint64_t float_params_device_ptr = allocator->GetDeviceMemPtr(float_params_buffer); + + void *int_params_buffer = allocator->Malloc(sizeof(int32_t) * kApplyMomentumIntParamSize); + + auto free_all_buffers = [&]() { + if (float_params_buffer != nullptr) { + allocator->Free(float_params_buffer); + float_params_buffer = nullptr; + } + if (int_params_buffer != nullptr) { + allocator->Free(int_params_buffer); + int_params_buffer = nullptr; + } + }; + + auto *int_params = reinterpret_cast(int_params_buffer); + int_params[0] = 0; + int_params[1] = static_cast(elements_num); + + uint64_t int_params_device_ptr = allocator->GetDeviceMemPtr(int_params_buffer); + + int use_nesterov = param->use_nesterov_ ? 1 : 0; + SetKernelArg({weight_device_ptr, accumulate_device_ptr, grad_device_ptr, float_params_device_ptr, + int_params_device_ptr, static_cast(use_nesterov)}); + + int ret = RET_ERROR; + if (data_type == kNumberTypeFloat32) { + ret = ApplyMomentumRunFp32(); + } else if (data_type == kNumberTypeFloat16) { + ret = ApplyMomentumRunFp16(); + } else { + free_all_buffers(); + MS_LOG(ERROR) << "Unsupported data type: " << static_cast(data_type); + return RET_ERROR; + } + + free_all_buffers(); + + if (ret != RET_OK) { + MS_LOG(ERROR) << this->name() << " Run failed! "; + return RET_ERROR; + } + return RET_OK; +} + +REG_KERNEL(kDSP, kNumberTypeFloat32, PrimitiveType_ApplyMomentum, DSPKernelCreator) +REG_KERNEL(kDSP, kNumberTypeFloat16, PrimitiveType_ApplyMomentum, DSPKernelCreator) +} // namespace mindspore::kernel diff --git a/mindspore-lite/src/litert/kernel/dsp/ft04/applymomentum.h b/mindspore-lite/src/litert/kernel/dsp/ft04/applymomentum.h new file mode 100644 index 0000000000000000000000000000000000000000..3460cbc61f389ab25a873bcaa157e345c7b9c7bb --- /dev/null +++ b/mindspore-lite/src/litert/kernel/dsp/ft04/applymomentum.h @@ -0,0 +1,54 @@ +/** + * Copyright 2025 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_DSP_FT04_APPLYMOMENTUM_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_DSP_FT04_APPLYMOMENTUM_H_ + +#include +#include "src/litert/kernel/dsp/dsp_kernel.h" + +namespace mindspore::kernel { +constexpr size_t kApplyMomentumInputTensorSize = 5; +constexpr size_t kApplyMomentumOutputTensorSize = 1; +constexpr size_t kApplyMomentumFloatParamSize = 2; +constexpr size_t kApplyMomentumIntParamSize = 2; + +constexpr size_t kApplyMomentumWeightIdx = 0; +constexpr size_t kApplyMomentumAccumulateIdx = 1; +constexpr size_t kApplyMomentumLrIdx = 2; +constexpr size_t kApplyMomentumGradientIdx = 3; +constexpr size_t kApplyMomentumMomentumIdx = 4; + +class ApplyMomentumDSPKernel : public DSPKernel { + public: + using DSPKernel::DSPKernel; + + ~ApplyMomentumDSPKernel() override = default; + + int Prepare() override; + int CheckSpecs() override; + int Run() override; + + private: + int ApplyMomentumRunFp32(); + int ApplyMomentumRunFp16(); + + std::string kernel_name_; + uint64_t core_mask_{0}; +}; +} // namespace mindspore::kernel + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_DSP_FT04_APPLYMOMENTUM_H_ diff --git a/mindspore-lite/src/litert/kernel/dsp/ft78/applymomentum.cc b/mindspore-lite/src/litert/kernel/dsp/ft78/applymomentum.cc new file mode 100644 index 0000000000000000000000000000000000000000..bf401232c06b92354bdea92279feb020e867d193 --- /dev/null +++ b/mindspore-lite/src/litert/kernel/dsp/ft78/applymomentum.cc @@ -0,0 +1,175 @@ +/** + * Copyright 2025 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/litert/kernel/dsp/ft78/applymomentum.h" + +#include +#include +#include + +#include "src/common/utils.h" +#include "src/litert/kernel/cpu/nnacl_c/nnacl_common.h" +#include "src/litert/kernel/cpu/nnacl_c/fp32_grad/optimizer.h" +#include "src/litert/kernel_registry.h" + +using mindspore::kernel::KERNEL_ARCH::kDSP; +using mindspore::lite::KernelRegistrar; +using mindspore::lite::RET_ERROR; +using mindspore::lite::RET_OK; +using mindspore::schema::PrimitiveType_ApplyMomentum; + +namespace mindspore::kernel { +int ApplyMomentumDSPKernel::Prepare() { return RET_OK; } + +int ApplyMomentumDSPKernel::CheckSpecs() { + if (in_tensors_.size() != kApplyMomentumInputTensorSize) { + MS_LOG(WARNING) << "Input size mismatch: expected " << kApplyMomentumInputTensorSize << ", got " + << in_tensors_.size(); + return RET_ERROR; + } + if (out_tensors_.size() != kApplyMomentumOutputTensorSize) { + MS_LOG(WARNING) << "Output size mismatch: expected " << kApplyMomentumOutputTensorSize << ", got " + << out_tensors_.size(); + return RET_ERROR; + } + + auto weight_shape = in_tensors_[kApplyMomentumWeightIdx]->shape(); + if (weight_shape != in_tensors_[kApplyMomentumAccumulateIdx]->shape() || + weight_shape != in_tensors_[kApplyMomentumGradientIdx]->shape()) { + MS_LOG(WARNING) << "Weight, accumulate or gradient tensor shapes mismatch."; + return RET_ERROR; + } + + auto data_type = in_tensors_[kApplyMomentumWeightIdx]->data_type(); + if (data_type != kNumberTypeFloat32) { + MS_LOG(WARNING) << "Unsupported data type: " << static_cast(data_type); + return RET_ERROR; + } + + auto check_scalar = [&](const lite::Tensor *tensor) -> bool { + if (tensor == nullptr || tensor->ElementsNum() != 1) { + return false; + } + auto tensor_type = tensor->data_type(); + return tensor_type == kNumberTypeFloat32; + }; + + if (!check_scalar(in_tensors_[kApplyMomentumLrIdx]) || !check_scalar(in_tensors_[kApplyMomentumMomentumIdx])) { + MS_LOG(WARNING) << "Optimizer scalar tensors are invalid."; + return RET_ERROR; + } + + return RET_OK; +} + +int ApplyMomentumDSPKernel::ApplyMomentumRunFp32() { + kernel_name_ = "fp_applymomentum_s"; + core_mask_ = 0xf; + return dsp_runtime_->RunKernel(kernel_name_, kernel_args_, core_mask_); +} + +int ApplyMomentumDSPKernel::Run() { + auto allocator = dsp_runtime_->GetAllocator(); + + auto *weight = in_tensors_[kApplyMomentumWeightIdx]; + + int64_t elements_num = weight->ElementsNum(); + + auto *param = reinterpret_cast(op_parameter_); + if (param == nullptr) { + MS_LOG(ERROR) << "ApplyMomentum parameter is nullptr."; + return RET_ERROR; + } + + uint64_t weight_device_ptr = allocator->GetDeviceMemPtr(weight->data()); + uint64_t accumulate_device_ptr = allocator->GetDeviceMemPtr(in_tensors_[kApplyMomentumAccumulateIdx]->data()); + uint64_t grad_device_ptr = allocator->GetDeviceMemPtr(in_tensors_[kApplyMomentumGradientIdx]->data()); + + size_t float_param_bytes = sizeof(float) * kApplyMomentumFloatParamSize; + + void *float_params_buffer = allocator->Malloc(float_param_bytes); + if (float_params_buffer == nullptr) { + MS_LOG(ERROR) << "Failed to allocate float parameter buffer."; + return RET_ERROR; + } + auto free_float_buffer = [&]() { + if (float_params_buffer != nullptr) { + allocator->Free(float_params_buffer); + float_params_buffer = nullptr; + } + }; + + const size_t scalar_indices[kApplyMomentumFloatParamSize] = {kApplyMomentumLrIdx, kApplyMomentumMomentumIdx}; + float float_params[kApplyMomentumFloatParamSize] = {0.f}; + for (size_t i = 0; i < kApplyMomentumFloatParamSize; ++i) { + const lite::Tensor *tensor = in_tensors_[scalar_indices[i]]; + if (tensor == nullptr || tensor->data() == nullptr) { + free_float_buffer(); + MS_LOG(ERROR) << "Optimizer scalar tensor is invalid."; + return RET_ERROR; + } + if (tensor->data_type() != kNumberTypeFloat32) { + free_float_buffer(); + MS_LOG(ERROR) << "Scalar tensor type mismatch: expected FP32."; + return RET_ERROR; + } + float_params[i] = *(reinterpret_cast(tensor->data())); + } + std::memcpy(float_params_buffer, float_params, float_param_bytes); + + uint64_t float_params_device_ptr = allocator->GetDeviceMemPtr(float_params_buffer); + + void *int_params_buffer = allocator->Malloc(sizeof(int32_t) * kApplyMomentumIntParamSize); + + auto free_all_buffers = [&]() { + if (float_params_buffer != nullptr) { + allocator->Free(float_params_buffer); + float_params_buffer = nullptr; + } + if (int_params_buffer != nullptr) { + allocator->Free(int_params_buffer); + int_params_buffer = nullptr; + } + }; + + auto *int_params = reinterpret_cast(int_params_buffer); + int_params[0] = 0; + int_params[1] = static_cast(elements_num); + + uint64_t int_params_device_ptr = allocator->GetDeviceMemPtr(int_params_buffer); + if (int_params_device_ptr == 0) { + free_all_buffers(); + MS_LOG(ERROR) << "Failed to obtain device pointer for int parameter buffer."; + return RET_ERROR; + } + + int use_nesterov = param->use_nesterov_ ? 1 : 0; + SetKernelArg({weight_device_ptr, accumulate_device_ptr, grad_device_ptr, float_params_device_ptr, + int_params_device_ptr, static_cast(use_nesterov)}); + + int ret = ApplyMomentumRunFp32(); + + free_all_buffers(); + + if (ret != RET_OK) { + MS_LOG(ERROR) << this->name() << " Run failed! "; + return RET_ERROR; + } + return RET_OK; +} + +REG_KERNEL(kDSP, kNumberTypeFloat32, PrimitiveType_ApplyMomentum, DSPKernelCreator) +} // namespace mindspore::kernel diff --git a/mindspore-lite/src/litert/kernel/dsp/ft78/applymomentum.h b/mindspore-lite/src/litert/kernel/dsp/ft78/applymomentum.h new file mode 100644 index 0000000000000000000000000000000000000000..286934564b7f87c0cf2ca156bef4b11dd110e665 --- /dev/null +++ b/mindspore-lite/src/litert/kernel/dsp/ft78/applymomentum.h @@ -0,0 +1,53 @@ +/** + * Copyright 2025 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_DSP_FT04_APPLYMOMENTUM_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_DSP_FT04_APPLYMOMENTUM_H_ + +#include +#include "src/litert/kernel/dsp/dsp_kernel.h" + +namespace mindspore::kernel { +constexpr size_t kApplyMomentumInputTensorSize = 5; +constexpr size_t kApplyMomentumOutputTensorSize = 1; +constexpr size_t kApplyMomentumFloatParamSize = 2; +constexpr size_t kApplyMomentumIntParamSize = 2; + +constexpr size_t kApplyMomentumWeightIdx = 0; +constexpr size_t kApplyMomentumAccumulateIdx = 1; +constexpr size_t kApplyMomentumLrIdx = 2; +constexpr size_t kApplyMomentumGradientIdx = 3; +constexpr size_t kApplyMomentumMomentumIdx = 4; + +class ApplyMomentumDSPKernel : public DSPKernel { + public: + using DSPKernel::DSPKernel; + + ~ApplyMomentumDSPKernel() override = default; + + int Prepare() override; + int CheckSpecs() override; + int Run() override; + + private: + int ApplyMomentumRunFp32(); + + std::string kernel_name_; + uint64_t core_mask_{0}; +}; +} // namespace mindspore::kernel + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_DSP_FT04_APPLYMOMENTUM_H_ diff --git a/mindspore-lite/test/ut/src/runtime/kernel/dsp/applymomentum_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/dsp/applymomentum_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..0327331b877096eff535e918013a6f33e4ba1396 --- /dev/null +++ b/mindspore-lite/test/ut/src/runtime/kernel/dsp/applymomentum_test.cc @@ -0,0 +1,292 @@ +/** + * Copyright 2025 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ut/src/runtime/kernel/dsp/dsp_test.h" +#include "ut/src/runtime/kernel/opencl/common.h" +#include "include/api/context.h" +#include "include/api/data_type.h" +#include "schema/inner/model_generated.h" +#include "src/litert/kernel_registry.h" +#include "src/litert/kernel/dsp/dsp_subgraph.h" +#ifdef SUPPORT_FT78 +#include "src/litert/kernel/dsp/ft78/applymomentum.h" +#endif +#ifdef SUPPORT_FT04 +#include "src/litert/kernel/dsp/ft04/applymomentum.h" +#endif +#include "src/litert/kernel/cpu/nnacl_c/fp32_grad/optimizer.h" + +namespace mindspore::lite::dsp::test { +namespace { +constexpr int kTensorLength = 10000; +constexpr float kLearningRate = 0.0001f; +constexpr float kMomentum = 1.0f; + +ApplyMomentumParameter *CreateApplyMomentumParameter(bool use_nesterov) { + auto *param = opencl::test::CreateParameter(schema::PrimitiveType_ApplyMomentum); + if (param == nullptr) { + return nullptr; + } + param->use_nesterov_ = use_nesterov; + param->grad_scale_ = 1.0f; + return param; +} + +void ApplyMomentumReference(std::vector *weight, std::vector *accumulate, + const std::vector &gradient, float lr, float momentum, bool use_nesterov) { + for (size_t i = 0; i < weight->size(); ++i) { + float grad = gradient[i]; + float accu = (*accumulate)[i]; + accu = accu * momentum + grad; + (*accumulate)[i] = accu; + float update = use_nesterov ? (accu * momentum + grad) : accu; + (*weight)[i] -= update * lr; + } +} + +std::vector BuildWeightData() { + std::vector data(kTensorLength); + for (int i = 0; i < kTensorLength; ++i) { + data[i] = 0.25f + 0.00035f * static_cast(i); + } + return data; +} + +std::vector BuildAccumulateData() { + std::vector data(kTensorLength); + for (int i = 0; i < kTensorLength; ++i) { + data[i] = 0.01f + 0.0002f * static_cast(i % 97); + } + return data; +} + +std::vector BuildGradientData() { + std::vector data(kTensorLength); + for (int i = 0; i < kTensorLength; ++i) { + float angle = static_cast(i) * 0.01f; + data[i] = 0.02f * std::sin(angle); + } + return data; +} + +} // namespace + +class TestDSP_ApplyMomentum : public DSPCommonTest {}; + +TEST_F(TestDSP_ApplyMomentum, ApplyMomentum_Fp32_NesterovFalse) { + InitDSPRuntime(); + + std::vector inputs; + std::vector outputs; + std::vector tensors_to_delete; + + std::vector param_shape = {kTensorLength}; + std::vector scalar_shape = {1}; + + auto weight_tensor = new lite::Tensor(kNumberTypeFloat32, param_shape, mindspore::NHWC, lite::Category::VAR); + weight_tensor->MallocData(allocator_); + inputs.push_back(weight_tensor); + outputs.push_back(weight_tensor); + tensors_to_delete.push_back(weight_tensor); + + auto accumulate_tensor = new lite::Tensor(kNumberTypeFloat32, param_shape, mindspore::NHWC, lite::Category::VAR); + accumulate_tensor->MallocData(allocator_); + inputs.push_back(accumulate_tensor); + tensors_to_delete.push_back(accumulate_tensor); + + auto lr_tensor = new lite::Tensor(kNumberTypeFloat32, scalar_shape, mindspore::NHWC, lite::Category::CONST_TENSOR); + lr_tensor->MallocData(allocator_); + inputs.push_back(lr_tensor); + tensors_to_delete.push_back(lr_tensor); + + auto gradient_tensor = + new lite::Tensor(kNumberTypeFloat32, param_shape, mindspore::NHWC, lite::Category::CONST_TENSOR); + gradient_tensor->MallocData(allocator_); + inputs.push_back(gradient_tensor); + tensors_to_delete.push_back(gradient_tensor); + + auto momentum_tensor = + new lite::Tensor(kNumberTypeFloat32, scalar_shape, mindspore::NHWC, lite::Category::CONST_TENSOR); + momentum_tensor->MallocData(allocator_); + inputs.push_back(momentum_tensor); + tensors_to_delete.push_back(momentum_tensor); + + auto initial_weight = BuildWeightData(); + auto initial_accumulate = BuildAccumulateData(); + auto gradients = BuildGradientData(); + + std::copy(initial_weight.begin(), initial_weight.end(), reinterpret_cast(weight_tensor->MutableData())); + std::copy(initial_accumulate.begin(), initial_accumulate.end(), + reinterpret_cast(accumulate_tensor->MutableData())); + std::copy(gradients.begin(), gradients.end(), reinterpret_cast(gradient_tensor->MutableData())); + + reinterpret_cast(lr_tensor->MutableData())[0] = kLearningRate; + reinterpret_cast(momentum_tensor->MutableData())[0] = kMomentum; + + auto expected_weight = initial_weight; + auto expected_accumulate = initial_accumulate; + ApplyMomentumReference(&expected_weight, &expected_accumulate, gradients, kLearningRate, kMomentum, false); + + auto ctx = new lite::InnerContext; + ASSERT_NE(ctx, nullptr); + ASSERT_EQ(lite::RET_OK, ctx->Init()); + + auto *param = CreateApplyMomentumParameter(false); + ASSERT_NE(param, nullptr); + + kernel::KernelKey key = {kernel::KERNEL_ARCH::kDSP, kNumberTypeFloat32, NHWC, schema::PrimitiveType_ApplyMomentum}; + auto creator = KernelRegistry::GetInstance()->GetCreator(key); + ASSERT_NE(creator, nullptr); + + auto kernel = creator(inputs, outputs, reinterpret_cast(param), ctx, key); + ASSERT_NE(kernel, nullptr); + + ASSERT_EQ(lite::RET_OK, kernel->Prepare()); + ASSERT_EQ(lite::RET_OK, kernel->Run()); + + auto weight_after = reinterpret_cast(inputs[kernel::kApplyMomentumWeightIdx]->MutableData()); + float sum_abs_err = 0.f; + for (int i = 0; i < kTensorLength; ++i) { + float abs_err = std::fabs(weight_after[i] - expected_weight[i]); + sum_abs_err += abs_err; + } + + ASSERT_EQ(0, CompareOutputData(weight_after, expected_weight.data(), kTensorLength, 1e-5f)); + + UninitDSPRuntime(); + + delete ctx; + for (auto *tensor : tensors_to_delete) { + delete tensor; + } + delete kernel; +} + +#ifdef SUPPORT_FT04 +TEST_F(TestDSP_ApplyMomentum, ApplyMomentum_Fp16_NesterovTrue) { + InitDSPRuntime(); + + std::vector inputs; + std::vector outputs; + std::vector tensors_to_delete; + + std::vector param_shape = {kTensorLength}; + std::vector scalar_shape = {1}; + + auto weight_tensor = new lite::Tensor(kNumberTypeFloat16, param_shape, mindspore::NHWC, lite::Category::VAR); + weight_tensor->MallocData(allocator_); + inputs.push_back(weight_tensor); + outputs.push_back(weight_tensor); + tensors_to_delete.push_back(weight_tensor); + + auto accumulate_tensor = new lite::Tensor(kNumberTypeFloat16, param_shape, mindspore::NHWC, lite::Category::VAR); + accumulate_tensor->MallocData(allocator_); + inputs.push_back(accumulate_tensor); + tensors_to_delete.push_back(accumulate_tensor); + + auto lr_tensor = new lite::Tensor(kNumberTypeFloat16, scalar_shape, mindspore::NHWC, lite::Category::CONST_TENSOR); + lr_tensor->MallocData(allocator_); + inputs.push_back(lr_tensor); + tensors_to_delete.push_back(lr_tensor); + + auto gradient_tensor = + new lite::Tensor(kNumberTypeFloat16, param_shape, mindspore::NHWC, lite::Category::CONST_TENSOR); + gradient_tensor->MallocData(allocator_); + inputs.push_back(gradient_tensor); + tensors_to_delete.push_back(gradient_tensor); + + auto momentum_tensor = + new lite::Tensor(kNumberTypeFloat16, scalar_shape, mindspore::NHWC, lite::Category::CONST_TENSOR); + momentum_tensor->MallocData(allocator_); + inputs.push_back(momentum_tensor); + tensors_to_delete.push_back(momentum_tensor); + + auto initial_weight_fp32 = BuildWeightData(); + auto initial_accumulate_fp32 = BuildAccumulateData(); + auto gradients_fp32 = BuildGradientData(); + + auto *weight_half = reinterpret_cast(weight_tensor->MutableData()); + auto *accumulate_half = reinterpret_cast(accumulate_tensor->MutableData()); + auto *gradient_half = reinterpret_cast(gradient_tensor->MutableData()); + + for (int i = 0; i < kTensorLength; ++i) { + weight_half[i] = fp32_to_fp16(initial_weight_fp32[i]); + accumulate_half[i] = fp32_to_fp16(initial_accumulate_fp32[i]); + gradient_half[i] = fp32_to_fp16(gradients_fp32[i]); + } + + reinterpret_cast(lr_tensor->MutableData())[0] = fp32_to_fp16(kLearningRate); + reinterpret_cast(momentum_tensor->MutableData())[0] = fp32_to_fp16(kMomentum); + + auto expected_weight = initial_weight_fp32; + auto expected_accumulate = initial_accumulate_fp32; + ApplyMomentumReference(&expected_weight, &expected_accumulate, gradients_fp32, kLearningRate, kMomentum, true); + + std::vector expected_weight_quantized(kTensorLength); + for (int i = 0; i < kTensorLength; ++i) { + expected_weight_quantized[i] = fp16_to_fp32(fp32_to_fp16(expected_weight[i])); + } + + auto ctx = new lite::InnerContext; + ASSERT_NE(ctx, nullptr); + ASSERT_EQ(lite::RET_OK, ctx->Init()); + + auto *param = CreateApplyMomentumParameter(true); + ASSERT_NE(param, nullptr); + + kernel::KernelKey key = {kernel::KERNEL_ARCH::kDSP, kNumberTypeFloat16, NHWC, schema::PrimitiveType_ApplyMomentum}; + auto creator = KernelRegistry::GetInstance()->GetCreator(key); + ASSERT_NE(creator, nullptr); + + auto kernel = creator(inputs, outputs, reinterpret_cast(param), ctx, key); + ASSERT_NE(kernel, nullptr); + + ASSERT_EQ(lite::RET_OK, kernel->Prepare()); + ASSERT_EQ(lite::RET_OK, kernel->Run()); + + auto weight_after_half = reinterpret_cast(inputs[kernel::kApplyMomentumWeightIdx]->MutableData()); + std::vector weight_after_fp32(kTensorLength); + for (int i = 0; i < kTensorLength; ++i) { + weight_after_fp32[i] = fp16_to_fp32(weight_after_half[i]); + } + + float sum_abs_err = 0.f; + for (int i = 0; i < kTensorLength; ++i) { + float abs_err = std::fabs(weight_after_fp32[i] - expected_weight_quantized[i]); + sum_abs_err += abs_err; + } + + ASSERT_EQ(0, CompareOutputData(weight_after_fp32.data(), expected_weight_quantized.data(), kTensorLength, 1e-3f)); + + UninitDSPRuntime(); + + delete ctx; + for (auto *tensor : tensors_to_delete) { + delete tensor; + } + delete kernel; +} +#endif // SUPPORT_FT04 + +} // namespace mindspore::lite::dsp::test diff --git a/mindspore-lite/test/ut/src/runtime/kernel/dsp/dsp_test.h b/mindspore-lite/test/ut/src/runtime/kernel/dsp/dsp_test.h index 88419f42d7e853af569ac4d207993293a3f96258..450e0d6c8c5a67f6b0d9675569fd25acd1494ba7 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/dsp/dsp_test.h +++ b/mindspore-lite/test/ut/src/runtime/kernel/dsp/dsp_test.h @@ -46,6 +46,98 @@ class DSPCommonTest : public CommonTest { dsp_runtime_wrapper_ = nullptr; } + // Local IEEE754 half <-> float converters to avoid any linkage/impl mismatch in tests. + float fp16_to_fp32(uint16_t h) { + uint32_t sign = (static_cast(h) & 0x8000u) << 16; + uint32_t exp = (static_cast(h) & 0x7C00u) >> 10; + uint32_t mant = static_cast(h & 0x03FFu); + uint32_t f; + if (exp == 0) { + if (mant == 0) { + f = sign; // zero + } else { + // subnormal -> normalize + exp = 1; + while ((mant & 0x0400u) == 0) { + mant <<= 1; + --exp; + } + mant &= 0x03FFu; + uint32_t fexp = (exp + (127 - 15)) << 23; + f = sign | fexp | (mant << 13); + } + } else if (exp == 0x1Fu) { // Inf/NaN + f = sign | 0x7F800000u | (mant << 13); + } else { + uint32_t fexp = (exp + (127 - 15)) << 23; + f = sign | fexp | (mant << 13); + } + float out; + std::memcpy(&out, &f, sizeof(out)); + return out; + } + + uint16_t fp32_to_fp16(float val) { + uint32_t fbits; + std::memcpy(&fbits, &val, sizeof(fbits)); + uint32_t sign = (fbits >> 16) & 0x8000u; + uint32_t fexp = (fbits >> 23) & 0xFFu; + uint32_t fmant = fbits & 0x007FFFFFu; + + // NaN/Inf handling + if (fexp == 0xFFu) { + if (fmant != 0) { + // NaN: keep a quiet NaN in half + return static_cast(sign | 0x7C00u | 0x0001u); + } + // Inf + return static_cast(sign | 0x7C00u); + } + + // Rebias exponent for half + int32_t hexp = static_cast(fexp) - 127 + 15; + + if (hexp <= 0) { + // Subnormal or underflow to zero in half + if (hexp < -10) { + return static_cast(sign); // Underflow to zero + } + // Make implicit leading 1 explicit + uint32_t mant = fmant | 0x00800000u; + // Shift to align to half subnormal mantissa (10 bits) + int shift = 1 - hexp; // shift in [1..10] + // Compute mantissa with round-to-nearest-even + uint32_t mant_rounded = mant >> (shift + 13); + uint32_t round_bit = (mant >> (shift + 12)) & 1u; + uint32_t sticky = (mant & ((1u << (shift + 12)) - 1u)) != 0u; + mant_rounded += (round_bit & (sticky | (mant_rounded & 1u))); + return static_cast(sign | static_cast(mant_rounded)); + } + + if (hexp >= 0x1F) { + // Overflow to half inf + return static_cast(sign | 0x7C00u); + } + + // Normal case: build exponent and mantissa with round-to-nearest-even + uint16_t hexp_field = static_cast(hexp) << 10; + uint32_t mant = fmant; + uint32_t mant_rounded = mant >> 13; + uint32_t round_bit = (mant >> 12) & 1u; + uint32_t sticky = (mant & 0xFFFu) != 0u; + mant_rounded += (round_bit & (sticky | (mant_rounded & 1u))); + if (mant_rounded == 0x400u) { + // Mantissa overflow after rounding; bump exponent, zero mantissa + mant_rounded = 0; + hexp_field = static_cast(hexp_field + 0x0400u); + if (hexp_field >= 0x7C00u) { + // Exponent overflow -> inf + return static_cast(sign | 0x7C00u); + } + } + return static_cast(sign | hexp_field | static_cast(mant_rounded)); + } + protected: dsp::DSPRuntimeInnerWrapper *dsp_runtime_wrapper_{nullptr}; std::shared_ptr allocator_;