diff --git a/mindspore-lite/src/litert/kernel/dsp/ft04/batchtospacend.cc b/mindspore-lite/src/litert/kernel/dsp/ft04/batchtospacend.cc new file mode 100644 index 0000000000000000000000000000000000000000..5a304db28801abd062f675e79fd73e2c20f7f163 --- /dev/null +++ b/mindspore-lite/src/litert/kernel/dsp/ft04/batchtospacend.cc @@ -0,0 +1,171 @@ +/** + * Copyright 2025 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/litert/kernel/dsp/ft04/batchtospacend.h" +#include +#include +#include +#include +#include "src/common/utils.h" +#include "src/litert/kernel/cpu/nnacl_c/batch_to_space_parameter.h" +#include "src/litert/kernel_registry.h" + +using mindspore::kernel::KERNEL_ARCH::kDSP; +using mindspore::lite::KernelRegistrar; +using mindspore::lite::RET_ERROR; +using mindspore::lite::RET_OK; +using mindspore::schema::PrimitiveType_BatchToSpace; + +namespace mindspore::kernel { +namespace { +constexpr size_t kInputTensorSize = 1; +constexpr size_t kOutputTensorSize = 1; +} // namespace + +int BatchToSpaceNDDSPKernel::Prepare() { return RET_OK; } + +int BatchToSpaceNDDSPKernel::CheckSpecs() { + if (in_tensors_.size() != kInputTensorSize) { + MS_LOG(WARNING) << "BatchToSpaceND expects one input, got: " << in_tensors_.size(); + return RET_ERROR; + } + if (out_tensors_.size() != kOutputTensorSize) { + MS_LOG(WARNING) << "BatchToSpaceND expects one output, got: " << out_tensors_.size(); + return RET_ERROR; + } + if (in_tensors_[0]->shape().size() != DIMENSION_4D) { + MS_LOG(WARNING) << "BatchToSpaceND expects 4D NHWC input, got dims: " << in_tensors_[0]->shape().size(); + return RET_ERROR; + } + if (out_tensors_[0]->shape().size() != DIMENSION_4D) { + MS_LOG(WARNING) << "BatchToSpaceND expects 4D NHWC output, got dims: " << out_tensors_[0]->shape().size(); + return RET_ERROR; + } + auto *param = reinterpret_cast(op_parameter_); + if (param == nullptr) { + MS_LOG(WARNING) << "BatchToSpaceND parameter is null."; + return RET_ERROR; + } + return RET_OK; +} + +int BatchToSpaceNDDSPKernel::BatchToSpaceNDRunFp32() { + kernel_name_ = "fp_batchtospacend_s"; + core_mask_ = 0xf; + return dsp_runtime_->RunKernel(kernel_name_, kernel_args_, core_mask_); +} + +int BatchToSpaceNDDSPKernel::BatchToSpaceNDRunFp16() { + kernel_name_ = "hp_batchtospacend_s"; + core_mask_ = 0xf; + return dsp_runtime_->RunKernel(kernel_name_, kernel_args_, core_mask_); +} + +int BatchToSpaceNDDSPKernel::BatchToSpaceNDRunInt16() { + kernel_name_ = "i16_batchtospacend_s"; + core_mask_ = 0xf; + return dsp_runtime_->RunKernel(kernel_name_, kernel_args_, core_mask_); +} + +int BatchToSpaceNDDSPKernel::BatchToSpaceNDRunInt32() { + kernel_name_ = "i32_batchtospacend_s"; + core_mask_ = 0xf; + return dsp_runtime_->RunKernel(kernel_name_, kernel_args_, core_mask_); +} + +int BatchToSpaceNDDSPKernel::BatchToSpaceNDRunComplex64() { + kernel_name_ = "c64_batchtospacend_s"; + core_mask_ = 0xf; + return dsp_runtime_->RunKernel(kernel_name_, kernel_args_, core_mask_); +} + +int BatchToSpaceNDDSPKernel::Run() { + int ret = RET_ERROR; + auto *param = reinterpret_cast(op_parameter_); + + auto allocator = dsp_runtime_->GetAllocator(); + + auto *input_tensor = in_tensors_[0]; + auto *output_tensor = out_tensors_[0]; + + const auto &in_shape_vec = input_tensor->shape(); + + // Prepare shape/block/crops buffers on device. + int32_t in_shape_host[DIMENSION_4D] = {0}; + for (size_t i = 0; i < DIMENSION_4D; ++i) { + in_shape_host[i] = static_cast(in_shape_vec[i]); + } + + int32_t block_shape_host[BATCH_TO_SPACE_BLOCK_SHAPE_SIZE] = {param->block_shape_[0], param->block_shape_[1]}; + int32_t crops_host[COMM_SHAPE_SIZE] = {param->crops_[0], param->crops_[1], param->crops_[2], param->crops_[3]}; + + void *shape_buf = allocator->Malloc(sizeof(in_shape_host)); + void *block_buf = allocator->Malloc(sizeof(block_shape_host)); + void *crops_buf = allocator->Malloc(sizeof(crops_host)); + + std::memcpy(shape_buf, in_shape_host, sizeof(in_shape_host)); + std::memcpy(block_buf, block_shape_host, sizeof(block_shape_host)); + std::memcpy(crops_buf, crops_host, sizeof(crops_host)); + + uint64_t in_shape_dev = allocator->GetDeviceMemPtr(shape_buf); + uint64_t block_dev = allocator->GetDeviceMemPtr(block_buf); + uint64_t crops_dev = allocator->GetDeviceMemPtr(crops_buf); + + uint64_t input_dev = allocator->GetDeviceMemPtr(input_tensor->data()); + uint64_t output_dev = allocator->GetDeviceMemPtr(output_tensor->data()); + + auto data_type = input_tensor->data_type(); + size_t data_size = lite::DataTypeSize(data_type); + if (data_size == 0) { + allocator->Free(shape_buf); + allocator->Free(block_buf); + allocator->Free(crops_buf); + MS_LOG(ERROR) << "BatchToSpaceND unsupported dtype: " << static_cast(data_type); + return RET_ERROR; + } + + SetKernelArg({input_dev, output_dev, in_shape_dev, block_dev, crops_dev, static_cast(data_size)}); + + if (data_type == kNumberTypeFloat32) { + ret = BatchToSpaceNDRunFp32(); + } else if (data_type == kNumberTypeFloat16) { + ret = BatchToSpaceNDRunFp16(); + } else if (data_type == kNumberTypeInt16) { + ret = BatchToSpaceNDRunInt16(); + } else if (data_type == kNumberTypeInt32) { + ret = BatchToSpaceNDRunInt32(); + } else if (data_type == kNumberTypeComplex64) { + ret = BatchToSpaceNDRunComplex64(); + } + + allocator->Free(shape_buf); + allocator->Free(block_buf); + allocator->Free(crops_buf); + + if (ret != RET_OK) { + MS_LOG(ERROR) << this->name() << " Run failed!"; + return RET_ERROR; + } + return RET_OK; +} + +REG_KERNEL(kDSP, kNumberTypeFloat32, PrimitiveType_BatchToSpace, DSPKernelCreator) +REG_KERNEL(kDSP, kNumberTypeFloat16, PrimitiveType_BatchToSpace, DSPKernelCreator) +REG_KERNEL(kDSP, kNumberTypeInt16, PrimitiveType_BatchToSpace, DSPKernelCreator) +REG_KERNEL(kDSP, kNumberTypeInt32, PrimitiveType_BatchToSpace, DSPKernelCreator) +REG_KERNEL(kDSP, kNumberTypeComplex64, PrimitiveType_BatchToSpace, DSPKernelCreator) + +} // namespace mindspore::kernel diff --git a/mindspore-lite/src/litert/kernel/dsp/ft04/batchtospacend.h b/mindspore-lite/src/litert/kernel/dsp/ft04/batchtospacend.h new file mode 100644 index 0000000000000000000000000000000000000000..bea3f914acad03daf201701d9b0b899ee0980ee3 --- /dev/null +++ b/mindspore-lite/src/litert/kernel/dsp/ft04/batchtospacend.h @@ -0,0 +1,46 @@ +/** + * Copyright 2025 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_DSP_KERNEL_BATCHTOSPACEND_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_DSP_KERNEL_BATCHTOSPACEND_H_ + +#include +#include "src/litert/kernel/dsp/dsp_kernel.h" + +namespace mindspore::kernel { +class BatchToSpaceNDDSPKernel : public DSPKernel { + public: + using DSPKernel::DSPKernel; + + ~BatchToSpaceNDDSPKernel() override = default; + + int Prepare() override; + int CheckSpecs() override; + int Run() override; + + int BatchToSpaceNDRunFp32(); + int BatchToSpaceNDRunFp16(); + int BatchToSpaceNDRunInt16(); + int BatchToSpaceNDRunInt32(); + int BatchToSpaceNDRunComplex64(); + + private: + std::string kernel_name_; + uint64_t core_mask_{0}; +}; +} // namespace mindspore::kernel + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_DSP_KERNEL_BATCHTOSPACEND_H_ diff --git a/mindspore-lite/src/litert/kernel/dsp/ft78/batchtospacend.cc b/mindspore-lite/src/litert/kernel/dsp/ft78/batchtospacend.cc new file mode 100644 index 0000000000000000000000000000000000000000..8d630613a49f283198f2aa6915411153fcee557a --- /dev/null +++ b/mindspore-lite/src/litert/kernel/dsp/ft78/batchtospacend.cc @@ -0,0 +1,188 @@ +/** + * Copyright 2025 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/litert/kernel/dsp/ft78/batchtospacend.h" +#include +#include +#include +#include +#include "src/litert/kernel/cpu/nnacl_c/batch_to_space_parameter.h" +#include "src/litert/kernel_registry.h" + +using mindspore::kernel::KERNEL_ARCH::kDSP; +using mindspore::lite::KernelRegistrar; +using mindspore::lite::RET_ERROR; +using mindspore::lite::RET_OK; +using mindspore::schema::PrimitiveType_BatchToSpace; + +namespace mindspore::kernel { +namespace { +constexpr size_t kInputTensorSize = 1; +constexpr size_t kOutputTensorSize = 1; +} // namespace + +int BatchToSpaceNDDSPKernel::Prepare() { return RET_OK; } + +int BatchToSpaceNDDSPKernel::CheckSpecs() { + if (in_tensors_.size() != kInputTensorSize) { + MS_LOG(WARNING) << "BatchToSpaceND expects one input, got: " << in_tensors_.size(); + return RET_ERROR; + } + if (out_tensors_.size() != kOutputTensorSize) { + MS_LOG(WARNING) << "BatchToSpaceND expects one output, got: " << out_tensors_.size(); + return RET_ERROR; + } + if (in_tensors_[0]->shape().size() != DIMENSION_4D) { + MS_LOG(WARNING) << "BatchToSpaceND expects 4D NHWC input, got dims: " << in_tensors_[0]->shape().size(); + return RET_ERROR; + } + if (out_tensors_[0]->shape().size() != DIMENSION_4D) { + MS_LOG(WARNING) << "BatchToSpaceND expects 4D NHWC output, got dims: " << out_tensors_[0]->shape().size(); + return RET_ERROR; + } + auto *param = reinterpret_cast(op_parameter_); + if (param == nullptr) { + MS_LOG(WARNING) << "BatchToSpaceND parameter is null."; + return RET_ERROR; + } + return RET_OK; +} + +int BatchToSpaceNDDSPKernel::BatchToSpaceNDRunFp32() { + kernel_name_ = "fp_batchtospacend_s"; + core_mask_ = 0xf; + return dsp_runtime_->RunKernel(kernel_name_, kernel_args_, core_mask_); +} + +int BatchToSpaceNDDSPKernel::BatchToSpaceNDRunFp64() { + kernel_name_ = "dp_batchtospacend_s"; + core_mask_ = 0xf; + return dsp_runtime_->RunKernel(kernel_name_, kernel_args_, core_mask_); +} + +int BatchToSpaceNDDSPKernel::BatchToSpaceNDRunInt8() { + kernel_name_ = "i8_batchtospacend_s"; + core_mask_ = 0xf; + return dsp_runtime_->RunKernel(kernel_name_, kernel_args_, core_mask_); +} + +int BatchToSpaceNDDSPKernel::BatchToSpaceNDRunInt16() { + kernel_name_ = "i16_batchtospacend_s"; + core_mask_ = 0xf; + return dsp_runtime_->RunKernel(kernel_name_, kernel_args_, core_mask_); +} + +int BatchToSpaceNDDSPKernel::BatchToSpaceNDRunInt32() { + kernel_name_ = "i32_batchtospacend_s"; + core_mask_ = 0xf; + return dsp_runtime_->RunKernel(kernel_name_, kernel_args_, core_mask_); +} + +int BatchToSpaceNDDSPKernel::BatchToSpaceNDRunComplex64() { + kernel_name_ = "c64_batchtospacend_s"; + core_mask_ = 0xf; + return dsp_runtime_->RunKernel(kernel_name_, kernel_args_, core_mask_); +} + +int BatchToSpaceNDDSPKernel::BatchToSpaceNDRunComplex128() { + kernel_name_ = "c128_batchtospacend_s"; + core_mask_ = 0xf; + return dsp_runtime_->RunKernel(kernel_name_, kernel_args_, core_mask_); +} + +int BatchToSpaceNDDSPKernel::Run() { + int ret = RET_ERROR; + auto *param = reinterpret_cast(op_parameter_); + + auto allocator = dsp_runtime_->GetAllocator(); + + auto *input_tensor = in_tensors_[0]; + auto *output_tensor = out_tensors_[0]; + + const auto &in_shape_vec = input_tensor->shape(); + + // Prepare shape/block/crops buffers on device. + int32_t in_shape_host[DIMENSION_4D] = {0}; + for (size_t i = 0; i < DIMENSION_4D; ++i) { + in_shape_host[i] = static_cast(in_shape_vec[i]); + } + + int32_t block_shape_host[BATCH_TO_SPACE_BLOCK_SHAPE_SIZE] = {param->block_shape_[0], param->block_shape_[1]}; + int32_t crops_host[COMM_SHAPE_SIZE] = {param->crops_[0], param->crops_[1], param->crops_[2], param->crops_[3]}; + + void *shape_buf = allocator->Malloc(sizeof(in_shape_host)); + void *block_buf = allocator->Malloc(sizeof(block_shape_host)); + void *crops_buf = allocator->Malloc(sizeof(crops_host)); + + std::memcpy(shape_buf, in_shape_host, sizeof(in_shape_host)); + std::memcpy(block_buf, block_shape_host, sizeof(block_shape_host)); + std::memcpy(crops_buf, crops_host, sizeof(crops_host)); + + uint64_t in_shape_dev = allocator->GetDeviceMemPtr(shape_buf); + uint64_t block_dev = allocator->GetDeviceMemPtr(block_buf); + uint64_t crops_dev = allocator->GetDeviceMemPtr(crops_buf); + + uint64_t input_dev = allocator->GetDeviceMemPtr(input_tensor->data()); + uint64_t output_dev = allocator->GetDeviceMemPtr(output_tensor->data()); + + auto data_type = input_tensor->data_type(); + size_t data_size = lite::DataTypeSize(data_type); + if (data_size == 0) { + allocator->Free(shape_buf); + allocator->Free(block_buf); + allocator->Free(crops_buf); + MS_LOG(ERROR) << "BatchToSpaceND unsupported dtype: " << static_cast(data_type); + return RET_ERROR; + } + + SetKernelArg({input_dev, output_dev, in_shape_dev, block_dev, crops_dev, static_cast(data_size)}); + + if (data_type == kNumberTypeFloat32) { + ret = BatchToSpaceNDRunFp32(); + } else if (data_type == kNumberTypeFloat64) { + ret = BatchToSpaceNDRunFp64(); + } else if (data_type == kNumberTypeInt8) { + ret = BatchToSpaceNDRunInt8(); + } else if (data_type == kNumberTypeInt16) { + ret = BatchToSpaceNDRunInt16(); + } else if (data_type == kNumberTypeInt32) { + ret = BatchToSpaceNDRunInt32(); + } else if (data_type == kNumberTypeComplex64) { + ret = BatchToSpaceNDRunComplex64(); + } else if (data_type == kNumberTypeComplex128) { + ret = BatchToSpaceNDRunComplex128(); + } + + allocator->Free(shape_buf); + allocator->Free(block_buf); + allocator->Free(crops_buf); + + if (ret != RET_OK) { + MS_LOG(ERROR) << this->name() << " Run failed!"; + return RET_ERROR; + } + return RET_OK; +} + +REG_KERNEL(kDSP, kNumberTypeFloat32, PrimitiveType_BatchToSpace, DSPKernelCreator) +REG_KERNEL(kDSP, kNumberTypeFloat64, PrimitiveType_BatchToSpace, DSPKernelCreator) +REG_KERNEL(kDSP, kNumberTypeInt8, PrimitiveType_BatchToSpace, DSPKernelCreator) +REG_KERNEL(kDSP, kNumberTypeInt16, PrimitiveType_BatchToSpace, DSPKernelCreator) +REG_KERNEL(kDSP, kNumberTypeInt32, PrimitiveType_BatchToSpace, DSPKernelCreator) +REG_KERNEL(kDSP, kNumberTypeComplex64, PrimitiveType_BatchToSpace, DSPKernelCreator) +REG_KERNEL(kDSP, kNumberTypeComplex128, PrimitiveType_BatchToSpace, DSPKernelCreator) + +} // namespace mindspore::kernel diff --git a/mindspore-lite/src/litert/kernel/dsp/ft78/batchtospacend.h b/mindspore-lite/src/litert/kernel/dsp/ft78/batchtospacend.h new file mode 100644 index 0000000000000000000000000000000000000000..c25d91429f2bf9c092f707481d6d8db374febc00 --- /dev/null +++ b/mindspore-lite/src/litert/kernel/dsp/ft78/batchtospacend.h @@ -0,0 +1,48 @@ +/** + * Copyright 2025 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_DSP_KERNEL_BATCHTOSPACEND_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_DSP_KERNEL_BATCHTOSPACEND_H_ + +#include +#include "src/litert/kernel/dsp/dsp_kernel.h" + +namespace mindspore::kernel { +class BatchToSpaceNDDSPKernel : public DSPKernel { + public: + using DSPKernel::DSPKernel; + + ~BatchToSpaceNDDSPKernel() override = default; + + int Prepare() override; + int CheckSpecs() override; + int Run() override; + + int BatchToSpaceNDRunFp32(); + int BatchToSpaceNDRunFp64(); + int BatchToSpaceNDRunInt8(); + int BatchToSpaceNDRunInt16(); + int BatchToSpaceNDRunInt32(); + int BatchToSpaceNDRunComplex64(); + int BatchToSpaceNDRunComplex128(); + + private: + std::string kernel_name_; + uint64_t core_mask_{0}; +}; +} // namespace mindspore::kernel + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_DSP_KERNEL_BATCHTOSPACEND_H_ diff --git a/mindspore-lite/test/ut/src/runtime/kernel/dsp/batchtospacend_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/dsp/batchtospacend_tests.cc new file mode 100644 index 0000000000000000000000000000000000000000..2209c772f4efa1b7e16d0053b12074505414c4d7 --- /dev/null +++ b/mindspore-lite/test/ut/src/runtime/kernel/dsp/batchtospacend_tests.cc @@ -0,0 +1,481 @@ +/** + * Copyright 2025 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ut/src/runtime/kernel/dsp/dsp_test.h" +#include "include/api/context.h" +#include "include/api/data_type.h" +#include "schema/inner/model_generated.h" +#include "src/litert/kernel_registry.h" +#include "src/litert/kernel/dsp/dsp_subgraph.h" +#include "src/litert/kernel/cpu/nnacl_c/batch_to_space_parameter.h" + +namespace mindspore::lite::dsp::test { +namespace { +constexpr int kBlockH = 2; +constexpr int kBlockW = 2; +// Match main.c test settings: batch=20, H=W=10, C=5, block=2x2, zero crops. +constexpr int kInBatch = 4; +constexpr int kInH = 100; +constexpr int kInW = 100; +constexpr int kInC = 5; +constexpr int kTotalInput = kInBatch * kInH * kInW * kInC; // 10000 +constexpr int kOutBatch = kInBatch / (kBlockH * kBlockW); // 5 +constexpr int kOutH = kInH * kBlockH; // 20 +constexpr int kOutW = kInW * kBlockW; // 20 +constexpr int kTotalOutput = kOutBatch * kOutH * kOutW * kInC; // 10000 +constexpr int kCrops[4] = {0, 0, 0, 0}; + +// Reference implementation for real types. +template +void BatchToSpaceNDRef(const T *input, T *output, bool is_complex) { + std::fill(output, output + kTotalOutput * (is_complex ? 2 : 1), static_cast(0)); + for (int n = 0; n < kOutBatch; ++n) { + for (int h = 0; h < kOutH; ++h) { + for (int w = 0; w < kOutW; ++w) { + int in_h = h / kBlockH; + int in_w = w / kBlockW; + int offset_h = h % kBlockH; + int offset_w = w % kBlockW; + int in_n = n * kBlockH * kBlockW + offset_h * kBlockW + offset_w; + int in_idx_base = ((in_n * kInH + in_h) * kInW + in_w) * kInC; + int out_idx_base = ((n * kOutH + h) * kOutW + w) * kInC; + if (!is_complex) { + for (int c = 0; c < kInC; ++c) { + output[out_idx_base + c] = input[in_idx_base + c]; + } + } else { + for (int c = 0; c < kInC; ++c) { + int in_pos = (in_idx_base + c) * 2; + int out_pos = (out_idx_base + c) * 2; + output[out_pos] = input[in_pos]; + output[out_pos + 1] = input[in_pos + 1]; + } + } + } + } + } +} + +BatchToSpaceParameter *CreateParam() { + auto *param = reinterpret_cast(malloc(sizeof(BatchToSpaceParameter))); + if (param == nullptr) { + return nullptr; + } + std::memset(param, 0, sizeof(BatchToSpaceParameter)); + param->op_parameter_.type_ = static_cast(schema::PrimitiveType_BatchToSpace); + param->block_shape_[0] = kBlockH; + param->block_shape_[1] = kBlockW; + for (int i = 0; i < 4; ++i) { + param->crops_[i] = kCrops[i]; + } + return param; +} +} // namespace + +class TestDSP_BatchToSpaceND : public DSPCommonTest {}; + +TEST_F(TestDSP_BatchToSpaceND, BatchToSpaceND_Fp32) { + InitDSPRuntime(); + + std::vector inputs; + std::vector outputs; + auto *input = + new lite::Tensor(kNumberTypeFloat32, {kInBatch, kInH, kInW, kInC}, mindspore::NHWC, lite::Category::VAR); + input->MallocData(allocator_); + inputs.push_back(input); + + auto *output = + new lite::Tensor(kNumberTypeFloat32, {kOutBatch, kOutH, kOutW, kInC}, mindspore::NHWC, lite::Category::VAR); + output->MallocData(allocator_); + outputs.push_back(output); + + for (int i = 0; i < kTotalInput; ++i) { + reinterpret_cast(input->MutableData())[i] = static_cast(i + 1); + } + + std::vector expected(kTotalOutput, 0.f); + BatchToSpaceNDRef(reinterpret_cast(input->MutableData()), expected.data(), false); + + auto *ctx = new lite::InnerContext; + ASSERT_EQ(lite::RET_OK, ctx->Init()); + auto *param = CreateParam(); + ASSERT_NE(param, nullptr); + + kernel::KernelKey key = {kernel::KERNEL_ARCH::kDSP, kNumberTypeFloat32, NHWC, schema::PrimitiveType_BatchToSpace}; + auto creator = KernelRegistry::GetInstance()->GetCreator(key); + ASSERT_NE(creator, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast(param), ctx, key); + ASSERT_NE(kernel, nullptr); + + EXPECT_EQ(kernel->Prepare(), lite::RET_OK); + EXPECT_EQ(kernel->Run(), lite::RET_OK); + + auto *out_data = reinterpret_cast(output->MutableData()); + ASSERT_EQ(0, CompareOutputData(out_data, expected.data(), kTotalOutput, 1e-5)); + + UninitDSPRuntime(); + delete ctx; + delete kernel; + delete input; + delete output; +} + +TEST_F(TestDSP_BatchToSpaceND, BatchToSpaceND_Int16) { + InitDSPRuntime(); + + std::vector inputs; + std::vector outputs; + auto *input = new lite::Tensor(kNumberTypeInt16, {kInBatch, kInH, kInW, kInC}, mindspore::NHWC, lite::Category::VAR); + input->MallocData(allocator_); + inputs.push_back(input); + + auto *output = + new lite::Tensor(kNumberTypeInt16, {kOutBatch, kOutH, kOutW, kInC}, mindspore::NHWC, lite::Category::VAR); + output->MallocData(allocator_); + outputs.push_back(output); + + for (int i = 0; i < kTotalInput; ++i) { + reinterpret_cast(input->MutableData())[i] = static_cast(i % 32760); + } + + std::vector expected(kTotalOutput, 0); + BatchToSpaceNDRef(reinterpret_cast(input->MutableData()), expected.data(), false); + + auto *ctx = new lite::InnerContext; + ASSERT_EQ(lite::RET_OK, ctx->Init()); + auto *param = CreateParam(); + ASSERT_NE(param, nullptr); + + kernel::KernelKey key = {kernel::KERNEL_ARCH::kDSP, kNumberTypeInt16, NHWC, schema::PrimitiveType_BatchToSpace}; + auto creator = KernelRegistry::GetInstance()->GetCreator(key); + ASSERT_NE(creator, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast(param), ctx, key); + ASSERT_NE(kernel, nullptr); + + EXPECT_EQ(kernel->Prepare(), lite::RET_OK); + EXPECT_EQ(kernel->Run(), lite::RET_OK); + + auto *out_data = reinterpret_cast(output->MutableData()); + ASSERT_EQ(0, CompareOutputData(out_data, expected.data(), kTotalOutput, 1e-9)); + + UninitDSPRuntime(); + delete ctx; + delete kernel; + delete input; + delete output; +} + +TEST_F(TestDSP_BatchToSpaceND, BatchToSpaceND_Int32) { + InitDSPRuntime(); + + std::vector inputs; + std::vector outputs; + auto *input = new lite::Tensor(kNumberTypeInt32, {kInBatch, kInH, kInW, kInC}, mindspore::NHWC, lite::Category::VAR); + input->MallocData(allocator_); + inputs.push_back(input); + + auto *output = + new lite::Tensor(kNumberTypeInt32, {kOutBatch, kOutH, kOutW, kInC}, mindspore::NHWC, lite::Category::VAR); + output->MallocData(allocator_); + outputs.push_back(output); + + for (int i = 0; i < kTotalInput; ++i) { + reinterpret_cast(input->MutableData())[i] = i + 1; + } + + std::vector expected(kTotalOutput, 0); + BatchToSpaceNDRef(reinterpret_cast(input->MutableData()), expected.data(), false); + + auto *ctx = new lite::InnerContext; + ASSERT_EQ(lite::RET_OK, ctx->Init()); + auto *param = CreateParam(); + ASSERT_NE(param, nullptr); + + kernel::KernelKey key = {kernel::KERNEL_ARCH::kDSP, kNumberTypeInt32, NHWC, schema::PrimitiveType_BatchToSpace}; + auto creator = KernelRegistry::GetInstance()->GetCreator(key); + ASSERT_NE(creator, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast(param), ctx, key); + ASSERT_NE(kernel, nullptr); + + EXPECT_EQ(kernel->Prepare(), lite::RET_OK); + EXPECT_EQ(kernel->Run(), lite::RET_OK); + + auto *out_data = reinterpret_cast(output->MutableData()); + ASSERT_EQ(0, CompareOutputData(out_data, expected.data(), kTotalOutput, 1e-9)); + + UninitDSPRuntime(); + delete ctx; + delete kernel; + delete input; + delete output; +} + +TEST_F(TestDSP_BatchToSpaceND, BatchToSpaceND_Complex64) { + InitDSPRuntime(); + + std::vector inputs; + std::vector outputs; + auto *input = + new lite::Tensor(kNumberTypeComplex64, {kInBatch, kInH, kInW, kInC}, mindspore::NHWC, lite::Category::VAR); + input->MallocData(allocator_); + inputs.push_back(input); + + auto *output = + new lite::Tensor(kNumberTypeComplex64, {kOutBatch, kOutH, kOutW, kInC}, mindspore::NHWC, lite::Category::VAR); + output->MallocData(allocator_); + outputs.push_back(output); + + auto *in_cplx = reinterpret_cast(input->MutableData()); + for (int i = 0; i < kTotalInput; ++i) { + in_cplx[2 * i] = 0.5f * static_cast(i + 1); + in_cplx[2 * i + 1] = 0.3f * static_cast(i + 1); + } + + std::vector expected(kTotalOutput * 2, 0.f); + BatchToSpaceNDRef(reinterpret_cast(input->MutableData()), expected.data(), true); + + auto *ctx = new lite::InnerContext; + ASSERT_EQ(lite::RET_OK, ctx->Init()); + auto *param = CreateParam(); + ASSERT_NE(param, nullptr); + + kernel::KernelKey key = {kernel::KERNEL_ARCH::kDSP, kNumberTypeComplex64, NHWC, schema::PrimitiveType_BatchToSpace}; + auto creator = KernelRegistry::GetInstance()->GetCreator(key); + ASSERT_NE(creator, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast(param), ctx, key); + ASSERT_NE(kernel, nullptr); + + EXPECT_EQ(kernel->Prepare(), lite::RET_OK); + EXPECT_EQ(kernel->Run(), lite::RET_OK); + + auto *out_cplx = reinterpret_cast(output->MutableData()); + ASSERT_EQ(0, CompareOutputData(out_cplx, expected.data(), kTotalOutput * 2, 1e-5)); + + UninitDSPRuntime(); + delete ctx; + delete kernel; + delete input; + delete output; +} + +#ifdef SUPPORT_FT04 +TEST_F(TestDSP_BatchToSpaceND, BatchToSpaceND_Fp16) { + InitDSPRuntime(); + + std::vector inputs; + std::vector outputs; + auto *input = + new lite::Tensor(kNumberTypeFloat16, {kInBatch, kInH, kInW, kInC}, mindspore::NHWC, lite::Category::VAR); + input->MallocData(allocator_); + inputs.push_back(input); + + auto *output = + new lite::Tensor(kNumberTypeFloat16, {kOutBatch, kOutH, kOutW, kInC}, mindspore::NHWC, lite::Category::VAR); + output->MallocData(allocator_); + outputs.push_back(output); + + auto *in_half = reinterpret_cast(input->MutableData()); + for (int i = 0; i < kTotalInput; ++i) { + float v = 0.1f * static_cast(i + 1); + in_half[i] = fp32_to_fp16(v); + } + + std::vector expected_half(kTotalOutput, 0); + std::vector expected_fp(kTotalOutput, 0.f); + BatchToSpaceNDRef(reinterpret_cast(input->MutableData()), expected_half.data(), false); + // Convert expected half to fp32 for comparison tolerance. + for (int i = 0; i < kTotalOutput; ++i) { + expected_fp[i] = fp16_to_fp32(expected_half[i]); + } + + auto *ctx = new lite::InnerContext; + ASSERT_EQ(lite::RET_OK, ctx->Init()); + auto *param = CreateParam(); + ASSERT_NE(param, nullptr); + + kernel::KernelKey key = {kernel::KERNEL_ARCH::kDSP, kNumberTypeFloat16, NHWC, schema::PrimitiveType_BatchToSpace}; + auto creator = KernelRegistry::GetInstance()->GetCreator(key); + ASSERT_NE(creator, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast(param), ctx, key); + ASSERT_NE(kernel, nullptr); + + EXPECT_EQ(kernel->Prepare(), lite::RET_OK); + EXPECT_EQ(kernel->Run(), lite::RET_OK); + + auto *out_half = reinterpret_cast(output->MutableData()); + std::vector out_fp(kTotalOutput); + for (int i = 0; i < kTotalOutput; ++i) { + out_fp[i] = fp16_to_fp32(out_half[i]); + } + ASSERT_EQ(0, CompareOutputData(out_fp.data(), expected_fp.data(), kTotalOutput, 1e-3f)); + + UninitDSPRuntime(); + delete ctx; + delete kernel; + delete input; + delete output; +} +#endif // SUPPORT_FT04 + +#ifdef SUPPORT_FT78 +TEST_F(TestDSP_BatchToSpaceND, BatchToSpaceND_Fp64) { + InitDSPRuntime(); + + std::vector inputs; + std::vector outputs; + auto *input = + new lite::Tensor(kNumberTypeFloat64, {kInBatch, kInH, kInW, kInC}, mindspore::NHWC, lite::Category::VAR); + input->MallocData(allocator_); + inputs.push_back(input); + + auto *output = + new lite::Tensor(kNumberTypeFloat64, {kOutBatch, kOutH, kOutW, kInC}, mindspore::NHWC, lite::Category::VAR); + output->MallocData(allocator_); + outputs.push_back(output); + + for (int i = 0; i < kTotalInput; ++i) { + reinterpret_cast(input->MutableData())[i] = static_cast(i + 1); + } + + std::vector expected(kTotalOutput, 0.0); + BatchToSpaceNDRef(reinterpret_cast(input->MutableData()), expected.data(), false); + + auto *ctx = new lite::InnerContext; + ASSERT_EQ(lite::RET_OK, ctx->Init()); + auto *param = CreateParam(); + ASSERT_NE(param, nullptr); + + kernel::KernelKey key = {kernel::KERNEL_ARCH::kDSP, kNumberTypeFloat64, NHWC, schema::PrimitiveType_BatchToSpace}; + auto creator = KernelRegistry::GetInstance()->GetCreator(key); + ASSERT_NE(creator, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast(param), ctx, key); + ASSERT_NE(kernel, nullptr); + + EXPECT_EQ(kernel->Prepare(), lite::RET_OK); + EXPECT_EQ(kernel->Run(), lite::RET_OK); + + auto *out_data = reinterpret_cast(output->MutableData()); + ASSERT_EQ(0, CompareOutputData(out_data, expected.data(), kTotalOutput, 1e-9)); + + UninitDSPRuntime(); + delete ctx; + delete kernel; + delete input; + delete output; +} + +TEST_F(TestDSP_BatchToSpaceND, BatchToSpaceND_Int8) { + InitDSPRuntime(); + + std::vector inputs; + std::vector outputs; + auto *input = new lite::Tensor(kNumberTypeInt8, {kInBatch, kInH, kInW, kInC}, mindspore::NHWC, lite::Category::VAR); + input->MallocData(allocator_); + inputs.push_back(input); + + auto *output = + new lite::Tensor(kNumberTypeInt8, {kOutBatch, kOutH, kOutW, kInC}, mindspore::NHWC, lite::Category::VAR); + output->MallocData(allocator_); + outputs.push_back(output); + + for (int i = 0; i < kTotalInput; ++i) { + reinterpret_cast(input->MutableData())[i] = static_cast(i % 127); + } + + std::vector expected(kTotalOutput, 0); + BatchToSpaceNDRef(reinterpret_cast(input->MutableData()), expected.data(), false); + + auto *ctx = new lite::InnerContext; + ASSERT_EQ(lite::RET_OK, ctx->Init()); + auto *param = CreateParam(); + ASSERT_NE(param, nullptr); + + kernel::KernelKey key = {kernel::KERNEL_ARCH::kDSP, kNumberTypeInt8, NHWC, schema::PrimitiveType_BatchToSpace}; + auto creator = KernelRegistry::GetInstance()->GetCreator(key); + ASSERT_NE(creator, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast(param), ctx, key); + ASSERT_NE(kernel, nullptr); + + EXPECT_EQ(kernel->Prepare(), lite::RET_OK); + EXPECT_EQ(kernel->Run(), lite::RET_OK); + + auto *out_data = reinterpret_cast(output->MutableData()); + ASSERT_EQ(0, CompareOutputData(out_data, expected.data(), kTotalOutput, 0.0f)); + + UninitDSPRuntime(); + delete ctx; + delete kernel; + delete input; + delete output; +} + +TEST_F(TestDSP_BatchToSpaceND, BatchToSpaceND_Complex128) { + InitDSPRuntime(); + + std::vector inputs; + std::vector outputs; + auto *input = + new lite::Tensor(kNumberTypeComplex128, {kInBatch, kInH, kInW, kInC}, mindspore::NHWC, lite::Category::VAR); + input->MallocData(allocator_); + inputs.push_back(input); + + auto *output = + new lite::Tensor(kNumberTypeComplex128, {kOutBatch, kOutH, kOutW, kInC}, mindspore::NHWC, lite::Category::VAR); + output->MallocData(allocator_); + outputs.push_back(output); + + auto *in_cplx = reinterpret_cast(input->MutableData()); + for (int i = 0; i < kTotalInput; ++i) { + in_cplx[2 * i] = 0.5 * static_cast(i + 1); + in_cplx[2 * i + 1] = 0.3 * static_cast(i + 1); + } + + std::vector expected(kTotalOutput * 2, 0.0); + BatchToSpaceNDRef(reinterpret_cast(input->MutableData()), expected.data(), true); + + auto *ctx = new lite::InnerContext; + ASSERT_EQ(lite::RET_OK, ctx->Init()); + auto *param = CreateParam(); + ASSERT_NE(param, nullptr); + + kernel::KernelKey key = {kernel::KERNEL_ARCH::kDSP, kNumberTypeComplex128, NHWC, schema::PrimitiveType_BatchToSpace}; + auto creator = KernelRegistry::GetInstance()->GetCreator(key); + ASSERT_NE(creator, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast(param), ctx, key); + ASSERT_NE(kernel, nullptr); + + EXPECT_EQ(kernel->Prepare(), lite::RET_OK); + EXPECT_EQ(kernel->Run(), lite::RET_OK); + + auto *out_cplx = reinterpret_cast(output->MutableData()); + ASSERT_EQ(0, CompareOutputData(out_cplx, expected.data(), kTotalOutput * 2, 1e-9)); + + UninitDSPRuntime(); + delete ctx; + delete kernel; + delete input; + delete output; +} +#endif // SUPPORT_FT78 +} // namespace mindspore::lite::dsp::test diff --git a/mindspore-lite/test/ut/src/runtime/kernel/dsp/dsp_test.h b/mindspore-lite/test/ut/src/runtime/kernel/dsp/dsp_test.h index 88419f42d7e853af569ac4d207993293a3f96258..450e0d6c8c5a67f6b0d9675569fd25acd1494ba7 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/dsp/dsp_test.h +++ b/mindspore-lite/test/ut/src/runtime/kernel/dsp/dsp_test.h @@ -46,6 +46,98 @@ class DSPCommonTest : public CommonTest { dsp_runtime_wrapper_ = nullptr; } + // Local IEEE754 half <-> float converters to avoid any linkage/impl mismatch in tests. + float fp16_to_fp32(uint16_t h) { + uint32_t sign = (static_cast(h) & 0x8000u) << 16; + uint32_t exp = (static_cast(h) & 0x7C00u) >> 10; + uint32_t mant = static_cast(h & 0x03FFu); + uint32_t f; + if (exp == 0) { + if (mant == 0) { + f = sign; // zero + } else { + // subnormal -> normalize + exp = 1; + while ((mant & 0x0400u) == 0) { + mant <<= 1; + --exp; + } + mant &= 0x03FFu; + uint32_t fexp = (exp + (127 - 15)) << 23; + f = sign | fexp | (mant << 13); + } + } else if (exp == 0x1Fu) { // Inf/NaN + f = sign | 0x7F800000u | (mant << 13); + } else { + uint32_t fexp = (exp + (127 - 15)) << 23; + f = sign | fexp | (mant << 13); + } + float out; + std::memcpy(&out, &f, sizeof(out)); + return out; + } + + uint16_t fp32_to_fp16(float val) { + uint32_t fbits; + std::memcpy(&fbits, &val, sizeof(fbits)); + uint32_t sign = (fbits >> 16) & 0x8000u; + uint32_t fexp = (fbits >> 23) & 0xFFu; + uint32_t fmant = fbits & 0x007FFFFFu; + + // NaN/Inf handling + if (fexp == 0xFFu) { + if (fmant != 0) { + // NaN: keep a quiet NaN in half + return static_cast(sign | 0x7C00u | 0x0001u); + } + // Inf + return static_cast(sign | 0x7C00u); + } + + // Rebias exponent for half + int32_t hexp = static_cast(fexp) - 127 + 15; + + if (hexp <= 0) { + // Subnormal or underflow to zero in half + if (hexp < -10) { + return static_cast(sign); // Underflow to zero + } + // Make implicit leading 1 explicit + uint32_t mant = fmant | 0x00800000u; + // Shift to align to half subnormal mantissa (10 bits) + int shift = 1 - hexp; // shift in [1..10] + // Compute mantissa with round-to-nearest-even + uint32_t mant_rounded = mant >> (shift + 13); + uint32_t round_bit = (mant >> (shift + 12)) & 1u; + uint32_t sticky = (mant & ((1u << (shift + 12)) - 1u)) != 0u; + mant_rounded += (round_bit & (sticky | (mant_rounded & 1u))); + return static_cast(sign | static_cast(mant_rounded)); + } + + if (hexp >= 0x1F) { + // Overflow to half inf + return static_cast(sign | 0x7C00u); + } + + // Normal case: build exponent and mantissa with round-to-nearest-even + uint16_t hexp_field = static_cast(hexp) << 10; + uint32_t mant = fmant; + uint32_t mant_rounded = mant >> 13; + uint32_t round_bit = (mant >> 12) & 1u; + uint32_t sticky = (mant & 0xFFFu) != 0u; + mant_rounded += (round_bit & (sticky | (mant_rounded & 1u))); + if (mant_rounded == 0x400u) { + // Mantissa overflow after rounding; bump exponent, zero mantissa + mant_rounded = 0; + hexp_field = static_cast(hexp_field + 0x0400u); + if (hexp_field >= 0x7C00u) { + // Exponent overflow -> inf + return static_cast(sign | 0x7C00u); + } + } + return static_cast(sign | hexp_field | static_cast(mant_rounded)); + } + protected: dsp::DSPRuntimeInnerWrapper *dsp_runtime_wrapper_{nullptr}; std::shared_ptr allocator_;