diff --git a/mindspore-lite/src/litert/lite_session.cc b/mindspore-lite/src/litert/lite_session.cc index cf47bb98e81372e28fca5ca8e11ea74922417dc5..a35d022e74bc319f3c7a900675ea214e4e3055f8 100644 --- a/mindspore-lite/src/litert/lite_session.cc +++ b/mindspore-lite/src/litert/lite_session.cc @@ -1457,51 +1457,60 @@ int LiteSession::Resize(const std::vector &inputs, MS_LOG(ERROR) << "Not support multi-threading"; return RET_ERROR; } - std::vector> old_dims; - for (size_t i = 0; i < inputs_.size(); ++i) { - old_dims.push_back(inputs_[i]->shape()); - } - auto ret = ResizeInputs(inputs, dims); - if (ret != RET_OK) { - ResetInputsShape(old_dims); - is_running_.store(false); - return ret; - } - ret = UpdateInputShapeMap(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "update input shape map failed."; - return RET_ERROR; - } - if (infer_along_running_) { - is_running_.store(false); - return ret; - } - - ret = ReSizeKernels(kernels_, isolate_input_map_); - if (ret != RET_OK) { - ResetInputsShape(old_dims); - auto resize_ret = ReSizeKernels(kernels_); - if (resize_ret != RET_OK) { - MS_LOG(ERROR) << "restore kernel size fail!ret: " << resize_ret; + for (size_t i = 0; i < dims.size(); i++) { + auto model_input_rank = inputs[i]->ConvertToTensorC()->shape_size_; + if (dims[i].size() != model_input_rank && model_input_rank != 0) { + MS_LOG(ERROR) << "Tensor " << i << "'s rank: " << dims[i].size() << " must match the rank: " << model_input_rank + << " of model input " << i; + return RET_ERROR; } - is_running_.store(false); - return ret; - } - - if (InitRuntimeAllocator() != RET_OK) { - MS_LOG(ERROR) << "Runtime allocator in resize failed."; - is_running_.store(false); - return RET_ERROR; } +} +std::vector> old_dims; +for (size_t i = 0; i < inputs_.size(); ++i) { + old_dims.push_back(inputs_[i]->shape()); +} +auto ret = ResizeInputs(inputs, dims); +if (ret != RET_OK) { + ResetInputsShape(old_dims); + is_running_.store(false); + return ret; +} +ret = UpdateInputShapeMap(); +if (ret != RET_OK) { + MS_LOG(ERROR) << "update input shape map failed."; + return RET_ERROR; +} +if (infer_along_running_) { + is_running_.store(false); + return ret; +} - auto status = GraphOptimizePass(&kernels_); - if (status != RET_OK) { - MS_LOG(ERROR) << "GraphOptimizePass failed."; - return RET_ERROR; +ret = ReSizeKernels(kernels_, isolate_input_map_); +if (ret != RET_OK) { + ResetInputsShape(old_dims); + auto resize_ret = ReSizeKernels(kernels_); + if (resize_ret != RET_OK) { + MS_LOG(ERROR) << "restore kernel size fail!ret: " << resize_ret; } + is_running_.store(false); + return ret; +} +if (InitRuntimeAllocator() != RET_OK) { + MS_LOG(ERROR) << "Runtime allocator in resize failed."; is_running_.store(false); - return RET_OK; + return RET_ERROR; +} + +auto status = GraphOptimizePass(&kernels_); +if (status != RET_OK) { + MS_LOG(ERROR) << "GraphOptimizePass failed."; + return RET_ERROR; +} + +is_running_.store(false); +return RET_OK; } int LiteSession::PreCheck(Model *model) {