diff --git a/.jenkins/check/config/filter_pylint.txt b/.jenkins/check/config/filter_pylint.txt index ac7405289b13bf690f582cc1ec6938c2c6078423..cfe1b1b50b637bb2f82516b87d187595bf097327 100644 --- a/.jenkins/check/config/filter_pylint.txt +++ b/.jenkins/check/config/filter_pylint.txt @@ -18,6 +18,7 @@ "mindspore-lite/mindspore-lite/tools/kernel_builder/ascend/akg/custom.py" "consider-using-in" "mindspore-lite/mindspore-lite/tools/kernel_builder/ascend/akg/custom.py" "unused-argument" "mindspore-lite/mindspore-lite/python/api/lite_infer.py" "arguments-differ" +"mindspore-lite/mindspore-lite/test/st/python/test_inference_cloud.py" "unused-argument" # ascend samples "mindspore-lite/mindspore-lite/tools/kernel_builder/ascend/tbe_dsl/sample/" "wrong-import-order" diff --git a/mindspore-lite/examples/cloud_infer/ascend_ge_distributed_java/src/main/java/com/mindspore/lite/demo/Main.java b/mindspore-lite/examples/cloud_infer/ascend_ge_distributed_java/src/main/java/com/mindspore/lite/demo/Main.java index d50c55edfe49c406928ed68c56af4dea9e6feee0..89afe93f5e6d5fa7283a5f551923fa29547cc796 100644 --- a/mindspore-lite/examples/cloud_infer/ascend_ge_distributed_java/src/main/java/com/mindspore/lite/demo/Main.java +++ b/mindspore-lite/examples/cloud_infer/ascend_ge_distributed_java/src/main/java/com/mindspore/lite/demo/Main.java @@ -19,7 +19,6 @@ package com.mindspore.lite.demo; import com.mindspore.MSTensor; import com.mindspore.Model; import com.mindspore.config.DataType; -import com.mindspore.config.DeviceType; import com.mindspore.config.MSContext; import com.mindspore.config.ModelType; import com.mindspore.config.Version; @@ -27,7 +26,6 @@ import com.mindspore.config.AscendDeviceInfo; import java.nio.ByteBuffer; import java.nio.ByteOrder; -import java.sql.DataTruncation; import java.util.*; public class Main { @@ -46,7 +44,7 @@ public class Main { Random rand = new Random(); byte[] arr = new byte[len]; for (int i = 0; i < arr.length; i++) { - arr[i] = (byte)0; + arr[i] = (byte) 0; } return arr; } diff --git a/mindspore-lite/examples/cloud_infer/quick_start_parallel_java/src/main/java/com/mindspore/lite/demo/Main.java b/mindspore-lite/examples/cloud_infer/quick_start_parallel_java/src/main/java/com/mindspore/lite/demo/Main.java index 96e1fa2ba07baf2c24dbe0255b416a45d4746787..184f21d44d9acd4ae79670f50321654b718af638 100644 --- a/mindspore-lite/examples/cloud_infer/quick_start_parallel_java/src/main/java/com/mindspore/lite/demo/Main.java +++ b/mindspore-lite/examples/cloud_infer/quick_start_parallel_java/src/main/java/com/mindspore/lite/demo/Main.java @@ -21,8 +21,6 @@ import com.mindspore.config.MSContext; import com.mindspore.config.RunnerConfig; import com.mindspore.ModelParallelRunner; import com.mindspore.MSTensor; -import com.mindspore.Model; -import com.mindspore.config.ModelType; import com.mindspore.config.Version; import java.nio.ByteBuffer; @@ -32,9 +30,7 @@ import java.util.Random; import java.nio.ByteBuffer; import java.nio.FloatBuffer; -import java.nio.IntBuffer; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; public class Main { @@ -62,7 +58,7 @@ public class Main { return buffer; } - private static void freeTensor(){ + private static void freeTensor() { for (int i = 0; i < inputs.size(); i++) { inputs.get(i).free(); } @@ -81,7 +77,7 @@ public class Main { // use default param init context MSContext context = new MSContext(); - context.init(1,0); + context.init(1, 0); boolean ret = context.addDeviceInfo(DeviceType.DT_CPU, false, 0); if (!ret) { System.err.println("init context failed"); @@ -129,7 +125,7 @@ public class Main { List outputs = new ArrayList<>(); // runner do predict - ret = runner.predict(inputs,outputs); + ret = runner.predict(inputs, outputs); if (!ret) { System.err.println("MindSpore Lite predict failed."); freeTensor(); diff --git a/mindspore-lite/examples/export_models/models/densenet_train_export.py b/mindspore-lite/examples/export_models/models/densenet_train_export.py index f4f59f69a63dadff898e8eb7d611c026792869da..0996091351c700da43700766dc0afb15f2ff317d 100644 --- a/mindspore-lite/examples/export_models/models/densenet_train_export.py +++ b/mindspore-lite/examples/export_models/models/densenet_train_export.py @@ -18,10 +18,10 @@ import sys import os import numpy as np from train_utils import save_inout, train_wrap +from src.network.densenet import DenseNet121 import mindspore.common.dtype as mstype from mindspore import context, Tensor, nn from mindspore.train.serialization import export -from src.network.densenet import DenseNet121 #pylint: disable=wrong-import-position sys.path.append(os.environ['CLOUD_MODEL_ZOO'] + 'official/cv/densenet121/') diff --git a/mindspore-lite/examples/export_models/models/tinybert_train_export.py b/mindspore-lite/examples/export_models/models/tinybert_train_export.py index 125f2f8cbb0a6665d9c1ecc84d5dca4ab858e7e1..5f6944f8f6d20b65f089078eb45ca765b6d8b4cd 100644 --- a/mindspore-lite/examples/export_models/models/tinybert_train_export.py +++ b/mindspore-lite/examples/export_models/models/tinybert_train_export.py @@ -18,6 +18,9 @@ import os import sys import numpy as np +from official.nlp.tinybert.src.tinybert_model import TinyBertModel # noqa: 402 +from official.nlp.tinybert.src.model_utils.config import bert_student_net_cfg # noqa: 402 +from train_utils import save_t # noqa: 402 import mindspore as M from mindspore.ops import operations as P from mindspore.ops import composite as C @@ -32,9 +35,6 @@ else: path = '' sys.path.append(os.environ['CLOUD_MODEL_ZOO'] + 'official/nlp/tinybert') -from official.nlp.tinybert.src.tinybert_model import TinyBertModel # noqa: 402 -from official.nlp.tinybert.src.model_utils.config import bert_student_net_cfg # noqa: 402 -from train_utils import save_t # noqa: 402 class BertNetworkWithLossGenDistill(M.nn.Cell): diff --git a/mindspore-lite/examples/quick_start_java/src/main/java/com/mindspore/lite/demo/Main.java b/mindspore-lite/examples/quick_start_java/src/main/java/com/mindspore/lite/demo/Main.java index fd124635a1a86b07a7c2b71577f3eea5c852942f..2cf7e4158f34494de69280a2ddb36fe635a4bb41 100644 --- a/mindspore-lite/examples/quick_start_java/src/main/java/com/mindspore/lite/demo/Main.java +++ b/mindspore-lite/examples/quick_start_java/src/main/java/com/mindspore/lite/demo/Main.java @@ -25,8 +25,6 @@ import com.mindspore.config.ModelType; import com.mindspore.config.Version; import java.io.File; -import java.nio.file.Path; -import java.nio.file.Paths; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.FloatBuffer; diff --git a/mindspore-lite/examples/train_lenet_cpp/model/lenet_export.py b/mindspore-lite/examples/train_lenet_cpp/model/lenet_export.py index cb03e8e6c71153502b3d3920b3cbc04315d89f47..900a0e5ff96231e52d3daf419fa09441110d60ed 100644 --- a/mindspore-lite/examples/train_lenet_cpp/model/lenet_export.py +++ b/mindspore-lite/examples/train_lenet_cpp/model/lenet_export.py @@ -17,11 +17,11 @@ import sys import os import numpy as np +from lenet import LeNet5 +from train_utils import train_wrap from mindspore import context, Tensor import mindspore.common.dtype as mstype from mindspore.train.serialization import export -from lenet import LeNet5 -from train_utils import train_wrap n = LeNet5() diff --git a/mindspore-lite/examples/train_lenet_java/model/lenet_export.py b/mindspore-lite/examples/train_lenet_java/model/lenet_export.py index 6429f9e1882a6c5d01d5c1d099f389780e231282..c8e9f67a1899ea440d537e14269cf7d4fa63f8b7 100644 --- a/mindspore-lite/examples/train_lenet_java/model/lenet_export.py +++ b/mindspore-lite/examples/train_lenet_java/model/lenet_export.py @@ -16,11 +16,11 @@ import os import numpy as np +from lenet import LeNet5 +from train_utils import train_wrap from mindspore import context, Tensor import mindspore.common.dtype as mstype from mindspore.train.serialization import export -from lenet import LeNet5 -from train_utils import train_wrap n = LeNet5() n.set_train() diff --git a/mindspore-lite/examples/train_lenet_java/src/main/java/com/mindspore/lite/train_lenet/DataSet.java b/mindspore-lite/examples/train_lenet_java/src/main/java/com/mindspore/lite/train_lenet/DataSet.java index f27c1de19c00e1df8915ad651e9efa7ee6063f41..31cfd7164fdcbf663a0a0a9862359b1a21b306db 100644 --- a/mindspore-lite/examples/train_lenet_java/src/main/java/com/mindspore/lite/train_lenet/DataSet.java +++ b/mindspore-lite/examples/train_lenet_java/src/main/java/com/mindspore/lite/train_lenet/DataSet.java @@ -35,8 +35,8 @@ public class DataSet { numOfClasses = 10; trainData = new Vector(); testData = new Vector(); - readMNISTFile(dpath + "/train/train-images-idx3-ubyte", dpath+"/train/train-labels-idx1-ubyte", trainData); - readMNISTFile(dpath + "/test/t10k-images-idx3-ubyte", dpath+"/test/t10k-labels-idx1-ubyte", testData); + readMNISTFile(dpath + "/train/train-images-idx3-ubyte", dpath + "/train/train-labels-idx1-ubyte", trainData); + readMNISTFile(dpath + "/test/t10k-images-idx3-ubyte", dpath + "/test/t10k-labels-idx1-ubyte", testData); System.out.println("train data cnt: " + trainData.size()); System.out.println("test data cnt: " + testData.size()); diff --git a/mindspore-lite/examples/transfer_learning/model/transfer_learning_export.py b/mindspore-lite/examples/transfer_learning/model/transfer_learning_export.py index 19082f4ea862562f3e7e93016370ec40789188cf..870d29cfd75669bfe821bab7c851cf897f80e685 100755 --- a/mindspore-lite/examples/transfer_learning/model/transfer_learning_export.py +++ b/mindspore-lite/examples/transfer_learning/model/transfer_learning_export.py @@ -16,11 +16,11 @@ import os import numpy as np +from effnet import effnet +from train_utils import train_wrap import mindspore as M from mindspore.nn import Cell from mindspore.train.serialization import load_checkpoint, export -from effnet import effnet -from train_utils import train_wrap class TransferNet(Cell): diff --git a/mindspore-lite/test/st/java/src/main/java/Benchmark.java b/mindspore-lite/test/st/java/src/main/java/Benchmark.java index 683d749d219392258dde515077fbd29cdd3f17df..4fd4bc73fd239da34b4415cd9fba16e3d0422880 100644 --- a/mindspore-lite/test/st/java/src/main/java/Benchmark.java +++ b/mindspore-lite/test/st/java/src/main/java/Benchmark.java @@ -88,8 +88,8 @@ public class Benchmark { } else { meanError += absoluteError / (Math.abs(benchmarkData[i]) + Float.MIN_VALUE); errorCount++; - } - } + } + } } if (meanError > 0.0f) { @@ -109,7 +109,7 @@ public class Benchmark { } return meanError < accuracy; } - + public static void main(String[] args) { if (args.length < 4) { System.err.println("We must pass parameters such as modelPath, inDataFile, benchmarkDataFile and accuracy."); @@ -123,7 +123,7 @@ public class Benchmark { if (args.length == 5 && args[4].equals("Runner")) { // use default param init context MSContext context = new MSContext(); - context.init(1,0); + context.init(1, 0); boolean ret = context.addDeviceInfo(DeviceType.DT_CPU, false, 0); if (!ret) { System.err.println("init context failed"); @@ -158,7 +158,7 @@ public class Benchmark { List outputs = new ArrayList<>(); // runner do predict - ret = runner.predict(inputs,outputs); + ret = runner.predict(inputs, outputs); if (!ret) { System.err.println("MindSpore Lite predict failed."); runner.free(); diff --git a/mindspore-lite/test/st/ops/frame/configs.py b/mindspore-lite/test/st/ops/frame/configs.py index 2c174185325bdc14e433791e326fefcbb5147fb9..1eb04c0d48e967e9152cf466566f5cecc3a6722c 100644 --- a/mindspore-lite/test/st/ops/frame/configs.py +++ b/mindspore-lite/test/st/ops/frame/configs.py @@ -146,8 +146,6 @@ class OnnxModelConfig: for graph_input in each_model["graph_param"]["inputs"]: each_input_name.append(graph_input["name"]) self.graph_input_names[each_model["model_name"]] = each_input_name - # for graph_input in self.op_configs_["op_configs"][0]["graph_param"]["inputs"]: - # self.graph_input_names.append(graph_input["name"]) def modelconfigs(self): return self.op_configs_ diff --git a/mindspore-lite/test/st/ops/frame/optest.py b/mindspore-lite/test/st/ops/frame/optest.py index a563318dda673e1a51f5db1d52a8187b5ae623e9..57d891fa6d75e7809e04e17b1f90f56613098ed2 100644 --- a/mindspore-lite/test/st/ops/frame/optest.py +++ b/mindspore-lite/test/st/ops/frame/optest.py @@ -51,8 +51,9 @@ class OpTest: os.mkdir(self.output_path) conf.logger.info("use mslite benchmark: %s", self.mslite_benchmark_path) -## 因为ms默认输入格式为NHWC,将onnx模型的NCHW格式输入转为NHWC格式 -# 将所有标杆数据 gold_name:shapes 转换为 X1:dim1, dim2;X2:dim1, dim2...的格式 + +# Because the default input format of MS is NHWC, convert the NCHW format input of the ONNX model to NHWC format +# Convert all benchmark data gold_name:shapes to the format of X1:dim1, dim2;X2:dim1, dim2... def get_run_input_shapes(self, run_config): run_input_shapes_ = {} input_shapes = self.golden_confis.gold_input_name_dict[run_config["gold_in"]] @@ -124,8 +125,6 @@ class OpTest: initializer=initializers, ) model = helper.make_model(graph, producer_name="test", opset_imports=[helper.make_opsetid("", 18)]) - # for opset in model.opset_import: - # print(f"Domain: {opset.domain}, Version: {opset.version}") onnx.checker.check_model(model) model.ir_version = 8 @@ -148,7 +147,6 @@ class OpTest: ) onnx.checker.check_model(model) - # conf.logger.debug(onnx.helper.printable_graph(model.graph)) # dtypes默认fp32 input_tensors = [] input_feeds = {} @@ -290,8 +288,6 @@ class OpTest: def run_models(self): run_configs = self.run_configs.run_configs_ - # run_input_shapes = self.get_run_input_shapes() - for run_config in run_configs: in_model_path = self.output_path + "/ms_models/" + run_config["in_model"] @@ -304,7 +300,6 @@ class OpTest: ) continue - # dtypes = type_map[run_config["dtypes"]] run_input_shapes = self.get_run_input_shapes(run_config) input_shapes = run_input_shapes[run_config["gold_in"]] @@ -326,10 +321,6 @@ class OpTest: f"--benchmarkDataFile={gold_out_param}", f"--inputShape={input_shapes}", ] - # if input_shapes != "None": - # args.append( - # f"--inputShape={input_shapes}", - # ) run_command = "" for arg in args: run_command += arg + " " diff --git a/mindspore-lite/test/st/python/import_ms_and_mslite/test_api_import_mslite_and_ms.py b/mindspore-lite/test/st/python/import_ms_and_mslite/test_api_import_mslite_and_ms.py index d52ddd901324146c6e412a59333006f18d082f62..6eb2ed5fb415f7be0b2a20427ebe81e121ca4885 100644 --- a/mindspore-lite/test/st/python/import_ms_and_mslite/test_api_import_mslite_and_ms.py +++ b/mindspore-lite/test/st/python/import_ms_and_mslite/test_api_import_mslite_and_ms.py @@ -20,7 +20,6 @@ import numpy as np import mindspore_lite as mslite import mindspore - # ============================ ut testcases ============================ # ============================ Context ============================ def test_context_construct(): diff --git a/mindspore-lite/test/st/python/import_ms_and_mslite/test_predict_backend_lite_resnet50.py b/mindspore-lite/test/st/python/import_ms_and_mslite/test_predict_backend_lite_resnet50.py index 419c000829d50f50b85799fe5d0d74ffd54d5f91..a3a0c9c84e3ce3406af8f235b569d9ae4b0e2da1 100644 --- a/mindspore-lite/test/st/python/import_ms_and_mslite/test_predict_backend_lite_resnet50.py +++ b/mindspore-lite/test/st/python/import_ms_and_mslite/test_predict_backend_lite_resnet50.py @@ -23,10 +23,10 @@ import os import numpy as np -import mindspore as ms -from mindspore import context from lite_infer_predict_utils import predict_backend_lite, _get_max_index_from_res from resnet import resnet50 +import mindspore as ms +from mindspore import context # pylint: disable=I1101 diff --git a/mindspore-lite/test/st/python/optimize_pass/test_concat_op_pass.py b/mindspore-lite/test/st/python/optimize_pass/test_concat_op_pass.py index 50160abea8fca7361018bfcb7547cf7e46337769..b8229dea0686e0c09729115267e26e641b0badad 100644 --- a/mindspore-lite/test/st/python/optimize_pass/test_concat_op_pass.py +++ b/mindspore-lite/test/st/python/optimize_pass/test_concat_op_pass.py @@ -40,12 +40,14 @@ class ConcatOpPassNet(nn.Cell): key_paddings = self.concat((Tensor([0, 0, 0, 0, 0], mstype.int64), pad_length, Tensor([0, 0], mstype.int64))) return key_paddings + def dummy_tensor(shape, dtype): """create dummy tensor""" if None in shape: return Tensor(shape=shape, dtype=dtype) return Tensor(np.ones(shape=tuple(shape)), dtype=dtype) + def export_model(): """ export model diff --git a/mindspore-lite/test/st/python/python_api/test_lite_llm_engine_api.py b/mindspore-lite/test/st/python/python_api/test_lite_llm_engine_api.py index ae55e47ea6e73903dc2cc98ff500a66eb75b160f..93f378f0ecaaf89052fb668dd29e2d797eca910f 100644 --- a/mindspore-lite/test/st/python/python_api/test_lite_llm_engine_api.py +++ b/mindspore-lite/test/st/python/python_api/test_lite_llm_engine_api.py @@ -42,6 +42,7 @@ def test_lite_llm_engine_cluster_info_cluster_id_type_check(): llm_cluster.remote_cluster_id = "0" assert "remote_cluster_id must be int, but got" in str(raise_info.value) + # ============================ LLMEngine ============================ def test_lite_llm_engine_llm_engine_role_type_check(): with pytest.raises(TypeError) as raise_info: @@ -168,6 +169,7 @@ def test_lite_llm_engine_llm_engine_fetch_status_check(): llm_engine.fetch_status() assert "LLMEngine is not inited or init failed" in str(raise_info.value) + # ============================ LLMModel ============================ def test_lite_llm_engine_llm_model_predict_check(): cluster_id = 0 diff --git a/mindspore-lite/test/st/python/test_inference_cloud.py b/mindspore-lite/test/st/python/test_inference_cloud.py index da2006792f9aba3ce34cea2ec78899f16515bbfd..ee3c65c36e09dd35dd18f331549ed327fffeca6a 100644 --- a/mindspore-lite/test/st/python/test_inference_cloud.py +++ b/mindspore-lite/test/st/python/test_inference_cloud.py @@ -168,6 +168,7 @@ def test_model_group_for_ascend(model_path, in_data_path, input_shapes): # use model two for inference test_model_inference_ascend(model_file, in_data_file_list, shapes) + def test_input_shape_for_ascend(model_path, input_shape_str): context = mslite.Context() context.target = ["ascend"] @@ -179,6 +180,7 @@ def test_input_shape_for_ascend(model_path, input_shape_str): model.build_from_file(model_path, mslite.ModelType.MINDIR, context) input_shape_config = model.get_model_info("input_shape") + def test_model_group_weight_workspace_for_ascend(model_path, in_data_path, input_shapes): # init model group context model_group_context = mslite.Context() diff --git a/mindspore-lite/test/ut/python/test_inference_api.py b/mindspore-lite/test/ut/python/test_inference_api.py index 69cf3d9d023ff73a1346f6356ad9f033656f1ad9..8bf9dd64e04872968da203fcf5c22130cb90e7e7 100644 --- a/mindspore-lite/test/ut/python/test_inference_api.py +++ b/mindspore-lite/test/ut/python/test_inference_api.py @@ -434,12 +434,14 @@ def test_model_predict_inputs_element_type_error(): outputs = model.predict(["input"]) assert "inputs element must be Tensor" in str(raise_info.value) + def test_model_get_model_info_type_error(): with pytest.raises(TypeError) as raise_info: model = get_model() inputs = model.get_model_info() assert "key must be str" in str(raise_info.value) + def test_model_predict_runtime_error(): with pytest.raises(RuntimeError) as raise_info: model = get_model() @@ -622,6 +624,7 @@ def test_model_group_add_model_invalid_model_obj_type_error(): model_group.add_model("model_path") assert "models must be list/tuple, but got" in str(raise_info.value) + # If declare a model and don't build it may cause core dump. def test_update_weights(): with pytest.raises(RuntimeError) as raise_info: diff --git a/mindspore-lite/tools/optimizer/fusion/flash_attention_antiquant_fusion.cc b/mindspore-lite/tools/optimizer/fusion/flash_attention_antiquant_fusion.cc index 27fbb58b516c259d3321ac057c22a9d2c809ab34..8aac6a4b931c2ef18176b1d0c586389be40809a4 100644 --- a/mindspore-lite/tools/optimizer/fusion/flash_attention_antiquant_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/flash_attention_antiquant_fusion.cc @@ -155,8 +155,9 @@ ParameterPtr FlashAttentionAntiquantFusion::ConcatParameter(const FuncGraphPtr & concat_parameter = opt::BuildFloatVecParameterNode(func_graph, concat_data, name); } + MS_CHECK_TRUE_RET(concat_parameter != nullptr, nullptr); auto abstract = concat_parameter->abstract(); - MS_EXCEPTION_IF_NULL(abstract); + MS_CHECK_TRUE_RET(abstract != nullptr, nullptr); ShapeVector shape_vector = {2}; shape_vector.insert(shape_vector.end(), ori_shape.begin() + 1, ori_shape.end()); abstract->set_shape(std::make_shared(shape_vector));