From 1112f35a7f642a14eb137dc77f05b1143b0fa7d3 Mon Sep 17 00:00:00 2001 From: Zhu Guodong Date: Thu, 10 Jul 2025 13:14:55 +0800 Subject: [PATCH] [Lite] fix coreml codecheck warnings --- .jenkins/check/config/filter_cpplint.txt | 1 + .../litert/delegate/coreml/coreml_executor.h | 18 ++-- .../litert/delegate/coreml/coreml_executor.mm | 90 ++++++++++--------- .../coreml/coreml_executor_wrapper.mm | 4 +- 4 files changed, 57 insertions(+), 56 deletions(-) diff --git a/.jenkins/check/config/filter_cpplint.txt b/.jenkins/check/config/filter_cpplint.txt index e3dd5ca5..7b1dc36a 100644 --- a/.jenkins/check/config/filter_cpplint.txt +++ b/.jenkins/check/config/filter_cpplint.txt @@ -31,6 +31,7 @@ "mindspore-lite/mindspore-lite/src/litert/thread_pool.c" "readability/casting" "mindspore-lite/mindspore-lite/src/litert/thread_pool.c" "runtime/arrays" "mindspore-lite/mindspore-lite/src/litert/thread_pool.c" "runtime/int" +"mindspore/mindspore/lite/src/litert/delegate/coreml/coreml_executor.h" "whitespace/parens" # MindData "mindspore-lite/mindspore-lite/minddata/dataset/kernels/image/cutmix_batch_op.cc" "build/include_what_you_use" diff --git a/mindspore-lite/src/litert/delegate/coreml/coreml_executor.h b/mindspore-lite/src/litert/delegate/coreml/coreml_executor.h index 30779627..42763105 100644 --- a/mindspore-lite/src/litert/delegate/coreml/coreml_executor.h +++ b/mindspore-lite/src/litert/delegate/coreml/coreml_executor.h @@ -22,27 +22,25 @@ #include #include "include/api/types.h" -API_AVAILABLE(ios(11)) -@interface InputFeatureProvider : NSObject { - const std::vector* _inputs; +API_AVAILABLE(ios(12)) +@interface MSFeatureProvider : NSObject { + const std::vector* _ms_tensors; NSSet* _featureNames; } -- (instancetype)initWithInputs:(const std::vector*)inputs +- (instancetype)initWithMSTensor:(const std::vector*)inputs coreMLVersion:(int)coreMLVersion; -- (NSSet*)featureNames; - (MLFeatureValue *)featureValueForName:(NSString *)featureName; - -@property(nonatomic, readonly) int coreMLVersion; +@property(nonatomic, readonly) NSSet *featureNames; @end -API_AVAILABLE(ios(11)) +API_AVAILABLE(ios(12)) @interface CoreMLExecutor : NSObject -- (bool)ExecuteWithInputs:(const std::vector&)inputs +- (bool)run:(const std::vector&)inputs outputs:(const std::vector&)outputs; -- (bool)loadModelC:(NSURL*)compileUrl; +- (bool)load:(NSURL*)compileUrl; @property MLModel* model; @property(nonatomic, readonly) int coreMLVersion; diff --git a/mindspore-lite/src/litert/delegate/coreml/coreml_executor.mm b/mindspore-lite/src/litert/delegate/coreml/coreml_executor.mm index df1b9bac..196d6a2a 100644 --- a/mindspore-lite/src/litert/delegate/coreml/coreml_executor.mm +++ b/mindspore-lite/src/litert/delegate/coreml/coreml_executor.mm @@ -20,7 +20,7 @@ namespace { // The subgraph split can cause the change of tensor name. This function is used to get the original name. -std::string GetOrgFeatureName(const std::string &input_name) { +std::string GetOrginalFeatureName(const std::string &input_name) { auto org_name = input_name; std::string pattern_1 = "_duplicate_"; auto pos_1 = input_name.find(pattern_1); @@ -38,17 +38,17 @@ std::string GetOrgFeatureName(const std::string &input_name) { } } // namespace -@implementation InputFeatureProvider +// Ref to: "https://developer.apple.com/documentation/coreml/mlfeatureprovider?language=objc" +@implementation MSFeatureProvider -- (instancetype)initWithInputs:(const std::vector*)inputs +- (instancetype)initWithMSTensor:(const std::vector*)ms_tensors coreMLVersion:(int)coreMLVersion { self = [super init]; - _inputs = inputs; - _coreMLVersion = coreMLVersion; + _ms_tensors = ms_tensors; NSMutableArray* names = [[NSMutableArray alloc] init]; - for (auto& input : *_inputs) { - auto input_name = GetOrgFeatureName(input.Name()); - [names addObject:[NSString stringWithCString:input_name.c_str() + for (auto& tensor : *_ms_tensors) { + auto name = GetOrginalFeatureName(tensor.Name()); + [names addObject:[NSString stringWithCString:name.c_str() encoding:[NSString defaultCStringEncoding]]]; } _featureNames = [NSSet setWithArray:names]; @@ -58,16 +58,16 @@ std::string GetOrgFeatureName(const std::string &input_name) { - (NSSet*)featureNames{ return _featureNames; } - (MLFeatureValue*)featureValueForName:(NSString*)featureName { - for (auto input : *_inputs) { - auto input_name = GetOrgFeatureName(input.Name()); - if ([featureName cStringUsingEncoding:NSUTF8StringEncoding] == input_name) { + for (auto tensor : *_ms_tensors) { + auto tensor_name = GetOrginalFeatureName(tensor.Name()); + if ([featureName cStringUsingEncoding:NSUTF8StringEncoding] == tensor_name) { NSArray* shape; NSArray* strides; - int tensorRank = input.Shape().size(); + int tensorRank = tensor.Shape().size(); switch(tensorRank) { case 1: shape = @[ - @(input.Shape()[0]) + @(tensor.Shape()[0]) ]; strides = @[ @1 @@ -75,46 +75,51 @@ std::string GetOrgFeatureName(const std::string &input_name) { break; case 2: shape = @[ - @(input.Shape()[0]), - @(input.Shape()[1]) + @(tensor.Shape()[0]), + @(tensor.Shape()[1]) ]; strides = @[ - @(input.Shape()[1]), + @(tensor.Shape()[1]), @1 ]; break; case 3: shape = @[ - @(input.Shape()[0]), - @(input.Shape()[1]), - @(input.Shape()[2]) + @(tensor.Shape()[0]), + @(tensor.Shape()[1]), + @(tensor.Shape()[2]) ]; strides = @[ - @(input.Shape()[2] * input.Shape()[1]), - @(input.Shape()[2]), + @(tensor.Shape()[2] * tensor.Shape()[1]), + @(tensor.Shape()[2]), @1 ]; break; case 4: shape = @[ - @(input.Shape()[0]), - @(input.Shape()[1]), - @(input.Shape()[2]), - @(input.Shape()[3]) + @(tensor.Shape()[0]), + @(tensor.Shape()[1]), + @(tensor.Shape()[2]), + @(tensor.Shape()[3]) ]; strides = @[ - @(input.Shape()[3] * input.Shape()[2] * input.Shape()[1]), - @(input.Shape()[3] * input.Shape()[2]), - @(input.Shape()[3]), + @(tensor.Shape()[3] * tensor.Shape()[2] * tensor.Shape()[1]), + @(tensor.Shape()[3] * tensor.Shape()[2]), + @(tensor.Shape()[3]), @1 ]; break; default: - NSLog(@"The rank of input tensor:%@ is unsupported!", featureName); + NSLog(@"The rank of tensor tensor:%@ is unsupported!", featureName); + } + + if (tensor.DataType() != mindspore::DataType::kNumberTypeFloat32) { + NSLog(@"Only support tensor of datatype float32, but %@ is not!", featureName); + return nil; } NSError* error = nil; - MLMultiArray* mlArray = [[MLMultiArray alloc] initWithDataPointer:(float*)input.MutableData() + MLMultiArray* mlArray = [[MLMultiArray alloc] initWithDataPointer:(float*)tensor.MutableData() shape:shape dataType:MLMultiArrayDataTypeFloat32 strides:strides @@ -137,18 +142,18 @@ std::string GetOrgFeatureName(const std::string &input_name) { @implementation CoreMLExecutor -- (bool)ExecuteWithInputs:(const std::vector&)inputs +- (bool)run:(const std::vector&)inputs outputs:(const std::vector&)outputs { if (_model == nil) { return NO; } - _coreMLVersion = 3; + _coreMLVersion = 4; NSError* error = nil; //Initialize the CoreML feature provider with input MSTensor - InputFeatureProvider* inputFeature = - [[InputFeatureProvider alloc] initWithInputs:&inputs coreMLVersion:[self coreMLVersion]]; + MSFeatureProvider* inputFeature = + [[MSFeatureProvider alloc] initWithMSTensor:&inputs coreMLVersion:[self coreMLVersion]]; if (inputFeature == nil) { - NSLog(@"inputFeature initialization failed."); + NSLog(@"init inputFeature from MSTensor failed."); return NO; } //Inference configuration, auto use GPU by default @@ -171,7 +176,7 @@ std::string GetOrgFeatureName(const std::string &input_name) { [outputFeature featureValueForName:[outputFeatureNames member:outputName]]; auto* data = [outputValue multiArrayValue]; float* outputData = (float*)data.dataPointer; - if (outputData == nullptr) { + if (outputData == nil) { NSLog(@"Output data is null!"); return NO; } @@ -182,14 +187,11 @@ std::string GetOrgFeatureName(const std::string &input_name) { - (bool)loadModelC:(NSURL*)compileUrl { NSError* error = nil; - if (@available(iOS 12.0, *)) { - MLModelConfiguration* config = [MLModelConfiguration alloc]; - config.computeUnits = MLComputeUnitsAll; - _model = [MLModel modelWithContentsOfURL:compileUrl configuration:config error:&error]; - } else { - _model = [MLModel modelWithContentsOfURL:compileUrl error:&error]; - } - if (error != NULL) { + MLModelConfiguration* config = [[MLModelConfiguration alloc] init]; + config.computeUnits = MLComputeUnitsAll; + + _model = [MLModel modelWithContentsOfURL:compileUrl configuration:config error:&error]; + if (error != nil) { NSLog(@"Create MLModel failed, error code: %@", [error localizedDescription]); return NO; } diff --git a/mindspore-lite/src/litert/delegate/coreml/coreml_executor_wrapper.mm b/mindspore-lite/src/litert/delegate/coreml/coreml_executor_wrapper.mm index 4dffecc9..26d92051 100644 --- a/mindspore-lite/src/litert/delegate/coreml/coreml_executor_wrapper.mm +++ b/mindspore-lite/src/litert/delegate/coreml/coreml_executor_wrapper.mm @@ -43,7 +43,7 @@ int CoreMLExecutorWrapper::CompileMLModel(const std::string &modelPath) { return RET_ERROR; } mlmodelc_path_ = [[MLModelCURL path] UTF8String]; - bool success = [(__bridge id)coreml_executor_ loadModelC:MLModelCURL]; + bool success = [(__bridge id)coreml_executor_ load:MLModelCURL]; if (!success) { NSLog(@"Load MLModelC failed!"); (void)CleanTmpFile(); @@ -58,7 +58,7 @@ int CoreMLExecutorWrapper::CompileMLModel(const std::string &modelPath) { int CoreMLExecutorWrapper::Run(const std::vector &in_tensors, const std::vector &out_tensors){ - auto success = [(__bridge id)coreml_executor_ ExecuteWithInputs:in_tensors outputs:out_tensors]; + auto success = [(__bridge id)coreml_executor_ run:in_tensors outputs:out_tensors]; if (!success) { NSLog(@"coreML model execute failed!"); return RET_ERROR; -- Gitee